code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# coding=utf-8
from data_packer import RequiredField, constant, converter
from common import demo_run
cvt = converter.TypeConverter(str)
fields = [
RequiredField('a', 'a', converter=cvt)
]
demo_run(fields, '类型转换为str')
|
ideascf/data-packer
|
example/demo/demo_converter.py
|
Python
|
mit
| 234
|
from django.conf.urls import patterns, url
from rest_framework.urlpatterns import format_suffix_patterns
from . import api_views
from . import views
urlpatterns = patterns(
'',
url(r'^munger_builder_index/', views.munger_builder_index, name='munger_builder_index'),
url(r'^new_munger_builder/', views.new_munger_builder, name='new_munger_builder'),
url(r'^pivot_builder/(?P<munger_builder_id>[0-9]+)$', views.pivot_builder, name='pivot_builder'),
url(r'^download_test_data/(?P<munger_builder_id>[0-9]+)$', views.download_test_data, name='download_test_data'),
url(r'^poll_for_download/', views.poll_for_download, name='poll_for_download'),
url(r'^mungers/$', api_views.Mungers.as_view(), name='mungerbuilder-list'),
url(r'^mungers/(?P<pk>[0-9]+)$', api_views.Mungers.as_view(), name='mungerbuilder-detail'),
url(r'^data_fields/$', api_views.DataFields.as_view(), name='datafield-list'),
url(r'^data_fields/(?P<pk>[0-9]+)$', api_views.DataFields.as_view(), name='datafield-detail'),
url(r'^pivot_fields/$', api_views.PivotFields.as_view(), name='pivotfield-list'),
url(r'^pivot_fields/(?P<pk>[0-9]+)$', api_views.PivotFields.as_view(), name='pivotfield-detail'),
url(r'^field_types/$', api_views.FieldTypes.as_view(), name='fieldtype-list'),
url(r'^field_types/(?P<pk>[0-9]+)$', api_views.FieldTypes.as_view(), name='fieldtype-detail'),
url(r'^', views.munger_builder_index, name='default'),
)
urlpatterns = format_suffix_patterns(urlpatterns)
|
cscanlin/munger-builder
|
script_builder/urls.py
|
Python
|
mit
| 1,506
|
# Dealer.py
# controls the actual game flow and logic
import Game
import CardDeck
class Dealer():
def __init__(self):
self.currentGame = self.setupNewGame()
self.currentDeck = CardDeck.CardDeck()
self.button = 0 # human starts on the button until card flip is implemented
def setupNewGame(self):
# choose num of human and ai players
print("Welcome to the poker room!\n")
numPlayers = raw_input("How many players will be in this game? \n")
newGame = Game.Game(numPlayers)
return newGame
def dealHand(self):
# set blinds, dealer and deal hand
print("Dealing hand.\n")
self.currentDeck.shuffle()
self.dealCards()
def dealCards(self):
#players = self.currentGame.playerArray
playerIndex = self.getNextPlayerIndex(self.button)
for i in range(0, len(self.currentGame.playerArray) * 2):
#currentPlayer = self.currentGame.playerArray[playerIndex]
if (i < len(self.currentGame.playerArray)):
# Deal first card
self.currentGame.playerArray[playerIndex].set_firstCard(self.currentDeck.cards[i])
else:
# Deal second card
print(self.currentDeck.cards[i].suit + self.currentDeck.cards[i].value)
self.currentGame.playerArray[playerIndex].set_secondCard(self.currentDeck.cards[i])
playerIndex = self.getNextPlayerIndex(playerIndex)
print(self.currentGame.playerArray[0].firstCard)
print("Your hand is " + self.currentGame.playerArray[0].firstCard.suit
+ self.currentGame.playerArray[0].firstCard.value + " "
+ self.currentGame.playerArray[0].secondCard.suit
+ self.currentGame.playerArray[0].secondCard.value + ".")
def getNextPlayerIndex(self, currentPos):
arrayLength = len(self.currentGame.playerArray)
nextPos = currentPos + 1
if (nextPos > arrayLength - 1):
return 0
else:
return nextPos
def getPreviousPlayerIndex(self, currentPos):
arrayLength = len(self.currentGame.playerArray)
prevPos = currentPos - 1
if (prevPos < 0):
return arrayLength - 1
else:
return prevPos
|
KristianL1415/python-poker
|
Dealer.py
|
Python
|
mit
| 2,358
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from nose.tools import assert_equals, assert_raises
from ..log.mixin import LoggerMixin
class LoggableStub(object, LoggerMixin):
pass
def test_logger_mixin():
obj = LoggableStub()
from logging.handlers import MemoryHandler
import logging
log = logging.getLogger()
handler = MemoryHandler(999)
log.setLevel(logging.DEBUG)
log.addHandler(handler)
obj.debug("This is a DEBUG message")
obj.info("This is an INFORMATIVE message")
obj.warning("This is a WARNING")
obj.error("This is an ERROR")
obj.critical("This is a CRITICAL error")
obj.exception("This is an exception")
obj.exception()
assert_equals(len(handler.buffer), 7)
assert_equals(handler.buffer[2].name, "loggablestub")
assert_equals(handler.buffer[2].msg, "This is a WARNING")
log.removeHandler(handler)
def test_logger_raises_on_invalid_name_type():
class BrokenLoggableStub(object, LoggerMixin):
def _logger_name(self):
return 123
broken_logger = BrokenLoggableStub()
assert_raises(
TypeError,
broken_logger.debug,
"This shouldn't work")
|
dimagi/rapidsms-core-dev
|
lib/rapidsms/tests/test_logger.py
|
Python
|
bsd-3-clause
| 1,194
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import os
import re
import traceback
from .printer import print_err, colors
from typing import cast, Any, Callable, Dict, List, Optional, Tuple
RuleList = List[Dict[str, Any]] # mypy currently requires Aliases at global scope
# https://github.com/python/mypy/issues/3145
def build_custom_checkers(by_lang):
# type: (Dict[str, List[str]]) -> Tuple[Callable[[], bool], Callable[[], bool]]
def custom_check_file(fn, identifier, rules, color, skip_rules=None, max_length=None):
# type: (str, str, RuleList, str, Optional[Any], Optional[int]) -> bool
failed = False
line_tups = []
for i, line in enumerate(open(fn)):
line_newline_stripped = line.strip('\n')
line_fully_stripped = line_newline_stripped.strip()
skip = False
for rule in skip_rules or []:
if re.match(rule, line):
skip = True
if line_fully_stripped.endswith(' # nolint'):
continue
if skip:
continue
tup = (i, line, line_newline_stripped, line_fully_stripped)
line_tups.append(tup)
rules_to_apply = []
fn_dirname = os.path.dirname(fn)
for rule in rules:
exclude_list = rule.get('exclude', set())
if fn in exclude_list or fn_dirname in exclude_list:
continue
if rule.get("include_only"):
found = False
for item in rule.get("include_only", set()):
if item in fn:
found = True
if not found:
continue
rules_to_apply.append(rule)
for rule in rules_to_apply:
exclude_lines = {
line for
(exclude_fn, line) in rule.get('exclude_line', set())
if exclude_fn == fn
}
pattern = rule['pattern']
for (i, line, line_newline_stripped, line_fully_stripped) in line_tups:
if line_fully_stripped in exclude_lines:
exclude_lines.remove(line_fully_stripped)
continue
try:
line_to_check = line_fully_stripped
if rule.get('strip') is not None:
if rule['strip'] == '\n':
line_to_check = line_newline_stripped
else:
raise Exception("Invalid strip rule")
if re.search(pattern, line_to_check):
print_err(identifier, color, '{} at {} line {}:'.format(
rule['description'], fn, i+1))
print_err(identifier, color, line)
failed = True
except Exception:
print("Exception with %s at %s line %s" % (rule['pattern'], fn, i+1))
traceback.print_exc()
if exclude_lines:
print('Please remove exclusions for file %s: %s' % (fn, exclude_lines))
lastLine = None
for (i, line, line_newline_stripped, line_fully_stripped) in line_tups:
if isinstance(line, bytes):
line_length = len(line.decode("utf-8"))
else:
line_length = len(line)
if (max_length is not None and line_length > max_length and
'# type' not in line and 'test' not in fn and 'example' not in fn and
not re.match("\[[ A-Za-z0-9_:,&()-]*\]: http.*", line) and
not re.match("`\{\{ external_api_uri_subdomain \}\}[^`]+`", line) and
"#ignorelongline" not in line and 'migrations' not in fn):
print("Line too long (%s) at %s line %s: %s" % (len(line), fn, i+1, line_newline_stripped))
failed = True
lastLine = line
if lastLine and ('\n' not in lastLine):
print("No newline at the end of file. Fix with `sed -i '$a\\' %s`" % (fn,))
failed = True
return failed
trailing_whitespace_rule = {
'pattern': '\s+$',
'strip': '\n',
'description': 'Fix trailing whitespace'
}
whitespace_rules = [
# This linter should be first since bash_rules depends on it.
trailing_whitespace_rule,
{'pattern': '\t',
'strip': '\n',
'exclude': set(['zerver/lib/bugdown/codehilite.py',
'tools/travis/success-http-headers.txt']),
'description': 'Fix tab-based whitespace'},
] # type: RuleList
markdown_whitespace_rules = list([rule for rule in whitespace_rules if rule['pattern'] != '\s+$']) + [
# Two spaces trailing a line with other content is okay--it's a markdown line break.
# This rule finds one space trailing a non-space, three or more trailing spaces, and
# spaces on an empty line.
{'pattern': '((?<!\s)\s$)|(\s\s\s+$)|(^\s+$)',
'strip': '\n',
'description': 'Fix trailing whitespace'},
{'pattern': '^#+[A-Za-z0-9]',
'strip': '\n',
'description': 'Missing space after # in heading'},
] # type: RuleList
js_rules = cast(RuleList, [
{'pattern': '[^_]function\(',
'description': 'The keyword "function" should be followed by a space'},
{'pattern': '.*blueslip.warning\(.*',
'description': 'The module blueslip has no function warning, try using blueslip.warn'},
{'pattern': '[)]{$',
'description': 'Missing space between ) and {'},
{'pattern': '["\']json/',
'description': 'Relative URL for JSON route not supported by i18n'},
# This rule is constructed with + to avoid triggering on itself
{'pattern': " =" + '[^ =>~"]',
'description': 'Missing whitespace after "="'},
{'pattern': '^[ ]*//[A-Za-z0-9]',
'description': 'Missing space after // in comment'},
{'pattern': 'if[(]',
'description': 'Missing space between if and ('},
{'pattern': 'else{$',
'description': 'Missing space between else and {'},
{'pattern': '^else {$',
'description': 'Write JS else statements on same line as }'},
{'pattern': '^else if',
'description': 'Write JS else statements on same line as }'},
{'pattern': 'console[.][a-z]',
'exclude': set(['static/js/blueslip.js',
'frontend_tests/zjsunit',
'frontend_tests/casper_lib/common.js',
'frontend_tests/node_tests',
'static/js/debug.js']),
'description': 'console.log and similar should not be used in webapp'},
{'pattern': '[.]text\(["\'][a-zA-Z]',
'description': 'Strings passed to $().text should be wrapped in i18n.t() for internationalization'},
{'pattern': 'compose_error\(["\']',
'description': 'Argument to compose_error should be a literal string enclosed '
'by i18n.t()'},
{'pattern': 'ui.report_success\(',
'description': 'Deprecated function, use ui_report.success.'},
{'pattern': 'report.success\(["\']',
'description': 'Argument to report_success should be a literal string enclosed '
'by i18n.t()'},
{'pattern': 'ui.report_error\(',
'description': 'Deprecated function, use ui_report.error.'},
{'pattern': 'report.error\(["\']',
'description': 'Argument to report_error should be a literal string enclosed '
'by i18n.t()'},
]) + whitespace_rules
python_rules = cast(RuleList, [
{'pattern': '^(?!#)@login_required',
'description': '@login_required is unsupported; use @zulip_login_required'},
{'pattern': '".*"%\([a-z_].*\)?$',
'description': 'Missing space around "%"'},
{'pattern': "'.*'%\([a-z_].*\)?$",
'exclude': set(['analytics/lib/counts.py',
'analytics/tests/test_counts.py',
]),
'exclude_line': set([
('zerver/views/users.py',
"return json_error(_(\"Email '%(email)s' not allowed for realm '%(realm)s'\") %"),
('zproject/settings.py',
"'format': '%(asctime)s %(levelname)-8s %(message)s'"),
('static/templates/settings/bot-settings.handlebars',
"'https://hostname.example.com/bots/followup'"),
]),
'description': 'Missing space around "%"'},
# This rule is constructed with + to avoid triggering on itself
{'pattern': " =" + '[^ =>~"]',
'description': 'Missing whitespace after "="'},
{'pattern': '":\w[^"]*$',
'description': 'Missing whitespace after ":"'},
{'pattern': "':\w[^']*$",
'description': 'Missing whitespace after ":"'},
{'pattern': "^\s+[#]\w",
'strip': '\n',
'description': 'Missing whitespace after "#"'},
{'pattern': "assertEquals[(]",
'description': 'Use assertEqual, not assertEquals (which is deprecated).'},
{'pattern': "== None",
'description': 'Use `is None` to check whether something is None'},
{'pattern': "type:[(]",
'description': 'Missing whitespace after ":" in type annotation'},
{'pattern': "type: ignore$",
'exclude': set(['tools/tests',
'zerver/lib/test_runner.py',
'zerver/tests']),
'description': '"type: ignore" should always end with "# type: ignore # explanation for why"'},
{'pattern': "# type [(]",
'description': 'Missing : after type in type annotation'},
{'pattern': "#type",
'description': 'Missing whitespace after "#" in type annotation'},
{'pattern': 'if[(]',
'description': 'Missing space between if and ('},
{'pattern': ", [)]",
'description': 'Unnecessary whitespace between "," and ")"'},
{'pattern': "% [(]",
'description': 'Unnecessary whitespace between "%" and "("'},
# This next check could have false positives, but it seems pretty
# rare; if we find any, they can be added to the exclude list for
# this rule.
{'pattern': ' % [a-zA-Z0-9_.]*\)?$',
'exclude_line': set([
('tools/tests/test_template_parser.py', '{% foo'),
]),
'description': 'Used % comprehension without a tuple'},
{'pattern': '.*%s.* % \([a-zA-Z0-9_.]*\)$',
'description': 'Used % comprehension without a tuple'},
{'pattern': 'django.utils.translation',
'include_only': set(['test/']),
'description': 'Test strings should not be tagged for translationx'},
{'pattern': 'json_success\({}\)',
'description': 'Use json_success() to return nothing'},
# To avoid json_error(_variable) and json_error(_(variable))
{'pattern': '\Wjson_error\(_\(?\w+\)',
'exclude': set(['zerver/tests']),
'description': 'Argument to json_error should be a literal string enclosed by _()'},
{'pattern': '\Wjson_error\([\'"].+[),]$',
'exclude': set(['zerver/tests']),
'exclude_line': set([
# We don't want this string tagged for translation.
('zerver/views/compatibility.py', 'return json_error("Client is too old")'),
]),
'description': 'Argument to json_error should a literal string enclosed by _()'},
# To avoid JsonableError(_variable) and JsonableError(_(variable))
{'pattern': '\WJsonableError\(_\(?\w.+\)',
'exclude': set(['zerver/tests']),
'description': 'Argument to JsonableError should be a literal string enclosed by _()'},
{'pattern': '\WJsonableError\(["\'].+\)',
'exclude': set(['zerver/tests']),
'description': 'Argument to JsonableError should be a literal string enclosed by _()'},
{'pattern': '([a-zA-Z0-9_]+)=REQ\([\'"]\\1[\'"]',
'description': 'REQ\'s first argument already defaults to parameter name'},
{'pattern': 'self\.client\.(get|post|patch|put|delete)',
'exclude': set(['zilencer/tests.py']),
'description': \
'''Do not call self.client directly for put/patch/post/get.
See WRAPPER_COMMENT in test_helpers.py for details.
'''},
# Directly fetching Message objects in e.g. views code is often a security bug.
{'pattern': '[^r][M]essage.objects.get',
'exclude': set(["zerver/tests",
"zerver/lib/onboarding.py",
"zilencer/management/commands/add_mock_conversation.py",
"zerver/worker/queue_processors.py"]),
'description': 'Please use access_message() to fetch Message objects',
},
{'pattern': '[S]tream.objects.get',
'include_only': set(["zerver/views/"]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': 'get_stream[(]',
'include_only': set(["zerver/views/", "zerver/lib/actions.py"]),
'exclude_line': set([
# This is a check for whether a stream rename is invalid because it already exists
('zerver/lib/actions.py', 'get_stream(new_name, stream.realm)'),
# This one in check_message is kinda terrible, since it's
# how most instances are written, but better to exclude something than nothing
('zerver/lib/actions.py', 'stream = get_stream(stream_name, realm)'),
('zerver/lib/actions.py', 'get_stream(signups_stream, admin_realm)'),
# Here we need get_stream to access streams you've since unsubscribed from.
('zerver/views/messages.py', 'stream = get_stream(operand, self.user_profile.realm)'),
]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': '[S]tream.objects.filter',
'include_only': set(["zerver/views/"]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': '^from (zerver|analytics|confirmation)',
'include_only': set(["/migrations/"]),
'exclude': set(['zerver/migrations/0032_verify_all_medium_avatar_images.py',
'zerver/migrations/0041_create_attachments_for_old_messages.py',
'zerver/migrations/0060_move_avatars_to_be_uid_based.py']),
'description': "Don't import models or other code in migrations; see docs/schema-migrations.md",
},
{'pattern': 'datetime[.](now|utcnow)',
'include_only': set(["zerver/", "analytics/"]),
'description': "Don't use datetime in backend code.\n"
"See https://zulip.readthedocs.io/en/latest/code-style.html#naive-datetime-objects",
},
{'pattern': 'render_to_response\(',
'description': "Use render() instead of render_to_response().",
},
{'pattern': '(^|\s)open\s*\(',
'description': 'open() should not be used in Zulip\'s bots. Use functions'
' provided by the bots framework to access the filesystem.',
'include_only': set(['api/bots/']),
'exclude': set(['api/bots/john/john.py'])},
]) + whitespace_rules
bash_rules = [
{'pattern': '#!.*sh [-xe]',
'description': 'Fix shebang line with proper call to /usr/bin/env for Bash path, change -x|-e switches'
' to set -x|set -e'},
] + whitespace_rules[0:1] # type: RuleList
css_rules = cast(RuleList, [
{'pattern': '^[^:]*:\S[^:]*;$',
'description': "Missing whitespace after : in CSS"},
{'pattern': '[a-z]{',
'description': "Missing whitespace before '{' in CSS."},
{'pattern': 'https://',
'description': "Zulip CSS should have no dependencies on external resources"},
{'pattern': '^[ ][ ][a-zA-Z0-9]',
'description': "Incorrect 2-space indentation in CSS",
'exclude': set(['static/third/thirdparty-fonts.css']),
'strip': '\n'},
{'pattern': '{\w',
'description': "Missing whitespace after '{' in CSS (should be newline)."},
{'pattern': ' thin[; ]',
'description': "thin CSS attribute is under-specified, please use 1px."},
{'pattern': ' medium[; ]',
'description': "medium CSS attribute is under-specified, please use pixels."},
{'pattern': ' thick[; ]',
'description': "thick CSS attribute is under-specified, please use pixels."},
]) + whitespace_rules # type: RuleList
prose_style_rules = [
{'pattern': '[^\/\#\-\"]([jJ]avascript)', # exclude usage in hrefs/divs
'description': "javascript should be spelled JavaScript"},
{'pattern': '[^\/\-\.\"\'\_\=\>]([gG]ithub)[^\.\-\_\"\<]', # exclude usage in hrefs/divs
'description': "github should be spelled GitHub"},
{'pattern': '[oO]rganisation', # exclude usage in hrefs/divs
'description': "Organization is spelled with a z"},
{'pattern': '!!! warning',
'description': "!!! warning is invalid; it's spelled '!!! warn'"},
] # type: RuleList
html_rules = whitespace_rules + prose_style_rules + [
{'pattern': 'placeholder="[^{]',
'description': "`placeholder` value should be translatable.",
'exclude_line': [('templates/zerver/register.html', 'placeholder="acme"'),
('templates/zerver/register.html', 'placeholder="Acme or Aκμή"'),
('static/templates/settings/realm-domains-modal.handlebars',
'<td><input type="text" class="new-realm-domain" placeholder="acme.com"></input></td>')],
'exclude': set(["static/templates/settings/emoji-settings-admin.handlebars",
"static/templates/settings/realm-filter-settings-admin.handlebars",
"static/templates/settings/bot-settings.handlebars"])},
{'pattern': "placeholder='[^{]",
'description': "`placeholder` value should be translatable."},
{'pattern': "aria-label='[^{]",
'description': "`aria-label` value should be translatable."},
{'pattern': 'aria-label="[^{]',
'description': "`aria-label` value should be translatable."},
{'pattern': 'script src="http',
'description': "Don't directly load dependencies from CDNs. See docs/front-end-build-process.md"},
{'pattern': "title='[^{]",
'description': "`title` value should be translatable."},
{'pattern': 'title="[^{\:]',
'exclude_line': set([
('templates/zerver/markdown_help.html',
'<td><img alt=":heart:" class="emoji" src="/static/generated/emoji/images/emoji/heart.png" title=":heart:" /></td>')
]),
'exclude': set(["templates/zerver/emails"]),
'description': "`title` value should be translatable."},
{'pattern': '\Walt=["\'][^{"\']',
'description': "alt argument should be enclosed by _() or it should be an empty string.",
'exclude': set(['static/templates/settings/display-settings.handlebars',
'templates/zerver/keyboard_shortcuts.html',
'templates/zerver/markdown_help.html']),
},
{'pattern': '\Walt=["\']{{ ?["\']',
'description': "alt argument should be enclosed by _().",
},
] # type: RuleList
handlebars_rules = html_rules + [
{'pattern': "[<]script",
'description': "Do not use inline <script> tags here; put JavaScript in static/js instead."},
{'pattern': '{{ t ("|\')',
'description': 'There should be no spaces before the "t" in a translation tag.'},
{'pattern': "{{t '.*' }}[\.\?!]",
'description': "Period should be part of the translatable string."},
{'pattern': '{{t ".*" }}[\.\?!]',
'description': "Period should be part of the translatable string."},
{'pattern': "{{/tr}}[\.\?!]",
'description': "Period should be part of the translatable string."},
{'pattern': '{{t ("|\') ',
'description': 'Translatable strings should not have leading spaces.'},
{'pattern': "{{t '[^']+ ' }}",
'description': 'Translatable strings should not have trailing spaces.'},
{'pattern': '{{t "[^"]+ " }}',
'description': 'Translatable strings should not have trailing spaces.'},
]
jinja2_rules = html_rules + [
{'pattern': "{% endtrans %}[\.\?!]",
'description': "Period should be part of the translatable string."},
{'pattern': "{{ _(.+) }}[\.\?!]",
'description': "Period should be part of the translatable string."},
]
json_rules = [
# Since most json files are fixtures containing 3rd party json code,
# we allow tab-based whitespaces.
trailing_whitespace_rule,
]
markdown_rules = markdown_whitespace_rules + prose_style_rules + [
{'pattern': '\[(?P<url>[^\]]+)\]\((?P=url)\)',
'description': 'Linkified markdown URLs should use cleaner <http://example.com> syntax.'}
]
help_markdown_rules = markdown_rules + [
{'pattern': '[a-z][.][A-Z]',
'description': "Likely missing space after end of sentence"},
{'pattern': '[rR]ealm',
'description': "Realms are referred to as Organizations in user-facing docs."},
]
txt_rules = whitespace_rules
def check_custom_checks_py():
# type: () -> bool
failed = False
color = next(colors)
for fn in by_lang['py']:
if 'custom_check.py' in fn:
continue
if custom_check_file(fn, 'py', python_rules, color, max_length=140):
failed = True
return failed
def check_custom_checks_nonpy():
# type: () -> bool
failed = False
color = next(colors)
for fn in by_lang['js']:
if custom_check_file(fn, 'js', js_rules, color):
failed = True
color = next(colors)
for fn in by_lang['sh']:
if custom_check_file(fn, 'sh', bash_rules, color):
failed = True
color = next(colors)
for fn in by_lang['css']:
if custom_check_file(fn, 'css', css_rules, color):
failed = True
color = next(colors)
for fn in by_lang['handlebars']:
if custom_check_file(fn, 'handlebars', handlebars_rules, color):
failed = True
color = next(colors)
for fn in by_lang['html']:
if custom_check_file(fn, 'html', jinja2_rules, color):
failed = True
color = next(colors)
for fn in by_lang['json']:
if custom_check_file(fn, 'json', json_rules, color):
failed = True
color = next(colors)
markdown_docs_length_exclude = {
"api/bots/converter/readme.md",
"docs/bots-guide.md",
"docs/dev-env-first-time-contributors.md",
"docs/webhook-walkthrough.md",
"docs/life-of-a-request.md",
"docs/logging.md",
"docs/migration-renumbering.md",
"docs/readme-symlink.md",
"README.md",
"zerver/webhooks/helloworld/doc.md",
"zerver/webhooks/trello/doc.md",
"templates/zerver/integrations/perforce.md",
}
for fn in by_lang['md']:
max_length = None
if fn not in markdown_docs_length_exclude:
max_length = 120
rules = markdown_rules
if fn.startswith("templates/zerver/help"):
rules = help_markdown_rules
if custom_check_file(fn, 'md', rules, color, max_length=max_length):
failed = True
color = next(colors)
for fn in by_lang['txt'] + by_lang['text']:
if custom_check_file(fn, 'txt', txt_rules, color):
failed = True
color = next(colors)
for fn in by_lang['yaml']:
if custom_check_file(fn, 'yaml', txt_rules, color):
failed = True
return failed
return (check_custom_checks_py, check_custom_checks_nonpy)
|
vaidap/zulip
|
tools/linter_lib/custom_check.py
|
Python
|
apache-2.0
| 24,661
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A client interface for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as tf_session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import session_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import nest
class SessionInterface(object):
"""Base class for implementations of TensorFlow client sessions."""
@property
def graph(self):
"""The underlying TensorFlow graph, to be used in building Operations."""
raise NotImplementedError('graph')
@property
def sess_str(self):
"""The TensorFlow process to which this session will connect."""
raise NotImplementedError('sess_str')
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations in the session. See `BaseSession.run()` for details."""
raise NotImplementedError('run')
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError('partial_run_setup')
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with additional feeds and fetches."""
raise NotImplementedError('partial_run')
def _get_indexed_slices_value_from_fetches(fetched_vals):
return ops.IndexedSlicesValue(fetched_vals[0], fetched_vals[1],
fetched_vals[2]
if len(fetched_vals) == 3 else None)
def _get_feeds_for_indexed_slices(feed, feed_val):
return list(zip([feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape], feed_val))
# List of extensions supported to convert run arguments into actual fetches and
# feeds.
#
# Each element in the list is a tuple of (Type, fetch_fn, feed_fn1, feed_fn2),
# where the function signatures are:
# fetch_fn : Type -> (list of Tensors,
# lambda: list of fetched np.ndarray -> TypeVal)
# feed_fn1 : Type, TypeVal -> list of (Tensor, value)
# feed_fn2 : Type -> list of Tensors
#
# `fetch_fn` describes how to expand fetch into its
# component Tensors and how to contract the fetched results back into
# a single return value.
#
# Each feed function describes how to unpack a single fed value and map it to
# feeds of one or more tensors and their corresponding values: `feed_fn1` is
# used to feed a run, `feed_fn2` to set up a partial run.
#
# TODO(touts): We could reimplement these as specialized _FeedMapper
# implementations after we refactor the feed handling code to use them.
#
# Eventually, this registration could be opened up to support custom Tensor
# expansions.
# pylint: disable=g-long-lambda
_REGISTERED_EXPANSIONS = [
# SparseTensors are fetched as SparseTensorValues. They can be fed
# SparseTensorValues or normal tuples.
(sparse_tensor.SparseTensor,
lambda fetch: (
[fetch.indices, fetch.values, fetch.shape],
lambda fetched_vals: sparse_tensor.SparseTensorValue(*fetched_vals)),
lambda feed, feed_val: list(zip(
[feed.indices, feed.values, feed.shape], feed_val)),
lambda feed: [feed.indices, feed.values, feed.shape]),
# IndexedSlices are fetched as IndexedSlicesValues. They can be fed
# IndexedSlicesValues or normal tuples.
(ops.IndexedSlices,
lambda fetch: (
[fetch.values, fetch.indices] if fetch.dense_shape is None
else [fetch.values, fetch.indices, fetch.dense_shape],
_get_indexed_slices_value_from_fetches),
_get_feeds_for_indexed_slices,
lambda feed: [feed.values, feed.indices] if feed.dense_shape is None
else [feed.values, feed.indices, feed.dense_shape]),
# The default catches all other types and performs no expansions.
(object,
lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),
lambda feed, feed_val: [(feed, feed_val)],
lambda feed: [feed])]
# pylint: enable=g-long-lambda
def register_session_run_conversion_functions(tensor_type, fetch_function,
feed_function=None, feed_function_for_partial_run=None):
"""Register fetch and feed conversion functions for `tf.Session.run()`.
This function registers a triple of conversion functions for fetching and/or
feeding values of user-defined types in a call to tf.Session.run().
An example
```python
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = tf.square(tensor)
#you can define conversion functions as follows:
fetch_function = lambda squared_tensor:([squared_tensor.sq],
lambda val: val[0])
feed_function = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_function_for_partial_run = lambda feed: [feed.sq]
#then after invoking this register function, you can use as follows:
session.run(squared_tensor1,
feed_dict = {squared_tensor2 : some_numpy_array})
```
Args:
tensor_type: The type for which you want to register a conversion function.
fetch_function: A callable that takes an object of type `tensor_type` and
returns a tuple, where the first element is a list of `tf.Tensor` objects,
and the second element is a callable that takes a list of ndarrays and
returns an object of some value type that corresponds to `tensor_type`.
fetch_function describes how to expand fetch into its component Tensors
and how to contract the fetched results back into a single return value.
feed_function: A callable that takes feed_key and feed_value as input, and
returns a list of tuples (feed_tensor, feed_val), feed_key must have type
`tensor_type`, and feed_tensor must have type `tf.Tensor`. Each feed
function describes how to unpack a single fed value and map it to feeds
of one or more tensors and their corresponding values.
feed_function_for_partial_run: A callable for specifying tensor values to
feed when setting up a partial run, which takes a `tensor_type` type
object as input, and returns a list of Tensors.
"""
for conversion_function in _REGISTERED_EXPANSIONS:
if issubclass(conversion_function[0], tensor_type):
raise ValueError(
'%s has already been registered so ignore it.', tensor_type)
return
_REGISTERED_EXPANSIONS.insert(0,
(tensor_type, fetch_function, feed_function, feed_function_for_partial_run))
class _FetchMapper(object):
"""Definition of the interface provided by fetch mappers.
Fetch mappers are utility classes used by the _FetchHandler to handle
arbitrary structures for the `fetch` argument to `Session.run()`.
The `fetch` argument can be of various shapes: single tensor or op, list of
fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The
structures can be arbitrarily nested.
The low level run() API only wants a list of tensor or op names. The various
`_FetchMapper` subclasses below take care of handling the different shapes:
uniquifying the fetches, and constructing results with the original shape.
"""
def unique_fetches(self):
"""Return the list of unique tensors or ops needed by this fetch mapper.
Returns:
A list of tensors or ops.
"""
raise NotImplementedError('Must be implemented by subclasses')
def build_results(self, values):
"""Build results that match the original shape of the fetch.
Args:
values: List of values returned by run(). The values correspond
exactly to the list tensors or ops returned by unique_fetches().
Returns:
A struct of the same shape as the original fetch object handled by
this fetch mapper. In the returned struct, the original fetches are
replaced by their fetched values.
"""
raise NotImplementedError('Must be implemented by subclasses')
@staticmethod
def for_fetch(fetch):
"""Creates fetch mapper that handles the structure of `fetch`.
The default graph must be the one from which we want to fetch values when
this function is called.
Args:
fetch: An arbitrary fetch structure: singleton, list, tuple,
namedtuple, or dict.
Returns:
An instance of a subclass of `_FetchMapper` that handles the shape.
"""
if fetch is None:
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
elif isinstance(fetch, (list, tuple)):
# NOTE(touts): This is also the code path for namedtuples.
return _ListFetchMapper(fetch)
elif isinstance(fetch, dict):
return _DictFetchMapper(fetch)
else:
# Look for a handler in the registered expansions.
for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:
if isinstance(fetch, tensor_type):
fetches, contraction_fn = fetch_fn(fetch)
return _ElementFetchMapper(fetches, contraction_fn)
# Did not find anything.
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
class _ElementFetchMapper(_FetchMapper):
"""Fetch mapper for singleton tensors and ops."""
def __init__(self, fetches, contraction_fn):
"""Creates an _ElementFetchMapper.
This is the fetch mapper used for leaves in the fetch struct. Because of
the expansions mechanism, a leaf can actually fetch more than one tensor.
Also note that the fetches here can be just strings (tensor or op names) or
any other object that the graph knows how to convert to a tensor, such as a
Variable. So we have to run each fetch through `as_graph_element()` to get
the corresponding tensor or op.
Args:
fetches: List of objects, as returned by a fetch_fn defined
in _REGISTERED_EXPANSIONS.
contraction_fn: Callable as returned by a fetch_fn.
"""
self._unique_fetches = []
for fetch in fetches:
try:
self._unique_fetches.append(ops.get_default_graph().as_graph_element(
fetch, allow_tensor=True, allow_operation=True))
except TypeError as e:
raise TypeError('Fetch argument %r has invalid type %r, '
'must be a string or Tensor. (%s)'
% (fetch, type(fetch), str(e)))
except ValueError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
except KeyError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
self._contraction_fn = contraction_fn
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
if not values:
# 'Operation' case
return None
else:
return self._contraction_fn(values)
def _uniquify_fetches(fetch_mappers):
"""Uniquifies fetches from a list of fetch_mappers.
This is a utility function used by _ListFetchMapper and _DictFetchMapper. It
gathers all the unique fetches from a list of mappers and builds a list
containing all of them but without duplicates (unique_fetches).
It also returns a 2-D list of integers (values_indices) indicating at which
index in unique_fetches the fetches of the mappers are located.
This list is as follows:
values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index
Args:
fetch_mappers: list of fetch mappers.
Returns:
A list of fetches.
A 2-D list of integers.
"""
unique_fetches = []
value_indices = []
seen_fetches = {}
for m in fetch_mappers:
m_value_indices = []
for f in m.unique_fetches():
j = seen_fetches.get(f)
if j is None:
j = len(seen_fetches)
seen_fetches[f] = j
unique_fetches.append(f)
m_value_indices.append(j)
value_indices.append(m_value_indices)
return unique_fetches, value_indices
class _ListFetchMapper(_FetchMapper):
"""Fetch mapper for lists, tuples, and namedtuples."""
def __init__(self, fetches):
"""Creates a _ListFetchMapper.
Args:
fetches: List, tuple, or namedtuple of fetches.
"""
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
# Create the list of results for each mapper.
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
# Return a value of the original type of the fetches.
if self._fetch_type == list:
return results
elif self._fetch_type == tuple:
return tuple(results)
else:
# This is the code path for namedtuple.
return self._fetch_type(*results)
class _DictFetchMapper(_FetchMapper):
"""Fetch mapper for dicts."""
def __init__(self, fetches):
"""Creates a _DictFetchMapper.
Args:
fetches: Dict of fetches.
"""
self._fetch_type = type(fetches)
self._keys = fetches.keys()
self._mappers = [_FetchMapper.for_fetch(fetch)
for fetch in fetches.values()]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = self._fetch_type()
for k, m, vi in zip(self._keys, self._mappers, self._value_indices):
results[k] = m.build_results([values[j] for j in vi])
return results
class _FetchHandler(object):
"""Handler for structured fetches.
Given a graph, a user-provided structure for fetches, and a feed dict, this
class takes care of generating a list of tensor names to fetch and op names
to run for a low level `run()` call.
Given the results of the low level run call, this class can also rebuild a
result structure matching the user-provided structure for fetches, but
containing the corresponding results.
"""
# TODO(touts): Make this class also take care of destructuring the feed
# dict instead of doing it in the callers.
def __init__(self, graph, fetches, feeds):
"""Creates a fetch handler.
Args:
graph: Graph of the fetches. Used to check for fetchability
and to convert all fetches to tensors or ops as needed.
fetches: An arbitrary fetch structure: singleton, list, tuple,
namedtuple, or dict.
feeds: A feed dict where keys are fully resolved tensor names.
"""
with graph.as_default():
self._fetch_mapper = _FetchMapper.for_fetch(fetches)
self._fetches = []
self._targets = []
self._feeds = feeds
self._ops = []
self._fetch_handles = {}
for fetch in self._fetch_mapper.unique_fetches():
fetch_name = compat.as_bytes(fetch.name)
if isinstance(fetch, ops.Operation):
self._assert_fetchable(graph, fetch)
self._targets.append(fetch_name)
self._ops.append(True)
else:
self._assert_fetchable(graph, fetch.op)
self._fetches.append(fetch_name)
self._ops.append(False)
# Remember the fetch if it is for a tensor handle.
if isinstance(fetch, ops.Tensor) and fetch.op.type == 'GetSessionHandle':
self._fetch_handles[fetch_name] = fetch.op.inputs[0].dtype
self._final_fetches = [x for x in self._fetches if x not in feeds]
def _assert_fetchable(self, graph, op):
if not graph.is_fetchable(op):
raise ValueError(
'Operation %r has been marked as not fetchable.' % op.name)
def fetches(self):
"""Return the unique names of tensors to fetch.
Returns:
A list of strings.
"""
return self._final_fetches
def targets(self):
"""Return the unique names of ops to run.
Returns:
A list of strings.
"""
return self._targets
def build_results(self, session, tensor_values):
"""Build results matching the original fetch shape.
`tensor_values` must be a list of the same length as
the one returned by `fetches()`, and holding the requested
fetch values.
This method builds a struct with the same shape as the original `fetches`
passed to the constructor, in which the fetches are replaced by their
fetched value.
Args:
session: The enclosing session. Used for tensor handles.
tensor_values: List of values matching the list returned
by fetches().
Returns:
A structure of the same shape as the original `fetches` argument but
containing tensors or None (for fetched ops).
"""
full_values = []
assert len(self._final_fetches) == len(tensor_values)
i = 0
j = 0
for is_op in self._ops:
if is_op:
full_values.append(None)
else:
# If the fetch was in the feeds, use the fed value, otherwise
# use the returned value.
value = self._feeds.get(self._fetches[i])
if value is None:
value = tensor_values[j]
j += 1
dtype = self._fetch_handles.get(self._fetches[i])
if dtype:
full_values.append(session_ops.TensorHandle(value, dtype, session))
else:
full_values.append(value)
i += 1
assert j == len(tensor_values)
return self._fetch_mapper.build_results(full_values)
class BaseSession(SessionInterface):
"""A class for interacting with a TensorFlow computation.
The BaseSession enables incremental graph building with inline
execution of Operations and evaluation of Tensors.
"""
def __init__(self, target='', graph=None, config=None):
"""Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None,
the default graph will be used.
config: (Optional) ConfigProto proto used to configure the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow session.
TypeError: If one of the arguments has the wrong type.
"""
if graph is None:
self._graph = ops.get_default_graph()
else:
if not isinstance(graph, ops.Graph):
raise TypeError('graph must be a tf.Graph, but got %s' % type(graph))
self._graph = graph
self._opened = False
self._closed = False
self._current_version = 0
self._extend_lock = threading.Lock()
if target is not None:
try:
self._target = compat.as_bytes(target)
except TypeError:
raise TypeError('target must be a string, but got %s' % type(target))
else:
self._target = None
self._delete_lock = threading.Lock()
self._dead_handles = []
if config is not None:
if not isinstance(config, config_pb2.ConfigProto):
raise TypeError('config must be a tf.ConfigProto, but got %s'
% type(config))
self._config = config
self._add_shapes = config.graph_options.infer_shapes
else:
self._config = None
self._add_shapes = False
self._session = None
opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)
try:
with errors.raise_exception_on_not_ok_status() as status:
self._session = tf_session.TF_NewDeprecatedSession(opts, status)
finally:
tf_session.TF_DeleteSessionOptions(opts)
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
closing the TensorFlow session.
"""
with self._extend_lock:
if self._opened and not self._closed:
self._closed = True
with errors.raise_exception_on_not_ok_status() as status:
tf_session.TF_CloseDeprecatedSession(self._session, status)
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
if self._session is not None:
try:
status = tf_session.TF_NewStatus()
tf_session.TF_DeleteDeprecatedSession(self._session, status)
finally:
tf_session.TF_DeleteStatus(status)
self._session = None
@property
def graph(self):
"""The graph that was launched in this session."""
return self._graph
@property
def graph_def(self):
"""A serializable version of the underlying TensorFlow graph.
Returns:
A graph_pb2.GraphDef proto containing nodes for all of the Operations in
the underlying TensorFlow graph.
"""
return self._graph.as_graph_def(add_shapes=self._add_shapes)
@property
def sess_str(self):
return self._target
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
[`Operation.run()`](../../api_docs/python/framework.md#Operation.run) or
[`Tensor.eval()`](../../api_docs/python/framework.md#Tensor.eval) should be
executed in this session.
```python
c = tf.constant(..)
sess = tf.Session()
with sess.as_default():
assert tf.get_default_session() is sess
print(c.eval())
```
To get the current default session, use
[`tf.get_default_session()`](#get_default_session).
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default graph is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations and evaluates tensors in `fetches`.
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a single graph element, or an arbitrarily
nested list, tuple, namedtuple, dict, or OrderedDict containing graph
elements at its leaves. A graph element can be one of the following types:
* An [`Operation`](../../api_docs/python/framework.md#Operation).
The corresponding fetched value will be `None`.
* A [`Tensor`](../../api_docs/python/framework.md#Tensor).
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
* A [`SparseTensor`](../../api_docs/python/sparse_ops.md#SparseTensor).
The corresponding fetched value will be a
[`SparseTensorValue`](../../api_docs/python/sparse_ops.md#SparseTensorValue)
containing the value of that sparse tensor.
* A `get_tensor_handle` op. The corresponding fetched value will be a
numpy ndarray containing the handle of that tensor.
* A `string` which is the name of a tensor or operation in the graph.
The value returned by `run()` has the same shape as the `fetches` argument,
where the leaves are replaced by the corresponding values returned by
TensorFlow.
Example:
```python
a = tf.constant([10, 20])
b = tf.constant([1.0, 2.0])
# 'fetches' can be a singleton
v = session.run(a)
# v is the numpy array [10, 20]
# 'fetches' can be a list.
v = session.run([a, b])
# v a Python list with 2 numpy arrays: the numpy array [10, 20] and the
# 1-D array [1.0, 2.0]
# 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:
MyData = collections.namedtuple('MyData', ['a', 'b'])
v = session.run({'k1': MyData(a, b), 'k2': [b, a]})
# v is a dict with
# v['k1'] is a MyData namedtuple with 'a' the numpy array [10, 20] and
# 'b' the numpy array [1.0, 2.0]
# v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array
# [10, 20].
```
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a [`Tensor`](../../api_docs/python/framework.md#Tensor), the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
[placeholder](../../api_docs/python/io_ops.md#placeholder), the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
[`SparseTensor`](../../api_docs/python/sparse_ops.md#SparseTensor),
the value should be a
[`SparseTensorValue`](../../api_docs/python/sparse_ops.md#SparseTensorValue).
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
Each value in `feed_dict` must be convertible to a numpy array of the dtype
of the corresponding key.
The optional `options` argument expects a [`RunOptions`] proto. The options
allow controlling the behavior of this particular step (e.g. turning tracing
on).
The optional `run_metadata` argument expects a [`RunMetadata`] proto. When
appropriate, the non-Tensor output of this step will be collected there. For
example, when users turn on tracing in `options`, the profiled info will be
collected into this argument and passed back.
Args:
fetches: A single graph element, a list of graph elements,
or a dictionary whose values are graph elements or lists of graph
elements (described above).
feed_dict: A dictionary that maps graph elements to values
(described above).
options: A [`RunOptions`] protocol buffer
run_metadata: A [`RunMetadata`] protocol buffer
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary (described above).
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
`Tensor` that doesn't exist.
"""
run_metadata_ptr = tf_session.TF_NewBuffer()
if options:
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString()))
else:
options_ptr = None
try:
result = self._run(None, fetches, feed_dict, options_ptr,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return result
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with more feeds and fetches.
This is EXPERIMENTAL and subject to change.
To use partial execution, a user first calls `partial_run_setup()` and
then a sequence of `partial_run()`. `partial_run_setup` specifies the
list of feeds and fetches that will be used in the subsequent
`partial_run` calls.
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. See run() for more information.
Below is a simple example:
```python
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
res = sess.partial_run(h, r2, feed_dict={c: res})
```
Args:
handle: A handle for a sequence of partial runs.
fetches: A single graph element, a list of graph elements,
or a dictionary whose values are graph elements or lists of graph
elements (see documentation for `run`).
feed_dict: A dictionary that maps graph elements to values
(described above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary
(see documentation for `run`).
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# TODO(touts): Support feeding and fetching the same tensor.
return self._run(handle, fetches, feed_dict, None, None)
def partial_run_setup(self, fetches, feeds=None):
"""Sets up a graph with feeds and fetches for partial run.
This is EXPERIMENTAL and subject to change.
Note that contrary to `run`, `feeds` only specifies the graph elements.
The tensors will be supplied by the subsequent `partial_run` calls.
Args:
fetches: A single graph element, or a list of graph elements.
feeds: A single graph element, or a list of graph elements.
Returns:
A handle for partial run.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens.
"""
def _feed_fn(feed):
for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed)
raise TypeError('Feed argument %r has invalid type %r'
% (feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_list = []
# Validate and process feed_list.
is_list_feed = isinstance(feeds, (list, tuple))
if not is_list_feed:
feeds = [feeds]
for feed in feeds:
for subfeed in _feed_fn(feed):
try:
subfeed_t = self.graph.as_graph_element(subfeed, allow_tensor=True,
allow_operation=False)
feed_list.append(compat.as_bytes(subfeed_t.name))
except Exception as e:
e.message = ('Cannot interpret feed_list key as Tensor: '
+ e.message)
e.args = (e.message,)
raise e
# Validate and process fetches.
# TODO(touts): Support feeding and fetching the same tensor.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# Set up a graph with feeds and fetches for partial run.
def _setup_fn(session, feed_list, fetch_list, target_list):
self._extend_graph()
with errors.raise_exception_on_not_ok_status() as status:
return tf_session.TF_PRunSetup(session, feed_list, fetch_list,
target_list, status)
return self._do_call(_setup_fn, self._session, feed_list,
fetch_handler.fetches(), fetch_handler.targets())
def _run(self, handle, fetches, feed_dict, options, run_metadata):
"""Perform either run or partial_run, depending the presence of `handle`."""
def _feed_fn(feed, feed_val):
for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed, feed_val)
raise TypeError('Feed argument %r has invalid type %r'
% (feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_dict_string = {}
feed_map = {}
# Validate and process feed_dict.
if feed_dict:
feed_dict = nest.flatten_dict_items(feed_dict)
for feed, feed_val in feed_dict.items():
for subfeed, subfeed_val in _feed_fn(feed, feed_val):
try:
subfeed_t = self.graph.as_graph_element(subfeed, allow_tensor=True,
allow_operation=False)
except Exception as e:
raise TypeError('Cannot interpret feed_dict key as Tensor: '
+ e.args[0])
if isinstance(subfeed_val, ops.Tensor):
raise TypeError('The value of a feed cannot be a tf.Tensor object. '
'Acceptable feed values include Python scalars, '
'strings, lists, or numpy ndarrays.')
subfeed_dtype = subfeed_t.dtype.as_numpy_dtype
if isinstance(subfeed_val,
int) and subfeed_dtype(subfeed_val) != subfeed_val:
raise TypeError(
'Type of feed value ' + str(subfeed_val) + ' is not'
' compatible with Tensor type ' + str(subfeed_dtype) + '.'
' Try explicitly setting the type of the feed tensor'
' to a larger type (e.g. int64).')
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
if not subfeed_t.get_shape().is_compatible_with(np_val.shape):
raise ValueError(
'Cannot feed value of shape %r for Tensor %r, '
'which has shape %r'
% (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
if not self.graph.is_feedable(subfeed_t):
raise ValueError('Tensor %s may not be fed.' % subfeed_t)
subfeed_name = compat.as_bytes(subfeed_t.name)
feed_dict_string[subfeed_name] = np_val
feed_map[subfeed_name] = (subfeed_t, subfeed_val)
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(self._graph, fetches, feed_dict_string)
# Run request and get response.
# We need to keep the movers alive for the following _do_run().
# These movers are no longer needed when _do_run() completes, and
# are deleted when `movers` goes out of scope when this _run() ends.
# TODO(yuanbyu, keveman): Revisit whether we should just treat feeding
# of a handle from a different device as an error.
movers = self._update_with_movers(feed_dict_string, feed_map)
final_fetches = fetch_handler.fetches()
final_targets = fetch_handler.targets()
if final_fetches or final_targets:
results = self._do_run(handle, final_targets, final_fetches,
feed_dict_string, options, run_metadata)
else:
results = []
return fetch_handler.build_results(self, results)
# Captures the name of a node in an error status.
_NODEDEF_NAME_RE = re.compile(r'\[\[Node: ([^ ]*?) =')
def _do_run(self, handle, target_list, fetch_list, feed_dict,
options, run_metadata):
"""Runs a step based on the given fetches and feeds.
Args:
handle: a handle for partial_run. None if this is just a call to run().
target_list: A list of byte arrays corresponding to names of tensors
or operations to be run to, but not fetched.
fetch_list: A list of byte arrays corresponding to names of tensors to
be fetched and operations to be run.
feed_dict: A dictionary that maps tensor names (as byte arrays) to
numpy ndarrays.
options: A (pointer to a) [`RunOptions`] protocol buffer, or None
run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None
Returns:
A list of numpy ndarrays, corresponding to the elements of
`fetch_list`. If the ith element of `fetch_list` contains the
name of an operation, the first Tensor output of that operation
will be returned for that element.
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
def _run_fn(session, feed_dict, fetch_list, target_list, options,
run_metadata):
# Ensure any changes to the graph are reflected in the runtime.
self._extend_graph()
with errors.raise_exception_on_not_ok_status() as status:
return tf_session.TF_Run(session, options,
feed_dict, fetch_list, target_list,
status, run_metadata)
def _prun_fn(session, handle, feed_dict, fetch_list):
if target_list:
raise RuntimeError('partial_run() requires empty target_list.')
with errors.raise_exception_on_not_ok_status() as status:
return tf_session.TF_PRun(session, handle, feed_dict, fetch_list,
status)
if handle is None:
return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
target_list, options, run_metadata)
else:
return self._do_call(_prun_fn, self._session, handle, feed_dict,
fetch_list)
def _do_call(self, fn, *args):
try:
return fn(*args)
except errors.OpError as e:
message = compat.as_text(e.message)
m = BaseSession._NODEDEF_NAME_RE.search(message)
node_def = None
op = None
if m is not None:
node_name = m.group(1)
try:
op = self._graph.get_operation_by_name(node_name)
node_def = op.node_def
except KeyError:
pass
raise type(e)(node_def, op, message)
def _extend_graph(self):
# Ensure any changes to the graph are reflected in the runtime.
with self._extend_lock:
if self._graph.version > self._current_version:
# pylint: disable=protected-access
graph_def, self._current_version = self._graph._as_graph_def(
from_version=self._current_version,
add_shapes=self._add_shapes)
# pylint: enable=protected-access
with errors.raise_exception_on_not_ok_status() as status:
tf_session.TF_ExtendGraph(
self._session, graph_def.SerializeToString(), status)
self._opened = True
# The threshold to run garbage collection to delete dead tensors.
_DEAD_HANDLES_THRESHOLD = 10
def _register_dead_handle(self, handle):
# Register a dead handle in the session. Delete the dead tensors when
# the number of dead tensors exceeds certain threshold.
tensors_to_delete = None
with self._delete_lock:
self._dead_handles.append(handle)
if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD:
tensors_to_delete = self._dead_handles
self._dead_handles = []
# Delete the dead tensors.
# TODO(yuanbyu): For now we use a sequence of runs to minimize the graph
# size and the overhead of graph construction/partitioning.
if tensors_to_delete:
for tensor_handle in tensors_to_delete:
feeds = {}
fetches = []
holder, deleter = session_ops._get_handle_deleter(self.graph,
tensor_handle)
feeds[holder] = tensor_handle
fetches.append(deleter)
self.run(fetches, feed_dict=feeds)
def _update_with_movers(self, feed_dict, feed_map):
# If a tensor handle that is fed to a device incompatible placeholder,
# we move the tensor to the right device, generate a new tensor handle,
# and update `feed_dict` to use the new handle.
handle_movers = []
for feed_name, val in feed_map.items():
mover = session_ops._get_handle_mover(self.graph, *val)
if mover:
handle_movers.append((feed_name, val[1], mover))
# Transfer a tensor to the right device if needed.
if not handle_movers:
return []
else:
feeds = {}
fetches = []
for _, handle, mover in handle_movers:
feeds[mover[0]] = handle
fetches.append(mover[1])
handles = self.run(fetches, feed_dict=feeds)
for handle_mover, handle in zip(handle_movers, handles):
np_val = np.array(handle.handle, dtype=np.object)
feed_dict[handle_mover[0]] = np_val
return handles
class Session(BaseSession):
"""A class for running TensorFlow operations.
A `Session` object encapsulates the environment in which `Operation`
objects are executed, and `Tensor` objects are evaluated. For
example:
```python
# Build a graph.
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# Launch the graph in a session.
sess = tf.Session()
# Evaluate the tensor `c`.
print(sess.run(c))
```
A session may own resources, such as
[variables](../../api_docs/python/state_ops.md#Variable), [queues](../../api_docs/python/io_ops.md#QueueBase),
and [readers](../../api_docs/python/io_ops.md#ReaderBase). It is important to release
these resources when they are no longer required. To do this, either
invoke the [`close()`](#Session.close) method on the session, or use
the session as a context manager. The following two examples are
equivalent:
```python
# Using the `close()` method.
sess = tf.Session()
sess.run(...)
sess.close()
# Using the context manager.
with tf.Session() as sess:
sess.run(...)
```
The [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer exposes various configuration options for a
session. For example, to create a session that uses soft constraints
for device placement, and log the resulting placement decisions,
create a session as follows:
```python
# Launch the graph in a session that allows soft device placement and
# logs the placement decisions.
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
```
@@__init__
@@run
@@close
@@graph
@@as_default
@@reset
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
Defaults to using an in-process engine. See
[Distributed Tensorflow](https://www.tensorflow.org/how_tos/distributed/index.html)
for more examples.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer with configuration options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
# NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle.
self._default_graph_context_manager = None
self._default_session_context_manager = None
def __enter__(self):
if self._default_graph_context_manager is None:
self._default_graph_context_manager = self.graph.as_default()
else:
raise RuntimeError('Session context managers are not re-entrant. '
'Use `Session.as_default()` if you want to enter '
'a session multiple times.')
if self._default_session_context_manager is None:
self._default_session_context_manager = self.as_default()
self._default_graph_context_manager.__enter__()
return self._default_session_context_manager.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
if exec_type is errors.OpError:
logging.error('Session closing due to OpError: %s', (exec_value,))
self._default_session_context_manager.__exit__(
exec_type, exec_value, exec_tb)
self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb)
self._default_session_context_manager = None
self._default_graph_context_manager = None
self.close()
@staticmethod
def reset(target, containers=None, config=None):
"""Resets resource containers on `target`, and close all connected sessions.
A resource container is distributed across all workers in the
same cluster as `target`. When a resource container on `target`
is reset, resources associated with that container will be cleared.
In particular, all Variables in the container will become undefined:
they lose their values and shapes.
NOTE:
(i) reset() is currently only implemented for distributed sessions.
(ii) Any sessions on the master named by `target` will be closed.
If no resource containers are provided, all containers are reset.
Args:
target: The execution engine to connect to.
containers: A list of resource container name strings, or `None` if all of
all the containers are to be reset.
config: (Optional.) Protocol buffer with configuration options.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
resetting containers.
"""
if target is not None:
target = compat.as_bytes(target)
if containers is not None:
containers = [compat.as_bytes(c) for c in containers]
else:
containers = []
tf_session.TF_Reset(target, containers, config)
class InteractiveSession(BaseSession):
"""A TensorFlow `Session` for use in interactive contexts, such as a shell.
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
The methods [`Tensor.eval()`](../../api_docs/python/framework.md#Tensor.eval)
and [`Operation.run()`](../../api_docs/python/framework.md#Operation.run)
will use that session to run ops.
This is convenient in interactive shells and [IPython
notebooks](http://ipython.org), as it avoids having to pass an explicit
`Session` object to run ops.
For example:
```python
sess = tf.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# We can just use 'c.eval()' without passing 'sess'
print(c.eval())
sess.close()
```
Note that a regular session installs itself as the default session when it
is created in a `with` statement. The common usage in non-interactive
programs is to follow that pattern:
```python
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
with tf.Session():
# We can also use 'c.eval()' here.
print(c.eval())
```
@@__init__
@@close
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new interactive TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
Defaults to using an in-process engine.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional) `ConfigProto` proto used to configure the session.
"""
if not config:
config = config_pb2.ConfigProto()
# Interactive sessions always place pruned graphs.
config.graph_options.place_pruned_graph = True
super(InteractiveSession, self).__init__(target, graph, config)
self._default_session = self.as_default()
self._default_session.enforce_nesting = False
self._default_session.__enter__()
self._explicit_graph = graph
if self._explicit_graph is not None:
self._default_graph = graph.as_default()
self._default_graph.enforce_nesting = False
self._default_graph.__enter__()
def close(self):
"""Closes an `InteractiveSession`."""
super(InteractiveSession, self).close()
if self._explicit_graph is not None:
self._default_graph.__exit__(None, None, None)
self._default_session.__exit__(None, None, None)
|
laosiaudi/tensorflow
|
tensorflow/python/client/session.py
|
Python
|
apache-2.0
| 50,538
|
"""
slothpal.context
~~~~~~~~~~~~~~~~
"""
from peak.util.proxies import ObjectProxy
from slothpal import constants
from slothpal.attributes import AttributeDict
context = ObjectProxy(None)
class PayPalContext(AttributeDict):
def __init__(self, **kwargs):
self.update(kwargs)
def push(self):
"""
Push the configuration to the global proxy
"""
if context.__subject__ is not self:
context.__subject__ = self
return self
def pop(self):
"""
Reset the configuration object to its initial state
"""
if context.__subject__ is self:
context.__subject__ = None
def __enter__(self):
return self.push()
def __exit__(self, type, val, tb):
self.pop()
def add_urls(config):
"""
Add PayPal specific URLs to the configuration dict. If the sandbox
environment is desired set the url endpoint to that
"""
if config.get("is_sandbox"):
config.update(endpoint=constants.SANDBOX_ENDPOINT)
else:
config.update(endpoint=constants.LIVE_ENDPOINT)
def paypal_context(config):
"""
Initialize the context manager that will handle all PayPal configuration
"""
add_urls(config)
return PayPalContext(paypal=config)
|
hahnicity/slothpal
|
slothpal/context.py
|
Python
|
unlicense
| 1,294
|
#!/usr/bin/env python
import os.path
import setuptools
import sprockets.mixins.cors
def read_requirements(file_name):
requirements = []
try:
with open(os.path.join('requires', file_name)) as req_file:
for req_line in req_file:
req_line = req_line.strip()
if '#' in req_line:
req_line = req_line[0:req_line.find('#')].strip()
if req_line.startswith('-r'):
req_line = req_line[2:].strip()
requirements.extend(read_requirements(req_line))
else:
requirements.append(req_line)
except IOError:
pass
return requirements
install_requires = read_requirements('install.txt')
setup_requires = read_requirements('setup.txt')
tests_require = read_requirements('testing.txt')
setuptools.setup(
name='sprockets.mixins.cors',
version=sprockets.mixins.cors.__version__,
description=('Tornado RequestHandler mix-in for implementing '
'a CORS enabled endpoint.'),
long_description='\n'+open('README.rst').read().strip(),
url='https://github.com/sprockets/sprockets.mixins.cors.git',
author='AWeber Communications, Inc.',
author_email='api@aweber.com',
license='BSD',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
],
packages=setuptools.find_packages(),
namespace_packages=['sprockets', 'sprockets.mixins'],
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
test_suite='nose.collector',
zip_safe=True)
|
sprockets/sprockets.mixins.cors
|
setup.py
|
Python
|
bsd-3-clause
| 1,977
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './Preferences/ProgramsDialog.ui'
#
# Created: Tue Nov 18 17:53:56 2014
# by: PyQt5 UI code generator 5.3.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ProgramsDialog(object):
def setupUi(self, ProgramsDialog):
ProgramsDialog.setObjectName("ProgramsDialog")
ProgramsDialog.resize(700, 570)
self.vboxlayout = QtWidgets.QVBoxLayout(ProgramsDialog)
self.vboxlayout.setObjectName("vboxlayout")
self.programsList = QtWidgets.QTreeWidget(ProgramsDialog)
self.programsList.setRootIsDecorated(False)
self.programsList.setObjectName("programsList")
self.vboxlayout.addWidget(self.programsList)
self.buttonBox = QtWidgets.QDialogButtonBox(ProgramsDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close)
self.buttonBox.setObjectName("buttonBox")
self.vboxlayout.addWidget(self.buttonBox)
self.retranslateUi(ProgramsDialog)
self.buttonBox.accepted.connect(ProgramsDialog.close)
self.buttonBox.rejected.connect(ProgramsDialog.close)
QtCore.QMetaObject.connectSlotsByName(ProgramsDialog)
def retranslateUi(self, ProgramsDialog):
_translate = QtCore.QCoreApplication.translate
ProgramsDialog.setWindowTitle(_translate("ProgramsDialog", "External Programs"))
self.programsList.setSortingEnabled(True)
self.programsList.headerItem().setText(0, _translate("ProgramsDialog", "Path"))
self.programsList.headerItem().setText(1, _translate("ProgramsDialog", "Version"))
|
davy39/eric
|
Preferences/Ui_ProgramsDialog.py
|
Python
|
gpl-3.0
| 1,757
|
import logging
import subprocess as sp
import threading
from . import stats
from .frame import Frame
from .service import Service
class Reader(Service):
def __init__(self, name, queue, source, shape, capture_options=None, bufsize=10 ** 8):
super().__init__()
self.name = name
self.queue = queue
self.source = source
self.shape = shape
self.capture_options = capture_options
self.bufsize = bufsize
self.frames = 0
self.frame_lock = threading.Condition()
def run(self):
command = ['ffmpeg'] + \
(self.capture_options or []) + \
['-i', self.source,
'-f', 'image2pipe',
'-pix_fmt', {3: 'rgb24'}[self.shape[2]],
'-vcodec', 'rawvideo', '-']
self.running = True
pipe = None
stats_tpl = 'capture.{}.{{}}'.format(self.name)
stats_loop = stats_tpl.format('loop')
stats_frame = stats_tpl.format('frame')
stats_invalid = stats_tpl.format('invalid')
stats_queue = stats_tpl.format('queue')
while self.running:
with stats.timer(stats_loop):
raw_image = None
if pipe is not None:
if pipe.poll() is None:
with stats.timer(stats_frame):
raw_image = pipe.stdout.read(self.shape[0] * self.shape[1] * self.shape[2])
if not raw_image:
logging.debug('invalid frame data')
stats.incr(stats_invalid)
continue
else:
logging.error('subprocess has died')
stdout, stderr = pipe.communicate()
if stderr:
logging.warning('stderr: {}', stderr)
if stdout:
logging.debug('stdout: {}', stdout)
pipe = None
continue
else:
pipe = sp.Popen(command, stdout=sp.PIPE, stderr=sp.DEVNULL, bufsize=self.bufsize)
logging.info('recording started')
continue
if raw_image:
try:
with stats.timer(stats_queue):
frame = Frame(self.shape, raw_image=raw_image)
with self.frame_lock:
self.queue.push(frame)
self.frames += 1
self.frame_lock.notify()
except ValueError:
logging.exception('invalid frame')
logging.info('process terminated')
stdout, stderr = pipe.communicate()
if stdout:
logging.debug('stdout: {}', stdout)
if stderr:
logging.debug('stderr: {}', stderr)
def wait_first_frame(self):
with self.frame_lock:
return self.frame_lock.wait_for(lambda: self.frames)
|
bkmeneguello/surveillance
|
surveillance/reader.py
|
Python
|
unlicense
| 3,126
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-29 01:58
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('magic', '0006_auto_20170928_2238'),
]
operations = [
migrations.RemoveField(
model_name='card',
name='card_type',
),
migrations.AddField(
model_name='card',
name='card_types',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='archetype',
name='colors',
field=models.ManyToManyField(related_name='archetype_colors', to='magic.Color'),
),
migrations.AlterField(
model_name='card',
name='card_subtypes',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='card',
name='card_supertypes',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='card',
name='cmc',
field=models.IntegerField(blank=True, null=True, verbose_name='converted mana cost'),
),
migrations.AlterField(
model_name='card',
name='color_identity',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
migrations.RemoveField(
model_name='card',
name='colors',
),
migrations.AddField(
model_name='card',
name='colors',
field=models.ManyToManyField(related_name='card_colors', to='magic.Color', verbose_name='colors'),
),
migrations.AlterField(
model_name='card',
name='foreign_names',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='card',
name='power',
field=models.IntegerField(blank=True, null=True, verbose_name='power'),
),
migrations.AlterField(
model_name='card',
name='printings',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='card',
name='toughness',
field=models.IntegerField(blank=True, null=True, verbose_name='toughness'),
),
]
|
Angoreher/xcero
|
magic/migrations/0007_auto_20170928_2258.py
|
Python
|
mit
| 2,691
|
'''
Forms for Prizes.
Created on Nov 4, 2012
@author: Cam Moore
'''
from django import forms
from apps.managers.challenge_mgr.models import RoundSetting
class ChangePrizeRoundForm(forms.Form):
"""change prize round form."""
round_choice = forms.ModelChoiceField(queryset=RoundSetting.objects.all(), required=True)
|
KendyllD/boukenda-project
|
makahiki/apps/widgets/prizes/forms.py
|
Python
|
mit
| 326
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
config = {
"suite_definitions": {
"mochitest": {
"options": [
"--total-chunks=%(total_chunks)s",
"--this-chunk=%(this_chunk)s",
"--profile=%(gaia_profile)s",
"--app=%(application)s",
"--desktop",
"--utility-path=%(utility_path)s",
"--certificate-path=%(cert_path)s",
"--symbols-path=%(symbols_path)s",
"--browser-arg=%(browser_arg)s",
"--quiet",
"--log-raw=%(raw_log_file)s",
"--screenshot-on-fail",
],
"run_filename": "runtestsb2g.py",
"testsdir": "mochitest"
},
"reftest": {
"options": [
"--desktop",
"--profile=%(gaia_profile)s",
"--appname=%(application)s",
"--total-chunks=%(total_chunks)s",
"--this-chunk=%(this_chunk)s",
"--browser-arg=%(browser_arg)s",
"--symbols-path=%(symbols_path)s",
"%(test_manifest)s"
],
"run_filename": "runreftestsb2g.py",
"testsdir": "reftest"
}
}
}
|
vladikoff/fxa-mochitest
|
tests/config/mozharness/b2g_desktop_config.py
|
Python
|
mpl-2.0
| 1,440
|
# Django settings for example project.
import os
PROJECT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_PATH, 'example.db'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_PATH, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(PROJECT_PATH, 'static')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '20qagh*vr36)5)t@4ni1g_kvroyp8qxdmhok&g_e_$9sy60#-u'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'example.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'example.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'template_graph',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/'
ALLOWED_HOSTS = ('*', )
TEMPLATE_GRAPH_PATH = PROJECT_PATH
|
caktus/django-template-graph
|
example/example/settings.py
|
Python
|
bsd-2-clause
| 5,564
|
"""
Resources for equipment from `NKT Photonics <https://www.nktphotonics.com/>`_.
"""
from .nktpdll import (
NKT,
PortStatusCallback,
DeviceStatusCallback,
RegisterStatusCallback,
RegisterPriorityTypes,
RegisterDataTypes,
RegisterStatusTypes,
PortStatusTypes,
DeviceModeTypes,
DeviceStatusTypes,
ParamSetUnitTypes,
DateTimeType,
ParameterSetType,
)
|
MSLNZ/msl-equipment
|
msl/equipment/resources/nkt/__init__.py
|
Python
|
mit
| 402
|
from django.conf.urls import patterns, include, url, handler404
from django.views.generic import TemplateView
from django.contrib import admin
admin.autodiscover()
handler404 = 'pyquiz.views.page_not_found'
handler500 = 'pyquiz.views.internal_error'
urlpatterns = patterns('',
# Examples:
#url(r'^(.*)$', TemplateView.as_view(template_name='maintainenance.html'), name="maintainenance"),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('allauth.urls')),
url(r'^pyquiz/', include("pyquiz.urls")),
url(r'^/?$', 'pyquiz.views.home', name='home'),
url(r'^gen_list/?$', 'pyquiz.views.generate_list', name='users'),
)
|
vivekhas3/python_quizzup
|
python_quizzup/urls.py
|
Python
|
mit
| 708
|
import requests
from lib.base import OpscenterAction
class GetClusterRepairStatusAction(OpscenterAction):
def run(self, cluster_id=None):
if not cluster_id:
cluster_id = self.cluster_id
url = self._get_full_url([cluster_id, 'services', 'repair'])
return requests.get(url).json()
|
pidah/st2contrib
|
packs/opscenter/actions/get_repair_status.py
|
Python
|
apache-2.0
| 324
|
from wishbonedevice import WishBoneDevice
import fractions as _frac
import logging
import time
logging.getLogger(__name__).addHandler(logging.NullHandler())
class LMX2581(WishBoneDevice):
""" LMX2581 Frequency Synthesizer """
DICTS = [
#00
{
'ID' : 1 << 31,
'FRAC_DITHER' : 0b11 << 29,
'NO_FCAL' : 1 << 28,
'PLL_N' : 0b111111111111 << 16, # PLL[11:0] PLL Feedback Divider Value
'PLL_NUM_L' : 0b111111111111 << 4, # PLL_NUM[11:0] PLL Fractional Numerator
},
#01
{
'CPG' : 0b11111 << 27,
'VCO_SEL' : 0b11 << 25,
'PLL_NUM_H' : 0b1111111111 << 15, # PLL_NUM[21:12] PLL Fractional Numerator
'FRAC_ORDER' : 0b111 << 12,
'PLL_R' : 0b11111111 << 4, # PLL_R[7:0] divides the OSCin frequency
},
#02
{
'OSC_2X' : 1 << 29,
'CPP' : 1 << 27,
'PLL_DEN' : 0b1111111111111111111111 << 4, # PLL_DEN[21:0] PLL Fractional Denominator
},
#03
{
'VCO_DIV' : 0b11111 << 18, # VCO_DIV[4:0] VCO Divider Value
'OUTB_PWR' : 0b111111 << 12,
'OUTA_PWR' : 0b111111 << 6,
'OUTB_PD' : 1 << 5,
'OUTA_PD' : 1 << 4,
},
#04
{
'PFD_DLY' : 0b111 << 29,
'FL_FRCE' : 1 << 28,
'FL_TOC' : 0b111111111111 << 16,
'FL_CPG' : 0b11111 << 11,
'CPG_BLEED' : 0b111111 << 4,
},
#05
{
'OUT_LDEN' : 1 << 24,
'OSC_FREQ' : 0b111 << 21,
'BUFEN_DIS' : 1 << 20,
'VCO_SEL_MODE' : 0b11 << 15,
'OUTB_MUX' : 0b11 << 13,
'OUTA_MUX' : 0b11 << 11,
'0_DLY' : 1 << 10,
'MODE' : 0b11 << 8,
'PWDN_MODE' : 0b111 << 5,
'RESET' : 1 << 4,
},
#06
{
'RD_DIAGNOSATICS' : 0b11111111111111111111 << 11,
'RDADDR' : 0b1111 << 5,
'UWIRE_LOCK' : 1 << 4,
},
#07
{
'FL_SELECT' : 0b11111 << 26,
'FL_PINMODE' : 0b111 << 23,
'FL_INV' : 1 << 22,
'MUXOUT_SELECT' : 0b11111 << 17,
'MUX_INV' : 1 << 16,
'MUXOUT_PINMODE' : 0b111 << 13,
'LD_SELECT' : 0b11111 << 8,
'LD_INV' : 1 << 7,
'LD_PINMODE' : 0b111 << 4,
},
#08
None,
#09
None,
#10
None,
#11
None,
#12
None,
#13
{
'DLD_ERR_CNT' : 0b1111 << 28,
'DLD_PASS_CNT' : 0b1111111111 << 18,
'DLD_TOL' : 0b111 << 15,
},
#14
None,
#15
{
'VCO_CAP_MAN' : 1 << 12,
'VCO_CAPCODE' : 0b11111111 << 4,
},
]
MASK_DIAG = {
'VCO_SELECT': 0b11 << 18+11,
'FIN_DETECT': 0b1 << 17+11,
'OSCIN_DETECT': 0b1 << 16+11,
'VCO_DETECT': 0b1 << 15+11,
'CAL_RUNNING': 0b1 << 10+11,
'VCO_RAIL_HIGH': 0b1 << 9+11,
'VCO_RAIL_LOW': 0b1 << 8+11,
'VCO_TUNE_HIGH': 0b1 << 6+11,
'VCO_TUNE_VALID': 0b1 << 5+11,
'FLOUT_ON': 0b1 << 4+11,
'DLD': 0b1 << 3+11,
'LD_PINSTATE': 0b1 << 2+11,
'CE_PINSTATE': 0b1 << 1+11,
'BUFEN_PINSTATE': 0b1 << 0+11
}
CMD10 = 0b00100001000000000101000011001010 # Not disclosed to user
CMD09 = 0b00000011110001111100000000111001 # Not disclosed to user
CMD08 = 0b00100000011111011101101111111000 # Not disclosed to user
# Using recommanded parameter settings in the following datasheet
# http://www.ti.com/lit/ds/symlink/lmx2581.pdf
# Also borrow some lines from https://github.com/domagalski/snap-synth
def __init__(self, interface, controller_name, fosc=10):
super(LMX2581, self).__init__(interface, controller_name)
# A non-None address list
self.A_DICT_LIST = [self.DICTS.index(a) for a in self.DICTS if a != None]
self.FOSC = fosc # 10 MHz from GPS module
self.freq_pd = self.FOSC / 1
def init(self):
# Generated via TI http://www.ti.com/tool/clockdesigntool
# and via TI http://www.ti.com/tool/codeloader
r05=0x40870015
r15=0x021FE80F
r13=0x4082C10D
r10=0x210050CA
r09=0x03C7C039
r08=0x207DDBF8
r07=0x00082317
r06=0x000004C6
r05=0x0010A805
r04=0x00000004
r03=0x2004F3C3
r02=0x0C000642
r01=0xD0000011
r00=0x60C80000
self.reset()
self.write(r15)
self.write(r13)
self.write(r10)
self.write(r09)
self.write(r08)
self.write(r07)
self.write(r06)
self.write(r05)
self.write(r04)
self.write(r03)
self.write(r02)
self.write(r01)
self.write(r00)
time.sleep(0.02)
self.write(r00)
def powerOn(self):
self.setWord(0, "PWDN_MODE")
def powerOff(self):
self.setWord(1, "PWDN_MODE")
def outputPower(self,p=15):
self.setWord(p, "OUTA_PWR")
self.setWord(p, "OUTB_PWR")
def get_osc_values(self, synth_mhz, ref_signal):
""" This function gets oscillator values
"""
# Equation for the output frequency.
# f_out = f_osc * OSC_2X / PLL_R * (PLL_N + PLL_NUM/PLL_DEN) / VCO_DIV
# XXX Right now, I'm not going to use OSC_2X or PLL_R, so this becomes
# f_out = f_osc * (PLL_N + PLL_NUM/PLL_DEN) / VCO_DIV
# Get a good VCO_DIV. The minimum VCO frequency is 1800.
# Though the min frequency is 1800, but mostly LMX2581 doesn't get
# locked at this frequency. Change 1800 to 1900
vco_min = 1900; vco_max = 3800
if synth_mhz > vco_min and synth_mhz < vco_max:
# Bypass VCO_DIV by properly setting OUTA_MUX and OUTB_MUX
VCO_DIV = None
else:
vco_guess = int(vco_min / synth_mhz) + 1
VCO_DIV = vco_guess + vco_guess%2
# Get PLLN, PLL_NUM, and PLL_DEN
pll = float(1 if VCO_DIV is None else VCO_DIV) * synth_mhz / ref_signal
PLL_N = int(pll)
frac = pll - PLL_N
if frac < 1.0/(1<<22): # smallest fraction on the synth
PLL_NUM = 0
PLL_DEN = 100
else:
fraction = _frac.Fraction(frac).limit_denominator(1<<22)
PLL_NUM = fraction.numerator
PLL_DEN = fraction.denominator
return (PLL_N, PLL_NUM, PLL_DEN, VCO_DIV)
def setFreq(self, synth_mhz):
self.setWord(1, 'NO_FCAL')
PLL_N, PLL_NUM, PLL_DEN, VCO_DIV = self.get_osc_values(synth_mhz,self.FOSC)
# Select the VCO frequency
# VCO1: 1800 to 2270 NHz
# VCO2: 2135 to 2720 MHz
# VCO3: 2610 to 3220 MHz
# VCO4: 3075 to 3800 MHz
freq_vco = self.freq_pd * (PLL_N + float(PLL_NUM)/PLL_DEN)
if freq_vco >= 1800 and freq_vco <= 2270:
VCO_SEL = 0
elif freq_vco >= 2135 and freq_vco <= 2720:
VCO_SEL = 1
elif freq_vco >= 2610 and freq_vco <= 3220:
VCO_SEL = 2
elif freq_vco >= 3075 and freq_vco <= 3800:
VCO_SEL = 3
else:
raise ValueError('VCO frequency is out of range.')
self.setWord(VCO_SEL, 'VCO_SEL')
# Dithering is set in R0, but it is needed for R1 stuff.
if PLL_NUM and PLL_DEN > 200 and not PLL_DEN % 2 and not PLL_DEN % 3:
FRAC_DITHER = 2
else:
FRAC_DITHER = 3
self.setWord(FRAC_DITHER, 'FRAC_DITHER')
# Get the Fractional modulator order
if not PLL_NUM:
FRAC_ORDER = 0
elif PLL_DEN < 20:
FRAC_ORDER = 1
elif PLL_DEN % 3 and FRAC_DITHER == 3:
FRAC_ORDER = 3
else:
FRAC_ORDER = 2
self.setWord(FRAC_ORDER, 'FRAC_ORDER')
# Here is the booting sequence after changing frequency according to 8.5.3
# 1. (optional) If the OUTx_MUX State is changing, program Register R5
# 2. (optional) If the VCO_DIV state is changing, program Register R3.
# See VCO_DIV[4:0] - VCO Divider Value if programming a to a value of 4.
if VCO_DIV == None:
self.setWord(0, 'OUTA_MUX')
self.setWord(0, 'OUTB_MUX')
else:
self.setWord(1, 'OUTA_MUX')
self.setWord(1, 'OUTB_MUX')
VCO_DIV = VCO_DIV / 2 - 1
self.setWord(VCO_DIV, 'VCO_DIV')
# 3. (optional) If the MSB of the fractional numerator or charge pump gain
# is changing, program register R1
PLL_NUM_H = (PLL_NUM & 0b1111111111000000000000) >> 12
PLL_NUM_L = PLL_NUM & 0b0000000000111111111111
self.setWord(PLL_DEN, 'PLL_DEN')
self.setWord(PLL_NUM_H, 'PLL_NUM_H')
self.setWord(PLL_NUM_L, 'PLL_NUM_L')
self.setWord(PLL_N, 'PLL_N')
# 4. (Required) Program register R0
# Activate frequency calibration
self.setWord(0, 'NO_FCAL')
# Sleep 20ms
time.sleep(0.02)
self.setWord(0, 'NO_FCAL')
if self.getDiagnoses('LD_PINSTATE'):
return True
else:
logging.error('LMX2581 not locked')
return False
def write(self, data, addr=None, mask=None):
if mask != None and addr != None:
r = self.read(addr)
r = self._set(r, data, mask)
self.write(r, addr)
elif mask == None and addr != None:
cmd = (data & 0xfffffff0) | (addr & 0xf)
self._write(cmd)
elif mask == None and addr == None:
self._write(data)
else:
raise ValueError("Invalid parameters")
def read(self, addr):
rid = self.getRegId('RDADDR')
# Tell LMX2581 which register to read
r06 = self._set(0x400, addr, self.DICTS[rid]['RDADDR'])
self.write(r06, rid)
# Read the register by issuing a dummy write
self.write(self.CMD10)
return self._read()
def _set(self, d1, d2, mask=None):
# Update some bits of d1 with d2, while keep other bits unchanged
if mask:
d1 = d1 & ~mask
d2 = d2 * (mask & -mask)
return d1 | d2
def _get(self, data, mask):
data = data & mask
return data / (mask & -mask)
def reset(self):
self.setWord(1,'RESET')
def getDiagnoses(self,name=None):
diag = self.read(6)
if name:
if name not in self.MASK_DIAG:
raise ValueError("Invalid parameter")
mask = self.MASK_DIAG.get(name)
return self._get(diag, mask)
else:
result = {}
for name,mask in self.MASK_DIAG.items():
result[name] = self._get(diag, mask)
return result
def getRegister(self,rid=None):
if rid==None:
return [self.getRegister(regId) for regId in self.A_DICT_LIST]
elif rid in self.A_DICT_LIST:
rval = self.read(rid)
return {name: self._get(rval,mask) for name, mask in self.DICTS[rid].items()}
else:
raise ValueError("Invalid parameter")
def getWord(self,name):
rid = self.getRegId(name)
rval = self.read(rid)
return self._get(rval,self.DICTS[rid][name])
def setWord(self,value,name):
rid = self.getRegId(name)
self.write(value,rid,self.DICTS[rid][name])
def getRegId(self,name):
rid = None
for d in [self.DICTS[a] for a in self.A_DICT_LIST]:
if name in d:
rid = self.DICTS.index(d)
break
if rid == None:
raise ValueError("Invalid parameter")
return rid
def loadCfgFromFile(self,filename):
f = open(filename)
lines = [l.split("\t") for l in f.read().splitlines()]
regs = [int(l[1].rstrip(),0) for l in lines]
for reg in regs:
self.write(reg)
if self.getDiagnoses('LD_PINSTATE'):
return True
else:
logging.error('LMX2581 not locked')
return False
|
ska-sa/casperfpga
|
src/synth.py
|
Python
|
gpl-2.0
| 10,000
|
from JumpScale import j
class system_infomgr(j.code.classGetBase()):
"""
this is an example actor
"""
def __init__(self):
pass
self._te={}
self.actorname="infomgr"
self.appname="system"
#system_infomgr_osis.__init__(self)
def addInfo(self, info, **kwargs):
"""
can be multi line
param:info dotnotation of info e.g. 'water.white.level.sb 10' (as used in graphite)
result bool
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method addInfo")
def getInfo1h(self, id, start=0, stop=0, **kwargs):
"""
return raw info (resolution is 1h)
param:id id in dot noation e.g. 'water.white.level.sb' (can be multiple use comma as separation)
param:start epoch; 0 means all default=0
param:stop epoch; 0 means all default=0
result list(list)
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method getInfo1h")
def getInfo1hFromTo(self, id, start, stop, **kwargs):
"""
will not return more than 12 months of info, resolution = 1h
param:id id in dot noation e.g. 'water.white.level.sb'
param:start epoch
param:stop epoch
result dict()
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method getInfo1hFromTo")
def getInfo5Min(self, id, start=0, stop=0, **kwargs):
"""
return raw info (resolution is 5min)
param:id id in dot noation e.g. 'water.white.level.sb' (can be multiple use comma as separation)
param:start epoch; 0 means all default=0
param:stop epoch; 0 means all default=0
result list(list)
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method getInfo5Min")
def getInfo5MinFromTo(self, id, start, stop, **kwargs):
"""
will not return more than 1 month of info
param:id id in dot noation e.g. 'water.white.level.sb'
param:start epoch
param:stop epoch
result dict()
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method getInfo5MinFromTo")
def getInfoWithHeaders(self, id, start, stop, maxvalues=360, **kwargs):
"""
param:id id in dot noation e.g. 'water.white.level.sb' (can be multiple use comma as separation)
param:start epoch
param:stop epoch
param:maxvalues nr of values you want to return default=360
result list(list)
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method getInfoWithHeaders")
def reset(self, **kwargs):
"""
reset all stats
result bool
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method reset")
|
Jumpscale/jumpscale6_core
|
apps/portalbase/system/system__infomgr/methodclass/system_infomgr.gen.py
|
Python
|
bsd-2-clause
| 3,098
|
"""Implements optimization and model problems."""
from cobra.flux_analysis.parsimonious import add_pfba
from micom.duality import fast_dual
from micom.solution import CommunitySolution
from micom.util import (_format_min_growth, _apply_min_growth,
check_modification)
from micom.logger import logger
from sympy.core.singleton import S
from functools import partial
def solve(community, fluxes=True, pfba=True):
"""Get all fluxes stratified by species."""
community.solver.optimize()
if community.solver.status == "optimal":
if fluxes and pfba:
add_pfba(community)
community.solver.optimize()
if fluxes:
sol = CommunitySolution(community)
else:
sol = CommunitySolution(community, slim=True)
return sol
return None
def add_linear_optcom(community, min_growth=0.1):
"""Add a linear version of optcom.
Adds constraints to the community such that each individual grows with
at least its minimal growth rate.
Arguments
---------
community : micom.Community
The community to modify.
min_growth : positive float or array-like object.
The minimum growth rate for each individual in the community. Either
a single value applied to all individuals or one value for each.
"""
check_modification(community)
species = list(community.objectives.keys())
min_growth = _format_min_growth(min_growth, species)
_apply_min_growth(community, min_growth)
community.modification = "linear optcom"
def add_lagrangian(community, tradeoff, linear=False):
"""Add a Lagrangian optimization target to a linear OptCom model.
Lagrangian forms in `micom` are usually objectives of the form
(1 - tradeoff) * community_objective + tradeoff * cooperativity_cost.
Here, cooperativity cost specifies the "sacrifice" in growth rate an
individual has to make in order to maximize community growth. It is
calculated as the sum of squared differences between the individuals
current and maximal growth rate. In the linear case squares are substituted
by absolute values (Manhattan distance).
Arguments
---------
community : micom.Community
The community to modify.
tradeoff : float in [0,1]
The tradeoff between community growth and the individual "egoistic"
growth target. 0 means optimize only community growth and 1 means
optimize only the individuals own growth target.
linear : boolean
Whether to use a non-linear (sum of squares) or linear version of the
cooperativity cost. If set to False requires a QP-capable solver.
"""
logger.info("adding lagrangian objective to %s" % community.id)
species = list(community.objectives.keys())
max_gcs = community.optimize_all(progress=False)
prob = community.problem
com_expr = S.Zero
cost_expr = S.Zero
abundances = community.abundances
logger.info("adding expressions for %d species" % len(species))
for sp in species:
com_expr += abundances[sp] * community.objectives[sp]
v_max_gc = prob.Variable("gc_constant_" + sp, lb=max_gcs[sp],
ub=max_gcs[sp])
community.add_cons_vars([v_max_gc])
ex = v_max_gc - community.objectives[sp]
if not linear:
ex = ex**2
cost_expr += ex.expand()
community.objective = (S.One - tradeoff) * com_expr - tradeoff * cost_expr
if "with lagrangian" not in community.modification:
community.modification += " with lagrangian"
logger.info("finished adding lagrangian objective to %s" % community.id)
def add_dualized_optcom(community, min_growth):
"""Add dual Optcom variables and constraints to a community.
Uses the original formulation of OptCom and solves the following
multi-objective problem::
maximize community_growth
s.t. maximize growth_rate_i for all i
s.t. Sv_i = 0
lb_i >= v_i >= ub_i
Notes
-----
This method will only find one arbitrary solution from the Pareto front.
There may exist several other optimal solutions.
Arguments
---------
community : micom.Community
The community to modify.
min_growth : positive float or array-like object.
The minimum growth rate for each individual in the community. Either
a single value applied to all individuals or one value for each.
"""
logger.info("adding dual optcom to %s" % community.id)
check_modification(community)
species = list(community.objectives.keys())
min_growth = _format_min_growth(min_growth, species)
prob = community.solver.interface
# Temporarily subtitute objective with sum of individual objectives
# for correct dual variables
old_obj = community.objective
community.objective = S.Zero
for expr in community.objectives.values():
community.objective += expr
_apply_min_growth(community, min_growth)
dual_coefs = fast_dual(community)
logger.info("adding expressions for %d species" % len(species))
for sp in species:
primal_const = community.constraints["objective_" + sp]
coefs = primal_const.get_linear_coefficients(primal_const.variables)
coefs.update({dual_var: -coef for dual_var, coef in
dual_coefs.items() if sp in dual_var.name})
obj_constraint = prob.Constraint(
S.Zero, lb=0, ub=0, name="optcom_suboptimality_" + sp)
community.add_cons_vars([obj_constraint])
community.solver.update()
obj_constraint.set_linear_coefficients(coefs)
community.objective = old_obj
community.modification = "dual optcom"
logger.info("finished adding dual optcom to %s" % community.id)
def add_moma_optcom(community, min_growth, linear=False):
"""Add a dualized MOMA version of OptCom.
Solves a MOMA (minimization of metabolic adjustment) formulation of OptCom
given by::
minimize cooperativity_cost
s.t. maximize community_objective
s.t. Sv = 0
lb >= v >= ub
where community_cost = sum (growth_rate - max_growth)**2
if linear=False or
community_cost = sum |growth_rate - max_growth|
if linear=True
Arguments
---------
community : micom.Community
The community to modify.
min_growth : positive float or array-like object.
The minimum growth rate for each individual in the community. Either
a single value applied to all individuals or one value for each.
linear : boolean
Whether to use a non-linear (sum of squares) or linear version of the
cooperativity cost. If set to False requires a QP-capable solver.
"""
logger.info("adding dual %s moma to %s" % (
"linear" if linear else "quadratic", community.id))
check_modification(community)
species = list(community.objectives.keys())
min_growth = _format_min_growth(min_growth, species)
prob = community.solver.interface
old_obj = community.objective
coefs = old_obj.get_linear_coefficients(old_obj.variables)
# Get maximum individual growth rates
max_gcs = community.optimize_all(progress=False)
_apply_min_growth(community, min_growth)
dual_coefs = fast_dual(community)
coefs.update({
v: -coef for v, coef in
dual_coefs.items()})
obj_constraint = prob.Constraint(
S.Zero, lb=0, ub=0,
name="optcom_suboptimality")
community.add_cons_vars([obj_constraint])
community.solver.update()
obj_constraint.set_linear_coefficients(coefs)
obj_expr = S.Zero
logger.info("adding expressions for %d species" % len(species))
for sp in species:
v = prob.Variable("gc_constant_" + sp, lb=max_gcs[sp], ub=max_gcs[sp])
community.add_cons_vars([v])
ex = v - community.objectives[sp]
if not linear:
ex = ex**2
obj_expr += ex.expand()
community.objective = prob.Objective(obj_expr, direction="min")
community.modification = "moma optcom"
logger.info("finished dual moma to %s" % community.id)
_methods = {"linear": [add_linear_optcom],
"lagrangian": [add_linear_optcom, add_lagrangian],
"linear lagrangian": [add_linear_optcom,
partial(add_lagrangian, linear=True)],
"original": [add_dualized_optcom],
"moma": [add_moma_optcom],
"lmoma": [partial(add_moma_optcom, linear=True)]}
def optcom(community, strategy, min_growth, tradeoff, fluxes, pfba):
"""Run OptCom for the community.
OptCom methods are a group of optimization procedures to find community
solutions that provide a tradeoff between the cooperative community
growth and the egoistic growth of each individual [#p1]_. `micom`
provides several strategies that can be used to find optimal solutions:
- "linear": Applies a lower bound for the individual growth rates and
finds the optimal community growth rate. This is the fastest methods
but also ignores that individuals might strive to optimize their
individual growth instead of community growth.
- "lagrangian": Optimizes a joint objective containing the community
objective (maximized) as well as a cooperativity cost which
represents the distance to the individuals "egoistic" maximum growth
rate (minimized). Requires the `tradeoff` parameter. This method is
still relatively fast and does require only few additional variables.
- "linear lagrangian": The same as "lagrangian" only with a linear
representation of the cooperativity cost (absolute value).
- "moma": Minimization of metabolic adjustment. Simultaneously
optimizes the community objective (maximize) and the cooperativity
cost (minimize). This method finds an exact maximum but doubles the
number of required variables, thus being slow.
- "lmoma": The same as "moma" only with a linear
representation of the cooperativity cost (absolute value).
- "original": Solves the multi-objective problem described in [#p1]_.
Here, the community growth rate is maximized simultanously with all
individual growth rates. Note that there are usually many
Pareto-optimal solutions to this problem and the method will only
give one solution. This is also the slowest method.
Parameters
----------
community : micom.Community
The community to optimize.
strategy : str
The strategy used to solve the OptCom formulation. Defaults to
"lagrangian" which gives a decent tradeoff between speed and
correctness.
min_growth : float or array-like
The minimal growth rate required for each individual. May be a
single value or an array-like object with the same length as there
are individuals.
tradeoff : float in [0, 1]
Only used for lagrangian strategies. Must be between 0 and 1 and
describes the strength of the cooperativity cost / egoism. 1 means
optimization will only minimize the cooperativity cost and zero
means optimization will only maximize the community objective.
fluxes : boolean
Whether to return the fluxes as well.
pfba : boolean
Whether to obtain fluxes by parsimonious FBA rather than
"classical" FBA.
Returns
-------
micom.CommunitySolution
The solution of the optimization. If fluxes==False will only contain
the objective value, community growth rate and individual growth rates.
References
----------
.. [#p1] OptCom: a multi-level optimization framework for the metabolic
modeling and analysis of microbial communities.
Zomorrodi AR, Maranas CD. PLoS Comput Biol. 2012 Feb;8(2):e1002363.
doi: 10.1371/journal.pcbi.1002363, PMID: 22319433
"""
if strategy not in _methods:
raise ValueError("strategy must be one of {}!".format(
",".join(_methods)))
funcs = _methods[strategy]
with community as com:
# Add needed variables etc.
funcs[0](com, min_growth)
if "lagrangian" in strategy:
funcs[1](com, tradeoff)
return solve(community, fluxes=fluxes, pfba=pfba)
|
cdiener/micom
|
micom/problems.py
|
Python
|
apache-2.0
| 12,424
|
'''
This file contains all the functions that constitute the "frontend of the backend",
i.e. native Python plotting functions. All actual plots are generated by plotting.py --
this is purely about displaying them. manualfit() and geogui() rely on PyQt4, which
is likely to fail, so it's sequestered.
Version: 2019aug06
'''
## Imports and globals...need Qt since matplotlib doesn't support edit boxes, grr!
from optima import OptimaException, dcp, printv, sigfig, makeplots, getplotselections, gridcolors, odict, isnumber, promotetolist, loadobj, sanitizeresults, reanimateplots
from pylab import figure, close, floor, ion, ioff, isinteractive, ceil, array, show, pause
from pylab import subplot, ylabel, transpose, legend, fill_between, xlim, title
from matplotlib.widgets import CheckButtons, Button
global panel, results, origpars, tmppars, parset, fulllabellist, fullkeylist, fullsubkeylist, fulltypelist, fullvallist, plotfig, panelfig, check, checkboxes, updatebutton, clearbutton, defaultsbutton, advancedbutton, closebutton, plusbutton, minusbutton, plotargs, scrwid, scrhei, globaladvanced # For manualfit GUI
if 1: panel, results, origpars, tmppars, parset, fulllabellist, fullkeylist, fullsubkeylist, fulltypelist, fullvallist, plotfig, panelfig, check, checkboxes, updatebutton, clearbutton, defaultsbutton, advancedbutton, closebutton, plusbutton, minusbutton, plotargs, scrwid, scrhei, globaladvanced = [None]*25
scrwid, scrhei = 24, 12 # Specify these here...if too large, should shrink anyway
def importpyqt():
''' Try to import pyqt, either PyQt4 or PyQt5, but allow it to fail '''
try:
from PyQt4 import QtGui as pyqt
except:
try: from PyQt5 import QtWidgets as pyqt
except: pyqt = Exception('QtGui could not be imported')
return pyqt
pyqt = importpyqt()
##############################################################################
### USER-VISIBLE FUNCTIONS
##############################################################################
def plotresults(results, toplot=None, fig=None, figargs=None, **kwargs):
'''
Does the hard work for updateplots() for pygui()
Keyword arguments if supplied are passed on to figure().
Usage:
results = P.runsim('default')
plotresults(results)
Version: 2016jan25
'''
if figargs is None: figargs = dict()
if fig is None: fig = makenewfigure(**figargs)
# Do plotting
wasinteractive = isinteractive() # You might think you can get rid of this...you can't!
if wasinteractive: ioff()
width,height = fig.get_size_inches()
# Actually create plots
if 'figsize' in kwargs: kwargs.pop('figsize', None)
plots = makeplots(results, toplot=toplot, die=True, figsize=(width, height), fig=fig, **kwargs)
naxes = len(plots[0].axes) # If done interactively, they're all in the first plot
# Calculate the rows and columns
aspectratio = 1.5 # The target aspect ratio for plots, for choosing how many rows and columns to use
nrows = 1
ncols = 1
while nrows*ncols < naxes:
if width/ncols/aspectratio > height/nrows: ncols += 1 # Height is more squashed: add a column
else: nrows += 1 # Width is more squashed: add a row
# Adjust margins
fig.subplots_adjust(left=0.07, bottom=0.05, right=0.85, top=0.95, wspace=0.9, hspace=0.7) # NB, 1.0 seems meaningless for wspace and hspace...
for a,ax in enumerate(plots[-1].axes):
ax.change_geometry(nrows, ncols, a+1)
# Handle interactivity like a boss
if wasinteractive: ion()
show()
return None
def pygui(tmpresults, toplot=None, advanced=False, verbose=2, figargs=None, **kwargs):
'''
PYGUI
Make a Python GUI for plotting results. Opens up a control window and a plotting window,
and when "Update" is clicked, will clear the contents of the plotting window and replot.
Usage:
pygui(results, [toplot])
where results is the output of e.g. runsim() and toplot is an optional list of form e.g.
toplot = ['prev-tot', 'inci-pop']
(see epiformatslist in plotting.py)
Warning: the plots won't resize automatically if the figure is resized, but if you click
"Update", then they will.
Version: 1.3 (2017feb07)
'''
global check, checkboxes, updatebutton, clearbutton, defaultsbutton, advancedbutton, closebutton, plusbutton, minusbutton, panelfig, results, plotargs, globaladvanced
plotargs = kwargs # Reset global to match function input
results = sanitizeresults(tmpresults)
globaladvanced = advanced
## Define options for selection
plotselections = getplotselections(results, advanced=globaladvanced)
checkboxes = plotselections['keys']
checkboxnames = plotselections['names']
isselected = []
toplot = promotetolist(toplot) # Ensure it's a list
if not toplot or toplot[0] is None or toplot[0]=='default':
if len(toplot): toplot.pop(0) # Remove the first element
defaultboxes = [checkboxes[i] for i,tf in enumerate(plotselections['defaults']) if tf] # Back-convert defaults from true/false list to list of keys
toplot.extend(defaultboxes)
if len(toplot):
tmptoplot = dcp(toplot) # Make a copy to compare arguments
for key in checkboxes:
if key in toplot:
isselected.append(True)
tmptoplot.remove(key)
else:
isselected.append(False)
if len(tmptoplot)!=0:
errormsg = 'Not all keys were recognized; mismatched ones were:\n'
errormsg += '%s\n' % tmptoplot
errormsg += 'Available keys are:\n'
errormsg += '%s' % checkboxes
if not globaladvanced: errormsg += '\nSet advanced=True for more options'
printv(errormsg, 1, verbose=verbose)
## Set up control panel
if advanced:
figwidth = 14
advwid = 0.6 # Adjust button width
else:
figwidth = 7
advwid = 1.0
figheight = 12
panelfig = figure(num='Optima control panel', figsize=(figwidth,figheight), facecolor=(0.95, 0.95, 0.95)) # Open control panel
xinit = 0.10*advwid
if advanced: cbapos = [xinit*advwid, 0.07, 0.9, 1.8] # cba="check box axes position": extra tall, for moving later
else: cbapos = [xinit, 0.07, 0.8, 0.9]
ypos = 0.02 # y-position of buttons
bwid = 0.14*advwid # x-width of buttons
bhei = 0.03 # y-height of buttons
sep = 0.165 # Separation between buttons
pmwid = 0.03*advwid # Width of plus/minus buttons
checkboxaxes = panelfig.add_axes(cbapos) # Create checkbox locations
updateaxes = panelfig.add_axes([xinit+0*sep, ypos, bwid, bhei]) # Create update button location
clearaxes = panelfig.add_axes([xinit+1*sep, ypos, bwid, bhei]) # Create clear button location
defaultsaxes = panelfig.add_axes([xinit+2*sep, ypos, bwid, bhei]) # Create defaults button location
advancedaxes = panelfig.add_axes([xinit+3*sep, ypos, bwid, bhei]) # Create defaults button location
closeaxes = panelfig.add_axes([xinit+4*sep, ypos, bwid, bhei]) # Create close button location
plusaxes = panelfig.add_axes([xinit+5*sep, ypos+0.015, pmwid, 0.02]) # Create plus button location
minusaxes = panelfig.add_axes([xinit+5*sep, ypos-0.005, pmwid, 0.02]) # Create plus button location
check = CheckButtons(checkboxaxes, checkboxnames, isselected) # Actually create checkboxes
# Reformat the checkboxes
stastr = ' - stacked'
perstr = ' - population'
nboxes = len(check.rectangles)
for b in range(nboxes):
label = check.labels[b]
labeltext = label.get_text()
labelpos = label.get_position()
label.set_position((labelpos[0]*0.3,labelpos[1])) # Not sure why by default the check boxes are so far away
if labeltext.endswith(perstr): label.set_text('Per population') # Clear label
elif labeltext.endswith(stastr): label.set_text('Stacked') # Clear label
else: label.set_weight('bold')
# If advanced, split into two columns -- messy since Matplotlib sucks! :(
if advanced:
for b in range(nboxes):
percol = floor(nboxes/2.0) # Number of boxes per column
col = floor(b/percol) # Which column to plto in
labelpos = list(check.labels[b].get_position()) # Get label positions and convert tuple -> list
rectpos = list(check.rectangles[b].get_xy()) # Likewise for rectangles
line0pos = check.lines[b][0].get_data() # There are two lines, and they have data
line1pos = check.lines[b][1].get_data()
yoffset = 0.5 # Specify amount to move everything in column 0 down by
xoffset = 0.45 # Specify amount to move everything on column 1 over by
if col==0: # Left column: shift everything down
labelpos[1] -= yoffset
rectpos[1] -= yoffset
for i in range(2): # Start and end points
line0pos[1][i] -= yoffset
line1pos[1][i] -= yoffset
else: # Right column: shift everything over
labelpos[0] += xoffset
rectpos[0] += xoffset
for i in range(2): # Start and end points
line0pos[0][i] += xoffset
line1pos[0][i] += xoffset
check.labels[b].set_position(labelpos) # Actually set positions
check.rectangles[b].set_xy(rectpos)
check.lines[b][0].set_data(line0pos)
check.lines[b][1].set_data(line1pos)
if advanced: advlabel = 'Normal'
else: advlabel = 'Advanced'
blue = (0.4,0.7,1.0) # Also green = (0.2,0.7,0.1), red = (1.0,0.5,0.1)
white = (1.0,1.0,1.0)
black = (0.4,0.4,0.4)
darker = 0.7
updatebutton = Button(updateaxes, 'Update', color=blue, hovercolor=tuple(array(blue)*darker))
clearbutton = Button(clearaxes, 'Clear', color=blue, hovercolor=tuple(array(blue)*darker))
defaultsbutton = Button(defaultsaxes, 'Defaults', color=blue, hovercolor=tuple(array(blue)*darker))
advancedbutton = Button(advancedaxes, advlabel, color=blue, hovercolor=tuple(array(blue)*darker))
closebutton = Button(closeaxes, 'Close', color=blue, hovercolor=tuple(array(blue)*darker))
plusbutton = Button(plusaxes, '+', color=white, hovercolor=tuple(array(white)*darker))
minusbutton = Button(minusaxes, '-', color=black, hovercolor=tuple(array(black)*darker))
updatebutton.on_clicked(updateplots) # Update figure if button is clicked
clearbutton.on_clicked(clearselections) # Clear all checkboxes
defaultsbutton.on_clicked(defaultselections) # Return to default selections
advancedbutton.on_clicked(advancedselections) # Return to default selections
closebutton.on_clicked(closegui) # Close figures
plusbutton.on_clicked(zoomin) # Zoom in on plots
minusbutton.on_clicked(zoomout) # Zoom in on plots
updateplots(None) # Plot initially -- ACTUALLY GENERATES THE PLOTS
return None
def manualfit(project=None, parsubset=None, name=-1, ind=0, maxrows=25, verbose=2, advanced=False, figargs=None, **kwargs):
'''
Create a GUI for doing manual fitting via the backend. Opens up three windows:
results, results selection, and edit boxes.
parsubset can be a list of parameters the user can fit, e.g.
parsubset=['initprev','force']
maxrows is the number of rows (i.e. parameters) to display in each column.
Note: to get advanced parameters and plots, set advanced=True.
Version: 1.2 (2017feb10)
'''
## Random housekeeping
global panel, results, origpars, tmppars, parset, fulllabellist, fullkeylist, fullsubkeylist, fulltypelist, fullvallist, globaladvanced
globaladvanced = advanced
if figargs is None: figargs = dict()
fig = figure(**figargs); close(fig) # Open and close figure...dumb, no? Otherwise get "QWidget: Must construct a QApplication before a QPaintDevice"
ion() # We really need this here!
nsigfigs = 4
boxes = []
texts = []
## Get the list of parameters that can be fitted
parset = dcp(project.parsets[name])
tmppars = parset.pars
origpars = dcp(tmppars)
mflists = parset.manualfitlists(parsubset=parsubset, advanced=globaladvanced)
fullkeylist = mflists['keys']
fullsubkeylist = mflists['subkeys']
fulltypelist = mflists['types']
fullvallist = mflists['values']
fulllabellist = mflists['labels']
nfull = len(fulllabellist) # The total number of boxes needed
results = project.runsim(name)
pygui(results, **kwargs)
def closewindows():
''' Close all three open windows '''
closegui()
panel.close()
## Define update step
def manualupdate():
''' Update GUI with new results '''
global results, tmppars, fullkeylist, fullsubkeylist, fulltypelist, fullvallist
# Update parameter values from GUI values
for b,box in enumerate(boxes):
fullvallist[b] = eval(str(box.text()))
# Create lists for update
mflists = odict()
mflists['keys'] = fullkeylist
mflists['subkeys'] = fullsubkeylist
mflists['types'] = fulltypelist
mflists['values'] = fullvallist
parset.update(mflists)
# Rerun
simparslist = parset.interp(start=project.settings.start, end=project.settings.end, dt=project.settings.dt)
results = project.runsim(simpars=simparslist)
updateplots(tmpresults=results, **kwargs)
## Keep the current parameters in the project; otherwise discard
def keeppars():
''' Little function to reset origpars and update the project '''
global origpars, tmppars, parset
origpars = dcp(tmppars)
parset.pars = tmppars
project.parsets[name].pars = tmppars
print('Parameters kept')
return None
def resetpars():
''' Reset the parameters to the last saved version -- WARNING, doesn't work '''
global origpars, tmppars, parset
tmppars = dcp(origpars)
parset.pars = tmppars
for i in range(nfull): boxes[i].setText(sigfig(fullvallist[i], sigfigs=nsigfigs))
simparslist = parset.interp(start=project.settings.start, end=project.settings.end, dt=project.settings.dt)
results = project.runsim(simpars=simparslist)
updateplots(tmpresults=results)
return None
## Set up GUI
npars = len(fullkeylist)
leftmargin = 10
rowheight = 25
colwidth = 450
ncols = floor(npars/(maxrows+10*advanced))+1
nrows = ceil(nfull/float(ncols))
panelwidth = colwidth*ncols
panelheight = rowheight*(nfull/ncols+2)+50
buttonheight = panelheight-rowheight*1.5
boxoffset = 300+leftmargin
panel = pyqt.QWidget() # Create panel widget
panel.setGeometry(100, 100, panelwidth, panelheight)
spottaken = [] # Store list of existing entries, to avoid duplicates
for i in range(nfull):
row = (i % nrows) + 1
col = floor(i/float(nrows))
spot = (row,col)
if spot in spottaken:
errormsg = 'Cannot add a button to %s since there already is one!' % str(spot)
raise OptimaException(errormsg)
else: spottaken.append(spot)
texts.append(pyqt.QLabel(parent=panel))
texts[-1].setText(fulllabellist[i])
texts[-1].move(leftmargin+colwidth*col, rowheight*row)
boxes.append(pyqt.QLineEdit(parent = panel)) # Actually create the text edit box
boxes[-1].move(boxoffset+colwidth*col, rowheight*row)
printv('Setting up GUI checkboxes: %s' % [i, fulllabellist[i], boxoffset+colwidth*col, rowheight*row], 4, verbose)
boxes[-1].setText(sigfig(fullvallist[i], sigfigs=nsigfigs))
boxes[-1].returnPressed.connect(manualupdate)
keepbutton = pyqt.QPushButton('Keep', parent=panel)
resetbutton = pyqt.QPushButton('Reset', parent=panel)
closebutton = pyqt.QPushButton('Close', parent=panel)
keepbutton.move(1*panelwidth/4, buttonheight)
resetbutton.move(2*panelwidth/4, buttonheight)
closebutton.move(3*panelwidth/4, buttonheight)
keepbutton.clicked.connect(keeppars)
resetbutton.clicked.connect(resetpars)
closebutton.clicked.connect(closewindows)
panel.show()
def plotpeople(project=None, people=None, tvec=None, ind=None, simind=None, start=2, end=None, pops=None, animate=False, skipempty=True, verbose=2, toplot=None, **kwargs):
'''
A function to plot all people as a stacked plot
"Exclude" excludes the first N health states -- useful for excluding susceptibles.
Usage example:
import optima as op
P = op.defaults.defaultproject('simple')
P.runsim()
people = P.results[-1].raw[0]['people']
op.plotpeople(P, people)
NB: for a multiresult, simind must not be None!
Version: 2018apr0
'''
if pops is None: pops = Ellipsis # This is a slice
elif isnumber(pops): pops = [pops]
if pops is not Ellipsis: plottitle = str(array(project.parsets[0].popkeys)[array(pops)])
else: plottitle = 'All populations'
legendsettings = {'loc':'upper left', 'bbox_to_anchor':(1.02, 1), 'fontsize':11, 'title':''}
nocolor = (0.9,0.9,0.9)
labels = project.settings.statelabels
if toplot is None: toplot = 'people'
if people is None:
if ind is None: ind=-1
try:
people = project.results[ind].raw[0][toplot] # Try to get default people to plot
except:
if simind is None: simind = 1
people = project.results[ind].raw[simind][0][toplot] # It's a multiresult: need another index
plotstyles = odict([
('susreg', ('|','|')),
('progcirc', ('+','|')),
('undx', ('O','o')),
('dx', ('.','o')),
('care', ('*','*')),
('lost', ('X','|')),
('usvl', ('.','o')),
('svl', ('*','*')),
])
hatchstyles = []
linestyles = []
for key in plotstyles.keys():
hatchstyles.extend([plotstyles[key][0] for lab in labels if lab.startswith(key)])
linestyles.extend([plotstyles[key][1] for lab in labels if lab.startswith(key)])
labels = labels[start:end]
hatchstyles = hatchstyles[start:end]
linestyles = linestyles[start:end]
ppl = people[start:end,:,:] # Exclude initial people
ppl = ppl[:,pops,:] # Filter selected populations
ppl = ppl[:,:,:].sum(axis=1) # Sum over people
ppl = transpose(ppl) # So time is plotted on x-axis
nstates = len(labels)
colors = gridcolors(nstates)
if tvec is None:
tvec = project.settings.maketvec() # Won't necessarily match this ppl, supply as argument if so
bottom = 0*tvec
makenewfigure(**kwargs)
ax = subplot(111)
ylabel('Number of people')
title(plottitle)
xlim((tvec[0], tvec[-1]))
for st in range(nstates-1,-1,-1):
this = ppl[:,st]
if sum(this):
thiscolor = colors[st]
haspeople = True
else:
thiscolor = nocolor
haspeople = False
if haspeople or not skipempty:
printv('State: %i/%i Hatch: %s Line: %s Color: %s' % (st, nstates, hatchstyles[st], linestyles[st], thiscolor), 4, verbose)
fill_between(tvec, bottom, this+bottom, facecolor=thiscolor, alpha=1, lw=0, hatch=hatchstyles[st])
bottom += this
# Legend stuff
ax.plot((0, 0), (0, 0), color=thiscolor, linewidth=10, label=labels[st], marker=linestyles[st]) # This loop is JUST for the legends! since fill_between doesn't count as a plot object... -- TODO: this is copied from plotepi(), perhaps streamline
handles, legendlabels = ax.get_legend_handles_labels()
legend(reversed(handles), reversed(legendlabels), **legendsettings)
if animate:
show()
pause(0.001)
return None
global plotparsbackbut, plotparsnextbut, plotparslider
def plotpars(parslist=None, start=None, end=None, verbose=2, rows=6, cols=5, figsize=(16,12), fontsize=8, die=True, **kwargs):
'''
A function to plot all parameters. 'pars' can be an odict or a list of pars odicts.
Version: 2016jan30
'''
from optima import Par, makesimpars, tic, toc
from numpy import array, vstack
import matplotlib.pyplot as plt
from matplotlib.widgets import Button, Slider
global position, plotparsbackbut, plotparsnextbut, plotparslider
position = 0
# In case the user tries to enter a project or parset -- TODO: make more flexible
tmp = parslist
try: parslist = tmp.parsets[-1].pars # If it's a project
except:
try: parslist = tmp.pars # If it's a parset
except: pass
parslist = promotetolist(parslist) # Convert to list
try:
for i in range(len(parslist)): parslist[i] = parslist[i].pars
except: pass # Assume it's in the correct form -- a list of pars odicts
allplotdata = []
for pars in parslist:
count = 0
simpars = makesimpars(pars, start=start, end=end)
tvec = simpars['tvec']
plotdata = array([['name','simpar','par_t', 'par_y']], dtype=object) # Set up array for holding plotting results
for i,key1 in enumerate(pars):
par = pars[key1]
if isinstance(par, Par):
if hasattr(par,'y'): pardata = par.y # TODO: consider adding par.m as well
elif hasattr(par,'p'): pardata = par.p # Population size
else: raise Exception('???')
if hasattr(pardata, 'keys') and len(pardata.keys())>0: # Only ones that don't have a len are temp pars
nkeys = len(pardata.keys())
for k,key2 in enumerate(pardata.keys()):
if hasattr(par, 't'): t = par.t[key2]
else: t = tvec[0] # For a constant
count += 1
if nkeys==1: thissimpar = simpars[key1]
else: thissimpar = simpars[key1][k]
thisplot = array(['%3i. %s - %s' % (count-1, key1, key2), thissimpar, t, pardata[key2]], dtype=object)
if array(thissimpar).sum()==0: thisplot[0] += ' (zero)'
plotdata = vstack([plotdata, thisplot])
else:
t = tvec[0] # For a constant
count += 1
thisplot = array(['%3i. %s' % (count-1, key1), simpars[key1], t, pardata], dtype=object)
plotdata = vstack([plotdata, thisplot])
plotdata = plotdata[1:,:] # Remove header
allplotdata.append(plotdata)
## Do plotting
nplots = len(plotdata)
if any([len(pltd)!=nplots for pltd in allplotdata]):
printv('Warning, not all pars are the same length, only plotting first', 2, verbose)
allplotdata = allplotdata[0]
nperscreen = rows*cols
plotparsfig = plt.figure(facecolor=(0.9,0.9,0.9), figsize=figsize)
plt.subplots_adjust(left=0.05, right=0.95, bottom=0.1, top=0.95, wspace=0.3, hspace=0.4)
plotparsaxs = []
count = 0
for row in range(rows):
for col in range(cols):
count += 1
plotparsaxs.append(plotparsfig.add_subplot(rows, cols, count))
backframe = plotparsfig.add_axes([0.1, 0.03, 0.1, 0.03])
sliderframe = plotparsfig.add_axes([0.3, 0.03, 0.4, 0.03])
nextframe = plotparsfig.add_axes([0.8, 0.03, 0.1, 0.03])
plotparsbackbut = Button(backframe, 'Back')
plotparsnextbut = Button(nextframe, 'Next')
plotparslider = Slider(sliderframe, '', 0, nplots, valinit=0, valfmt='%d')
def updateb(event=None):
global position
position -= nperscreen
position = max(0,position)
position = min(nplots-nperscreen, position)
plotparslider.set_val(position)
def updaten(event=None):
global position
position += nperscreen
position = max(0,position)
position = min(nplots-nperscreen, position)
plotparslider.set_val(position)
def update(tmp=0):
global position, plotparslider
position = tmp
position = max(0,position)
position = min(nplots-nperscreen, position)
t = tic()
for i,ax in enumerate(plotparsaxs):
ax.cla()
for item in ax.get_xticklabels() + ax.get_yticklabels(): item.set_fontsize(fontsize)
ax.hold(True)
nplt = i+position
if nplt<nplots:
for pd,plotdata in enumerate(allplotdata):
try:
this = plotdata[nplt,:]
ax.set_title(this[0])
if isinstance(this[1], dict):
if len(this[1].keys())==1: this[1] = this[1][0] # Actually needs to be an odict
elif len(this[1].keys())>1: raise OptimaException('Expecting a number or an array or even an odict with one key, but got an odict with multiple keys (%s)' % this[0])
if isnumber(this[1]): ax.plot(tvec, 0*tvec+this[1])
elif len(this[1])==0: ax.set_title(this[0]+' is empty')
elif len(this[1])==1: ax.plot(tvec, 0*tvec+this[1])
elif len(this[1])==len(tvec): ax.plot(tvec, this[1])
else: pass # Population size, doesn't use control points
printv('Plot %i/%i...' % (i*len(allplotdata)+pd+1, len(plotparsaxs)*len(allplotdata)), 2, verbose)
except Exception as E:
if die: raise E
else: print('??????: %s' % repr(E))
try:
if not(hasattr(this[3],'__len__') and len(this[3])==0): ax.scatter(this[2],this[3])
except Exception: pass # print('Problem with "%s": "%s"' % (this[0], repr(E)))
if pd==len(allplotdata)-1: # Do this for the last plot only
ax.set_ylim((0,1.1*ax.get_ylim()[1]))
ax.set_xlim((tvec[0],tvec[-1]))
toc(t)
update()
plotparsbackbut.on_clicked(updateb)
plotparsnextbut.on_clicked(updaten)
plotparslider.on_changed(update)
return allplotdata
def showplots(plots=None, figsize=None):
'''
This function can be used to show plots (in separate figure windows, independently
of generating them.
Example:
import optima as op
P = op.demo(0)
plot = plotcascade(results=P.result(), interactive=False)
op.showplots(plot) # Creates one plot
NOTE: This function is purely remedial; the same effect can be accomplished more easily via:
op.plotcascade(results=P.result(), interactive=True)
Version: 2017may29
'''
ion()
if figsize is None: figsize = (10,4)
reanimateplots(plots) # Reconnect the plots to the matplotlib backend so they can be rendered
nplots = len(plots)
figs = []
for p in range(nplots):
figs.append(figure(facecolor=(1,1,1),figsize=figsize))
thisfig = figs[p]
thisplot = plots[p].axes[0]
thisfig._axstack.add(thisfig._make_key(thisplot), thisplot) # Add a plot to the axis stack
thisplot.change_geometry(1, 1, 1) # Change geometry to be correct
orig = thisplot.get_position() # get the original position
widthfactor = 0.9
heightfactor = 0.9
pos2 = [orig.x0, orig.y0, orig.width*widthfactor, orig.height*heightfactor]
thisplot.set_position(pos2) # set a new position
if nplots>1: return figs
else: return figs[0] # Don't return a list if a single figure
def loadplot(filename=None):
'''
Load a plot from a file and reanimate it.
Example usage:
import optima as op
P = op.demo(0)
op.saveplots(P, toplot='cascade', filetype='fig')
Later:
cascadefig = op.loadplot('cascade.fig')
'''
ion() # Without this, it doesn't show up
fig = loadobj(filename)
reanimateplots(fig)
return fig
##############################################################################
### HELPER FUNCTIONS
##############################################################################
def makenewfigure(**figargs):
''' PyQt-specific function for maximizing the current figure '''
global scrwid, scrhei
if 'figsize' not in figargs: figargs['figsize'] = (scrwid, scrhei)
if 'facecolor' not in figargs: figargs['facecolor'] = (1,1,1)
fig = figure(**figargs) # Create a figure based on supplied kwargs, if any
return fig
def closegui(event=None):
''' Close all GUI windows '''
global panelfig, plotfig
try: close(plotfig)
except: pass
try: close(panelfig)
except: pass
return None
def getchecked(check=None):
''' Return a list of whether or not each check box is checked or not '''
ischecked = []
for box in range(len(check.lines)): ischecked.append(check.lines[box][0].get_visible()) # Stupid way of figuring out if a box is ticked or not
return ischecked
def clearselections(event=None):
global check
for box in range(len(check.lines)):
for i in [0,1]: check.lines[box][i].set_visible(False)
updateplots()
return None
def defaultselections(event=None):
''' Reset to default options '''
global check, results, globaladvanced
plotselections = getplotselections(results, advanced=globaladvanced) # WARNING, assumes defaults don't change with advanced
for box,tf in enumerate(plotselections['defaults']):
if tf: # True if in defaults, false otherwise
for i in [0,1]: check.lines[box][i].set_visible(True) # Two lines...stupid
else:
for i in [0,1]: check.lines[box][i].set_visible(False)
updateplots()
return None
def advancedselections(event=None):
''' Toggle advance doptions '''
global check, checkboxes, updatebutton, clearbutton, defaultsbutton, advancedbutton, closebutton, plotfig, panelfig, results, plotargs, globaladvanced
globaladvanced = not(globaladvanced) # Toggle
try: close(plotfig) # These work better here than caling closegui() directly
except: pass
try: close(panelfig)
except: pass
check, checkboxes, updatebutton, clearbutton, defaultsbutton, advancedbutton, closebutton, plotfig, panelfig, plotargs = [None]*10 # Clear the bejesus out of everything
pygui(results, advanced=globaladvanced)
print('Switching to/from advanced; if GUI hangs, press enter in console') # Unfortunately, this happens from time to time
pause(0.2) # Without this, it doesn't work...siiiigh
return None
def zoomplots(event=None, ratio=1.0):
''' Zoom in or out '''
global plotfig
for ax in plotfig.axes:
axpos = ax.get_position()
x0 = axpos.x0
x1 = axpos.x1
y0 = axpos.y0
y1 = axpos.y1
xdiff = x1-x0
ydiff = y1-y0
xchange = xdiff*(1-ratio)/2.0
ychange = ydiff*(1-ratio)/2.0
ax.set_position([x0+xchange, y0+ychange, xdiff*ratio, ydiff*ratio])
return None
def zoomin(event=None):
''' Zoom into plots '''
zoomplots(event=event, ratio=1.1)
return None
def zoomout(event=None):
''' Zoom out of plots '''
zoomplots(event=event, ratio=0.9)
return None
def updateplots(event=None, tmpresults=None, **kwargs):
''' Close current window if it exists and open a new one based on user selections '''
global plotfig, check, checkboxes, results, plotargs
if tmpresults is not None: results = tmpresults
# If figure exists, get size, then close it
if plotfig is None: plotfig = makenewfigure()
width,height = plotfig.get_size_inches(); close(plotfig) # Get current figure dimensions
# Get user selections
ischecked = getchecked(check)
toplot = array(checkboxes)[array(ischecked)].tolist() # Use logical indexing to get names to plot
# Do plotting
if sum(ischecked): # Don't do anything if no plots
plotfig = makenewfigure(num='Optima results', figsize=(width, height), facecolor=(1,1,1)) # Create figure with correct number of plots
for key in ['toplot','fig','figsize']: kwargs.pop(key, None) # Remove duplicated arguments if they exist
plotresults(results, toplot=toplot, fig=plotfig, figsize=(width, height), **plotargs)
return None
##############################################################################################################################
### GEOSPATIAL GUI
##############################################################################################################################
"""
GEOSPATIAL
This file defines everything needed for the Python GUI for geospatial analysis.
Version: 2017mar22
"""
from optima import Project, Portfolio, loadproj, saveobj, defaultobjectives, makegeospreadsheet, makegeoprojects
from time import time
global geoguiwindow, globalportfolio, globalobjectives
if 1: geoguiwindow, globalportfolio, globalobjectives = [None]*3
## Global options
budgetfactor = 1e6 # Conversion between screen and internal
prjext = '.prj'
prtext = '.prt'
##############################################################################################################################
## Define functions
##############################################################################################################################
def resetbudget():
''' Replace current displayed budget with default from portfolio '''
global globalportfolio, objectiveinputs
totalbudget = 0
for project in globalportfolio.projects.values():
totalbudget += sum(project.progsets[0].getdefaultbudget().values())
objectiveinputs['budget'].setText(str(totalbudget/budgetfactor))
return None
def warning(message, usegui=True):
''' usegui kwarg is so this can be used in a GUI and non-GUI context '''
global geoguiwindow
if usegui:
pyqt.QMessageBox.warning(geoguiwindow, 'Message', message)
else:
print(message)
def gui_loadproj():
''' Helper function to load a project, since used more than once '''
filepath = pyqt.QFileDialog.getOpenFileName(caption='Choose project file', filter='*'+prjext)
project = None
if filepath:
try: project = loadproj(filepath, verbose=0)
except Exception as E: print('Could not load file "%s": "%s"' % (filepath, repr(E)))
if type(project)==Project: return project
else: print('File "%s" is not an Optima project file' % filepath)
else:
print('No filepath provided')
return project
def gui_makesheet():
''' Create a geospatial spreadsheet template based on a project file '''
## 1. Load a project file
project = gui_loadproj() # No, it's a project path, load it
if project is None:
raise OptimaException('No project loaded.')
try: results = project.parsets[-1].getresults()
except: results = project.runsim(name=project.parsets[-1].name)
copies, ok = pyqt.QInputDialog.getText(geoguiwindow, 'GA Spreadsheet Parameter', 'How many variants of the chosen project do you want?')
try: copies = int(copies)
except: raise OptimaException('Input (number of project copies) cannot be converted into an integer.')
refyear, ok = pyqt.QInputDialog.getText(geoguiwindow, 'GA Spreadsheet Parameter', 'Select a reference year for which you have district data.')
try: refyear = int(refyear)
except: raise OptimaException('Input (reference year) cannot be converted into an integer.')
if not refyear in [int(x) for x in results.tvec]:
raise OptimaException("Input not within range of years used by aggregate project's last stored calibration.")
## 2. Get destination filename
spreadsheetpath = pyqt.QFileDialog.getSaveFileName(caption='Save geospatial spreadsheet file', filter='*.xlsx')
# 4. Generate and save spreadsheet
try:
makegeospreadsheet(project=project, filename=spreadsheetpath, copies=copies, refyear=refyear, verbose=2)
warning('Multi-project template saved to "%s".' % spreadsheetpath)
except:
warning('Error: Template not saved due to a workbook error!')
return None
def gui_makeproj():
''' Create a series of project files based on a seed file and a geospatial spreadsheet '''
project = gui_loadproj()
spreadsheetpath = pyqt.QFileDialog.getOpenFileName(caption='Choose geospatial spreadsheet', filter='*.xlsx')
destination = pyqt.QFileDialog.getExistingDirectory(caption='Choose output folder')
makegeoprojects(project=project, spreadsheetpath=spreadsheetpath, destination=destination)
warning('Created projects from spreadsheet')
return None
def gui_create(filepaths=None, portfolio=None, doadd=False):
''' Create a portfolio by selecting a list of projects; silently skip files that fail '''
global globalportfolio, projectslistbox, objectiveinputs
projectpaths = []
projectslist = []
if globalportfolio is None:
globalportfolio = Portfolio()
if not doadd:
globalportfolio = Portfolio()
projectslistbox.clear()
if doadd and portfolio != None:
globalportfolio = portfolio
filepaths = pyqt.QFileDialog.getOpenFileNames(caption='Choose project files', filter='*'+prjext)
if filepaths:
if type(filepaths)==str: filepaths = [filepaths] # Convert to list
for filepath in filepaths:
tmpproj = None
try: tmpproj = loadproj(filepath, verbose=0)
except: print('Could not load file "%s"; moving on...' % filepath)
if tmpproj is not None:
if type(tmpproj)==Project:
projectslist.append(tmpproj)
projectpaths.append(filepath)
print('Project file "%s" loaded' % filepath)
else: print('File "%s" is not an Optima project file; moving on...' % filepath)
projectslistbox.addItems(projectpaths)
globalportfolio.addprojects(projectslist)
resetbudget() # And reset the budget
return None
def gui_addproj():
''' Add a project -- same as creating a portfolio except don't overwrite '''
gui_create(doadd=True)
resetbudget() # And reset the budget
return None
def gui_loadport():
''' Load an existing portfolio '''
global globalportfolio, projectslistbox
filepath = pyqt.QFileDialog.getOpenFileName(caption='Choose portfolio file', filter='*'+prtext)
tmpport = None
if filepath:
try: tmpport = loadobj(filepath, verbose=0)
except Exception as E:
warning('Could not load file "%s" because "%s"' % (filepath, repr(E)))
return None
if tmpport is not None:
if type(tmpport)==Portfolio:
globalportfolio = tmpport
projectslistbox.clear()
projectslistbox.addItems([proj.name for proj in globalportfolio.projects.values()])
print('Portfolio file "%s" loaded' % filepath)
else: print('File "%s" is not an Optima portfolio file' % filepath)
else:
warning('File path not provided. Portfolio not loaded.')
resetbudget() # And reset the budget
return None
def gui_rungeo():
''' Actually run geospatial analysis!!! '''
global globalportfolio, globalobjectives, objectiveinputs
starttime = time()
if globalobjectives is None:
globalobjectives = defaultobjectives()
globalobjectives['budget'] = 0.0 # Reset
for key in objectiveinputs.keys():
globalobjectives[key] = eval(str(objectiveinputs[key].text())) # Get user-entered values
globalobjectives['budget'] *= budgetfactor # Convert back to internal representation
BOCobjectives = dcp(globalobjectives)
try:
globalportfolio.genBOCs(objectives=BOCobjectives, maxtime=30, mc=0)
globalportfolio.runGA(objectives=globalobjectives, maxtime=30, reoptimize=True, mc=0, batch=True, verbose=2, die=False, strict=True)
except Exception as E:
warning('Geospatial analysis failed: %s' % repr(E))
warning('Geospatial analysis finished running; total time: %0.0f s' % (time() - starttime))
return None
def gui_plotgeo():
''' Actually plot geospatial analysis!!! '''
global globalportfolio
if globalportfolio is None:
warning('Please load a portfolio first')
return None
globalportfolio.plotBOCs(deriv=False)
return None
def gui_export():
''' Save the current results to Excel file '''
global globalportfolio
if type(globalportfolio)!=Portfolio: warning('Warning, must load portfolio first!')
# 2. Create a new file dialog to save this spreadsheet
filepath = pyqt.QFileDialog.getSaveFileName(caption='Save geospatial analysis results file', filter='*.xlsx')
# 3. Generate spreadsheet according to David's template to store these data
if filepath:
try:
globalportfolio.export(filename=filepath)
except Exception as E:
warning('Results export failed: %s' % repr(E))
warning('Results saved to "%s".' % filepath)
else:
warning('Filepath not supplied: %s' % filepath)
return None
def gui_saveport():
''' Save the current portfolio '''
global globalportfolio
filepath = pyqt.QFileDialog.getSaveFileName(caption='Save portfolio file', filter='*'+prtext)
saveobj(filepath, globalportfolio)
return None
def closewindow():
''' Close the control panel '''
global geoguiwindow
geoguiwindow.close()
return None
def geogui():
'''
Open the GUI for doing geospatial analysis.
Version: 2016jan23
'''
global geoguiwindow, globalportfolio, globalobjectives, objectiveinputs, projectslistbox, projectinfobox
globalportfolio = None
if globalobjectives is None:
globalobjectives = defaultobjectives()
globalobjectives['budget'] = 0.0 # Reset
## Set parameters
wid = 1200.0
hei = 600.0
top = 20
spacing = 40
left = 20.
## Housekeeping
fig = figure(); close(fig) # Open and close figure...dumb, no? Otherwise get "QWidget: Must construct a QApplication before a QPaintDevice"
geoguiwindow = pyqt.QWidget() # Create panel widget
geoguiwindow.setGeometry(100, 100, wid, hei)
geoguiwindow.setWindowTitle('Optima geospatial analysis')
##############################################################################################################################
## Define buttons
##############################################################################################################################
## Define buttons
buttons = odict()
buttons['makesheet'] = pyqt.QPushButton('Make geospatial spreadsheet from project', parent=geoguiwindow)
buttons['makeproj'] = pyqt.QPushButton('Auto-generate projects from spreadsheet', parent=geoguiwindow)
buttons['create'] = pyqt.QPushButton('Create portfolio from projects', parent=geoguiwindow)
buttons['add'] = pyqt.QPushButton('Add projects to portfolio', parent=geoguiwindow)
buttons['loadport'] = pyqt.QPushButton('Load existing portfolio', parent=geoguiwindow)
buttons['rungeo'] = pyqt.QPushButton('Run geospatial analysis', parent=geoguiwindow)
buttons['plotgeo'] = pyqt.QPushButton('Plot geospatial results', parent=geoguiwindow)
buttons['export'] = pyqt.QPushButton('Export results', parent=geoguiwindow)
buttons['saveport'] = pyqt.QPushButton('Save portfolio', parent=geoguiwindow)
buttons['close'] = pyqt.QPushButton('Close', parent=geoguiwindow)
## Define button functions
actions = odict()
actions['makesheet'] = gui_makesheet
actions['makeproj'] = gui_makeproj
actions['create'] = gui_create
actions['add'] = gui_addproj
actions['loadport'] = gui_loadport
actions['rungeo'] = gui_rungeo
actions['plotgeo'] = gui_plotgeo
actions['export'] = gui_export
actions['saveport'] = gui_saveport
actions['close'] = closewindow
## Set button locations
spacer = 0
for b,key in enumerate(buttons.keys()):
if key=='rungeo': spacer = 170
buttons[key].move(left, top+spacing*b+spacer)
## Define button functions
for key in buttons.keys():
buttons[key].clicked.connect(actions[key])
##############################################################################################################################
## Define other objects
##############################################################################################################################
def updateprojectinfo():
global globalportfolio, projectslistbox, projectinfobox
ind = projectslistbox.currentRow()
project = globalportfolio.projects[ind]
projectinfobox.setText(repr(project))
return None
def removeproject():
global projectslistbox, projectinfobox, globalportfolio
ind = projectslistbox.currentRow()
globalportfolio.projects.pop(globalportfolio.projects.keys()[ind]) # Remove from portfolio
projectslistbox.takeItem(ind) # Remove from list
return None
## List of projects
projectslistlabel = pyqt.QLabel(parent=geoguiwindow)
projectslistlabel.setText('Projects in this portfolio:')
projectslistbox = pyqt.QListWidget(parent=geoguiwindow)
projectslistbox.verticalScrollBar()
projectslistbox.currentItemChanged.connect(updateprojectinfo)
buttons['remove'] = pyqt.QPushButton('Remove selected project from portfolio', parent=geoguiwindow)
buttons['remove'].clicked.connect(removeproject)
projectslistlabel.move(330,20)
projectslistbox.move(330, 40)
buttons['remove'].move(330, hei-40)
projectslistbox.resize(300, hei-100)
## Project info
projectsinfolabel = pyqt.QLabel(parent=geoguiwindow)
projectsinfolabel.setText('Information about the selected project:')
projectinfobox = pyqt.QTextEdit(parent=geoguiwindow)
projectinfobox.setReadOnly(True)
projectinfobox.verticalScrollBar()
projectsinfolabel.move(640,20)
projectinfobox.move(640, 40)
projectinfobox.resize(530, hei-100)
## Objectives
objectivetext = odict()
objectivetext['start'] = 'Start year:'
objectivetext['end'] = 'End year:'
objectivetext['budget'] = 'Total budget (mil.):'
objectivetext['deathweight'] = 'Deaths weight:'
objectivetext['inciweight'] = 'Infections weight:'
objectivetextobjs = odict()
for k,key in enumerate(objectivetext.keys()):
objectivetextobjs[key] = pyqt.QLabel(parent=geoguiwindow)
objectivetextobjs[key].setText(str(objectivetext[key]))
objectivetextobjs[key].move(left+10, 235+k*30)
objectiveinputs = odict()
for k,key in enumerate(objectivetext.keys()):
objectiveinputs[key] = pyqt.QLineEdit(parent=geoguiwindow)
objectiveinputs[key].setText(str(globalobjectives[key]))
objectiveinputs[key].move(left+120, 230+k*30)
objectiveinputs['budget'].setText(str(globalobjectives['budget']/budgetfactor)) # So right units
geoguiwindow.show()
|
optimamodel/Optima
|
optima/gui.py
|
Python
|
lgpl-3.0
| 48,120
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='hubsync',
packages=['hubsync'],
version='0.2.9',
description='Get your github workspace synced!',
author='Mario Corchero',
author_email='mariocj89@gmail.com',
url='https://github.com/Mariocj89/hubsync',
keywords=['github', 'sync', 'workspace'],
scripts=['bin/hubsync'],
test_suite='nose.collector',
use_2to3=True,
install_requires=['gitpython', 'requests', 'six'],
tests_require=['mock']
)
|
Mariocj89/hubsync
|
setup.py
|
Python
|
mit
| 507
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_modes', '0002_coursemode_expiration_datetime_is_explicit'),
]
operations = [
migrations.AlterField(
model_name='coursemode',
name='expiration_datetime_is_explicit',
field=models.BooleanField(default=False),
),
]
|
solashirai/edx-platform
|
common/djangoapps/course_modes/migrations/0003_auto_20151113_1443.py
|
Python
|
agpl-3.0
| 463
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'RestoreRequest.error_message'
db.add_column('backup_restorerequest', 'error_message', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True), keep_default=False)
# Adding field 'BackupRequest.error_message'
db.add_column('backup_backuprequest', 'error_message', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'RestoreRequest.error_message'
db.delete_column('backup_restorerequest', 'error_message')
# Deleting field 'BackupRequest.error_message'
db.delete_column('backup_backuprequest', 'error_message')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'backup.backuprequest': {
'Meta': {'object_name': 'BackupRequest'},
'creation_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'error_message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_state_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'backup.restorerequest': {
'Meta': {'object_name': 'RestoreRequest'},
'action': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'backup_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'creation_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'error_message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_state_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['backup']
|
fgaudin/aemanager
|
backup/migrations/0003_auto__add_field_restorerequest_error_message__add_field_backuprequest_.py
|
Python
|
agpl-3.0
| 5,669
|
# -*- coding: utf-8 -*-
""" skratchlib
Library Package for Skratch Tool
Author: Andrew Paxson
Created: 2017-09-01
"""
#TODO Add List Scratch Files
import argparse
import os
import re
import subprocess
SCRATCH_FILENAME_CONSTANT = "scratch"
SCRATCH_LOCATION_CONSTANT = os.path.expanduser("~")
SCRATCH_RE_PATTERN_CONSTANT = r"{0}(\d+)"
SCRATCH_DEFAULT_EDITOR = "sublime"
class CLIComponent(object):
def __init__(self, settings):
self.settings = settings
def action(self, args):
raise NotImplementedError("action method needs to be implemented on {0}".format(self.__class__.__name__))
class NewScratchDelegate(CLIComponent):
def action(self, args):
""" Create New Scratch File """
create_new_scratch_file(args.file, self.settings, py_template_func)
def add_cli(self, subparser):
""" Creates New Scratch CLI Interface on the given argparse.subparser
Args:
subparser (argparse._SubParsersAction): action to add arguments too
Returns:
argparse._SubParsersAction
"""
new_parser = subparser.add_parser('create', help='create new scratch file')
new_parser.add_argument('name', nargs='?', default=None, help="Optional Name to be given to the file, "
"default name is an increment of 'scratch##'")
new_parser.set_defaults(func=self.action)
return subparser
class RunScratchDelegate(CLIComponent):
def action(self, args):
file_ = args.file
if not file_:
file_ = get_recently_modified_scratch_file(self.settings)
if not file_:
raise RuntimeError("could not location files to run!")
run_scratch_file(file_, self.settings)
def add_cli(self, subparser):
new_parser = subparser.add_parser('run', help='runscratch file')
new_parser.add_argument('file', nargs='?', default="", help="Optional scratch filename to run, if not given "
"it will run skratch with the latest mtime")
new_parser.set_defaults(func=self.action)
return subparser
class SettingData(object):
def __init__(self):
self.location = SCRATCH_LOCATION_CONSTANT
self.editor = SCRATCH_DEFAULT_EDITOR
self.base_filename = SCRATCH_FILENAME_CONSTANT
self._re_pattern = SCRATCH_RE_PATTERN_CONSTANT.format(self.base_filename)
@property
def re_pattern(self):
return self._re_pattern.format(self.base_filename)
@re_pattern.setter
def re_pattern(self, value):
self._re_pattern = value
# ----------------------------------------------------------------------------
def _get_mtime(f):
return os.stat(f).st_mtime
def _next_file_number(files, pattern):
if not files:
return ""
numbers = []
for f in files:
num = re.findall(pattern, f)
if num:
numbers.append(int(num[0]))
if numbers:
return str(max(numbers) + 1)
return "1"
def _open_with_editor(path, editor):
return os.system("%s %s" % (path, editor))
def py_template_func(settings):
return "#! /usr/bin/env python\n"
def create_new_scratch_file(file_name, settings, template_func=py_template_func):
""" Creates New Scratch File
Args:
file_name (str)
settings (SettingData)
template_func (types.FunctionType)
"""
if not file_name:
re_pattern = settings.re_pattern
def _filter_scratch_files(f):
return re.match(re_pattern, f)
files = os.listdir(settings.location)
scratch_files = filter(_filter_scratch_files, files)
num = _next_file_number(scratch_files, re_pattern)
file_name = settings.base_filename + num
path = os.path.join(SCRATCH_LOCATION_CONSTANT, file_name)
with open(path, 'w') as fh:
fh.write(template_func(settings))
_open_with_editor(path, SCRATCH_DEFAULT_EDITOR)
return path
# paths = map(lambda x: os.path.join(settings.scratch_location, x), scratch_files)
def get_recently_modified_scratch_file(settings):
""" Returns the most recently modified scratch in skratch dir
Args:
settings (SettingData): settings to use for getting scratch dir
Returns:
filepath (str) or empty string is nothing was found
"""
dir_contents = os.listdir(settings.location)
full_paths = map(lambda f: os.path.join(settings.location, f), dir_contents)
files = filter(lambda f: os.path.isfile(str(f)), full_paths)
if not files:
return ""
files = sorted(files, key=_get_mtime)
return files[-1]
def run_scratch_file(file_name, settings):
""" Runs Given Scratch Filename with settings """
return subprocess.call([file_name])
def cli_parser(settings):
""" CLI Interface """
parser = argparse.ArgumentParser("Scratch File Manager")
subparser = parser.add_subparsers()
cli_components = [
NewScratchDelegate,
RunScratchDelegate
]
for component in cli_components:
item = component(settings)
item.add_cli(subparser)
return parser
|
paxsonsa/skratch
|
skratchlib/__init__.py
|
Python
|
mit
| 5,224
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-16 00:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0005_queue_name'),
]
operations = [
migrations.AlterField(
model_name='media',
name='media_service',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.MediaService'),
),
]
|
falcaopetri/enqueuer-api
|
api/migrations/0006_auto_20161015_2113.py
|
Python
|
bsd-3-clause
| 543
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import HTMLParser
import urllib
import urllib2
from tempest import config
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
class HorizonHTMLParser(HTMLParser.HTMLParser):
csrf_token = None
region = None
def _find_name(self, attrs, name):
for attrpair in attrs:
if attrpair[0] == 'name' and attrpair[1] == name:
return True
return False
def _find_value(self, attrs):
for attrpair in attrs:
if attrpair[0] == 'value':
return attrpair[1]
return None
def handle_starttag(self, tag, attrs):
if tag == 'input':
if self._find_name(attrs, 'csrfmiddlewaretoken'):
self.csrf_token = self._find_value(attrs)
if self._find_name(attrs, 'region'):
self.region = self._find_value(attrs)
class TestDashboardBasicOps(manager.ScenarioTest):
"""
This is a basic scenario test:
* checks that the login page is available
* logs in as a regular user
* checks that the user home page loads without error
"""
@classmethod
def skip_checks(cls):
super(TestDashboardBasicOps, cls).skip_checks()
if not CONF.service_available.horizon:
raise cls.skipException("Horizon support is required")
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
super(TestDashboardBasicOps, cls).setup_credentials()
def check_login_page(self):
response = urllib2.urlopen(CONF.dashboard.dashboard_url)
self.assertIn("id_username", response.read())
def user_login(self, username, password):
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
response = self.opener.open(CONF.dashboard.dashboard_url).read()
# Grab the CSRF token and default region
parser = HorizonHTMLParser()
parser.feed(response)
# Prepare login form request
req = urllib2.Request(CONF.dashboard.login_url)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
req.add_header('Referer', CONF.dashboard.dashboard_url)
params = {'username': username,
'password': password,
'region': parser.region,
'csrfmiddlewaretoken': parser.csrf_token}
self.opener.open(req, urllib.urlencode(params))
def check_home_page(self):
response = self.opener.open(CONF.dashboard.dashboard_url)
self.assertIn('Overview', response.read())
@test.idempotent_id('4f8851b1-0e69-482b-b63b-84c6e76f6c80')
@test.services('dashboard')
def test_basic_scenario(self):
creds = self.credentials()
self.check_login_page()
self.user_login(creds.username, creds.password)
self.check_home_page()
|
fengbeihong/tempest_automate_ironic
|
tempest/scenario/test_dashboard_basic_ops.py
|
Python
|
apache-2.0
| 3,454
|
# -*- coding: utf-8 -*-
"""
Class and program to colorize python source code for ANSI terminals.
Based on an HTML code highlighter by Jurgen Hermann found at:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52298
Modifications by Fernando Perez (fperez@colorado.edu).
Information on the original HTML highlighter follows:
MoinMoin - Python Source Parser
Title: Colorize Python source using the built-in tokenizer
Submitter: Jurgen Hermann
Last Updated:2001/04/06
Version no:1.2
Description:
This code is part of MoinMoin (http://moin.sourceforge.net/) and converts
Python source code to HTML markup, rendering comments, keywords,
operators, numeric and string literals in different colors.
It shows how to use the built-in keyword, token and tokenize modules to
scan Python source code and re-emit it with no changes to its original
formatting (which is the hard part).
"""
__all__ = ['ANSICodeColors','Parser']
_scheme_default = 'Linux'
# Imports
import StringIO
import keyword
import os
import optparse
import sys
import token
import tokenize
try:
generate_tokens = tokenize.generate_tokens
except AttributeError:
# Python 3. Note that we use the undocumented _tokenize because it expects
# strings, not bytes. See also Python issue #9969.
generate_tokens = tokenize._tokenize
from IPython.utils.coloransi import *
#############################################################################
### Python Source Parser (does Hilighting)
#############################################################################
_KEYWORD = token.NT_OFFSET + 1
_TEXT = token.NT_OFFSET + 2
#****************************************************************************
# Builtin color schemes
Colors = TermColors # just a shorthand
# Build a few color schemes
NoColor = ColorScheme(
'NoColor',{
token.NUMBER : Colors.NoColor,
token.OP : Colors.NoColor,
token.STRING : Colors.NoColor,
tokenize.COMMENT : Colors.NoColor,
token.NAME : Colors.NoColor,
token.ERRORTOKEN : Colors.NoColor,
_KEYWORD : Colors.NoColor,
_TEXT : Colors.NoColor,
'normal' : Colors.NoColor # color off (usu. Colors.Normal)
} )
LinuxColors = ColorScheme(
'Linux',{
token.NUMBER : Colors.LightCyan,
token.OP : Colors.Yellow,
token.STRING : Colors.LightBlue,
tokenize.COMMENT : Colors.LightRed,
token.NAME : Colors.Normal,
token.ERRORTOKEN : Colors.Red,
_KEYWORD : Colors.LightGreen,
_TEXT : Colors.Yellow,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
LightBGColors = ColorScheme(
'LightBG',{
token.NUMBER : Colors.Cyan,
token.OP : Colors.Blue,
token.STRING : Colors.Blue,
tokenize.COMMENT : Colors.Red,
token.NAME : Colors.Normal,
token.ERRORTOKEN : Colors.Red,
_KEYWORD : Colors.Green,
_TEXT : Colors.Blue,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
# Build table of color schemes (needed by the parser)
ANSICodeColors = ColorSchemeTable([NoColor,LinuxColors,LightBGColors],
_scheme_default)
class Parser:
""" Format colored Python source.
"""
def __init__(self, color_table=None,out = sys.stdout):
""" Create a parser with a specified color table and output channel.
Call format() to process code.
"""
self.color_table = color_table and color_table or ANSICodeColors
self.out = out
def format(self, raw, out = None, scheme = ''):
return self.format2(raw, out, scheme)[0]
def format2(self, raw, out = None, scheme = ''):
""" Parse and send the colored source.
If out and scheme are not specified, the defaults (given to
constructor) are used.
out should be a file-type object. Optionally, out can be given as the
string 'str' and the parser will automatically return the output in a
string."""
string_output = 0
if out == 'str' or self.out == 'str' or \
isinstance(self.out,StringIO.StringIO):
# XXX - I don't really like this state handling logic, but at this
# point I don't want to make major changes, so adding the
# isinstance() check is the simplest I can do to ensure correct
# behavior.
out_old = self.out
self.out = StringIO.StringIO()
string_output = 1
elif out is not None:
self.out = out
# Fast return of the unmodified input for NoColor scheme
if scheme == 'NoColor':
error = False
self.out.write(raw)
if string_output:
return raw,error
else:
return None,error
# local shorthands
colors = self.color_table[scheme].colors
self.colors = colors # put in object so __call__ sees it
# Remove trailing whitespace and normalize tabs
self.raw = raw.expandtabs().rstrip()
# store line offsets in self.lines
self.lines = [0, 0]
pos = 0
raw_find = self.raw.find
lines_append = self.lines.append
while 1:
pos = raw_find('\n', pos) + 1
if not pos: break
lines_append(pos)
lines_append(len(self.raw))
# parse the source and write it
self.pos = 0
text = StringIO.StringIO(self.raw)
error = False
try:
for atoken in generate_tokens(text.readline):
self(*atoken)
except tokenize.TokenError as ex:
msg = ex.args[0]
line = ex.args[1][0]
self.out.write("%s\n\n*** ERROR: %s%s%s\n" %
(colors[token.ERRORTOKEN],
msg, self.raw[self.lines[line]:],
colors.normal)
)
error = True
self.out.write(colors.normal+'\n')
if string_output:
output = self.out.getvalue()
self.out = out_old
return (output, error)
return (None, error)
def __call__(self, toktype, toktext, (srow,scol), (erow,ecol), line):
""" Token handler, with syntax highlighting."""
# local shorthands
colors = self.colors
owrite = self.out.write
# line separator, so this works across platforms
linesep = os.linesep
# calculate new positions
oldpos = self.pos
newpos = self.lines[srow] + scol
self.pos = newpos + len(toktext)
# send the original whitespace, if needed
if newpos > oldpos:
owrite(self.raw[oldpos:newpos])
# skip indenting tokens
if toktype in [token.INDENT, token.DEDENT]:
self.pos = newpos
return
# map token type to a color group
if token.LPAR <= toktype and toktype <= token.OP:
toktype = token.OP
elif toktype == token.NAME and keyword.iskeyword(toktext):
toktype = _KEYWORD
color = colors.get(toktype, colors[_TEXT])
#print '<%s>' % toktext, # dbg
# Triple quoted strings must be handled carefully so that backtracking
# in pagers works correctly. We need color terminators on _each_ line.
if linesep in toktext:
toktext = toktext.replace(linesep, '%s%s%s' %
(colors.normal,linesep,color))
# send text
owrite('%s%s%s' % (color,toktext,colors.normal))
def main(argv=None):
"""Run as a command-line script: colorize a python file or stdin using ANSI
color escapes and print to stdout.
Inputs:
- argv(None): a list of strings like sys.argv[1:] giving the command-line
arguments. If None, use sys.argv[1:].
"""
usage_msg = """%prog [options] [filename]
Colorize a python file or stdin using ANSI color escapes and print to stdout.
If no filename is given, or if filename is -, read standard input."""
parser = optparse.OptionParser(usage=usage_msg)
newopt = parser.add_option
newopt('-s','--scheme',metavar='NAME',dest='scheme_name',action='store',
choices=['Linux','LightBG','NoColor'],default=_scheme_default,
help="give the color scheme to use. Currently only 'Linux'\
(default) and 'LightBG' and 'NoColor' are implemented (give without\
quotes)")
opts,args = parser.parse_args(argv)
if len(args) > 1:
parser.error("you must give at most one filename.")
if len(args) == 0:
fname = '-' # no filename given; setup to read from stdin
else:
fname = args[0]
if fname == '-':
stream = sys.stdin
else:
try:
stream = open(fname)
except IOError,msg:
print >> sys.stderr, msg
sys.exit(1)
parser = Parser()
# we need nested try blocks because pre-2.5 python doesn't support unified
# try-except-finally
try:
try:
# write colorized version to stdout
parser.format(stream.read(),scheme=opts.scheme_name)
except IOError,msg:
# if user reads through a pager and quits, don't print traceback
if msg.args != (32,'Broken pipe'):
raise
finally:
if stream is not sys.stdin:
stream.close() # in case a non-handled exception happened above
if __name__ == "__main__":
main()
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/utils/PyColorize.py
|
Python
|
lgpl-3.0
| 9,600
|
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import datetime, timedelta
import json
import shutil
import tempfile
from c7n import policy, manager
from c7n.resources.ec2 import EC2
from c7n.utils import dumps
from .common import BaseTest, Config, Bag
class DummyResource(manager.ResourceManager):
def resources(self):
return [
{'abc': 123},
{'def': 456}]
@property
def actions(self):
class _a(object):
def name(self):
return self.f.__name__
def __init__(self, f):
self.f = f
def process(self, resources):
return self.f(resources)
def p1(resources):
return [
{'abc': 456},
{'def': 321}]
def p2(resources):
return resources
return [_a(p1), _a(p2)]
class PolicyPermissions(BaseTest):
def test_policy_detail_spec_permissions(self):
policy = self.load_policy({
'name': 'kinesis-delete',
'resource': 'kinesis',
'actions': ['delete']})
perms = policy.get_permissions()
self.assertEqual(
perms,
set(('kinesis:DescribeStream',
'kinesis:ListStreams',
'kinesis:DeleteStream')))
def test_policy_manager_custom_permissions(self):
policy = self.load_policy({
'name': 'ec2-utilization',
'resource': 'ec2',
'filters': [
{'type': 'metrics',
'name': 'CPUUtilization',
'days': 3,
'value': 1.5}
]})
perms = policy.get_permissions()
self.assertEqual(
perms,
set(('ec2:DescribeInstances',
'ec2:DescribeTags',
'cloudwatch:GetMetricStatistics')))
def xtest_resource_filter_name(self):
# resources without a filter name won't play nice in
# lambda policies
missing = []
marker = object
for k, v in manager.resources.items():
if getattr(v.resource_type, 'filter_name', marker) is marker:
missing.append(k)
if missing:
self.fail("Missing filter name %s" % (', '.join(missing)))
def test_resource_augment_universal_mask(self):
# universal tag had a potential bad patterm of masking
# resource augmentation, scan resources to ensure
for k, v in manager.resources.items():
if not getattr(v.resource_type, 'universal_taggable', None):
continue
if v.augment.__name__ == 'universal_augment' and getattr(
v.resource_type, 'detail_spec', None):
self.fail(
"%s resource has universal augment masking resource augment" % k)
def test_resource_permissions(self):
self.capture_logging('c7n.cache')
missing = []
cfg = Config.empty()
for k, v in manager.resources.items():
p = Bag({'name': 'permcheck', 'resource': k})
ctx = self.get_context(config=cfg, policy=p)
mgr = v(ctx, p)
perms = mgr.get_permissions()
if not perms:
missing.append(k)
for n, a in v.action_registry.items():
p['actions'] = [n]
perms = a({}, mgr).get_permissions()
found = bool(perms)
if not isinstance(perms, (list, tuple, set)):
found = False
if not found:
missing.append("%s.actions.%s" % (
k, n))
for n, f in v.filter_registry.items():
if n in ('and', 'or', 'not'):
continue
p['filters'] = [n]
perms = f({}, mgr).get_permissions()
if not isinstance(perms, (tuple, list, set)):
missing.append("%s.filters.%s" % (
k, n))
# in memory filters
if n in ('event', 'value', 'tag-count',
'marked-for-op', 'offhour', 'onhour', 'age',
'state-age', 'egress', 'ingress',
'capacity-delta', 'is-ssl', 'global-grants',
'missing-policy-statement', 'missing-statement',
'healthcheck-protocol-mismatch', 'image-age',
'has-statement', 'no-access',
'instance-age', 'ephemeral', 'instance-uptime'):
continue
if not perms:
missing.append("%s.filters.%s" % (
k, n))
if missing:
self.fail("Missing permissions %d on \n\t%s" % (
len(missing),
"\n\t".join(sorted(missing))))
class TestPolicyCollection(BaseTest):
def test_expand_partitions(self):
cfg = Config.empty(
regions=['us-gov-west-1', 'cn-north-1', 'us-west-2'])
original = policy.PolicyCollection.from_data(
{'policies': [
{'name': 'foo',
'resource': 'ec2'}]},
cfg)
collection = original.expand_regions(cfg.regions)
self.assertEqual(
sorted([p.options.region for p in collection]),
['cn-north-1', 'us-gov-west-1', 'us-west-2'])
def test_policy_account_expand(self):
original = policy.PolicyCollection.from_data(
{'policies': [
{'name': 'foo',
'resource': 'account'}]},
Config.empty(regions=['us-east-1', 'us-west-2']))
collection = original.expand_regions(['all'])
self.assertEqual(len(collection), 1)
def test_policy_region_expand_global(self):
original = policy.PolicyCollection.from_data(
{'policies': [
{'name': 'foo',
'resource': 's3'},
{'name': 'iam',
'resource': 'iam-user'}]},
Config.empty(regions=['us-east-1', 'us-west-2']))
collection = original.expand_regions(['all'])
self.assertEqual(len(collection.resource_types), 2)
self.assertEqual(len(collection), 15)
iam = [p for p in collection if p.resource_type == 'iam-user']
self.assertEqual(len(iam), 1)
self.assertEqual(iam[0].options.region, 'us-east-1')
collection = original.expand_regions(['eu-west-1', 'eu-west-2'])
iam = [p for p in collection if p.resource_type == 'iam-user']
self.assertEqual(len(iam), 1)
self.assertEqual(iam[0].options.region, 'eu-west-1')
self.assertEqual(len(collection), 3)
class TestPolicy(BaseTest):
def test_load_policy_validation_error(self):
invalid_policies = {
'policies':
[{
'name': 'foo',
'resource': 's3',
'filters': [{"tag:custodian_tagging": "not-null"}],
'actions': [{'type': 'untag',
'tags': {'custodian_cleanup': 'yes'}}],
}]
}
self.assertRaises(Exception, self.load_policy_set, invalid_policies)
def test_policy_validation(self):
policy = self.load_policy({
'name': 'ec2-utilization',
'resource': 'ec2',
'tags': ['abc'],
'filters': [
{'type': 'metrics',
'name': 'CPUUtilization',
'days': 3,
'value': 1.5}],
'actions': ['stop']})
policy.validate()
self.assertEqual(policy.tags, ['abc'])
self.assertFalse(policy.is_lambda)
self.assertTrue(
repr(policy).startswith(
"<Policy resource: ec2 name: ec2-utilization"))
def test_policy_name_filtering(self):
collection = self.load_policy_set(
{'policies': [
{'name': 's3-remediate',
'resource': 's3'},
{'name': 's3-global-grants',
'resource': 's3'},
{'name': 'ec2-tag-compliance-stop',
'resource': 'ec2'},
{'name': 'ec2-tag-compliance-kill',
'resource': 'ec2'},
{'name': 'ec2-tag-compliance-remove',
'resource': 'ec2'}]},
)
self.assertIn('s3-remediate', collection)
self.assertNotIn('s3-argle-bargle', collection)
# Make sure __iter__ works
for p in collection:
self.assertTrue(p.name is not None)
self.assertEqual(collection.resource_types, set(('s3', 'ec2')))
self.assertTrue('s3-remediate' in collection)
self.assertEqual(
[p.name for p in collection.filter('s3*')],
['s3-remediate', 's3-global-grants'])
self.assertEqual(
[p.name for p in collection.filter('ec2*')],
['ec2-tag-compliance-stop',
'ec2-tag-compliance-kill',
'ec2-tag-compliance-remove'])
def test_file_not_found(self):
self.assertRaises(
IOError, policy.load, Config.empty(), "/asdf12")
def test_lambda_policy_metrics(self):
session_factory = self.replay_flight_data('test_lambda_policy_metrics')
p = self.load_policy({
'name': 'ec2-tag-compliance-v6',
'resource': 'ec2',
'mode': {
'type': 'ec2-instance-state',
'events': ['running']},
'filters': [
{"tag:custodian_status": 'absent'},
{'or': [
{"tag:App": 'absent'},
{"tag:Env": 'absent'},
{"tag:Owner": 'absent'}]}]},
session_factory=session_factory)
end = datetime.utcnow()
start = end - timedelta(14)
period = 24 * 60 * 60 * 14
self.assertEqual(
json.loads(dumps(p.get_metrics(start, end, period), indent=2)),
{u'Durations': [],
u'Errors': [{u'Sum': 0.0,
u'Timestamp': u'2016-05-30T10:50:00+00:00',
u'Unit': u'Count'}],
u'Invocations': [{u'Sum': 4.0,
u'Timestamp': u'2016-05-30T10:50:00+00:00',
u'Unit': u'Count'}],
u'ResourceCount': [{u'Average': 1.0,
u'Sum': 2.0,
u'Timestamp': u'2016-05-30T10:50:00+00:00',
u'Unit': u'Count'}],
u'Throttles': [{u'Sum': 0.0,
u'Timestamp': u'2016-05-30T10:50:00+00:00',
u'Unit': u'Count'}]})
def test_policy_metrics(self):
session_factory = self.replay_flight_data('test_policy_metrics')
p = self.load_policy(
{'name': 's3-encrypt-keys',
'resource': 's3',
'actions': [
{'type': 'encrypt-keys'}]},
session_factory=session_factory)
end = datetime.now().replace(hour=0, minute=0, microsecond=0)
start = end - timedelta(14)
period = 24 * 60 * 60 * 14
self.maxDiff = None
self.assertEqual(
json.loads(dumps(p.get_metrics(start, end, period), indent=2)),
{
"ActionTime": [
{
"Timestamp": "2016-05-30T00:00:00+00:00",
"Average": 8541.752702140668,
"Sum": 128126.29053211001,
"Unit": "Seconds"
}
],
"Total Keys": [
{
"Timestamp": "2016-05-30T00:00:00+00:00",
"Average": 1575708.7333333334,
"Sum": 23635631.0,
"Unit": "Count"
}
],
"ResourceTime": [
{
"Timestamp": "2016-05-30T00:00:00+00:00",
"Average": 8.682969363532667,
"Sum": 130.24454045299,
"Unit": "Seconds"
}
],
"ResourceCount": [
{
"Timestamp": "2016-05-30T00:00:00+00:00",
"Average": 23.6,
"Sum": 354.0,
"Unit": "Count"
}
],
"Unencrypted": [
{
"Timestamp": "2016-05-30T00:00:00+00:00",
"Average": 10942.266666666666,
"Sum": 164134.0,
"Unit": "Count"
}
]})
def test_get_resource_manager(self):
collection = self.load_policy_set(
{'policies': [
{'name': 'query-instances',
'resource': 'ec2',
'filters': [
{'tag-key': 'CMDBEnvironment'}
]}]})
p = collection.policies[0]
self.assertTrue(
isinstance(p.get_resource_manager(), EC2))
def test_get_logs_from_group(self):
p_data = {
'name': 'related-rds-test',
'resource': 'rds',
'filters': [
{
'key': 'GroupName',
'type': 'security-group',
'value': 'default',
},
],
'actions': [{'days': 10, 'type': 'retention'}],
}
session_factory = self.replay_flight_data('test_logs_from_group')
config = {'log_group': 'test-logs'}
policy = self.load_policy(p_data, config, session_factory)
logs = list(
policy.get_logs('2016-11-01 00:00:00', '2016-11-30 11:59:59')
)
self.assertEqual(len(logs), 6)
# entries look reasonable
entry = logs[1]
self.assertIn('timestamp', entry)
self.assertIn('message', entry)
# none in range
logs = list(
policy.get_logs('2016-10-01 00:00:00', '2016-10-31 11:59:59')
)
self.assertEqual(len(logs), 0)
def xtest_policy_run(self):
manager.resources.register('dummy', DummyResource)
self.addCleanup(manager.resources.unregister, 'dummy')
self.output_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.output_dir)
collection = self.load_policy_set(
{'policies': [
{'name': 'process-instances',
'resource': 'dummy'}]},
{'output_dir': self.output_dir})
p = collection.policies[0]
p()
self.assertEqual(len(p.ctx.metrics.data), 3)
class PolicyExecutionModeTest(BaseTest):
def test_run_unimplemented(self):
self.assertRaises(NotImplementedError,
policy.PolicyExecutionMode({}).run)
def test_get_logs_unimplemented(self):
self.assertRaises(NotImplementedError,
policy.PolicyExecutionMode({}).get_logs, 1, 2)
class PullModeTest(BaseTest):
def test_skip_when_region_not_equal(self):
log_file = self.capture_logging('custodian.policy')
policy_name = 'rds-test-policy'
p = self.load_policy(
{'name': policy_name,
'resource': 'rds',
'region': 'us-east-1',
'filters': [
{'type': 'default-vpc'}]},
config={'region': 'us-west-2'},
session_factory=None)
p.run()
lines = log_file.getvalue().strip().split('\n')
self.assertIn(
"Skipping policy {} target-region: us-east-1 current-region: us-west-2".format(policy_name),
lines)
|
jdubs/cloud-custodian
|
tests/test_policy.py
|
Python
|
apache-2.0
| 16,608
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Billy Kimble <basslines@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: hall
short_description: Send notification to Hall
description:
- "The C(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms."
version_added: "2.0"
author: Billy Kimble (@bkimble) <basslines@gmail.com>
options:
room_token:
description:
- "Room token provided to you by setting up the Ansible room integation on U(https://hall.com)"
required: true
msg:
description:
- The message you wish to deliver as a notification
required: true
title:
description:
- The title of the message
required: true
picture:
description:
- >
The full URL to the image you wish to use for the Icon of the message. Defaults to
U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627)
required: false
"""
EXAMPLES = """
- name: Send Hall notifiation
hall:
room_token: <hall room integration token>
title: Nginx
msg: 'Created virtual host file on {{ inventory_hostname }}'
delegate_to: loclahost
- name: Send Hall notification if EC2 servers were created.
hall:
room_token: <hall room integration token>
title: Server Creation
msg: 'Created instance {{ item.id }} of type {{ item.instance_type }}.\\nInstance can be reached at {{ item.public_ip }} in the {{ item.region }} region.'
delegate_to: loclahost
when: ec2.instances|length > 0
with_items: '{{ ec2.instances }}'
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
HALL_API_ENDPOINT = 'https://hall.com/api/1/services/generic/%s'
def send_request_to_hall(module, room_token, payload):
headers = {'Content-Type': 'application/json'}
payload = module.jsonify(payload)
api_endpoint = HALL_API_ENDPOINT % (room_token)
response, info = fetch_url(module, api_endpoint, data=payload, headers=headers)
if info['status'] != 200:
secure_url = HALL_API_ENDPOINT % ('[redacted]')
module.fail_json(msg=" failed to send %s to %s: %s" % (payload, secure_url, info['msg']))
def main():
module = AnsibleModule(
argument_spec=dict(
room_token=dict(type='str', required=True),
msg=dict(type='str', required=True),
title=dict(type='str', required=True),
picture=dict(type='str',
default='http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627'),
)
)
room_token = module.params['room_token']
message = module.params['msg']
title = module.params['title']
picture = module.params['picture']
payload = {'title': title, 'message': message, 'picture': picture}
send_request_to_hall(module, room_token, payload)
module.exit_json(msg="OK")
if __name__ == '__main__':
main()
|
rosmo/ansible
|
lib/ansible/modules/notification/hall.py
|
Python
|
gpl-3.0
| 3,365
|
# -*- coding: utf-8 -*-
# Copyright 2014-17 Neil Freeman
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Argument helper for cli tool."""
import argparse
import logging
import sys
def add_default_args(parser, version=None, include=None):
"""
Add default arguments to a parser. These are:
- config: argument for specifying a configuration file.
- user: argument for specifying a user.
- dry-run: option for running without side effects.
- verbose: option for running verbosely.
- quiet: option for running quietly.
- version: option for spitting out version information.
Args:
version (str): version to return on <cli> --version
include (Sequence): default arguments to add to cli. Default: (config, user, dry-run, verbose, quiet)
"""
include = include or ("config", "user", "dry-run", "verbose", "quiet")
if "config" in include:
parser.add_argument(
"-c",
"--config",
dest="config_file",
metavar="PATH",
default=None,
type=str,
help="bots config file (json or yaml)",
)
if "user" in include:
parser.add_argument("-u", "--user", dest="screen_name", type=str, help="Twitter screen name")
if "dry-run" in include:
parser.add_argument("-n", "--dry-run", action="store_true", help="Don't actually do anything")
if "verbose" in include:
parser.add_argument("-v", "--verbose", action="store_true", help="Run talkatively")
if "quiet" in include:
parser.add_argument("-q", "--quiet", action="store_true", help="Run quietly")
if version:
parser.add_argument("-V", "--version", action="version", version="%(prog)s " + version)
def parent(version=None, include=None):
"""
Return the default args as a parent parser, optionally adding a version
Args:
version (str): version to return on <cli> --version
include (Sequence): default arguments to add to cli. Default: (config, user, dry-run, verbose, quiet)
"""
parser = argparse.ArgumentParser(add_help=False)
add_default_args(parser, version=version, include=include)
return parser
def add_logger(name, level=None, format=None):
"""
Set up a stdout logger.
Args:
name (str): name of the logger
level: defaults to logging.INFO
format (str): format string for logging output.
defaults to ``%(filename)-11s %(lineno)-3d: %(message)s``.
Returns:
The logger object.
"""
# pylint: disable=redefined-builtin
format = format or "%(filename)-11s %(lineno)-3d: %(message)s"
log = logging.getLogger(name)
# Set logging level.
log.setLevel(level or logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(format))
log.addHandler(handler)
return log
|
fitnr/twitter_bot_utils
|
src/twitter_bot_utils/args.py
|
Python
|
gpl-3.0
| 3,523
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework import serializers
from drf_haystack.serializers import HaystackSerializer
class ProductSearchSerializer(HaystackSerializer):
"""
The base serializer to represent one or more product fields for being returned as a
result list during searches.
"""
price = serializers.SerializerMethodField()
class Meta:
fields = ('text', 'autocomplete', 'name', 'product_url', 'price', 'body')
ignore_fields = ('text', 'autocomplete',)
def get_price(self, search_result):
"""
The price can't be stored inside the search index but must be fetched from the resolved
model. In case your product models have fixed prices, try to store and retrieve them
from the search index, because that's much faster.
"""
if search_result.object:
return search_result.object.get_price(self.context['request'])
def to_representation(self, instance):
representation = super(ProductSearchSerializer, self).to_representation(instance)
return representation
|
schacki/django-shop
|
shop/search/serializers.py
|
Python
|
bsd-3-clause
| 1,131
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# Copyright (C) 2014-2017 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from unittest import mock
from django.core.urlresolvers import reverse
from django.core import mail
from taiga.base.utils import json
from taiga.hooks.gogs import event_hooks
from taiga.hooks.gogs.api import GogsViewSet
from taiga.hooks.exceptions import ActionSyntaxException
from taiga.projects import choices as project_choices
from taiga.projects.epics.models import Epic
from taiga.projects.issues.models import Issue
from taiga.projects.tasks.models import Task
from taiga.projects.userstories.models import UserStory
from taiga.projects.models import Membership
from taiga.projects.history.services import get_history_queryset_by_model_instance, take_snapshot
from taiga.projects.notifications.choices import NotifyLevel
from taiga.projects.notifications.models import NotifyPolicy
from taiga.projects import services
from .. import factories as f
pytestmark = pytest.mark.django_db
def test_bad_signature(client):
project = f.ProjectFactory()
url = reverse("gogs-hook-list")
url = "%s?project=%s" % (url, project.id)
data = {
"secret": "badbadbad"
}
response = client.post(url, json.dumps(data),
content_type="application/json")
response_content = response.data
assert response.status_code == 400
assert "Bad signature" in response_content["_error_message"]
def test_ok_signature(client):
project = f.ProjectFactory()
f.ProjectModulesConfigFactory(project=project, config={
"gogs": {
"secret": "tpnIwJDz4e"
}
})
url = reverse("gogs-hook-list")
url = "%s?project=%s" % (url, project.id)
data = {"test:": "data", "secret": "tpnIwJDz4e"}
response = client.post(url, json.dumps(data),
content_type="application/json")
assert response.status_code == 204
def test_blocked_project(client):
project = f.ProjectFactory(blocked_code=project_choices.BLOCKED_BY_STAFF)
f.ProjectModulesConfigFactory(project=project, config={
"gogs": {
"secret": "tpnIwJDz4e"
}
})
url = reverse("gogs-hook-list")
url = "%s?project=%s" % (url, project.id)
data = {"test:": "data", "secret": "tpnIwJDz4e"}
response = client.post(url, json.dumps(data),
content_type="application/json")
assert response.status_code == 451
def test_push_event_detected(client):
project = f.ProjectFactory()
url = reverse("gogs-hook-list")
url = "%s?project=%s" % (url, project.id)
data = {
"commits": [
{
"message": "test message",
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
GogsViewSet._validate_signature = mock.Mock(return_value=True)
with mock.patch.object(event_hooks.PushEventHook, "process_event") as process_event_mock:
response = client.post(url, json.dumps(data),
HTTP_X_GITHUB_EVENT="push",
content_type="application/json")
assert process_event_mock.call_count == 1
assert response.status_code == 204
def test_push_event_epic_processing(client):
creation_status = f.EpicStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_epics"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.EpicStatusFactory(project=creation_status.project)
epic = f.EpicFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test TG-%s #%s ok
bye!
""" % (epic.ref, new_status.slug),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(epic.project, payload)
ev_hook.process_event()
epic = Epic.objects.get(id=epic.id)
assert epic.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_issue_processing(client):
creation_status = f.IssueStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_issues"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.IssueStatusFactory(project=creation_status.project)
issue = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test TG-%s #%s ok
bye!
""" % (issue.ref, new_status.slug),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(issue.project, payload)
ev_hook.process_event()
issue = Issue.objects.get(id=issue.id)
assert issue.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_task_processing(client):
creation_status = f.TaskStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_tasks"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.TaskStatusFactory(project=creation_status.project)
task = f.TaskFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test TG-%s #%s ok
bye!
""" % (task.ref, new_status.slug),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(task.project, payload)
ev_hook.process_event()
task = Task.objects.get(id=task.id)
assert task.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_user_story_processing(client):
creation_status = f.UserStoryStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_us"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.UserStoryStatusFactory(project=creation_status.project)
user_story = f.UserStoryFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test TG-%s #%s ok
bye!
""" % (user_story.ref, new_status.slug),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(user_story.project, payload)
ev_hook.process_event()
user_story = UserStory.objects.get(id=user_story.id)
assert user_story.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_issue_mention(client):
creation_status = f.IssueStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_issues"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
issue = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
take_snapshot(issue, user=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test TG-%s ok
bye!
""" % (issue.ref),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(issue.project, payload)
ev_hook.process_event()
issue_history = get_history_queryset_by_model_instance(issue)
assert issue_history.count() == 1
assert issue_history[0].comment.startswith("This issue has been mentioned by")
assert len(mail.outbox) == 1
def test_push_event_task_mention(client):
creation_status = f.TaskStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_tasks"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
task = f.TaskFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
take_snapshot(task, user=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test TG-%s ok
bye!
""" % (task.ref),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(task.project, payload)
ev_hook.process_event()
task_history = get_history_queryset_by_model_instance(task)
assert task_history.count() == 1
assert task_history[0].comment.startswith("This task has been mentioned by")
assert len(mail.outbox) == 1
def test_push_event_user_story_mention(client):
creation_status = f.UserStoryStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_us"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
user_story = f.UserStoryFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
take_snapshot(user_story, user=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test TG-%s ok
bye!
""" % (user_story.ref),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(user_story.project, payload)
ev_hook.process_event()
us_history = get_history_queryset_by_model_instance(user_story)
assert us_history.count() == 1
assert us_history[0].comment.startswith("This user story has been mentioned by")
assert len(mail.outbox) == 1
def test_push_event_multiple_actions(client):
creation_status = f.IssueStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_issues"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.IssueStatusFactory(project=creation_status.project)
issue1 = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
issue2 = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test TG-%s #%s ok
test TG-%s #%s ok
bye!
""" % (issue1.ref, new_status.slug, issue2.ref, new_status.slug),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook1 = event_hooks.PushEventHook(issue1.project, payload)
ev_hook1.process_event()
issue1 = Issue.objects.get(id=issue1.id)
issue2 = Issue.objects.get(id=issue2.id)
assert issue1.status.id == new_status.id
assert issue2.status.id == new_status.id
assert len(mail.outbox) == 2
def test_push_event_processing_case_insensitive(client):
creation_status = f.TaskStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_tasks"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.TaskStatusFactory(project=creation_status.project)
task = f.TaskFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test tg-%s #%s ok
bye!
""" % (task.ref, new_status.slug.upper()),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(task.project, payload)
ev_hook.process_event()
task = Task.objects.get(id=task.id)
assert task.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_task_bad_processing_non_existing_ref(client):
issue_status = f.IssueStatusFactory()
payload = {
"commits": [
{
"message": """test message
test TG-6666666 #%s ok
bye!
""" % (issue_status.slug),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(issue_status.project, payload)
with pytest.raises(ActionSyntaxException) as excinfo:
ev_hook.process_event()
assert str(excinfo.value) == "The referenced element doesn't exist"
assert len(mail.outbox) == 0
def test_push_event_us_bad_processing_non_existing_status(client):
user_story = f.UserStoryFactory.create()
payload = {
"commits": [
{
"message": """test message
test TG-%s #non-existing-slug ok
bye!
""" % (user_story.ref),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(user_story.project, payload)
with pytest.raises(ActionSyntaxException) as excinfo:
ev_hook.process_event()
assert str(excinfo.value) == "The status doesn't exist"
assert len(mail.outbox) == 0
def test_push_event_bad_processing_non_existing_status(client):
issue = f.IssueFactory.create()
payload = {
"commits": [
{
"message": """test message
test TG-%s #non-existing-slug ok
bye!
""" % (issue.ref),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(issue.project, payload)
with pytest.raises(ActionSyntaxException) as excinfo:
ev_hook.process_event()
assert str(excinfo.value) == "The status doesn't exist"
assert len(mail.outbox) == 0
def test_api_get_project_modules(client):
project = f.create_project()
f.MembershipFactory(project=project, user=project.owner, is_admin=True)
url = reverse("projects-modules", args=(project.id,))
client.login(project.owner)
response = client.get(url)
assert response.status_code == 200
content = response.data
assert "gogs" in content
assert content["gogs"]["secret"] != ""
assert content["gogs"]["webhooks_url"] != ""
def test_api_patch_project_modules(client):
project = f.create_project()
f.MembershipFactory(project=project, user=project.owner, is_admin=True)
url = reverse("projects-modules", args=(project.id,))
client.login(project.owner)
data = {
"gogs": {
"secret": "test_secret",
"html_url": "test_url",
}
}
response = client.patch(url, json.dumps(data), content_type="application/json")
assert response.status_code == 204
config = services.get_modules_config(project).config
assert "gogs" in config
assert config["gogs"]["secret"] == "test_secret"
assert config["gogs"]["webhooks_url"] != "test_url"
def test_replace_gogs_references():
ev_hook = event_hooks.BaseGogsEventHook
assert ev_hook.replace_gogs_references(None, "project-url", "#2") == "[Gogs#2](project-url/issues/2)"
assert ev_hook.replace_gogs_references(None, "project-url", "#2 ") == "[Gogs#2](project-url/issues/2) "
assert ev_hook.replace_gogs_references(None, "project-url", " #2 ") == " [Gogs#2](project-url/issues/2) "
assert ev_hook.replace_gogs_references(None, "project-url", " #2") == " [Gogs#2](project-url/issues/2)"
assert ev_hook.replace_gogs_references(None, "project-url", "#test") == "#test"
assert ev_hook.replace_gogs_references(None, "project-url", None) == ""
|
dayatz/taiga-back
|
tests/integration/test_hooks_gogs.py
|
Python
|
agpl-3.0
| 19,291
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"gui2py's Components Object Model: base & super classes, metaclasses and mixins"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2013- Mariano Reingart" # where applicable
# Initial implementation was based on PythonCard's component module, altought
# most of it was redesigned and overhauled a lot, mainly:
# * ComponentBase (metaclass) & ComponentMeta were inspired by Django's models
# * Widget was renamed to Control (wx.Control, Component abstract wx.Window)
# * SubComponent and DesignerMixin & SizerMixin are completely new
# * ImageBackgroundMixin was refactorized from Pythoncards Panel
# Note: PyhonCard spec and event model were almost completely discarded
import decimal
import datetime
import wx
from .event import FocusEvent, MouseEvent, KeyEvent
from .font import Font
from .graphic import Bitmap, Color
from .spec import Spec, EventSpec, InitSpec, DimensionSpec, StyleSpec, InternalSpec
from . import registry
DEBUG = False
COMPONENTS = {} # map all created objects (used to search parents)
STACK = [] # used by the context manager to store parent references
# WORKAROUND: 2.8 does not have WrapSizer, use custom FlowSizer instead:
if wx.VERSION >= (2, 9):
wx_WrapSizer = wx.WrapSizer
else:
from .flowsizer import FlowSizer as wx_WrapSizer
class ComponentMeta():
"Component Metadata"
def __init__(self, name, specs):
self.name = name
self.specs = specs
self.valid_children = []
self.facade = False # enable facade (fake screenshot ) at design
self.container = False # True if can contain controls
class ComponentBase(type):
"Component class constructor (creates metadata and register the component)"
def __new__(cls, name, bases, attrs):
super_new = super(ComponentBase, cls).__new__
# Create the class.
new_class = super_new(cls, name, bases, attrs)
specs = {}
# get specs of the base classes
for base in bases:
if hasattr(base, "_meta"):
specs.update(base._meta.specs)
# get all the specs
specs.update(dict([(attr_name, attr_value)
for attr_name, attr_value in attrs.items()
if isinstance(attr_value, Spec)]))
# insert a _meta attribute with the specs
new_class._meta = ComponentMeta(name, specs)
# registry and return the new class:
if hasattr(new_class, "_registry"):
new_class._registry[name] = new_class
if name not in registry.ALL:
registry.ALL.append(name)
return new_class
class Component(object):
"The base class for all of our GUI elements"
# Each Component must bind itself to the wxPython event model.
# When it receives an event from wxPython, it will convert the event
# to a gui.event.Event ( UIEvent, MouseEvent, etc ) and call the handler
# This is the base class wich all GUI Elements should derive
# (TopLevelWindows -Frame, Dialog, etc.- and Controls)
# This object maps to wx.Window, but avoid that name as it can be confusing.
__metaclass__ = ComponentBase
_wx_class = wx.Window # wx object constructor
_style = 0 # base style
_image = None # default icon for toolbox
def __init__(self, parent=None, **kwargs):
# create the wxpython kw arguments (based on specs and defaults)
wx_kwargs = dict(id=new_id(kwargs.get('id')))
# check if we are recreating the object (i.e., to apply a new style)
rebuild = hasattr(self, "wx_obj")
# if using context manager, use the parent reference from the creation stack:
if not parent and STACK:
parent = STACK[-1]
if DEBUG: print "using parent stack ", parent
# get current spec values (if we are re-creating the wx object)
if rebuild:
for spec_name, spec in self._meta.specs.items():
if spec_name in kwargs:
continue # use provided new value
if not isinstance(spec, (InternalSpec)):
# get the current value and store it in kwargs
kwargs[spec_name] = getattr(self, spec_name)
self.wx_obj.Visible = False
old_wx_obj = self.wx_obj
self.wx_obj.obj = None
del self.wx_obj
if DEBUG: print "kwargs", kwargs
if isinstance(self._parent, Component):
del self._parent[self._name] # remove old child reference
else:
self.set_parent(parent, init=True)
self._font = None
self._bgcolor = self._fgcolor = False
# container to hold children:
self._children_dict = {} # key and values for __setitem__
self._children_list = [] # ordered values for __iter__
old_wx_obj = None
self.wx_obj = None # set up a void wx object (needed by setters)
for spec_name, spec in self._meta.specs.items():
value = kwargs.get(spec_name, spec.default)
# no not apply a spec if we are re-creating the wx object:
if rebuild and spec_name not in kwargs:
continue # use previously _style
if isinstance(spec, InitSpec):
if DEBUG: print "INIT: setting ", spec_name, value
if not spec.optional and value is None:
raise ValueError("%s: %s is not optional" %
(self._meta.name, spec_name))
if spec._name:
name = spec._name[1:] # use the internal name
if name == "name":
self._set_name(value)
else:
setattr(self, spec._name, value)
else:
name = spec_name # use the spec attribute name
if value is not None:
wx_kwargs[name] = value
if spec_name in kwargs:
del kwargs[spec_name]
if isinstance(spec, StyleSpec):
if DEBUG: print "setting", spec_name, value, spec
setattr(self, spec_name, value)
if spec_name in kwargs:
del kwargs[spec_name]
self._wx_kwargs = wx_kwargs
# create the actual wxpython object
if hasattr(self, "_kind"): # hack to support menu items (TODO: fix)
self._wx_kwargs['kind'] = self._kind
else:
self._wx_kwargs['style'] = style = self._style
if DEBUG: print "WX KWARGS: ", self._wx_kwargs
if DEBUG: print "creating", self._wx_class
if self._parent is None or isinstance(self._parent, wx.Object):
wx_parent = self._parent
else:
wx_parent = self._parent.wx_obj
# sanitize parameters
if 'id' in self._wx_kwargs and self._wx_kwargs['id'] <= 0:
self._wx_kwargs['id'] = -1
self.wx_obj = self._wx_class(wx_parent, **self._wx_kwargs)
# load specs from kwargs, use default if available
for spec_name, spec in sorted(self._meta.specs.items(),
key=lambda it: it[1].order):
if spec.read_only or isinstance(spec, (StyleSpec, InitSpec, InternalSpec)):
continue
# get the spec value for kwargs, if it is optional, get the default
if spec_name in kwargs:
# set the value passed to the constructor
setattr(self, spec_name, kwargs[spec_name])
elif spec.default is not None or isinstance(spec, EventSpec):
# set the default value
setattr(self, spec_name, spec.default)
elif not spec.optional:
raise ValueError("%s: %s is not optional" % (self._meta.name,
spec_name))
else:
# asign a empty value (just to set up internal properties)
setattr(self, spec_name, None)
# store gui reference inside of wx object
self.wx_obj.obj = self
if isinstance(self._parent, Component) and getattr(self, "_name", None):
self._parent[self._name] = self # add child reference
COMPONENTS[self._get_fully_qualified_name()] = self
# re-associate childrens (wx objects hierachy):
if rebuild:
for ctrl in self:
ctrl.set_parent(self)
# destroy previous wx object
# (after reparent so children are not destroyed)
if old_wx_obj:
old_wx_obj.Destroy()
# finally, set special internal spec (i.e. designer)
# (this must be done at last to overwrite other event handlers)
for spec_name, spec in sorted(self._meta.specs.items(),
key=lambda it: it[1].order):
if isinstance(spec, InternalSpec) and not spec.read_only:
if DEBUG: print "resetting internal specs (rebound...):", spec_name, self.name
value = kwargs.get(spec_name, getattr(self, spec_name, spec.default))
setattr(self, spec_name, value)
if isinstance(spec, InitSpec):
# override special dimension init values passed directly
if spec_name == 'pos' and 'pos' in wx_kwargs:
x, y = wx_kwargs['pos']
if x >= 0:
self._left = str(x)
if y >= 0:
self._top = str(y)
elif spec_name == 'size' and 'size' in wx_kwargs:
w, h = wx_kwargs['size']
if w >= 0:
self._width = str(w)
if h >= 0:
self._height = str(h)
def rebuild(self, recreate=True, force=False, **kwargs):
"Recreate (if needed) the wx_obj and apply new properties"
# detect if this involves a spec that needs to recreate the wx_obj:
needs_rebuild = any([isinstance(spec, (StyleSpec, InitSpec))
for spec_name, spec in self._meta.specs.items()
if spec_name in kwargs])
# validate if this gui object needs and support recreation
if needs_rebuild and recreate or force:
if DEBUG: print "rebuilding window!"
# recreate the wx_obj! warning: it will call Destroy()
self.__init__(**kwargs)
else:
if DEBUG: print "just setting attr!"
for name, value in kwargs.items():
setattr(self, name, value)
def __del__(self):
"Destructor: clean-up all references"
self.destroy()
def destroy(self):
"Remove event references and destroy wx object (and children)"
# unreference the obj from the components map and parent
if self._name:
del COMPONENTS[self._get_fully_qualified_name()]
if DEBUG: print "deleted from components!"
if isinstance(self._parent, Component):
del self._parent[self._name]
if DEBUG: print "deleted from parent!"
# destroy the wx_obj (only if sure that reference is not needed!)
if self.wx_obj:
self.wx_obj.Destroy()
for child in self:
print "destroying child",
child.destroy()
# destroy the designer selection marker (if any)
if hasattr(self, 'sel_marker') and self.sel_marker:
self.sel_marker.destroy()
if hasattr(self, 'facade') and self.facade:
self.facade.destroy()
def duplicate(self, new_parent=None):
"Create a new object exactly similar to self"
kwargs = {}
for spec_name, spec in self._meta.specs.items():
value = getattr(self, spec_name)
if isinstance(value, Color):
print "COLOR", value, value.default
if value.default:
value = None
if value is not None:
kwargs[spec_name] = value
del kwargs['parent']
new_id = wx.NewId()
kwargs['id'] = new_id
kwargs['name'] = "%s_%s" % (kwargs['name'], new_id)
new_obj = self.__class__(new_parent or self.get_parent(), **kwargs)
# recursively create a copy of each child (in the new parent!)
for child in self:
child.duplicate(new_obj)
return new_obj
def reindex(self, z=None):
"Raises/lower the component in the window hierarchy (Z-order/tab order)"
# z=0: lowers(first index), z=-1: raises (last)
# actually, only useful in design mode
if isinstance(self._parent, Component):
# get the current index (z-order)
if not self in self._parent._children_list:
return len(self._parent._children_list)
i = self._parent._children_list.index(self)
if z is None:
return i
if not hasattr(self, "designer") and not self.designer:
raise RuntimeError("reindexing can only be done on design mode")
# delete the element reference from the list
del self._parent._children_list[i]
# insert as last element
if z < 0:
self._parent._children_list.append(self)
else:
self._parent._children_list.insert(z, self)
# Container methods:
def __iter__(self):
return self._children_list.__iter__()
def __setitem__(self, name, child):
self._children_dict[name] = child
if child not in self._children_list and hasattr(self, "_sizer_add"):
self._sizer_add(child)
self._children_list.append(child)
def __getitem__(self, key):
if isinstance(key, slice):
return self._children_list.__getitem__(key)
else:
return self._children_dict[key]
def __delitem__(self, name):
del self._children_list[self._children_list.index(self._children_dict[name])]
del self._children_dict[name]
def __contains__(self, name):
return name in self._children_dict
# context manager methods (with statement currently used to stack parents)
def __enter__(self):
STACK.append(self)
if DEBUG: print "pushed", self.name
return self
def __exit__(self, *exc_info):
obj = STACK.pop()
if DEBUG: print "popped", obj.name
# Public methods:
def redraw(self):
"Force an immediate redraw without waiting for an event handler to finish."
self.wx_obj.Refresh()
self.wx_obj.Update()
def set_focus(self):
self.wx_obj.SetFocus()
def set_parent(self, new_parent, init=False):
"Store the gui/wx object parent for this component"
# set init=True if this is called from the constructor
self._parent = get(new_parent, init) # store new parent
def get_parent(self):
"Return the object parent for this component (either gui or wx)"
return self._parent
def _get_parent_name(self):
"Return parent window name (used in __repr__ parent spec)"
parent = self.get_parent()
parent_names = []
while parent:
if isinstance(parent, Component):
parent_name = parent.name
# Top Level Windows has no parent!
if parent_name:
parent_names.insert(0, parent_name)
parent = parent.get_parent()
else:
break
if not parent_names:
return None
else:
return '.'.join(parent_names)
def _get_fully_qualified_name(self):
"return full parents name + self name (useful as key)"
parent_name = self._get_parent_name()
if not parent_name:
return self._name
else:
return "%s.%s" % (parent_name, self._name)
def _get_name(self):
return getattr(self, "_name", None)
def _set_name(self, value):
# check if we're changing the previous name (ie., design time)
if hasattr(self, "_name"):
key = self._get_fully_qualified_name()
else:
key = None
self._name = value
# delete old reference (if exists)
if key in COMPONENTS:
del COMPONENTS[key]
COMPONENTS[self._get_fully_qualified_name()] = self
def __repr__( self, prefix="gui", parent="", indent=0, context=False):
return represent(self, prefix, parent, indent, context)
# properties:
def _getId(self):
"return the id is generated using NewId by the base wxPython control"
return self.wx_obj.GetId()
def _setId(self, id):
pass
#raise AttributeError("id attribute is read-only")
def _getToolTip(self):
try:
return self.wx_obj.GetToolTip().GetTip()
except:
return ""
def _get_font(self):
if self._font is None:
return None
wx_font = self.wx_obj.GetFont()
# sanity check: avoid assertion error:
if not wx_font.IsOk():
wx_font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
self._font.set_wx_font(wx_font)
return self._font
def _set_fgcolor(self, color):
color = self._get_default_color(color, "foreground")
# if not color given, avoid change it to wx.NullColor (fix for OSX)
if color is not wx.NullColour:
self.wx_obj.SetForegroundColour(color)
self.wx_obj.Refresh() # KEA wxPython bug?
self._fgcolor = True
else:
self._fgcolor = False
def _set_bgcolor(self, color):
color = self._get_default_color(color, "background")
# if not color given, avoid change it to wx.NullColor (fix for OSX)
if color is not wx.NullColour:
self.wx_obj.SetBackgroundColour(color)
self.wx_obj.Refresh() # KEA wxPython bug?
self._bgcolor = True
else:
self._fgcolor = False
def _get_fgcolor(self):
color = self.wx_obj.GetForegroundColour()
if color:
color = self._get_default_color(color, "foreground")
c = Color(color.Red(), color.Green(), color.Blue(), color.Alpha())
c.default = not self._fgcolor
return c
def _get_bgcolor(self):
color = self.wx_obj.GetBackgroundColour()
if color:
color = self._get_default_color(color, "background")
c = Color(color.Red(), color.Green(), color.Blue(), color.Alpha())
c.default = not self._bgcolor
return c
def _setToolTip(self, aString):
toolTip = wx.ToolTip(aString)
self.wx_obj.SetToolTip(toolTip)
def _set_font(self, value):
if value is None:
self._font = None
return
if isinstance(value, dict):
self._font = Font(parent=self, **value)
else: # Bind the font to this component.
self._font = value
value._parent = self
self.wx_obj.SetFont( self._font.get_wx_font() )
def _getUserdata(self):
return self._userdata
def _setUserdata(self, aString):
self._userdata = aString
def _get_default_color(self, color, context="background"):
if color is None:
# warning: NullColour is ignored as it doesn't work properly in OSX
# use wx.SystemSettings.GetColour(wx.SYS_COLOUR_BACKGROUND) instead
c = wx.NullColour
else:
# KEA 2001-07-27
# is the right place for this check?
if isinstance(color, tuple) and len(color) == 3:
c = Color(color[0], color[1], color[2])
else:
if isinstance(color, basestring):
c = wx.NamedColour(color)
else:
c = color
return c
def _getEnabled(self):
return self.wx_obj.IsEnabled()
def _setEnabled(self, aBoolean):
self.wx_obj.Enable(aBoolean)
def _getVisible(self):
return self.wx_obj.IsShown()
def _setVisible(self, aBoolean):
# do not show immediately to allow proper layout and avoid flickering
if aBoolean and self._parent is None:
wx.CallAfter(self.wx_obj.Show)
else:
self.wx_obj.Show(aBoolean)
def _get_drop_target(self):
# dt is a special case because the original wx_obj could be destroyed
return hasattr(self, "_drop_target") and self._drop_target or None
def _set_drop_target(self, dt):
# discard previous drop target, as it is associated to the old wx_obj!
if dt and self.drop_target == dt:
dt = dt.copy() # create a copy to avoid wx problems
old_dt = getattr(self, "_drop_target", None)
self._drop_target = dt
if (old_dt or dt) and hasattr(self.wx_obj, "SetDropTarget"):
self.wx_obj.SetDropTarget(dt)
name = InitSpec(_get_name, _set_name, optional=False, _name="_name", default="", type='string')
bgcolor = Spec(_get_bgcolor, _set_bgcolor, type='colour')
font = Spec(_get_font, _set_font, type='font')
fgcolor = Spec(_get_fgcolor, _set_fgcolor, type='colour')
enabled = Spec(_getEnabled, _setEnabled, default=True, type='boolean')
id = InitSpec(_getId, _setId, default=-1, type="integer")
helptext = Spec(optional=True, type="string")
tooltip = Spec(_getToolTip, _setToolTip, default='', type="string")
visible = Spec(_getVisible, _setVisible, default=True, type='boolean')
userdata = Spec(_name='_userdata')
parent = Spec(lambda self: self._get_parent_name(),
optional=False, default="",
doc="parent window (used internally)")
drop_target = InternalSpec(lambda self: self._get_drop_target(),
lambda self, value: self._set_drop_target(value),
doc="drag&drop handler (used in design mode)",
type='internal')
index = Spec(reindex, reindex, type="integer", default=None,
doc="Z Order (overlapping) / Tab Order (Tabbing navigation)")
class DesignerMixin(object):
"Designer support"
__metaclass__ = ComponentBase
def _set_designer(self, func):
if DEBUG: print "binding designer handler...", func, self._meta.name
if func:
# connect the mouse event handler of the designer:
self.wx_obj.Bind(wx.EVT_MOUSE_EVENTS, func)
# link menu selection (click) to the designer
self.wx_obj.Bind(wx.EVT_MENU, func)
# link key press event to the designer (move)
self.wx_obj.Bind(wx.EVT_KEY_DOWN, func)
self.wx_obj.Bind(wx.EVT_KEY_UP, func)
# bind top level window resizing and closing event:
if self._parent is None:
self.wx_obj.Bind(wx.EVT_SIZE, func, self.wx_obj)
self.wx_obj.Bind(wx.EVT_CLOSE, func, self.wx_obj)
# link repaint event (refresh) to the designer (draw grid)
self.wx_obj.Bind(wx.EVT_PAINT, func, self.wx_obj)
self._designer = func
for child in self:
child.designer = func
def snapshot(self):
"Capture the screen appearance of the control (to be used as facade)"
width, height = self.wx_obj.GetSize()
bmp = wx.EmptyBitmap(width, height)
wdc = wx.ClientDC(self.wx_obj)
mdc = wx.MemoryDC(bmp)
mdc.Blit(0, 0, width, height, wdc, 0, 0)
#bmp.SaveFile("test.bmp", wx.BITMAP_TYPE_BMP)
wdc.Destroy()
mdc.Destroy()
return bmp
def _set_facade(self, Facade):
if DEBUG: print "setting facade...", self._meta.name
if hasattr(self, "_facade") and self._facade:
self._facade.destroy()
# only set facade if it is a Class (not instantiated)
if Facade and callable(Facade):
self._facade = Facade(self._parent.wx_obj, obj=self, )
self._facade.Bind(wx.EVT_MOUSE_EVENTS, self.designer)
self._facade.Show()
else:
self._facade = None
def rebuild(self, **kwargs):
super(DesignerMixin, self).rebuild(**kwargs)
# refresh the fake image screenshot (if used as facade):
if self._meta.facade and self.facade:
self.facade.update()
designer = InternalSpec(lambda self: self._designer,
lambda self, value: self._set_designer(value),
doc="function to handle events in design mode",
type='internal')
facade = InternalSpec(lambda self: self._facade,
lambda self, value: self._set_facade(value),
doc="fake static superficial object in design mode",
type='internal')
sel_marker = InternalSpec(_name="_sel_marker",
doc="selection marker in design mode",
type='internal')
SIZERS_MAP = {'wrap': wx_WrapSizer,
'flexgrid': wx.FlexGridSizer,
'gridbag': wx.GridBagSizer,
'': None, }
class SizerMixin(object):
"Automatic layout & sizing support"
__metaclass__ = ComponentBase
def _set_sizer(self, sizer):
if DEBUG: print "set sizer", sizer, self._meta.name
if sizer:
# create a new sizer (flow / fluid) layout (requires wxPython 2.9)
self._sizer = SIZERS_MAP[sizer]()
self.wx_obj.SetSizer(self._sizer)
# add all the children to the sizer (in case we're on design time):
for child in self:
self._sizer_add(child)
elif hasattr(self, "_sizer"):
# remove the sizer
self.wx_obj.SetSizer(None)
del self._sizer
def _get_sizer(self):
if hasattr(self, "_sizer"):
return [k for k, v in SIZERS_MAP.items()
if v == self._sizer.__class__][0]
else:
return None
def _sizer_add(self, child):
"called when adding a control to the window"
if self.sizer:
if DEBUG: print "adding to sizer:", child.name
border = None
if not border:
border = child.sizer_border
flags = child._sizer_flags
if child.sizer_align:
flags |= child._sizer_align
if child.sizer_expand:
flags |= wx.EXPAND
if 'grid' in self.sizer:
self._sizer.Add(child.wx_obj, flag=flags, border=border,
pos=(child.sizer_row, child.sizer_col),
span=(child.sizer_rowspan, child.sizer_colspan))
else:
self._sizer.Add(child.wx_obj, 0, flags, border)
def set_sizer_grow_row(self, row=-1, proportion=0):
if row >= 0:
self._sizer.AddGrowableRow(row, proportion)
def set_sizer_grow_col(self, col=-1, proportion=0):
if col >= 0:
self._sizer.AddGrowableCol(col, proportion)
sizer = DimensionSpec(lambda self: self._get_sizer(),
lambda self, value: self._set_sizer(value), group="sizer",
doc="use an automatic flow layout mechanism (wx.WrapSizer)",
mapping=dict([(k, id(v)) for k, v in SIZERS_MAP.items()]),
type='enum', default='')
class ControlSuper(DesignerMixin, Component):
"This is the base class for a control and top level windows"
# A control is generally a small window which processes user input and/or
# displays one or more item of data (for more info see wx.Control)
# Avoid the term 'widget' as could be confusing (and it is already used in
# web2py)
_registry = registry.CONTROLS
def __init__(self, parent=None, **kwargs):
# set safe initial dimensions
if not hasattr(self, '_resizable'):
self._resizable = kwargs.get('resizable', True)
self._left = self._top = self._width = self._height = None
self._margins = [0] * 4 # left, top, right, bottom
# call default contructor
Component.__init__(self, parent, **kwargs)
# Handle resize events to adjust absolute and relative dimensions
if self._resizable:
# postpone this so it doesn't catch initialization events:
wx.CallAfter(self.wx_obj.Bind, wx.EVT_SIZE, self.resize)
def set_parent(self, new_parent, init=False):
"Re-parent a child control with the new wx_obj parent"
Component.set_parent(self, new_parent, init)
# if not called from constructor, we must also reparent in wx:
if not init:
if DEBUG: print "reparenting", ctrl.name
if hasattr(self.wx_obj, "Reparent"):
self.wx_obj.Reparent(self._parent.wx_obj)
# Dimensions:
def _calc_dimension(self, dim_val, dim_max, font_dim):
"Calculate final pos and size (auto, absolute in pixels & relativa)"
if dim_val is None:
return -1 # let wx automatic pos/size
elif isinstance(dim_val, int):
return dim_val # use fixed pixel value (absolute)
elif isinstance(dim_val, basestring):
if dim_val.endswith("%"):
# percentaje, relative to parent max size:
dim_val = int(dim_val[:-1])
dim_val = dim_val / 100.0 * dim_max
elif dim_val.endswith("em"):
# use current font size (suport fractions):
dim_val = float(dim_val[:-2])
dim_val = dim_val * font_dim
elif dim_val.endswith("px"):
# fixed pixels
dim_val = dim_val[:-2]
elif dim_val == "" or dim_val == "auto":
dim_val = -1
return int(dim_val)
def _get_pos(self):
# get the actual position, not (-1, -1)
return tuple(self.wx_obj.GetPosition())
def _set_pos(self, point):
# check parameters (and store user values for resize)
point = list(point)
# get parent or screen size (used to calc the percent)
if self.parent and self.wx_obj.GetParent():
parent_size = self.wx_obj.GetParent().GetSize()
else:
parent_size = wx.DisplaySize()
# get font metrics for "em" unit
font_width = self.wx_obj.GetCharWidth()
font_height = self.wx_obj.GetCharHeight()
# get current position:
x, y = self._get_pos()
# calculate actual position (auto, relative or absolute)
if point[0] is not None:
x = self._calc_dimension(point[0], parent_size[0], font_width)
self._left = str(point[0])
if point[1] is not None:
y = self._calc_dimension(point[1], parent_size[1], font_height)
self._top = str(point[1])
# actually move the object
self.wx_obj.Move((x, y))
# update the designer selection marker (if any)
if hasattr(self, 'sel_marker') and self.sel_marker:
self.sel_marker.update()
if hasattr(self, 'facade') and self.facade:
self.facade.update()
def _set_left(self, value):
self._set_pos([value, None])
def _set_top(self, value):
self._set_pos([None, value])
def _get_size(self):
# return the actual size, not (-1, -1)
if isinstance(self.wx_obj, wx.TopLevelWindow):
return tuple(self.wx_obj.GetClientSize())
else:
return tuple(self.wx_obj.GetSize())
def _set_size(self, size, new_size=None):
if DEBUG: print "SET SIZE", self._name, size, new_size
# check parameters (and store user values for resize)
size = list(size)
# get parent or screen size (used to calc the percent)
if new_size:
parent_size = new_size # use event size instead
elif self.parent and self.wx_obj.GetParent():
parent_size = self.wx_obj.GetParent().GetSize()
else:
parent_size = wx.DisplaySize()
# get font metrics for "em" unit
font_width = self.wx_obj.GetCharWidth()
font_height = self.wx_obj.GetCharHeight()
# get current size (to use if a dimension has not changed)
prev_size = self._get_size()
# calculate actual position (auto, relative or absolute)
if size[0] is not None:
w = self._calc_dimension(size[0], parent_size[0], font_width)
else:
w = prev_size[0]
if size[1] is not None:
h = self._calc_dimension(size[1], parent_size[1], font_height)
else:
h = prev_size[1]
# on windows set the client size (ignore title bar)
# note: don't do on controls (it doesn't work at least for textbox)
if DEBUG: print "NEWSIZE", w, h
if isinstance(self.wx_obj, wx.TopLevelWindow):
self.wx_obj.SetClientSize((w, h))
else:
self.wx_obj.SetSize((w, h))
self.wx_obj.SetMinSize((w, h)) # needed for sizers
# update the designer selection marker (if any)
if hasattr(self, 'sel_marker') and self.sel_marker:
self.sel_marker.update()
if hasattr(self, 'facade') and self.facade:
self.facade.update()
def _set_width(self, value):
if DEBUG: print "SET WIDTH", self._name, value
self._width = str(value)
self._set_size([value, None])
def _set_height(self, value):
if DEBUG: print "SET HEIGHT", self._name, value
self._height = str(value)
self._set_size([None, value])
def _get_margin(self, index):
return self._margins[index] or 0
def _set_margin(self, value, index):
old_margin = self._margins[index]
self._margins[index] = value
if old_margin != value and value is not None:
self.resize()
def resize(self, evt=None):
"automatically adjust relative pos and size of children controls"
if DEBUG: print "RESIZE!", self.name, self.width, self.height
if not isinstance(self.wx_obj, wx.TopLevelWindow):
# check that size and pos is relative, then resize/move
if self._left and self._left[-1] == "%" or \
self._top and self._top[-1] == "%":
if DEBUG: print "MOVING", self.name, self._width
self._set_pos((self._left, self._top))
if self._width and self._width[-1] == "%" or \
self._height and self._height[-1] == "%":
if DEBUG: print "RESIZING", self.name, self._width, self._height
self._set_size((self._width, self._height))
for child in self:
if isinstance(child, Control):
child.resize(evt)
# call original handler (wx.HtmlWindow)
if evt:
evt.Skip()
def get_char_width(self):
"Returns the average character width for this window."
return self.wx_obj.GetCharWidth()
def get_char_height(self):
"Returns the character height for this window."
return self.wx_obj.GetCharHeight()
pos = InitSpec(_get_pos, _set_pos, default=[ -1, -1])
size = InitSpec(_get_size, _set_size, default=[ -1, -1])
client_size = Spec(lambda self: self.wx_obj.GetClientSize(),
lambda self, value: self.wx_obj.SetClientSize(value),
default=[ -1, -1])
width = DimensionSpec(lambda self: self._width,
lambda self, value: self._set_width(value),
default="", type="string", group="size")
height = DimensionSpec(lambda self: self._height,
lambda self, value: self._set_height(value),
default="", type="string", group="size")
left = DimensionSpec(lambda self: self._left,
lambda self, value: self._set_left(value),
default="", type="string", group="position")
top = DimensionSpec(lambda self: self._top,
lambda self, value: self._set_top(value),
default="", type="string", group="position")
border = StyleSpec({'default': wx.BORDER_DEFAULT,
'simple': wx.BORDER_SIMPLE,
'sunken': wx.BORDER_SUNKEN,
'raised': wx.BORDER_RAISED,
'static': wx.BORDER_STATIC,
'theme': wx.BORDER_THEME, # native
'none': wx.BORDER_NONE, },
doc="Kind of border to show (some will have no effect"
" depending on control and platform)",
default='default')
transparent = StyleSpec(wx.TRANSPARENT_WINDOW, default=False,
doc="will not receive paint events. Windows only")
# Events:
onfocus = EventSpec('focus', binding=wx.EVT_SET_FOCUS, kind=FocusEvent)
onblur = EventSpec('blur', binding=wx.EVT_KILL_FOCUS, kind=FocusEvent)
onmousemove = EventSpec('mousemove', binding=wx.EVT_MOTION, kind=MouseEvent)
onmouseover = EventSpec('mouseover', binding=wx.EVT_ENTER_WINDOW, kind=MouseEvent)
onmouseout = EventSpec('mouseout', binding=wx.EVT_LEAVE_WINDOW, kind=MouseEvent)
onmousewheel = EventSpec('mousewheel', binding=wx.EVT_MOUSEWHEEL, kind=MouseEvent)
onmousedown = EventSpec('mousedown', binding=(wx.EVT_LEFT_DOWN,
wx.EVT_MIDDLE_DOWN,
wx.EVT_RIGHT_DOWN),
kind=MouseEvent)
onmousedclick = EventSpec('mousedclick', binding=(wx.EVT_LEFT_DCLICK,
wx.EVT_MIDDLE_DCLICK,
wx.EVT_RIGHT_DCLICK),
kind=MouseEvent)
onmouseup = EventSpec('mouseup', binding=(wx.EVT_LEFT_UP,
wx.EVT_MIDDLE_UP,
wx.EVT_RIGHT_UP),
kind=MouseEvent)
onkeypress = EventSpec('keypress', binding=wx.EVT_CHAR, kind=KeyEvent)
onkeydown = EventSpec('keydown', binding=wx.EVT_KEY_DOWN, kind=KeyEvent)
onkeyup = EventSpec('keyup', binding=wx.EVT_KEY_UP, kind=KeyEvent)
class Control(ControlSuper):
"This is the base class for a control"
sizer_border = DimensionSpec(_name="_sizer_border", default=0,
type="integer", group="sizer",
doc="empty space arround a item, used by the sizer")
sizer_align = DimensionSpec(mapping={'left': wx.ALIGN_LEFT,
'top': wx.ALIGN_TOP,
'center': wx.ALIGN_CENTER,
'right': wx.ALIGN_RIGHT,
'bottom': wx.ALIGN_BOTTOM, },
default='left', _name="_sizer_align", type="enum", group="sizer",
doc="alignment within the space allotted to it by the sizer")
sizer_flags = DimensionSpec(mapping={'left': wx.LEFT,
'top': wx.TOP,
'right': wx.RIGHT,
'bottom': wx.BOTTOM,
'all': wx.ALL,
'horiz': wx.LEFT | wx.RIGHT,
'vert': wx.TOP | wx.BOTTOM,
},
default='all', _name="_sizer_flags", type="enum", group="sizer",
doc="Side(s) of the sizer item that the border width will apply to")
sizer_expand = DimensionSpec(_name="_sizer_expand",
default=False, type='boolean', group="sizer",
doc="The item will be expanded to fill the space allotted to it")
sizer_col = DimensionSpec(_name="_sizer_col", default=0,
type="integer", group="sizer.pos",
doc="Column index (postion) for the item in the virtual Grid Sizer")
sizer_row = DimensionSpec(_name="_sizer_row", default=0,
type="integer", group="sizer.pos",
doc="Row index (postion) for the item in the virtual Grid Sizer")
sizer_colspan = DimensionSpec(_name="_sizer_colspan", default=1,
type="integer", group="sizer.span",
doc="Columns that the item occupies one cell in each direction")
sizer_rowspan = DimensionSpec(_name="_sizer_rowpan", default=1,
type="integer", group="sizer.span",
doc="Row that the item occupies one cell in each direction")
class ImageBackgroundMixin(object):
"Tiled background image support"
__metaclass__ = ComponentBase
def _set_image(self, image):
if DEBUG: print "using image...", image, self._meta.name
self._image_filename = image
# KEA 2001-07-27
# Load the bitmap once and keep it around
# this could fail, so should be a try/except.
if image:
self._bitmap = Bitmap(image)
# bind only one time (but designer could had destroyed the wx_obj!)
if not hasattr(self.wx_obj, "image_bg_mixin_bound"):
# only bound ourselves, don't catch childrens' erase/destroy
self.wx_obj.Bind(wx.EVT_ERASE_BACKGROUND,
self.__on_erase_background, self.wx_obj)
self.wx_obj.Bind(wx.EVT_WINDOW_DESTROY,
self.__on_destroy, self.wx_obj)
self.wx_obj.image_bg_mixin_bound = True
else:
# clean up the image
self._bitmap = None
def _get_image(self):
return getattr(self, "_image_filename", None) or ""
def __tile_background(self, dc):
"make several copies of the background bitmap"
sz = self.wx_obj.GetClientSize()
bmp = self._bitmap.get_bits()
w = bmp.GetWidth()
h = bmp.GetHeight()
if isinstance(self, wx.ScrolledWindow):
# adjust for scrolled position
spx, spy = self.wx_obj.GetScrollPixelsPerUnit()
vsx, vsy = self.wx_obj.GetViewStart()
dx, dy = (spx * vsx) % w, (spy * vsy) % h
else:
dx, dy = (w, h)
x = -dx
while x < sz.width:
y = -dy
while y < sz.height:
dc.DrawBitmap(bmp, x, y)
y = y + h
x = x + w
def __on_destroy(self, event):
if event.EventObject == self.wx_obj:
# memory leak cleanup
self._bitmap = None
def __on_erase_background(self, evt):
"Draw the image as background"
if self._bitmap:
dc = evt.GetDC()
if not dc:
dc = wx.ClientDC(self)
r = self.wx_obj.GetUpdateRegion().GetBox()
dc.SetClippingRegion(r.x, r.y, r.width, r.height)
if self._background_tiling:
self.__tile_background(dc)
else:
dc.DrawBitmapPoint(self._bitmap.get_bits(), (0, 0))
image = Spec(lambda self: self._get_image(),
lambda self, value: self._set_image(value),
doc="image (filename) used as background",
type="image_file")
tiled = Spec(_name="_background_tiling", default=False,
doc="image background tiling", type='boolean')
class SubComponent(object):
"Base class to use in complex controls (like ListView)"
__metaclass__ = ComponentBase
wx_obj = None # no wx object is related
def __iter__(self):
return [].__iter__() # we have no children (designer!)
def __getitem__(self, key):
return None
def __init__(self, parent=None, **kwargs):
# if using context manager, use the parent reference from the creation stack:
if not parent and STACK:
parent = STACK[-1]
if DEBUG: print "using parent stack ", parent
# set up the properties:
for spec_name, spec in self._meta.specs.items():
if not spec.read_only:
value = kwargs.get(spec_name, spec.default)
setattr(self, spec_name, value)
self.set_parent(parent, init=True)
def set_parent(self, new_parent, init=False):
"Associate the component to the control (it could be recreated)"
# store gui reference inside of wx object (this will enable rebuild...)
self._parent = get(new_parent, init=False) # store new parent
if init:
self._parent[self._name] = self # add child reference
def rebuild(self, **kwargs):
"Update a property value with (used by the designer)"
for name, value in kwargs.items():
setattr(self, name, value)
def destroy(self, **kwargs):
pass
def __repr__(self, prefix="gui", parent="", indent=0):
return represent(self, prefix, parent, indent)
parent = Spec(lambda self: self._parent.name,
optional=False, default="",
doc="parent window (used internally)")
# Auxiliary functions & classes:
def new_id(id=None):
if id is None or id == -1:
return wx.NewId()
else:
return id
sort_order_map = {InitSpec: 1, DimensionSpec: 3, StyleSpec: 2, EventSpec: 5, Spec: 4}
def get_sort_key((name, spec)):
return sort_order_map.get(spec.__class__, 6), name
def represent(obj, prefix, parent="", indent=0, context=False, max_cols=80):
"Construct a string representing the object"
try:
name = getattr(obj, "name", "")
class_name = "%s.%s" % (prefix, obj.__class__.__name__)
padding = len(class_name) + 1 + indent * 4 + (5 if context else 0)
params = []
for (k, spec) in sorted(obj._meta.specs.items(), key=get_sort_key):
if k == "index": # index is really defined by creation order
continue # also, avoid infinite recursion
if k == "parent" and parent != "":
v = parent
else:
v = getattr(obj, k, "")
if (not isinstance(spec, InternalSpec)
and v != spec.default
and (k != 'id' or v > 0)
and isinstance(v,
(basestring, int, long, float, bool, dict, list,
decimal.Decimal,
datetime.datetime, datetime.date, datetime.time,
Font, Color))
and repr(v) != 'None'
):
v = repr(v)
else:
v = None
if v is not None:
params.append("%s=%s" % (k, v))
param_lines = []
line = ""
for param in params:
if len(line + param) + 3 > max_cols - padding:
param_lines.append(line)
line = ""
line += param + ", "
param_lines.append(line)
param_str = ("\n%s" % (" " * padding)).join(param_lines)
return "%s(%s)" % (class_name, param_str)
except:
raise
# uninitialized, use standard representation to not break debuggers
return object.__repr__(obj)
def get(obj_name, init=False):
"Find an object already created"
wx_parent = None
# check if new_parent is given as string (useful for designer!)
if isinstance(obj_name, basestring):
# find the object reference in the already created gui2py objects
# TODO: only useful for designer, get a better way
obj_parent = COMPONENTS.get(obj_name)
if not obj_parent:
# try to find window (it can be a plain wx frame/control)
wx_parent = wx.FindWindowByName(obj_name)
if wx_parent:
# store gui object (if any)
obj_parent = getattr(wx_parent, "obj")
else:
# fallback using just object name (backward compatibility)
for obj in COMPONENTS.values():
if obj.name==obj_name:
obj_parent = obj
else:
obj_parent = obj_name # use the provided parent (as is)
return obj_parent or wx_parent # new parent
if __name__ == "__main__":
# basic test until unit_test
app = wx.App(redirect=False)
frame = wx.Frame(None)
w = Component(frame, name="test")
# test representation:
print w
assert w.get_parent() is frame
assert w.id != -1 # wx should have assigned a new id!
assert w.name == "test"
w.font = dict(face="ubuntu")
assert w.font.face == "ubuntu"
# check container methods:
ct1 = Control(w, name="test_ctrl1")
ct2 = Control(w, name="test_ctrl2")
ct2.__init__(name="chau!") # recreate the control (!= name)
assert ct1.index == 0
assert ct2.index == 1
names = []
for c in w:
names.append(c.name)
assert names == ["test_ctrl1", "chau!"]
|
reingart/gui2py
|
gui/component.py
|
Python
|
lgpl-3.0
| 51,589
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-28 13:41
# flake8: noqa
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import normandy.recipes.validators
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('recipes', '0033_migrate_surveys'),
]
operations = [
migrations.CreateModel(
name='RecipeRevision',
fields=[
('id', models.CharField(max_length=64, primary_key=True, serialize=False)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('comment', models.TextField()),
('name', models.CharField(max_length=255)),
('arguments_json', models.TextField(default='{}', validators=[normandy.recipes.validators.validate_json])),
('filter_expression', models.TextField()),
('action', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recipe_revisions', to='recipes.Action')),
('parent', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='child', to='recipes.RecipeRevision')),
],
),
migrations.RemoveField(
model_name='approval',
name='creator',
),
migrations.RemoveField(
model_name='approvalrequest',
name='approval',
),
migrations.RemoveField(
model_name='approvalrequest',
name='creator',
),
migrations.RemoveField(
model_name='approvalrequest',
name='recipe',
),
migrations.RemoveField(
model_name='approvalrequestcomment',
name='approval_request',
),
migrations.RemoveField(
model_name='approvalrequestcomment',
name='creator',
),
migrations.AlterModelOptions(
name='recipe',
options={'ordering': ['-enabled', '-latest_revision__updated']},
),
migrations.RemoveField(
model_name='recipe',
name='approval',
),
migrations.DeleteModel(
name='Approval',
),
migrations.DeleteModel(
name='ApprovalRequest',
),
migrations.DeleteModel(
name='ApprovalRequestComment',
),
migrations.AddField(
model_name='reciperevision',
name='recipe',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='revisions', to='recipes.Recipe'),
),
migrations.AddField(
model_name='reciperevision',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL,related_name='recipe_revisions', to=settings.AUTH_USER_MODEL)
),
migrations.AddField(
model_name='recipe',
name='latest_revision',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='latest_for_recipe', to='recipes.RecipeRevision'),
),
migrations.AlterField(
model_name='recipe',
name='action',
field=models.ForeignKey(to='recipes.Action', null=True),
),
migrations.AlterField(
model_name='recipe',
name='name',
field=models.CharField(max_length=255, unique=False, null=True),
),
]
|
Osmose/normandy
|
recipe-server/normandy/recipes/migrations/0034_recipe_revisions.py
|
Python
|
mpl-2.0
| 3,780
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
#Art Rand
"""
This is a module of FASTA/.qual and FASTQ parsers.
The functions in this module are intended for use with biological sequence data
(DNA, and amino acid) in the form of FASTA, .qual (which accompany FASTA),
and FASTQ files. There are two kinds of functions in this module, Readers and
Makers.
Readers:
In general, a reader function accepts an argument which points to the file(s)
to be parsed and then any options associated with that kind of file.
For example:
To parse a FASTQ file (ex, fake.fq64) that has phred-64/solexa quality encoding;
>>>read_fastq('fake.fq64', offset=64, solexa=True)
This function yields a tuple in with the internal format:
('sequence identifier', 'comment', 'sequence', [Q-value])
'sequence identifier' = the string following the '@' or '>' in FASTA up
until the first space (' ')
'comment' = everything else on the first line after the sequence title
'sequence' = the sequence (nucleotide, FASTA, FASTQ or amino acid FASTA)
[Q-value] = all of the functions in this module convert to a common quality
score which is defined as Q=-10(log10(Perror))
Makers:
These functions are intended to be used with the Reader functions included in
this module. In general they accept an argument in the form of a generator
(the Reader function), an output destination (default is sys.stdout) and any
options associated with making that kind of file (ex. FASTQ offset).
For example:
To make a FASTA and .qual file from a FASTQ with phred-64 quality encoding that
prints to two files out.fasta and out.qual;
>>>make_fasta_and_qual(read_fastq('fake.fq64', offset=64),
outfasta='out.fasta',
outqual='out.qual)
Limitations (and expansions):
None of these parsers check to make sure the files make sense. In other words,
if you have a FASTQ with letters that aren't in the alphabet associated with
nucleotide sequence, the parser will not recognize them. Similarly with quality
values, these parsers will not guess what encoding the input file is using or
sanity check whether the quality values make sense.
Basically, if you put garbage in, you'll get garbage out. But hopefully it will
be correctly parsed and formatted garbage.
Some edge conditions are accepted; no identifier string for example, and will
simply propagate as empty strings for the title. If you're only interested in
converting from one format to another quickly, than this shouldn't be too much
of a problem. If there is no sequence data, these entries will propagate as
empty entries.
"""
import sys
import re
from math import log10
from itertools import groupby
def read_fasta(fasta_file, ignore_case=True):
"""
Generic FASTA parser.
Input:
Takes one argument, which can point to a file-type or a generator that
yields one line at a time in FASTA format.
Output:
An entry is identified by alternating '>sequence_id comment' then 'sequence'
on a new line optionally followed by '>sequence2_id comment' and so forth.
For each entry, a tuple ('title', 'comment', 'sequence') is yielded.
This function is not used directly in the 'maker' functions that follow, but
is used in the read_fasta_with_quality() function which follows.
This code was adoped from 'brentp'
URL: https://www.biostars.org/p/710/
"""
file = open(fasta_file, 'r')
# file=fasta_file
# fasta_iter groups each line from the input fasta file by whether or not
# the line starts with '>' , returning a tuple (bool, line) where bool
# indicates if the line started with '>' and line is a string from the
# file.
fasta_iter = (x[1] for x in groupby(file,
lambda line: line.startswith(">"))
)
# This for-loop takes the two groups of line(s) from fasta_iter. The ID
# lines are seperated into the 'title' and the 'comment' The following group
# of lines (the sequence) is joined into a single line and assigned to the
# variable 'seq'. 'title', 'comment', and 'seq' are yielded as a tuple in
# that order.
for lgroup in fasta_iter:
lgroup = lgroup.next()[1:].strip()
lgroup = lgroup.split(' ', 1)
title = lgroup[0]
comment = ''
if len(lgroup) > 1:
comment = lgroup[1]
seq = "".join(s.strip() for s in fasta_iter.next())
seq = seq.replace(" ", "")
if ignore_case:
seq = seq.upper()
fasta = (title, comment, seq)
yield fasta
def read_fasta_with_quality(fasta_file, qual_file):
"""
Generic FASTA and .qual parser.
Input:
Takes two arguments; fasta_file which can be a file-type or a generator that
yields one line at a time in FASTA format and qual_file which contains the
quality data as whitespace separated integers for the associated FASTA file.
Output:
An entry is identified by alternating '>sequence_id comment' then
'sequence/qual-value' on a new line optionally followed by '>sequence2_id
comment' and so forth. For each entry a tuple ('title', 'comment',
'sequence', [Q-value]) is yielded.
This function is used by the 'maker' functions that follow.
This code was adoped from brentp
URL: https://www.biostars.org/p/710/
"""
qual_file = open(qual_file, 'r')
quals = {}
# Identical to fast_iter in the read_fasta() function
qual_iter = (x[1] for x in groupby(qual_file,
lambda line: line.startswith(">"))
)
# This for-loop takes the two groups of lines from qual_iter. The ID lines
# are seperated into the 'title' and the 'comment' and the 'comment' is
# discarded. The following group of lines (quality data) is put into a
# list of integers called 'q'. 'q' is assigned to the value in a dict with
# 'title' as the key.
for group in qual_iter:
group = group.next()[1:].strip()
group = group.split(' ', 1)
qual_title = group[0]
q = " ".join(s.strip() for s in qual_iter.next())
q = [int(s) for s in q.split()]
quals[qual_title] = q
# The quality values (as a list) are appended to the tuple yeilded by the
# read_fasta() function to give the internal format
# ('title', 'comment', 'seq', [Q-value]) and this tuple is yielded
for fq in read_fasta(fasta_file):
fq = fq + (quals.get(fq[0]), )
yield fq
def read_fastq(fastq_file, offset=33, solexa=False): #add arguments
"""
Generic FASTQ parser.
Input:
Takes three arguments; fasta_file which can be a file-type or a generator
that yields one line at a time in FASTQ format, an offset for phred-33 and
phred-64 (which is simply equal to whichever encoding the fastq_file has)
and an optional solexa=True/False argument for phred-64/solexa encoding.
Output:
Each entry is identified by it's '@SEQID comment', 'sequence',
'+optional_id comment', 'quality characters'. The quality characters are
converted into Q-values based on the offset/solexa arguments and the
'title', 'comment', 'sequence', and [Q-value] are yielded.
This function is used by the 'maker' functions that follow.
This code was adopted from 'Gareth Rees' on stackexchange
URL": http://codereview.stackexchange.com/questions/32897/efficient-parsing-of-fastq
"""
# These four lines are a set of regular expressions that are used to
# identify the lines from the generator by whether or not they match
# a pattern.
at_seqname_re = re.compile(r'@(.+)$')
sequence_re = re.compile(r'[!-*,-~]*$')
plus_seqname_re = re.compile(r'\+(.*)$')
quality_re = re.compile(r'[!-ÿ]*$')
lines = open(fastq_file)
# This is the main loop of the function that iterates through each line of
# the FASTQ file and groups the lines into 'title', 'comment', 'sequence'
# and [Q-Value].
for line in lines:
seqname, comment = [], ''
m = at_seqname_re.match(line)
if not m: #make title empty string in the case of no title in file
title = ''
if m: #split indentifier line into 'title' and 'comment'
seqname = m.group(1)
seqname = seqname.split(' ', 1)
title = seqname[0]
comment = ''
if len(seqname) > 1:
comment = seqname[1]
# identify lines that are sequence
try:
sequence = []
for line in lines:
m = sequence_re.match(line)
if not m:
break
sequence.append(m.group(0))
if not sequence:
break
# identify lines that are quality-identifier ('+SEQNAME')
m = plus_seqname_re.match(line)
# This is here to build the contingency that there is no '+'
# if not m:
# print "Expected +<seqname>"
# if m.group(1) not in ['', title]:
# print "Not the correct seqname for qual"
# identify lines that are quality characters
quality = []
n = sum(map(len, sequence))
while n > 0:
line = next(lines)
m = quality_re.match(line)
if not m:
print >> sys.stderr, "No quality characters"
n -= len(m.group(0))
if n < 0:
print >> sys.stderr, "quality longer than sequence"
quality.append(m.group(0))
qlist = list(''.join(quality))
o = int(offset)
qlist = [ord(i)-o for i in qlist] # this is where conversion from
# character to phred takes place
if solexa == True: #this is where we convert from solexa to sanger
qlist = [int(round(10*log10(10**(i/10.0) + 1))) for i in qlist]
else:
pass
# this is left for debugging
# print qlist
# print seqname, ''.join(sequence), qlist
# if not ''.join(sequence):
# break
fsq = (title, comment, ''.join(sequence), qlist)
# print fsq
yield fsq
except StopIteration:
print "End of input"
def make_fasta(input_fcn, outfasta=sys.stdout):
"""
FASTA 'Maker' function.
Input:
Takes two arguments. The first, 'input_fcn' which a generator ('Read
function') from above, although any generator that yields ('string',
'string', 'string', [list of integers]) will work. The second, 'outfasta'
is the destination FASTA file or defaults to sys.stdout.
Output:
Default output is to sys.stdout. Generates a string with the format:
>'title' 'comment'
sequence
>'title2' 'comment2'
...
NOTE: the quality data from the 'input_fcn' is discarded. Also this
functions will not work directly with the read_fasta() function, so:
>>>make_fasta(read_fasta(f.fa), output=newfasta.fa) will NOT work.
A fasta_to_fasta function is in the works, the utility of which would only
be to re-format FASTA files for later use.
"""
# Check for non-default output
if outfasta != sys.stdout:
outfasta = open(outfasta, 'a')
# For each tuple yielded by the input_fcn, write a FASTA-formatted string
for title, comment, seq, qual in input_fcn:
print >> outfasta, ">%s %s\n%s" % (title, comment, seq)
def make_fasta_and_qual(input_fcn, outfasta=sys.stdout, outqual=sys.stdout):
"""
FASTA and .qual 'maker' function.
Input:
Takes three arguments. The first, 'input_fcn' which a generator ('Read
function') from above, although any generator that yields ('string',
'string', 'string', [list of integers]) will work. The second, 'outfasta'
is the destination FASTA file or defaults to sys.stdout. The third,
'outqual' is the destination quality file or defaults to sys.stdout
Output:
Default output is to sys.stdout. Generates a string with the format:
>'title' 'comment'
Sequence1
>'title' 'comment'
Q-values seperated by ' '
>'title2', 'comment2'
Sequence2
...
NOTE: In the example both quality and sequence information are sent to
sys.stdout, but they can be routed to different output locations.
"""
# Check for non-default output
if outfasta != sys.stdout:
outfasta = open(outfasta, 'a')
if outqual != sys.stdout:
outqual = open(outqual, 'a')
# For each tuple yielded by the input_fcn, write a FASTA-formatted string
# and a FASTA-formatted .qual file
for title, comment, seq, qual in input_fcn:
print >> outfasta, ">%s %s\n%s" % (title, comment, seq)
print >> outqual, ">%s %s\n%s" % (title, comment,
str(qual).strip('[]').
replace(',', ' '))
def make_fastq(input_fcn, outfastq=sys.stdout, offset=33):
"""
FASTQ 'maker' function.
Input:
Takes three arguments. The first, 'input_fcn' which a generator ('Read
function') from above. The second, 'outfastq' is the destination FASTQ file
or defaults to sys.stdout. The third, 'offset' is the phred-33 or phred-64
offset.
Output:
Default output is to sys.stdout. Generates a string with the format:
@'title' 'comment'
sequence
+'title'
Quality characters in phred-33 or phred-64 encoding
@'title2' 'comment2'
...
NOTE: there is no option to encode in phred-64/solexa. The 'title' string is
added to the quality identifier line (+'title') even if it does not occur in
the input file, the comment is not amended.
"""
# Check for non-default output
if outfastq != sys.stdout:
outfastq = open(outfastq, 'a')
# For each tuple yielded by the input_fcn, write a FASTQ-formatted string
for title, comment, seq, qual in input_fcn:
qual = [chr(i+offset) for i in qual]
print >> outfastq, "@%s %s\n%s\n+%s\n%s" % (title, comment, seq,
title,
''.join(qual))
|
mitenjain/signalAlign
|
src/signalalign/utils/parsers.py
|
Python
|
mit
| 14,540
|
import chaospy
import numpy
import pytest
@pytest.fixture
def collocation_model(expansion_small, samples_small, evaluations_small):
return chaospy.fit_regression(expansion_small, samples_small, evaluations_small)
def test_collocation_mean(collocation_model, joint, true_mean):
assert numpy.allclose(chaospy.E(collocation_model, joint), true_mean, rtol=1e-6)
def test_regression_variance(collocation_model, joint, true_variance):
assert numpy.allclose(chaospy.Var(collocation_model, joint), true_variance, rtol=1e-5)
|
jonathf/chaospy
|
tests/test_point_collocation.py
|
Python
|
mit
| 534
|
from __future__ import absolute_import, unicode_literals
import os
from django import VERSION as DJANGO_VERSION
from django.utils.translation import ugettext_lazy as _
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for conveniently
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", (_("Media Library"), "media-library"),)),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, _("Top navigation bar"), "pages/menus/dropdown.html"),
# (2, _("Left-hand tree"), "pages/menus/tree.html"),
# (3, _("Footer"), "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# (_("Image"),),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# (_("Another name"),),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the django-modeltranslation will be added to the
# INSTALLED_APPS setting.
USE_MODELTRANSLATION = False
########################
# MAIN DJANGO SETTINGS #
########################
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '111.222.333.444']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "cloudSolarDB",
# Not used with sqlite3.
"USER": "valia",
# Not used with sqlite3.
"PASSWORD": "scenetwork",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "localhost",
# Set to empty string for default. Not used with sqlite3.
"PORT": "5432",
}
}
#########
# PATHS #
#########
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_APP
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(PROJECT_ROOT, "templates")
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.static",
"django.template.context_processors.media",
"django.template.context_processors.request",
"django.template.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
],
"builtins": [
"mezzanine.template.loader_tags",
],
},
},
]
if DJANGO_VERSION < (1, 9):
del TEMPLATES[0]["OPTIONS"]["builtins"]
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.pages",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.galleries",
"mezzanine.twitter",
# "mezzanine.accounts",
# "mezzanine.mobile",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
# Uncomment if using internationalisation or localisation
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
# Instead of doing "from .local_settings import *", we use exec so that
# local_settings has full access to everything defined in this module.
# Also force into sys.modules so it's visible to Django's autoreload.
f = os.path.join(PROJECT_APP_PATH, "local_settings.py")
if os.path.exists(f):
import sys
import imp
module_name = "%s.local_settings" % PROJECT_APP
module = imp.new_module(module_name)
module.__file__ = f
sys.modules[module_name] = module
exec(open(f, "rb").read())
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
|
nikdval/cloudSolar
|
solarApp/solar/settings.py
|
Python
|
artistic-2.0
| 11,845
|
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y' # 04 نوفمبر 2020
TIME_FORMAT = 'g:i A'
# DATETIME_FORMAT =
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd/m/Y'
# SHORT_DATETIME_FORMAT =
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
kaedroho/django
|
django/conf/locale/ar_DZ/formats.py
|
Python
|
bsd-3-clause
| 728
|
from .navigation import Navigation, MenuItem
shared = MenuItem('Logout', 'account_logout')
member = MenuItem('Clients', 'users:list')
checkin = MenuItem('Check-ins', url='checkin:list')
template = MenuItem('Template', url='checkin:list')
leaderboard = MenuItem('Lifestyle Leaderboard', url='lifestyle:list')
library = MenuItem('Library', url='library:list')
# lease = MenuItem('Lease')
# lease.add_sub_menu_item('New Lease', 'account_login')
# lease.add_sub_menu_item('View Lease', 'invite:new')
# trip = MenuItem('Trip')
# trip.add_sub_menu_item('New Trip', 'trips:add')
# trip.add_sub_menu_item('View Trips', 'trips:view')
# wish = MenuItem('Wish')
# wish.add_sub_menu_item('Add Wish', 'wish:add')
# wish.add_sub_menu_item('View Wish', 'wish:list')
# properties = MenuItem('Property')
# properties.add_sub_menu_item('Add Property', 'property:add')
# side_agent = Navigation()
# side_agent.add_menu(member)
# side_agent.add_menu(lease)
# side_agent.add_menu(trip)
# side_agent.add_menu(wish)
# side_agent.add_menu(properties)
# top_agent = Navigation()
# top_agent.add_menu(shared)
# top_client = Navigation()
# top_client.add_menu(shared)
coach_navTop = Navigation()
coach_navTop.add_menu(shared)
coach_navLeft = Navigation()
coach_navLeft.add_menu(member)
coach_navLeft.add_menu(library)
coach_navLeft.add_menu(leaderboard)
############## Client ############
client_navTop = Navigation()
client_navTop.add_menu(shared)
client_navLeft = Navigation()
client_navLeft.add_menu(checkin)
client_navLeft.add_menu(library)
client_navLeft.add_menu(leaderboard)
# profile = MenuItem('Profile')
# profile.add_sub_menu_item('Account Information')
# profile.add_sub_menu_item('Measurments')
# profile.add_sub_menu_item('Payment Informaiton')
# Goal = MenuItem('Template')
# handbook = MenuItem('Bodzii Handbook')
# check = MenuItem('Check-in')
# check.add_sub_menu_item('Perform Checkin')
# check.add_sub_menu_item('Current week') # Template
# check.add_sub_menu_item('Previous week') # report of the checkin and any coachs comments
# meal = MenuItem('Meal Plan')
|
airportmarc/the416life
|
src/apps/utls/navigation/navigationBuild.py
|
Python
|
mit
| 2,085
|
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
# A reminder: The 2 fields, 'file_urls' and 'files' are special fields
# required by scrapy pipeline for storing files to local disk.
# See https://groups.google.com/forum/print/msg/scrapy-users/kzGHFjXywuY/O6PIhoT3thsJ
# for more details and usage.
from scrapy.item import Item, Field
class TypedItem(Item):
""" Quick extension so you can define type_name on an Item and
it will get spit out in the JSON format. Replace this if
you find a better way of achieving this.
"""
type = Field()
def __init__(self, *args, **kwargs):
super(TypedItem, self).__init__(*args, **kwargs)
self['type'] = self.type_name
#########################################################
# ___ _ _
# / _ \ _ _ ___ ___| |_(_) ___ _ __ ___
# | | | | | | |/ _ \/ __| __| |/ _ \| '_ \/ __|
# | |_| | |_| | __/\__ \ |_| | (_) | | | \__ \
# \__\_\\__,_|\___||___/\__|_|\___/|_| |_|___/
#
# Note that these questions-related items are not used yet
class QuestionRecordQuestion(TypedItem):
type_name = "QuestionRecordQuestion"
question_no = Field()
# The member who raised the question
raised_by = Field()
# Was the question originally asked in English or translated?
was_translated = Field()
# Was a written reply expected?
written_reply = Field()
# The opening text of the question, before the specific articles
question_text = Field()
# The question articles. This is a list of items in the form
# [ (article_id, text), (article_id, text), ... ]
articles = Field()
# The officers who are to reply to this question
# Currently just a blob of text
officer_to_reply = Field()
class QuestionRecordSubsidiaryPapers(TypedItem):
type_name = "QuestionRecordSubsidiaryPapers"
item_no = Field()
title = Field()
ln_no = Field()
class QuestionRecordOtherPapers(TypedItem):
type_name = "QuestionRecordOtherPapers"
item_no = Field()
title = Field()
presented_by = Field()
#########################################################
# _ _ _
# | | | | __ _ _ __ ___ __ _ _ __ __| |
# | |_| |/ _` | '_ \/ __|/ _` | '__/ _` |
# | _ | (_| | | | \__ \ (_| | | | (_| |
# |_| |_|\__,_|_| |_|___/\__,_|_| \__,_|
#
class HansardAgenda(TypedItem):
####Depreciated####
""" This will need developing, as it is structured in HTML """
type_name = "HansardAgenda"
date = Field()
source_url = Field()
file_urls = Field()
files = Field()
class HansardMinutes(TypedItem):
""" This contains a file of the mintues """
type_name = "HansardMinutes"
date = Field()
source_url = Field()
file_urls = Field()
files = Field()
class HansardRecord(TypedItem):
""" This will contain a DOC file for the record for the date """
type_name = "HansardRecord"
date = Field()
source_url = Field()
file_urls = Field()
files = Field()
#####
# Library related items, including agenda and hansard at the moment
#####
#Common item for Library database
class LibraryResultPage(TypedItem):
"""
Stores individual results pages, for debugging
"""
type_name = "LibraryResultPage"
# Title of the link
title = Field()
link = Field()
# Page the link was found on
browse_url = Field()
# The type of document this page should be fore
document_type = Field()
class LibraryAgenda(TypedItem):
"""
Library record for Council Meeting agendas
"""
type_name = "LibraryAgenda"
title_en = Field()
title_cn = Field()
# List of (title, link) pairs
links = Field()
file_urls = Field()
files = Field()
# The URL to the source page
source_url = Field()
class LibraryHansard(TypedItem):
"""
Library record for Council Meeting Hansard
"""
type_name = "LibraryHansard"
title_en = Field()
title_cn = Field()
# List of (title, link) pairs
links = Field()
file_urls = Field()
files = Field()
# The URL to the source page
source_url = Field()
|
comsaint/legco-watch
|
app/raw/scraper/items.py
|
Python
|
mit
| 4,319
|
##
# Copyright (C) 2018 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
import ipaddress
IPV4_HOST_PREFIX = 32
IPV6_HOST_PREFIX = 64
def strip_ip(ip_addr, ipv4_host_prefix=IPV4_HOST_PREFIX, ipv6_host_prefix=IPV6_HOST_PREFIX):
"""Strip an IP address back to the largest routable part
For IPv4, this is the full 32 bits. For IPv6 this is the first 64 bits.
"""
ip = ipaddress.ip_address(ip_addr)
prefix_len = ip.max_prefixlen
if isinstance(ip, ipaddress.IPv4Address):
remove_bits = prefix_len - ipv4_host_prefix
elif isinstance(ip, ipaddress.IPv6Address):
remove_bits = prefix_len - ipv6_host_prefix
else:
raise ValueError("Not an IPv4 or IPv6 address")
# the netmask should be a series of 1s for the bits we want to
# keep followed by 0s for the bits we don't
netmask = 2**prefix_len - 2**remove_bits
prefix = int(ip) & netmask
prefix = ip.__class__(prefix)
return str(prefix)
|
Inboxen/Inboxen
|
inboxen/utils/ip.py
|
Python
|
agpl-3.0
| 1,663
|
def test(fmt, *args):
print('{:8s}'.format(fmt) + '>' + fmt.format(*args) + '<')
test("{:10.4}", 123.456)
test("{:10.4e}", 123.456)
test("{:10.4e}", -123.456)
#test("{:10.4f}", 123.456)
#test("{:10.4f}", -123.456)
test("{:10.4g}", 123.456)
test("{:10.4g}", -123.456)
test("{:10.4n}", 123.456)
test("{:e}", 100)
test("{:f}", 200)
test("{:g}", 300)
test("{:10.4E}", 123.456)
test("{:10.4E}", -123.456)
#test("{:10.4F}", 123.456)
#test("{:10.4F}", -123.456)
test("{:10.4G}", 123.456)
test("{:10.4G}", -123.456)
test("{:06e}", float("inf"))
test("{:06e}", float("-inf"))
test("{:06e}", float("nan"))
# The following fails right now
#test("{:10.1}", 0.0)
print("%.0f" % (1.750000 % 0.08333333333))
# Below isn't compatible with single-precision float
#print("%.1f" % (1.750000 % 0.08333333333))
#print("%.2f" % (1.750000 % 0.08333333333))
#print("%.12f" % (1.750000 % 0.08333333333))
# tests for errors in format string
try:
'{:10.1b}'.format(0.0)
except ValueError:
print('ValueError')
|
MrSurly/micropython-esp32
|
tests/float/string_format_fp30.py
|
Python
|
mit
| 1,002
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***********************IMPORTANT NMAP LICENSE TERMS************************
# * *
# * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is *
# * also a registered trademark of Insecure.Com LLC. This program is free *
# * software; you may redistribute and/or modify it under the terms of the *
# * GNU General Public License as published by the Free Software *
# * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS *
# * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, *
# * modify, and redistribute this software under certain conditions. If *
# * you wish to embed Nmap technology into proprietary software, we sell *
# * alternative licenses (contact sales@insecure.com). Dozens of software *
# * vendors already license Nmap technology such as host discovery, port *
# * scanning, OS detection, version detection, and the Nmap Scripting *
# * Engine. *
# * *
# * Note that the GPL places important restrictions on "derivative works", *
# * yet it does not provide a detailed definition of that term. To avoid *
# * misunderstandings, we interpret that term as broadly as copyright law *
# * allows. For example, we consider an application to constitute a *
# * derivative work for the purpose of this license if it does any of the *
# * following with any software or content covered by this license *
# * ("Covered Software"): *
# * *
# * o Integrates source code from Covered Software. *
# * *
# * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db *
# * or nmap-service-probes. *
# * *
# * o Is designed specifically to execute Covered Software and parse the *
# * results (as opposed to typical shell or execution-menu apps, which will *
# * execute anything you tell them to). *
# * *
# * o Includes Covered Software in a proprietary executable installer. The *
# * installers produced by InstallShield are an example of this. Including *
# * Nmap with other software in compressed or archival form does not *
# * trigger this provision, provided appropriate open source decompression *
# * or de-archiving software is widely available for no charge. For the *
# * purposes of this license, an installer is considered to include Covered *
# * Software even if it actually retrieves a copy of Covered Software from *
# * another source during runtime (such as by downloading it from the *
# * Internet). *
# * *
# * o Links (statically or dynamically) to a library which does any of the *
# * above. *
# * *
# * o Executes a helper program, module, or script to do any of the above. *
# * *
# * This list is not exclusive, but is meant to clarify our interpretation *
# * of derived works with some common examples. Other people may interpret *
# * the plain GPL differently, so we consider this a special exception to *
# * the GPL that we apply to Covered Software. Works which meet any of *
# * these conditions must conform to all of the terms of this license, *
# * particularly including the GPL Section 3 requirements of providing *
# * source code and allowing free redistribution of the work as a whole. *
# * *
# * As another special exception to the GPL terms, Insecure.Com LLC grants *
# * permission to link the code of this program with any version of the *
# * OpenSSL library which is distributed under a license identical to that *
# * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
# * linked combinations including the two. *
# * *
# * Any redistribution of Covered Software, including any derived works, *
# * must obey and carry forward all of the terms of this license, including *
# * obeying all GPL rules and restrictions. For example, source code of *
# * the whole work must be provided and free redistribution must be *
# * allowed. All GPL references to "this License", are to be treated as *
# * including the special and conditions of the license text as well. *
# * *
# * Because this license imposes special exceptions to the GPL, Covered *
# * Work may not be combined (even as part of a larger work) with plain GPL *
# * software. The terms, conditions, and exceptions of this license must *
# * be included as well. This license is incompatible with some other open *
# * source licenses as well. In some cases we can relicense portions of *
# * Nmap or grant special permissions to use it in other open source *
# * software. Please contact fyodor@nmap.org with any such requests. *
# * Similarly, we don't incorporate incompatible open source software into *
# * Covered Software without special permission from the copyright holders. *
# * *
# * If you have any questions about the licensing restrictions on using *
# * Nmap in other works, are happy to help. As mentioned above, we also *
# * offer alternative license to integrate Nmap into proprietary *
# * applications and appliances. These contracts have been sold to dozens *
# * of software vendors, and generally include a perpetual license as well *
# * as providing for priority support and updates. They also fund the *
# * continued development of Nmap. Please email sales@insecure.com for *
# * further information. *
# * *
# * If you received these files with a written license agreement or *
# * contract stating terms other than the terms above, then that *
# * alternative license agreement takes precedence over these comments. *
# * *
# * Source is provided to this software because we believe users have a *
# * right to know exactly what a program is going to do before they run it. *
# * This also allows you to audit the software for security holes (none *
# * have been found so far). *
# * *
# * Source code also allows you to port Nmap to new platforms, fix bugs, *
# * and add new features. You are highly encouraged to send your changes *
# * to the dev@nmap.org mailing list for possible incorporation into the *
# * main distribution. By sending these changes to Fyodor or one of the *
# * Insecure.Org development mailing lists, or checking them into the Nmap *
# * source code repository, it is understood (unless you specify otherwise) *
# * that you are offering the Nmap Project (Insecure.Com LLC) the *
# * unlimited, non-exclusive right to reuse, modify, and relicense the *
# * code. Nmap will always be available Open Source, but this is important *
# * because the inability to relicense code has caused devastating problems *
# * for other Free Software projects (such as KDE and NASM). We also *
# * occasionally relicense the code to third parties as discussed above. *
# * If you wish to specify special license conditions of your *
# * contributions, just say so when you send them. *
# * *
# * This program is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap *
# * license file for more details (it's in a COPYING file included with *
# * Nmap, and also available from https://svn.nmap.org/nmap/COPYING *
# * *
# ***************************************************************************/
"""
higwidgets/higspinner.py
a pygtk spinner, based on the epiphany/nautilus implementation
"""
__all__ = ['HIGSpinner']
import os
import gtk
import gobject
from gtkutils import gobject_register
class HIGSpinnerImages:
def __init__(self):
"""This class holds list of GDK Pixbuffers.
- static_pixbufs is used for multiple static pixbuffers
- self.animated_pixbufs is used for the pixbuffers that make up the animation
"""
dprint('HIGSpinnerImages::__init__')
# The Nautilus/Epiphany implementation uses a single "rest/quiescent"
# static pixbuffer. We'd rather allow the developer to choose from
# multiple static states, such as "done" or "failed".
# Index it by a name like that.
self.static_pixbufs = {}
# We should have a default rest pixbuf, set it with set_rest_pixbuf()
self.rest_pixbuf = None
# This is a list of pixbufs to be used on the animation
# For now, we're only implementing a single animation. Inconsistent!
self.animated_pixbufs = []
def add_static_pixbuf(self, name, pixbuf, default_on_rest=False):
"""Add a static pixbuf.
If this is the first one, make it the default pixbuffer on rest.
The user can make some other pixbuf the new default on rest, by setting
default_on_rest to True.
"""
dprint('HIGSpinnerImages::add_static_pixbuf')
self.static_pixbufs[name] = pixbuf
if (len(self.static_pixbufs) == 1) or default_on_rest:
self.set_rest_pixbuf(name)
def add_animated_pixbuf(self, pixbuf):
dprint('HIGSpinnerImages::add_animated_pixbuf')
self.animated_pixbufs.append(pixbuf)
def set_rest_pixbuf(self, name):
"""Sets the pixbuf that will be used on the default, 'rest' state. """
dprint('HIGSpinnerImages::set_rest_pixbuf')
if not self.static_pixbufs.has_key(name):
raise StaticPixbufNotFound
# self.rest_pixbuf holds the *real* pixbuf, not it's name
self.rest_pixbuf = self.static_pixbufs[name]
def set_size(self, width, height):
"""Sets the size of eache pixbuf (static and animated)"""
new_animated = []
for p in self.animated_pixbufs:
new_animated.append(p.scale_simple(width, height,
gtk.gdk.INTERP_BILINEAR))
self.animated_pixbufs = new_animated
for k in self.static_pixbufs:
self.static_pixbufs[k] = self.static_pixbufs[k].\
scale_simple(width,
height,
gtk.gdk.INTERP_BILINEAR)
self.rest_pixbuf = self.rest_pixbuf.\
scale_simple(width,
height,
gtk.gdk.INTERP_BILINEAR)
self.images_width = width
self.images_height = height
class HIGSpinnerCache:
"""This hols a copy of the images used on the HIGSpinners instances."""
def __init__(self):
dprint('HIGSpinnerCache::__init__')
# Our own instance of a HIGSpinnerImages
self.spinner_images = HIGSpinnerImages()
# These are on Private member in the C implementation
self.icon_theme = gtk.IconTheme()
self.originals = None
self.images = None
# We might have access to a "default" animated icon.
# For example, if we're on a GNOME desktop, and have the (default)
# "gnome-icon-theme" package installed, we might have access
# to "gnome-spinner". Check it before using, though
if (self.icon_theme.lookup_icon("gnome-spinner", -1, 0)):
self.default_animated_icon_name = "gnome-spinner"
else:
self.default_animated_icon_name = None
def load_animated_from_lookup(self, icon_name=None):
"""Loads an animated icon by doing a lookup on the icon theme."""
# If user do not choose a icon_name, use the default one
if icon_name == None:
icon_name = self.default_animated_icon_name
# Even the default one (now on icon_name) might not be available
if icon_name == None:
raise AnimatedIconNotFound
# Try to lookup the icon
icon_info = self.icon_theme.lookup_icon(icon_name, -1, 0)
# Even if icon_name exists, it might not be found by lookup
if icon_info == None:
raise AnimatedIconNotFound
# Base size is, according to PyGTK docs:
# "a size for the icon that was specified by the icon theme creator,
# This may be different than the actual size of image."
# Ouch! We are acting on blind faith here...
size = icon_info.get_base_size()
# NOTE: If the icon is a builtin, it will not have a filename, see:
# http://www.pygtk.org/pygtk2reference/class-gtkicontheme.html
# But, we are not using the gtk.ICON_LOOKUP_USE_BUILTIN flag, nor does
# GTK+ has a builtin animation, so we are safe ;-)
filename = icon_info.get_filename()
# Now that we have a filename, call load_animated_from_filename()
self.load_animated_from_filename(filename, size)
def load_animated_from_filename(self, filename, size):
# grid_pixbuf is a pixbuf that holds the entire
grid_pixbuf = gtk.gdk.pixbuf_new_from_file(filename)
grid_width = grid_pixbuf.get_width()
grid_height = grid_pixbuf.get_height()
for x in range(0, grid_width, size):
for y in range(0, grid_height, size):
self.spinner_images.add_animated_pixbuf(\
self.__extract_frame(grid_pixbuf, x, y, size, size))
def load_static_from_lookup(self, icon_name="gnome-spinner-rest",
key_name=None):
icon_info = self.icon_theme.lookup_icon(icon_name, -1, 0)
size = icon_info.get_base_size()
filename = icon_info.get_filename()
# Now that we have a filename, call load_static_from_filename()
self.load_static_from_filename(filename)
def load_static_from_filename(self, filename, key_name=None):
icon_pixbuf = gtk.gdk.pixbuf_new_from_file(filename)
if key_name == None:
key_name = filename.split(".")[0]
self.spinner_images.add_static_pixbuf(key_name, icon_pixbuf)
def __extract_frame(self, pixbuf, x, y, w, h):
"""Cuts a sub pixbuffer, usually a frame of an animation.
- pixbuf is the complete pixbuf, from which a frame will be cut off
- x/y are the position
- w (width) is the is the number of pixels to move right
- h (height) is the is the number of pixels to move down
"""
if (x + w > pixbuf.get_width()) or (y + h > pixbuf.get_height()):
raise PixbufSmallerThanRequiredError
return pixbuf.subpixbuf(x, y, w, h)
def _write_animated_pixbuf_to_files(self, path_format, image_format):
"""Writes image files from self.spinner_images.animated_pixbufs
- path_format should be a format string with one occurrence of a
string substitution, such as '/tmp/animation_%s.png'
- image_format can be either 'png' or 'jpeg'
"""
counter = 0
for i in self.spinner_images.animated_pixbufs:
i.save(path_format % counter, "png")
counter += 1
def _write_static_pixbuf_to_file(self, key_name, path_name, image_format):
self.spinner_images.static_pixbufs[key_name].save(path_name,
image_format)
class HIGSpinner(gtk.EventBox):
"""Simple spinner, such as the one found in webbrowsers and file managers.
You can construct it with the optional parameters:
* images, a list of images that will make up the animation
* width, the width that will be set for the images
* height, the height that will be set for the images
"""
__gsignals__ = { 'expose-event' : 'override',
'size-request' : 'override' }
def __init__(self):
gtk.EventBox.__init__(self)
#self.set_events(self.get_events())
# This holds a GDK Graphic Context
self.gc = None
# These are sane defaults, but should really come from the images
self.images_width = 32
self.images_height = 32
# Timeout set to 100 miliseconds per frame, just as the
# Nautilus/Epiphany implementation
self.timeout = 120
# Initialize a cache for ouselves
self.cache = HIGSpinnerCache()
self.cache.load_static_from_lookup()
self.cache.load_animated_from_lookup()
# timer_task it the gobject.timeout_add identifier (when the animation
# is in progress, and __bump_frame is being continually called). If the
# spinner is static, timer_task is 0
self.timer_task = 0
# animated_pixbuf_index is a index on
self.animated_pixbuf_index = 0
# current_pixbuf is initially the default rest_pixbuf
self.current_pixbuf = self.cache.spinner_images.rest_pixbuf
def __bump_frame(self):
"""This function moves the animated frame to the next one, or, if it's
currently the last one, back to the first one"""
animated_list = self.cache.spinner_images.animated_pixbufs
if self.animated_pixbuf_index == (len(animated_list) - 1):
# back to the first one
self.animated_pixbuf_index = 0
else:
# go the next one
self.animated_pixbuf_index += 1
self.queue_draw()
return True
def __select_pixbuf(self):
"""This selects either a rest pixbuf or a animation frame based on the
status of timer_task."""
if self.timer_task == 0:
self.current_pixbuf = self.cache.spinner_images.rest_pixbuf
else:
self.current_pixbuf = self.cache.spinner_images.animated_pixbufs\
[self.animated_pixbuf_index]
def start(self):
"""Starts the animation"""
if self.timer_task == 0:
self.timer_task = gobject.timeout_add(self.timeout,
self.__bump_frame)
def pause(self):
"""Pauses the animation"""
if self.timer_task != 0:
gobject.source_remove(self.timer_task)
self.timer_task = 0
self.queue_draw()
def stop(self):
"""Stops the animation
Do the same stuff as pause, but returns the animation to the beggining."""
self.pause()
self.animated_pixbuf_index = 0
def set_speed(speed_in_milliseconds):
self.timeout = speed_in_milliseconds
self.pause()
self.start()
def do_expose_event(self, event):
#self.chain(event)
if self.cache.spinner_images.rest_pixbuf == None:
raise RestPixbufNotFound
self.__select_pixbuf()
width = self.current_pixbuf.get_width()
height = self.current_pixbuf.get_height()
x_offset = (self.allocation.width - width) / 2
y_offset = (self.allocation.height - height) / 2
pix_area = gtk.gdk.Rectangle(x_offset + self.allocation.x,
y_offset + self.allocation.y,
width, height)
dest = event.area.intersect(pix_area)
# If a graphic context doesn't not exist yet, create one
if self.gc == None:
self.gc = gtk.gdk.GC(self.window)
#gc = self.gc
self.window.draw_pixbuf(self.gc,
self.current_pixbuf,
dest.x - x_offset - self.allocation.x,
dest.y - y_offset - self.allocation.y,
dest.x, dest.y,
dest.width, dest.height)
def do_size_request(self, requisition):
# http://www.pygtk.org/pygtk2reference/class-gtkrequisition.html
# FIXME, this should really come from the pixbuf size + margins
requisition.width = self.cache.spinner_images.images_width
requisition.height = self.cache.spinner_images.images_height
gobject_register(HIGSpinner)
|
grongor/school_rfid
|
lib/nmap-6.40/zenmap/zenmapGUI/higwidgets/higspinner.py
|
Python
|
gpl-2.0
| 21,695
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('developers', '0005_auto_20150827_1230'),
]
operations = [
migrations.RemoveField(
model_name='preloadtestplan',
name='addon',
),
migrations.DeleteModel(
name='PreloadTestPlan',
),
]
|
ingenioustechie/zamboni
|
mkt/developers/migrations/0006_auto_20151110_1117.py
|
Python
|
bsd-3-clause
| 442
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_udld
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages UDLD global configuration params.
description:
- Manages UDLD global configuration params.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- When C(state=absent), it unconfigures existing settings C(msg_time) and set it
to its default value of 15. It is cleaner to always use C(state=present).
- Module will fail if the udld feature has not been previously enabled.
options:
aggressive:
description:
- Toggles aggressive mode.
required: false
default: null
choices: ['enabled','disabled']
msg_time:
description:
- Message time in seconds for UDLD packets.
required: false
default: null
reset:
description:
- Ability to reset UDLD down interfaces.
required: false
default: null
choices: ['true','false']
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure udld aggressive mode is globally disabled and se global message interval is 20
- nxos_udld:
aggressive: disabled
msg_time: 20
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Ensure agg mode is globally enabled and msg time is 15
- nxos_udld:
aggressive: enabled
msg_time: 15
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"aggressive": "enabled", "msg_time": "40"}
existing:
description:
- k/v pairs of existing udld configuration
returned: always
type: dict
sample: {"aggressive": "disabled", "msg_time": "15"}
end_state:
description: k/v pairs of udld configuration after module execution
returned: always
type: dict
sample: {"aggressive": "enabled", "msg_time": "40"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["udld message-time 40", "udld aggressive"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
import re
def execute_show_command(command, module, command_type='cli_show'):
provider = module.params['provider']
if provider['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif provider['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_commands_config_udld_global(delta, reset):
config_args = {
'enabled': 'udld aggressive',
'disabled': 'no udld aggressive',
'msg_time': 'udld message-time {msg_time}'
}
commands = []
for param, value in delta.items():
if param == 'aggressive':
if value == 'enabled':
command = 'udld aggressive'
elif value == 'disabled':
command = 'no udld aggressive'
else:
command = config_args.get(param, 'DNE').format(**delta)
if command and command != 'DNE':
commands.append(command)
command = None
if reset:
command = 'udld reset'
commands.append(command)
return commands
def get_commands_remove_udld_global(delta):
config_args = {
'aggressive': 'no udld aggressive',
'msg_time': 'no udld message-time {msg_time}',
}
commands = []
for param, value in delta.items():
command = config_args.get(param, 'DNE').format(**delta)
if command and command != 'DNE':
commands.append(command)
command = None
return commands
def get_udld_global(module):
command = 'show udld global'
udld_table = execute_show_command(command, module)[0]
status = str(udld_table.get('udld-global-mode', None))
if status == 'enabled-aggressive':
aggressive = 'enabled'
else:
aggressive = 'disabled'
interval = str(udld_table.get('message-interval', None))
udld = dict(msg_time=interval, aggressive=aggressive)
return udld
def main():
argument_spec = dict(
aggressive=dict(required=False, choices=['enabled', 'disabled']),
msg_time=dict(required=False, type='str'),
reset=dict(required=False, type='bool'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[['aggressive', 'msg_time', 'reset']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
aggressive = module.params['aggressive']
msg_time = module.params['msg_time']
reset = module.params['reset']
state = module.params['state']
if (aggressive or reset) and state == 'absent':
module.fail_json(msg="It's better to use state=present when "
"configuring or unconfiguring aggressive mode "
"or using reset flag. state=absent is just for "
"when using msg_time param.")
if msg_time:
try:
msg_time_int = int(msg_time)
if msg_time_int < 7 or msg_time_int > 90:
raise ValueError
except ValueError:
module.fail_json(msg='msg_time must be an integer'
'between 7 and 90')
args = dict(aggressive=aggressive, msg_time=msg_time, reset=reset)
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_udld_global(module)
end_state = existing
delta = set(proposed.items()).difference(existing.items())
changed = False
commands = []
if state == 'present':
if delta:
command = get_commands_config_udld_global(dict(delta), reset)
commands.append(command)
elif state == 'absent':
common = set(proposed.items()).intersection(existing.items())
if common:
command = get_commands_remove_udld_global(dict(common))
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_udld_global(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
|
kbrebanov/ansible
|
lib/ansible/modules/network/nxos/nxos_udld.py
|
Python
|
gpl-3.0
| 8,829
|
from __future__ import absolute_import
import time
from celery import shared_task
@shared_task
def test_task():
time.sleep(20)
return 'Completed'
|
baranbartu/djcelery-admin
|
sample_project/celeryapp/tasks.py
|
Python
|
mit
| 157
|
#!/usr/bin/env python2
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 5005
BUFFER_SIZE = 20 # Normally 1024, but we want fast response
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
conn, addr = s.accept()
print 'Connection address:', addr
data = conn.recv(BUFFER_SIZE)
print("received : %s"%data)
conn.send("TEST\n") # echo
conn.close()
s.close()
|
sbinet-staging/pyrame
|
acq_chain/acq_plugins/tcpclient/tcp_server.py
|
Python
|
lgpl-3.0
| 402
|
import os
import pytest
from topaz.modules import process
from ..base import BaseTopazTest
class TestProcess(BaseTopazTest):
def test_euid(self, space):
w_res = space.execute("return Process.euid")
assert space.int_w(w_res) == os.geteuid()
def test_pid(self, space):
w_res = space.execute("return Process.pid")
assert space.int_w(w_res) == os.getpid()
def test_exit(self, space):
with self.raises(space, "SystemExit"):
space.execute("Process.exit")
w_res = space.execute("""
begin
Process.exit
rescue SystemExit => e
return e.success?, e.status
end
""")
assert self.unwrap(space, w_res) == [True, 0]
w_res = space.execute("""
begin
Process.exit(1)
rescue SystemExit => e
return e.success?, e.status
end
""")
assert self.unwrap(space, w_res) == [False, 1]
def test_fork(self, space, monkeypatch, capfd):
monkeypatch.setattr(process, "fork", lambda: 0)
with self.raises(space, "SystemExit"):
space.execute("""
Process.fork do
puts "child"
end
""")
out, err = capfd.readouterr()
assert err == ""
assert out == "child\n"
monkeypatch.setattr(process, "fork", lambda: 200)
w_res = space.execute("""
return Process.fork do
puts "child"
end
""")
assert space.int_w(w_res) == 200
@pytest.mark.parametrize("code", [0, 1, 173])
def test_waitpid(self, space, code):
pid = os.fork()
if pid == 0:
os._exit(code)
else:
w_res = space.execute("return Process.waitpid %i" % pid)
assert space.int_w(w_res) == pid
w_res = space.execute("return $?")
status = space.send(w_res, "to_i", [])
assert space.int_w(status) == code
@pytest.mark.parametrize("code", [0, 1, 173])
def test_waitpid2(self, space, code):
pid = os.fork()
if pid == 0:
os._exit(code)
else:
w_res = space.execute("return Process.waitpid2 %i" % pid)
[returned_pid, returned_code] = space.listview(w_res)
assert space.int_w(returned_pid) == pid
code_to_i = space.send(returned_code, "to_i", [])
assert space.int_w(code_to_i) == code
|
kachick/topaz
|
tests/modules/test_process.py
|
Python
|
bsd-3-clause
| 2,459
|
from unittest import TestCase
from ..utils import Pieces
class TestPieces(TestCase):
def setUp(self):
self.torrent = {
b'info': {
b'piece length': 4,
b'pieces': '\00'*(20*20)
}
}
self.pieces = Pieces(self.torrent)
def test_get_complete_pieces(self):
self.assertEqual(self.pieces.get_complete_pieces(1, 15), (3, 3, ['\00'*(20)]*2))
|
jeanfrancoisdrapeau/autotorrent
|
autotorrent/tests/test_utils.py
|
Python
|
mit
| 442
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
JoinAttributes.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.core import QgsFeature
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class JoinAttributes(GeoAlgorithm):
OUTPUT_LAYER = 'OUTPUT_LAYER'
INPUT_LAYER = 'INPUT_LAYER'
INPUT_LAYER_2 = 'INPUT_LAYER_2'
TABLE_FIELD = 'TABLE_FIELD'
TABLE_FIELD_2 = 'TABLE_FIELD_2'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Join attributes table')
self.group, self.i18n_group = self.trAlgorithm('Vector general tools')
self.addParameter(ParameterVector(self.INPUT_LAYER,
self.tr('Input layer')))
self.addParameter(ParameterTable(self.INPUT_LAYER_2,
self.tr('Input layer 2'), False))
self.addParameter(ParameterTableField(self.TABLE_FIELD,
self.tr('Table field'), self.INPUT_LAYER))
self.addParameter(ParameterTableField(self.TABLE_FIELD_2,
self.tr('Table field 2'), self.INPUT_LAYER_2))
self.addOutput(OutputVector(self.OUTPUT_LAYER,
self.tr('Joined layer')))
def processAlgorithm(self, feedback):
input = self.getParameterValue(self.INPUT_LAYER)
input2 = self.getParameterValue(self.INPUT_LAYER_2)
output = self.getOutputFromName(self.OUTPUT_LAYER)
field = self.getParameterValue(self.TABLE_FIELD)
field2 = self.getParameterValue(self.TABLE_FIELD_2)
layer = dataobjects.getObjectFromUri(input)
joinField1Index = layer.fields().lookupField(field)
layer2 = dataobjects.getObjectFromUri(input2)
joinField2Index = layer2.fields().lookupField(field2)
outFields = vector.combineVectorFields(layer, layer2)
writer = output.getVectorWriter(outFields, layer.wkbType(),
layer.crs())
# Cache attributes of Layer 2
cache = {}
features = vector.features(layer2)
total = 100.0 / len(features)
for current, feat in enumerate(features):
attrs = feat.attributes()
joinValue2 = str(attrs[joinField2Index])
if joinValue2 not in cache:
cache[joinValue2] = attrs
feedback.setProgress(int(current * total))
# Create output vector layer with additional attribute
outFeat = QgsFeature()
features = vector.features(layer)
total = 100.0 / len(features)
for current, feat in enumerate(features):
outFeat.setGeometry(feat.geometry())
attrs = feat.attributes()
joinValue1 = str(attrs[joinField1Index])
attrs.extend(cache.get(joinValue1, []))
outFeat.setAttributes(attrs)
writer.addFeature(outFeat)
feedback.setProgress(int(current * total))
del writer
|
myarjunar/QGIS
|
python/plugins/processing/algs/qgis/JoinAttributes.py
|
Python
|
gpl-2.0
| 4,402
|
#! /usr/bin/env python
descr = """A set of Python modules for functional MRI..."""
import sys
import os
from setuptools import setup, find_packages
def load_version():
"""Executes nistats/version.py in a globals dictionary and return it.
Note: importing nistats is not an option because there may be
dependencies like nibabel which are not installed and
setup.py is supposed to install them.
"""
# load all vars into globals, otherwise
# the later function call using global vars doesn't work.
globals_dict = {}
with open(os.path.join('nistats', 'version.py')) as fp:
exec(fp.read(), globals_dict)
return globals_dict
def is_installing():
# Allow command-lines such as "python setup.py build install"
install_commands = set(['install', 'develop'])
return install_commands.intersection(set(sys.argv))
# Make sources available using relative paths from this file's directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
_VERSION_GLOBALS = load_version()
DISTNAME = 'nistats'
DESCRIPTION = 'Modeling and Statistical analysis of fMRI data in Python'
LONG_DESCRIPTION = open('README.rst').read()
MAINTAINER = 'Bertrand Thirion'
MAINTAINER_EMAIL = 'bertrand.thirion@inria.fr'
URL = 'http://nistats.github.io'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://nistats.github.io'
VERSION = _VERSION_GLOBALS['__version__']
if __name__ == "__main__":
if is_installing():
module_check_fn = _VERSION_GLOBALS['_check_module_dependencies']
module_check_fn(is_nistats_installing=True)
install_requires = \
['%s>=%s' % (meta.get('pypi_name', mod), meta['min_version'])
for mod, meta in _VERSION_GLOBALS['REQUIRED_MODULE_METADATA']]
print(install_requires)
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
zip_safe=False, # the package can run out of an .egg file
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
packages=find_packages(),
package_data={'nistats.tests': ['*.nii.gz', '*.npz'],
#'nistats.description': ['*.rst'],
},
install_requires=install_requires,)
|
bthirion/nistats
|
setup.py
|
Python
|
bsd-3-clause
| 3,134
|
# Program main_remote. Look up new phone numbers and maybe add to the blacklist.
# Copyright (C) 2014 David Brown
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# This is different than main.py in that this version is intended to also launch
# selenium and a browser so the user can see the results. Not very code different
# yet.
import sys
from calleridlist import callerIdList
from blacklist import blacklist
import callercomplaints
from whocalledus import whocalled
from whocallsme import whocallsme
if __name__ == '__main__':
raise NotImplementedError # when you put in your password, take this out
wc = whocalled('YourUserName', 'YourPassword')
wcm = whocallsme() # requires selenium install and configuration
cc = callercomplaints()
it = callerIdList()
it.loadFromFile()
it.pickNewBlacklistEntries( [ wc, wcm, cc ])
#TODO compare to local address book on mac?
bl = blacklist()
bl.loadFromFile()
bl.merge( it.getNewBlacklistEntries() )
#print bl
bl.save()
sys.exit(0)
|
smurfless1/jcmanage
|
main_remote.py
|
Python
|
gpl-3.0
| 1,627
|
from .functional import *
from .unit import *
|
HumanExposure/factotum
|
feedback/tests/__init__.py
|
Python
|
gpl-3.0
| 46
|
# Get the difference between the greatest and smallest number in the given array
def checkio(*args):
if (len(args) == 0):
return 0
x = max(args) - min(args);
return x
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
def almost_equal(checked, correct, significant_digits):
precision = 0.1 ** significant_digits
return correct - precision < checked < correct + precision
assert almost_equal(checkio(1, 2, 3), 2, 3), "3-1=2"
assert almost_equal(checkio(5, -5), 10, 3), "5-(-5)=10"
assert almost_equal(checkio(10.2, -2.2, 0, 1.1, 0.5), 12.4, 3), "10.2-(-2.2)=12.4"
assert almost_equal(checkio(), 0, 3), "Empty"
|
lisprolog/python
|
most_numbers.py
|
Python
|
bsd-3-clause
| 735
|
from atila import Atila
import confutil
import skitai
from rs4 import asyncore
import os
from rs4 import jwt as jwt_
import time
def test_route_root (app, dbpath):
@app.route ("/index")
@app.require ("URL", ints = ["t"])
def index (was, t = 0):
t = int (t)
if t == 0:
return was.API ("200 OK")
if t == 1:
return was.API ("205 No Content")
if t == 2:
return was.API ("201 Created", {"data": 1})
if t == 3:
return was.API ("201 Created", data = 1)
if t == 4:
return was.API (data = 1)
if t == 5:
return was.API ({"data": 1})
if t == 9:
return was.API ("201 Created", {"data": 1}, data = 2)
with app.test_client ("/", confutil.getroot ()) as cli:
api = cli.api ()
resp = api.index.get (t = 0)
assert resp.status_code == 200
resp = api.index.get (t = 1)
assert resp.status_code == 205
assert resp.data == {}
resp = api.index.get (t = 2)
assert resp.status_code == 201
assert resp.data == {"data": 1}
resp = api.index.get (t = 3)
assert resp.status_code == 201
assert resp.data == {"data": 1}
resp = api.index.get (t = 4)
assert resp.status_code == 200
assert resp.data == {"data": 1}
resp = api.index.get (t = 5)
assert resp.status_code == 200
assert resp.data == {"data": 1}
resp = api.index.get (t = 9)
assert resp.status_code == 201
assert resp.data == {"data": 2}
|
hansroh/skitai
|
tests/level3/test_api_fault.py
|
Python
|
mit
| 1,690
|
import time as real_time
import unittest
import jwt as jwt_lib
from mock import patch
from twilio.jwt import Jwt, JwtDecodeError
class DummyJwt(Jwt):
"""Jwt implementation that allows setting arbitrary payload and headers for testing."""
ALGORITHM = 'HS256'
def __init__(self, secret_key, issuer, subject=None, algorithm=None,
nbf=Jwt.GENERATE, ttl=3600, valid_until=None, headers=None,
payload=None):
super(DummyJwt, self).__init__(
secret_key=secret_key,
issuer=issuer,
subject=subject,
algorithm=algorithm or self.ALGORITHM,
nbf=nbf,
ttl=ttl,
valid_until=valid_until
)
self._payload = payload or {}
self._headers = headers or {}
def _generate_payload(self):
return self._payload
def _generate_headers(self):
return self._headers
class JwtTest(unittest.TestCase):
def assertIn(self, foo, bar, msg=None):
"""backport for 2.6"""
assert foo in bar, (msg or "%s not found in %s" % (foo, bar))
def now(self):
return int(real_time.time())
def assertJwtsEqual(self, jwt, key, expected_payload=None, expected_headers=None):
expected_headers = expected_headers or {}
expected_payload = expected_payload or {}
decoded_payload = jwt_lib.decode(jwt, key, algorithms=["HS256"], options={"verify_signature": False})
decoded_headers = jwt_lib.get_unverified_header(jwt)
self.assertEqual(expected_headers, decoded_headers)
self.assertEqual(expected_payload, decoded_payload)
@patch('time.time')
def test_basic_encode(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'nbf': 0},
)
@patch('time.time')
def test_encode_with_subject(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', subject='subject', headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'nbf': 0, 'sub': 'subject'},
)
@patch('time.time')
def test_encode_without_nbf(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', subject='subject', headers={}, payload={}, nbf=None)
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'sub': 'subject'},
)
@patch('time.time')
def test_encode_custom_ttl(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', ttl=10, headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 10, 'nbf': 0},
)
@patch('time.time')
def test_encode_ttl_added_to_current_time(self, time_mock):
time_mock.return_value = 50.0
jwt = DummyJwt('secret_key', 'issuer', ttl=10, headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 60, 'nbf': 50},
)
@patch('time.time')
def test_encode_override_ttl(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', ttl=10, headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(ttl=20),
'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 20, 'nbf': 0},
)
@patch('time.time')
def test_encode_valid_until_overrides_ttl(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', ttl=10, valid_until=70, headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 70, 'nbf': 0},
)
@patch('time.time')
def test_encode_custom_nbf(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', ttl=10, nbf=5, headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 10, 'nbf': 5},
)
@patch('time.time')
def test_encode_with_headers(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', headers={'sooper': 'secret'}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256', 'sooper': 'secret'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'nbf': 0},
)
@patch('time.time')
def test_encode_with_payload(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', payload={'root': 'true'})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'nbf': 0, 'root': 'true'},
)
@patch('time.time')
def test_encode_with_payload_and_headers(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', headers={'yes': 'oui'}, payload={'pay': 'me'})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256', 'yes': 'oui'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'nbf': 0, 'pay': 'me'},
)
def test_encode_no_key_fails(self):
jwt = DummyJwt(None, 'issuer')
self.assertRaises(ValueError, jwt.to_jwt)
def test_encode_decode(self):
test_start = self.now()
jwt = DummyJwt('secret_key', 'issuer', subject='hey', payload={'sick': 'sick'})
decoded_jwt = Jwt.from_jwt(jwt.to_jwt(), 'secret_key')
self.assertGreaterEqual(decoded_jwt.valid_until, self.now() + 3600)
self.assertGreaterEqual(decoded_jwt.nbf, test_start)
self.assertEqual(decoded_jwt.issuer, 'issuer')
self.assertEqual(decoded_jwt.secret_key, 'secret_key')
self.assertEqual(decoded_jwt.algorithm, 'HS256')
self.assertEqual(decoded_jwt.subject, 'hey')
self.assertEqual(decoded_jwt.headers, {'typ': 'JWT', 'alg': 'HS256'})
self.assertDictContainsSubset({
'iss': 'issuer',
'sub': 'hey',
'sick': 'sick',
}, decoded_jwt.payload)
def test_encode_decode_mismatched_algorithms(self):
jwt = DummyJwt('secret_key', 'issuer', algorithm='HS512', subject='hey', payload={'sick': 'sick'})
self.assertRaises(JwtDecodeError, Jwt.from_jwt, jwt.to_jwt())
def test_decode_bad_secret(self):
jwt = DummyJwt('secret_key', 'issuer')
self.assertRaises(JwtDecodeError, Jwt.from_jwt, jwt.to_jwt(), 'letmeinplz')
def test_decode_modified_jwt_fails(self):
jwt = DummyJwt('secret_key', 'issuer')
example_jwt = jwt.to_jwt()
example_jwt = 'ABC' + example_jwt[3:]
self.assertRaises(JwtDecodeError, Jwt.from_jwt, example_jwt, 'secret_key')
def test_decode_validates_expiration(self):
expired_jwt = DummyJwt('secret_key', 'issuer', valid_until=self.now())
real_time.sleep(1)
self.assertRaises(JwtDecodeError, Jwt.from_jwt, expired_jwt.to_jwt(), 'secret_key')
def test_decode_validates_nbf(self):
expired_jwt = DummyJwt('secret_key', 'issuer', nbf=self.now() + 3600) # valid 1hr from now
self.assertRaises(JwtDecodeError, Jwt.from_jwt, expired_jwt.to_jwt(), 'secret_key')
def test_decodes_valid_jwt(self):
expiry_time = self.now() + 1000
example_jwt = jwt_lib.encode(
{'hello': 'world', 'iss': 'me', 'sub': 'being awesome', 'exp': expiry_time},
'secret'
)
decoded_jwt = Jwt.from_jwt(example_jwt, 'secret')
self.assertEqual(decoded_jwt.issuer, 'me')
self.assertEqual(decoded_jwt.subject, 'being awesome')
self.assertEqual(decoded_jwt.valid_until, expiry_time)
self.assertIn('hello', decoded_jwt.payload)
self.assertEqual(decoded_jwt.payload['hello'], 'world')
def test_decode_allows_skip_verification(self):
jwt = DummyJwt('secret', 'issuer', payload={'get': 'rekt'})
decoded_jwt = Jwt.from_jwt(jwt.to_jwt(), key=None)
self.assertEqual(decoded_jwt.issuer, 'issuer')
self.assertEqual(decoded_jwt.payload['get'], 'rekt')
self.assertIsNone(decoded_jwt.secret_key)
|
twilio/twilio-python
|
tests/unit/jwt/test_jwt.py
|
Python
|
mit
| 9,331
|
# -*- coding: utf-8 -*-
#
# Copyright 2015-2019 Jun-ya HASEBA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mock import patch
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from django.utils.importlib import import_module
class ViewsTest(TestCase):
"""
views.pyに対するテストコード。
"""
def setUp(self):
"""
初期処理を実行する。
"""
# クライアントを生成する
self.client = Client()
# クッキーを生成する
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session.save()
session_cookie = settings.SESSION_COOKIE_NAME
self.client.cookies[session_cookie] = session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.client.cookies[session_cookie].update(cookie_data)
@patch('twingo.views.OAuthHandler')
def test_twitter_login_01(self, oauth_handler):
"""
[対象] twitter_login() : No.01
[条件] 次ページを指定せずにアクセスする。
[結果] セッションにリクエストトークンが保管され、認証ページにリダイレクトされる。
"""
oauth_handler.return_value.get_authorization_url.return_value = '/redirect/'
oauth_handler.return_value.request_token = 'Request Token'
response = self.client.get(reverse('twingo_login'))
session = self.client.session
self.assertRedirects(response, '/redirect/')
self.assertEqual('Request Token', session['request_token'])
self.assertIsNone(session.get('next'))
@patch('twingo.views.OAuthHandler')
def test_twitter_login_02(self, oauth_handler):
"""
[対象] twitter_login() : No.02
[条件] 次ページを指定してアクセスする。
[結果] セッションにリクエストトークンと次ページのURLが保管され、認証ページにリダイレクトされる。
"""
oauth_handler.return_value.get_authorization_url.return_value = '/redirect/'
oauth_handler.return_value.request_token = 'Request Token'
response = self.client.get(reverse('twingo_login'), {'next': '/next_page/'})
session = self.client.session
self.assertRedirects(response, '/redirect/')
self.assertEqual('Request Token', session['request_token'])
self.assertEqual('/next_page/', session.get('next'))
@patch('twingo.views.login')
@patch('twingo.views.authenticate')
@patch('twingo.views.OAuthHandler')
def test_twitter_callback_01(self, oauth_handler, authenticate, login):
"""
[対象] twitter_callback() : No.01
[条件] ログイン後に遷移する画面をセッションで指定する。
[結果] 指定された画面にリダイレクトされる。
"""
authenticate.return_value = 'user'
session = self.client.session
session['request_token'] = {'oauth_token': 'token'}
session['next'] = '/next/'
session.save()
response = self.client.get(reverse('twingo_callback'), {'oauth_token': 'token', 'oauth_verifier': 'verifier'})
self.assertRedirects(response, '/next/')
@override_settings(AFTER_LOGIN_URL='/after/')
@patch('twingo.views.login')
@patch('twingo.views.authenticate')
@patch('twingo.views.OAuthHandler')
def test_twitter_callback_02(self, oauth_handler, authenticate, login):
"""
[対象] twitter_callback() : No.02
[条件] ログイン後に遷移する画面をsettings.pyで指定する。
[結果] 指定された画面にリダイレクトされる。
"""
authenticate.return_value = 'user'
session = self.client.session
session['request_token'] = {'oauth_token': 'token'}
session.save()
response = self.client.get(reverse('twingo_callback'), {'oauth_token': 'token', 'oauth_verifier': 'verifier'})
self.assertRedirects(response, '/after/')
@patch('twingo.views.login')
@patch('twingo.views.authenticate')
@patch('twingo.views.OAuthHandler')
def test_twitter_callback_03(self, oauth_handler, authenticate, login):
"""
[対象] twitter_callback() : No.03
[条件] ログイン後に遷移する画面を指定しない。
[結果] トップ画面にリダイレクトされる。
"""
authenticate.return_value = 'user'
session = self.client.session
session['request_token'] = {'oauth_token': 'token'}
session.save()
response = self.client.get(reverse('twingo_callback'), {'oauth_token': 'token', 'oauth_verifier': 'verifier'})
self.assertRedirects(response, '/')
def test_twitter_callback_04(self):
"""
[対象] twitter_callback() : No.04
[条件] リクエストトークンをセッションに設定しない。
[結果] 401エラーが発生する。
"""
response = self.client.get(reverse('twingo_callback'), {'oauth_token': 'token', 'oauth_verifier': 'verifier'})
self.assertEqual(401, response.status_code)
def test_twitter_callback_05(self):
"""
[対象] twitter_callback() : No.05
[条件] セッションに格納されたリクエストトークンとGETパラメータのリクエストトークンが異なる。
[結果] 401エラーが発生する。
"""
session = self.client.session
session['request_token'] = {'oauth_token': 'error_token'}
session.save()
response = self.client.get(reverse('twingo_callback'), {'oauth_token': 'token', 'oauth_verifier': 'verifier'})
self.assertEqual(401, response.status_code)
@patch('twingo.views.authenticate')
@patch('twingo.views.OAuthHandler')
def test_twitter_callback_06(self, oauth_handler, authenticate):
"""
[対象] twitter_callback() : No.06
[条件] 認証処理に失敗する。
[結果] 401エラーが発生する。
"""
authenticate.return_value = None
session = self.client.session
session['request_token'] = {'oauth_token': 'token'}
session.save()
response = self.client.get(reverse('twingo_callback'), {'oauth_token': 'token', 'oauth_verifier': 'verifier'})
self.assertEqual(401, response.status_code)
@override_settings(AFTER_LOGOUT_URL='/after/')
@patch('twingo.views.logout')
def test_twitter_logout_01(self, logout):
"""
[対象] twitter_logout() : No.01
[条件] ログイン後に遷移する画面をsettings.pyで指定する。
[結果] 指定された画面にリダイレクトされる。
"""
response = self.client.get(reverse('twingo_logout'))
self.assertRedirects(response, '/after/')
@patch('twingo.views.logout')
def test_twitter_logout_02(self, logout):
"""
[対象] twitter_logout() : No.02
[条件] ログイン後に遷移する画面をsettings.pyで指定しない。
[結果] トップ画面にリダイレクトされる。
"""
response = self.client.get(reverse('twingo_logout'))
self.assertRedirects(response, '/')
|
7pairs/twingo
|
tests/test_views.py
|
Python
|
apache-2.0
| 8,174
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Campaign.user'
db.alter_column(u'campaign_campaign', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True))
def backwards(self, orm):
# Changing field 'Campaign.user'
db.alter_column(u'campaign_campaign', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['auth.User']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'campaign.campaign': {
'Meta': {'object_name': 'Campaign'},
'goal': ('django.db.models.fields.DecimalField', [], {'max_digits': '15', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100'}),
'message': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['campaign']
|
fandrefh/AnjoMeu
|
anjo/campaign/migrations/0005_auto__chg_field_campaign_user.py
|
Python
|
gpl-2.0
| 4,461
|
import subprocess
import random, string, os, socket, json, time
from glob import glob
from urllib import request
import threading
import configparser
import yaml
import logging
import logging.config
import fcntl
import datetime
USBDEVFS_RESET = 21780
try:
logging.config.fileConfig("logging.ini")
except:
pass
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
default_config = """
[DEFAULT]
exposure = 0
enabled = on
resize = on
[camera]
name =
enabled = on
[ftp]
enabled = on
replace = on
resize = on
timestamped = on
server = sftp.traitcapture.org
directory = /
username = picam
password = DEFAULT_PASSWORD
[timelapse]
interval = 300
starttime = 00:00
stoptime = 23:59
[localfiles]
spooling_dir =
upload_dir =
"""
default_light_config = """
[light]
max_power = 1000
min_power = 0
wavelengths = "400nm,420nm,450nm,530nm,630nm,660nm,735nm"
csv_keys = "LED1,LED2,LED3,LED4,LED5,LED6,LED7"
file_path = "lights_byserial/{identifier}.scf"
[telnet]
telnet_host = "192.168.2.124"
telnet_port = 50630
set_all_command = setall {power}
set_wavelength_command = setwlrelpower {wavelength} {power}
set_all_wavelength_command = setwlsrelpower {} {} {} {} {} {} {}
get_wavelength_command = getwlrelpower {wavelength}
[url]
url_host = "192.168.2.124"
control_uri = /cgi-bin/userI.cgi
set_all_command = "setAllTo": {percent}, "setAllSub": "set"
set_all_wavelength_command = "wl1":{}, "wl2":{}, "wl3":{}, "wl4":{}, "wl5":{}, "wl6":{}, "wl7":{}
"""
class SysUtil(object):
"""
System utility class.
Helper class to cache various things like the hostname, machine-id, amount of space in the filesystem.
"""
_ip_address = "0.0.0.0", 0
_external_ip = "0.0.0.0", 0
_machine_id = "", 0
_hostname = "HOSTNAME", 0
_tor_host = ("unknown.onion", "not a real key", "not a real client"), 0
_version = "Unknown spc-eyepi version", 0
a_statvfs = os.statvfs("/")
_fs = (a_statvfs.f_frsize * a_statvfs.f_bavail, a_statvfs.f_frsize * a_statvfs.f_blocks), 0
_watches = list()
thread = None
stop = False
logger = logging.getLogger("SysUtil")
def __init__(self):
if SysUtil.thread is None:
SysUtil.thread = threading.Thread(target=self._thread)
SysUtil.thread.start()
pass
@staticmethod
def reset_usb_device(bus: int, dev: int) -> bool:
"""
resets a usb device.
:param bus: bus number
:type bus: int
:param dev: device number of the device on the bus above
:type dev: int
"""
try:
fn = "/dev/bus/usb/{bus:03d}/{dev:03d}".format(bus=bus, dev=dev)
with open(fn, 'w', os.O_WRONLY) as f:
fcntl.ioctl(f, USBDEVFS_RESET, 0)
return True
except Exception as e:
SysUtil.logger.error("Couldnt reset usb device (possible filenotfound): {}".format(str(e)))
@staticmethod
def default_identifier(prefix=None):
"""
returns an identifier, If no prefix available, generates something.
:param prefix:
:return: string of the itentifier.
:rtype: str
"""
if prefix:
return SysUtil.get_identifier_from_name(prefix)
else:
from hashlib import md5
serialnumber = ("AUTO_" + md5(bytes(prefix, 'utf-8')).hexdigest()[len("AUTO_"):])[:32]
SysUtil.logger.warning("using autogenerated serialnumber {}".format(serialnumber))
return serialnumber
@staticmethod
def _nested_lookup(key, document):
"""
nested document lookup,
works on dicts and lists
:param key: string of key to lookup
:param document: dict or list to lookup
:return: yields item
"""
if isinstance(document, list):
for d in document:
for result in SysUtil._nested_lookup(key, d):
yield result
if isinstance(document, dict):
for k, v in document.items():
if k == key:
yield v
elif isinstance(v, dict):
for result in SysUtil._nested_lookup(key, v):
yield result
elif isinstance(v, list):
for d in v:
for result in SysUtil._nested_lookup(key, d):
yield result
@staticmethod
def sizeof_fmt(num, suffix='B')->str:
"""
formats a number of bytes in to a human readable string.
returns in SI units
eg sizeof_fmt(1234) returns '1.2KiB'
:param num: number of bytes to format
:param suffix: the suffix to use
:return: human formattted string.
:rtype: str
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
@classmethod
def update_from_git(cls):
"""
updates spc-eyepi from git.
"""
os.system("git fetch --all;git reset --hard origin/master")
os.system("systemctl restart spc-eyepi_capture.service")
@classmethod
def get_hostname(cls)->str:
"""
gets the current hostname.
if there is no /etc/hostname file, sets the hostname randomly.
:return: the current hostname or the hostname it was set to
:rtype: str
"""
if abs(cls._hostname[-1] - time.time()) > 10:
if not os.path.isfile("/etc/hostname"):
hostname = "".join(random.choice(string.ascii_letters) for _ in range(8))
os.system("hostname {}".format(cls._hostname))
else:
with open("/etc/hostname", "r") as fn:
hostname = fn.read().strip()
cls._hostname = hostname, time.time()
return cls._hostname[0]
@classmethod
def set_hostname(cls, hostname: str):
"""
sets the machines hosname, in /etc/hosts and /etc/hostname
:param hostname: the string of which to set the hostname to.
"""
try:
with open(os.path.join("/etc/", "hostname"), 'w') as f:
f.write(hostname + "\n")
with open(os.path.join("/etc/", "hosts"), 'w') as hosts_file:
h_tmpl = "127.0.0.1\tlocalhost.localdomain localhost {hostname}\n"
h_tmpl += "::1\tlocalhost.localdomain localhost {hostname}\n"
hosts_file.write(h_tmpl.format(hostname=hostname))
except Exception as e:
cls.logger.error("Failed setting hostname for machine. {}".format(str(e)))
@classmethod
def get_machineid(cls)->str:
"""
gets the machine id, or initialises the machine id if it doesnt exist.
:return: string of the machine-id
:rtype: str
"""
if abs(cls._machine_id[-1] - time.time()) > 10:
if not os.path.isfile("/etc/machine-id"):
os.system("systemd-machine-id-setup")
with open("/etc/machine-id") as f:
cls._machine_id = f.read().strip(), time.time()
return cls._machine_id[0]
@classmethod
def get_tor_host(cls)->tuple:
"""
gets a tuple of the current tor host.
:return: tuple of hostname(onion address), client key, client name
:rtype: tuple[str, str, str]
"""
if abs(cls._tor_host[-1] - time.time()) > 10:
try:
with open("/home/tor_private/hostname") as f:
onion_address = f.read().replace('\n', '')
cls._tor_host = onion_address.split(" ")[:3], time.time()
except:
cls._tor_host = ("unknown", 'unknown', "unknown"), time.time()
return cls._tor_host[0]
@classmethod
def get_fs_space(cls)->tuple:
"""
returns free/total space of root filesystem as bytes(?)
:return: tuple of free/total space
:rtype: tuple[int, int]
"""
if abs(cls._fs[-1] - time.time()) > 10:
try:
a_statvfs = os.statvfs("/")
cls._fs = (
a_statvfs.f_frsize * a_statvfs.f_bavail, a_statvfs.f_frsize * a_statvfs.f_blocks), time.time()
except:
cls._fs = (0, 0), time.time()
return cls._fs[0]
@classmethod
def get_fs_space_mb(cls)->tuple:
"""
returns the filesystems free space in mebibytes.
see :func:`get_fs_space`
:return: tuple of free/total space
:rtype:tuple[int, int]
"""
free_space, total_space = SysUtil.get_fs_space()
for x in range(0, 2):
free_space /= 1024.0
total_space /= 1024.0
return free_space, total_space
@classmethod
def get_version(cls)->str:
"""
gets the "describe" version of the current git repo as a string.
:return: the current version
:rtype: str
"""
if abs(cls._version[-1] - time.time()) > 10:
try:
cmd = "/usr/bin/git describe --always"
cls._version = subprocess.check_output([cmd], shell=True).decode().strip("\n"), time.time()
except:
cls._version = "unknown", time.time()
return cls._version[0]
@classmethod
def get_internal_ip(cls):
"""
gets the internal ip by attempting to connect to googles DNS
:return: the current internal ip
:rtype: str
"""
if abs(cls._ip_address[-1] - time.time()) > 10:
try:
try:
import netifaces
ip = netifaces.ifaddresses("tun0")[netifaces.AF_INET][0]["addr"]
cls._ip_address = ip, time.time()
except:
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 0))
cls._ip_address = s.getsockname()[0], time.time()
except:
cls._ip_address = "0.0.0.0", time.time()
return cls._ip_address[0]
@classmethod
def get_log_files(cls) -> list:
"""
returns the spc-eyepi log files that have been rotated.
:return: list of filenames
:rtype: list(str)
"""
return list(glob("/home/spc-eyepi/spc-eyepi.log.*"))
@classmethod
def clear_files(cls, filenames: list):
"""
removes all files in the list provided, skipping and logging on an error removing
todo: Do different things based on whether is a directory.
:param filenames: list of directories or files
:type filenames: list or tuple
"""
for f in filenames:
try:
os.remove(f)
except FileNotFoundError as e:
cls.logger.debug(str(e))
except IsADirectoryError as e:
cls.logger.error(str(e))
except Exception as e:
cls.logger.error(str(e))
@classmethod
def get_isonow(cls):
"""
gets the current time as an iso8601 string
:return: the current time as iso8601
:rtype: str
"""
return datetime.datetime.now().isoformat()
@classmethod
def get_external_ip(cls):
"""
returns the external IP address of the raspberry pi through api.ipify.org
:return: the external ip address
:rtype: str
"""
if abs(cls._external_ip[-1] - time.time()) > 60:
try:
url = 'https://api.ipify.org/?format=json'
response = request.urlopen(url, timeout=10).read().decode('utf-8')
cls._external_ip = json.loads(response)['ip'], time.time()
except:
cls._external_ip = "0.0.0.0", time.time()
return cls._external_ip[0]
@classmethod
def get_identifier_from_name(cls, name):
"""
returns either the identifier (from name) or the name filled with the machine id
clamps to 32 characters.
:param name: name to fill
:type name: str
:return: filled name
:rtype: str
"""
identifier = "".join((x if idx > len(name) - 1 else name[idx] for idx, x in enumerate(cls.get_machineid())))
return identifier[:32]
@classmethod
def get_identifier_from_filename(cls, file_name):
"""
returns either the identifier (from the file name) or the name filled with the machine id
:param file_name: filename
:type file_name: str
:return: string identifier,
:rtype: str
"""
fsn = next(iter(os.path.splitext(os.path.basename(file_name))), "")
return cls.get_identifier_from_name(fsn)
@classmethod
def ensure_config(cls, identifier):
"""
ensures a configuration file exists for this identifier.
if a config file doesnt exist then it will create a default one.
:param identifier: identifier to create or find a configuration file for.
:type identifier: str
:return: the configuration file dict or configparser object.
:rtype: dict or configparser.ConfigParser
"""
config = configparser.ConfigParser()
config.read_string(default_config)
path = cls.identifier_to_ini(identifier)
try:
if len(config.read(path)):
return config
except Exception as e:
print(str(e))
if not config['localfiles']['spooling_dir']:
config['localfiles']['spooling_dir'] = "/home/images/spool/{}".format(identifier)
if not config['localfiles']['upload_dir']:
config['localfiles']['upload_dir'] = "/home/images/upload/{}".format(identifier)
if not config['camera']['name']:
config['camera']['name'] = cls.get_hostname() + identifier[:6]
cls.write_config(config, identifier)
return config
@classmethod
def write_config(cls, config: configparser.ConfigParser, identifier: str):
"""
writes a configuration file to an correct config file path.
:param config: configuration file (configparser object)
:type identifier: str
:param identifier: identifier to user as the raget file name.
:return: configparser object
"""
path = SysUtil.identifier_to_ini(identifier)
with open(path, 'w+') as configfile:
config.write(configfile)
return config
@classmethod
def identifier_to_ini(cls, identifier: str)->str:
"""
gets a valid .ini path for an identifier.
:param identifier: identifier to find an ini for.
:return: file path for identifier
:rtype: str
"""
for fn in glob("configs_byserial/*.ini"):
if identifier == cls.get_identifier_from_filename(fn):
return fn
else:
return os.path.join("configs_byserial/", identifier) + ".ini"
@classmethod
def ensure_light_config(cls, identifier):
"""
ensures a configuration file exists for this identifier.
if a config file doesnt exist then it will create a default one.
:param identifier: identifier of the light
:type identifier: str
:return: configuration for the light
:rtype: configparser.ConfigParser
"""
config = configparser.ConfigParser()
config.read_string(default_light_config)
path = cls.identifier_to_ini(identifier)
try:
if len(config.read(path)):
return config
except Exception as e:
print(str(e))
if "{identifier}" in config.get("light", "file_path"):
config.set("light", "file_path",
config.get('light', "file_path").format(identifier=identifier))
cls.write_light_config(config, identifier)
return config
@classmethod
def get_light_configs(cls):
"""
gets a dict of the light config files (.ini)
:return: dict of light configs
:rtype: dict(str: configparser.ConfigParser)
"""
def slc_csv_exists(fp):
return os.path.exists(os.path.splitext(fp)[0]+".csv") or os.path.exists(os.path.splitext(fp)[0]+".slc")
def get_id(fp):
n, ext = os.path.splitext(os.path.basename(fp))
return n
try:
files = [x for x in glob("light_configs_byip/*.ini") if slc_csv_exists(x)]
f_and_id = {get_id(x): x for x in files}
return f_and_id
except Exception as e:
cls.logger.error("Couldnt enumerate lights, no light functionality. {}".format(str(e)))
return dict()
@classmethod
def write_light_config(cls, config: configparser.ConfigParser, identifier: str):
"""
writes a configuration file to an correct config file path.
:param config: configuration file (configparser object)
:param identifier: identifier of the light.
:type identifier: str
:return: configparser object
"""
path = SysUtil.light_identifier_to_ini(identifier)
with open(path, 'w+') as configfile:
config.write(configfile)
return config
@classmethod
def get_light_datafile(cls, identifier: str)->str:
"""
gets a light datafile
:param identifier: identifier to use to find the data file.
:type identifier: str
:return: file path for csv or slc.
:rtype: str
"""
csv = "lights_byip/{}.csv".format(identifier)
slc = "lights_byip/{}.slc".format(identifier)
if os.path.exists(slc) and os.path.isfile(slc):
return slc
elif os.path.exists(csv) and os.path.isfile(csv):
return csv
else:
return ""
@classmethod
def load_or_fix_solarcalc(cls, identifier: str)->list:
"""
function to either load an existing fixed up solarcalc file or to coerce one into the fixed format.
:param identifier: identifier of the light for which the solarcalc file exists.
:type identifier: str
:return: light timing data as a list of lists.
:rtype: list(list())
"""
lx = []
fp = cls.get_light_datafile(identifier)
path, ext = os.path.splitext(fp)
header10 = ['datetime', 'temp', 'relativehumidity', 'LED1', 'LED2', 'LED3', 'LED4', 'LED5', 'LED6', 'LED7',
'LED8', 'LED9', 'LED10', 'total_solar_watt', 'simulated_datetime']
header7 = ['datetime', 'temp', 'relativehumidity', 'LED1', 'LED2', 'LED3', 'LED4', 'LED5', 'LED6', 'LED7',
'total_solar_watt', 'simulated_datetime']
if not os.path.isfile(fp):
SysUtil.logger.error("no SolarCalc file.")
raise FileNotFoundError()
if ext == ".slc":
with open(fp) as f:
lx = [x.strip().split(",") for x in f.readlines()]
else:
with open(fp) as f:
l = [x.strip().split(",") for x in f.readlines()]
def get_lines(li):
print("Loading csv")
for idx, line in enumerate(li):
try:
yield [
datetime.datetime.strptime("{}_{}".format(line[0], line[1]), "%d/%m/%Y_%H:%M").isoformat(),
*line[2:-1],
datetime.datetime.strptime(line[-1], "%d %b %Y %H:%M").isoformat()
]
except Exception as e:
SysUtil.logger.error("Couldnt fix solarcalc file. {}".format(str(e)))
print(l)
lx.extend(get_lines(l))
if len(l[0]) == 15:
lx.insert(0, header10)
else:
lx.insert(0, header7)
with open(path+".slc", 'w') as f:
f.write("\n".join([",".join(x) for x in lx]))
for idx, x in enumerate(lx[1:]):
lx[idx+1][0] = datetime.datetime.strptime(x[0], "%Y-%m-%dT%H:%M:%S")
lx[idx+1][-1] = datetime.datetime.strptime(x[-1], "%Y-%m-%dT%H:%M:%S")
return lx[1:]
@classmethod
def light_identifier_to_ini(cls, identifier: str)->str:
"""
gets a valid .ini path for an identifier.
:param identifier: identifier for a light
:type identifier: str
:return: ini filename for a light
:rtype: str
"""
for fn in glob("lights_byip/*.ini"):
if identifier == cls.get_identifier_from_filename(fn):
return fn
else:
return os.path.join("lights_byip/", identifier) + ".ini"
@classmethod
def identifier_to_yml(cls, identifier: str)->str:
"""
the same as identifier_to_ini but for yml files
:param identifier: identifier for a matching yml file.
:type identifier: str
:return: string filepath for the yml file.
:rtype: str
"""
for fn in glob("configs_byserial/*.yml"):
if identifier == cls.get_identifier_from_filename(fn):
return fn
else:
return os.path.join("configs_byserial/", identifier) + ".yml"
@classmethod
def configs_from_identifiers(cls, identifiers: set) -> dict:
"""
given a set of identifiers, returns a dictionary of the data contained in those config files with the key
for each config file data being the identifier
:param identifiers:
:type identifiers: list(str)
:return: dictionary of configuration datas
:rtype: dict(str: dict)
"""
data = dict()
for ini in ["configs_byserial/{}.ini".format(x) for x in identifiers]:
cfg = configparser.ConfigParser()
cfg.read(ini)
d = dict()
d = {section: dict(cfg.items(section)) for section in cfg.sections()}
data[cls.get_identifier_from_filename(ini)] = d
return data
@classmethod
def add_watch(cls, path: str, callback):
"""
adds a watch that calls the callback on file change
:param path: path of the file to watch
:type path: str
:param callback: function signature to call when the file is changed
"""
cls._watches.append((path, os.stat(path).st_mtime, callback))
@classmethod
def open_yaml(cls, filename):
"""
opens a yaml file using yaml.load
:param filename: yaml file to load
:return: dictionary of values in yaml file
:rtype: dict
"""
try:
with open(filename) as e:
q = yaml.load(e.read())
return q
except Exception as e:
print(str(e))
return dict()
@classmethod
def _thread(cls):
"""
runs the watchers
"""
while True and not cls.stop:
try:
for index, (path, mtime, callback) in enumerate(cls._watches):
tmt = os.stat(path).st_mtime
if tmt != mtime:
cls._watches[index] = (path, tmt, callback)
try:
print("calling {}".format(callback))
callback()
except Exception as e:
print(str(e))
time.sleep(1)
except Exception as e:
break
cls.thread = None
|
borevitzlab/Gigvaision-ControlSoftware
|
libs/SysUtil.py
|
Python
|
mit
| 24,107
|
from .reader_utils import regist_reader, get_reader
from .feature_reader import FeatureReader
from .kinetics_reader import KineticsReader
from .nonlocal_reader import NonlocalReader
regist_reader("ATTENTIONCLUSTER", FeatureReader)
regist_reader("NEXTVLAD", FeatureReader)
regist_reader("ATTENTIONLSTM", FeatureReader)
regist_reader("TSN", KineticsReader)
regist_reader("TSM", KineticsReader)
regist_reader("STNET", KineticsReader)
regist_reader("NONLOCAL", NonlocalReader)
|
kuke/models
|
fluid/PaddleCV/video/datareader/__init__.py
|
Python
|
apache-2.0
| 474
|
import json
from random import shuffle
class City:
def __init__(self, cityName, population):
self.cityName = cityName
self.population = population
self.listOfDistancesToOtherCities = []
self.distanceToInStraightLineWarsaw = 0
self.latitude = 0
self.longitude = 0
class GraphCity:
def __init__(self, name, neighbours, position):
self.name = name
self.neighbours = neighbours
self.position = position
class NeighbourCity:
def __init__(self, name, population, parentName, distanceToParent, distanceToWarsaw):
self.name = name
self.population = population
self.parentName = parentName
self.distanceToParent = distanceToParent
self.distanceToWarsaw = distanceToWarsaw
class TreeNode:
def __init__(self, name, parentNode, childNodes, position, population = 0, distance = 0, distanceWarsaw = 0):
self.name = name
self.parentNode = parentNode
self.childNodes = childNodes
self.population = population
self.distance = distance
self.position = position
self.distanceWarsaw = distanceWarsaw
def getDistanceBetweenCities(city1Name, city2Name, citiesJson):
for city in citiesJson:
if city["name"] == city1Name:
for neighbour in city["list"]:
if neighbour["name"] == city2Name:
return neighbour["distance"]
def getPopulation(cityName, jsonCity):
for c in jsonCity:
if cityName == c['name']:
return c["population"]
def getDistanceToWarsaw(cityName, citiesJson):
if cityName == "Warszawa":
return 0
for city in citiesJson:
if city["name"] == cityName:
return city["distToWarsaw"]
def generateGraph(citiesJson, maxNumbersOfNeighbours=5):
#Zasada: każde miasto łączy się z czterama najbliższymi sobie miastom + jeśli już ma połączenie z innym
citiesGraph = []
for cityJson in citiesJson:
sortedNeighbours = sorted(cityJson["list"], key=lambda x: x["distance"])
# print(sortedNeighbours)
newGraphCityName = cityJson["name"]
newNeighbours = []
position = getPosition(cityJson["lat"], cityJson["lng"])
neighboursLeft = maxNumbersOfNeighbours - len(newNeighbours)
for i in range(0, neighboursLeft):
neighbour = sortedNeighbours[i]
neighbourName = neighbour["name"]
population = getPopulation(neighbourName, citiesJson)
parentName = newGraphCityName
distanceToParent = getDistanceBetweenCities(parentName, neighbourName, citiesJson)
distanceToWarsaw = getDistanceToWarsaw(neighbourName, citiesJson)
newNeighbours.append(NeighbourCity(neighbourName, population, parentName, distanceToParent, distanceToWarsaw))
citiesGraph.append(GraphCity(newGraphCityName, newNeighbours, position))
# Znajdź powtórzenia w sąsadach (ta sama trasa)
citiesGraph2 = citiesGraph
for cityFromGraph in citiesGraph:
neighbours = []
for cityFromGraph2 in citiesGraph2:
if cityFromGraph.name == cityFromGraph2.name:
continue
for neighbourFromGraph in cityFromGraph2.neighbours:
if neighbourFromGraph.name == cityFromGraph.name:
neighbourName = cityFromGraph2.name
population = getPopulation(cityFromGraph2.name, citiesJson)
parentName = cityFromGraph.name
distanceToParent = getDistanceBetweenCities(parentName, neighbourName, citiesJson)
distanceToWarsaw = getDistanceToWarsaw(neighbourName, citiesJson)
neighbours.append(NeighbourCity(neighbourName, population, parentName, distanceToParent, distanceToWarsaw))
cityFromGraph.neighbours = cityFromGraph.neighbours + neighbours
# Usuń powtarzających się sąsiadów
for cityFromGraph in citiesGraph:
neighbours = []
for n in cityFromGraph.neighbours:
found = False
for n2 in neighbours:
if n.name == n2.name:
found = True
if found == False:
neighbours.append(n)
cityFromGraph.neighbours = neighbours
return citiesGraph
def computeBFS(graphCities, citiesJson, startingCityName, numberOfCitiesWithparticularPopulation=0, minPopulation=0, maxDepth = 10):
vNodes = []
rootNode = TreeNode(startingCityName, None, [], getJsonPosition(startingCityName, citiesJson),
getPopulation(startingCityName, citiesJson), 0,
getDistanceToWarsaw(startingCityName, citiesJson))
rootNode.childNodes = getNodeNeighbours(rootNode, graphCities, citiesJson)
vNodes.append(rootNode)
nodeQueue = list()
nodeQueue += rootNode.childNodes
lastNode = ""
while True:
if len(nodeQueue) == 0:
break
#Odczytaj pierwszy node z kolejki
currentNode = nodeQueue.pop(0)
if len(getRouteToRootNode(currentNode)) > maxDepth:
continue
#Sprtawdź czy to Warszawa, jak tak to kończ i podstaw pod lastNode
if currentNode.name != "Warszawa":
vNodes.append(currentNode)
if checkEnd(currentNode, numberOfCitiesWithparticularPopulation, minPopulation):
lastNode = currentNode
break
#Pobierz sąsiednie Node'y (dodaj na koniec kolejki te nieodwiedzone)
currentNode.childNodes = getNodeNeighbours(currentNode, graphCities, citiesJson)
for child in currentNode.childNodes:
found = False
# visitedNumber = len(visitedNodes)
# if visitedNumber > 4:
# for i in range(visitedNumber - 4, visitedNumber-1):
# if visitedNodes[i].name == child.parentNode.name:
# found = True
# else:
visitedNodes = getRouteToRootNode(currentNode)
for visited in visitedNodes:
if visited.name == child.name:
found = True
if found == False:
nodeQueue.append(child)
printResult(lastNode, vNodes, "BFS")
def getJsonPosition (cityName, citiesJson):
for c in citiesJson:
if cityName == c['name']:
return getPosition(c["lat"], c["lng"])
def getPosition (lat, lng):
midLng = 19
midLat = 52
if lat >= midLat:
if lng <= midLng:
return "I"
else:
return "II"
else:
if lng <= midLng:
return "III"
else:
return "IV"
def computeDFS(graphCities, citiesJson, startingCityName, numberOfCitiesWithparticularPopulation=0, minPopulation=0, maxDepth = 10):
vNodes = []
rootNode = TreeNode(startingCityName, None, [], getJsonPosition(startingCityName, citiesJson),
getPopulation(startingCityName, citiesJson), 0,
getDistanceToWarsaw(startingCityName, citiesJson))
rootNode.childNodes = getNodeNeighbours(rootNode, graphCities, citiesJson)
vNodes.append(rootNode)
nodeQueue = list()
nodeQueue += rootNode.childNodes
lastNode = ""
while True:
if len(nodeQueue) == 0:
break
#Odczytaj pierwszy node z kolejki
currentNode = nodeQueue.pop(0)
if len(getRouteToRootNode(currentNode)) > maxDepth:
continue
#Sprtawdź czy to Warszawa, jak tak to kończ i podstaw pod lastNode
if currentNode.name != "Warszawa":
vNodes.append(currentNode)
if checkEnd(currentNode, numberOfCitiesWithparticularPopulation, minPopulation):
lastNode = currentNode
break
#Pobierz sąsiednie Node'y (dodaj na początek kolejki te nieodwiedzone)
currentNode.childNodes = getNodeNeighbours(currentNode, graphCities, citiesJson)
for child in currentNode.childNodes:
found = False
visitedNodes = getRouteToRootNode(currentNode)
for visited in visitedNodes:
if visited.name == child.name:
found = True
if found == False:
nodeQueue = [child] + nodeQueue
printResult(lastNode, vNodes, "DFS")
def printResult(lastNode, visitedNodes, algorithmName):
# print ("----------------" + algorithmName + " Route-----------------")
route = getRouteToRootNode(lastNode)
distance = 0
citiesNames = []
for r in route:
if r != "":
# print (r.name + " " + str(r.position) + " pop. " + str(r.population))
citiesNames = [r.name] + citiesNames
distance += r.distance
# print (citiesNames)
# print ("Depth " + str(len(route)))
# print ("WHOLE DISTANCE " + str(distance) + "km")
# print ("Visited nodes " + str(len(visitedNodes)))
return distance, len(route), len(visitedNodes)
def getRouteToRootNode (lastNode):
route = []
node = lastNode
while True:
route.append(node)
if node == "" or node.parentNode == None:
break
node = node.parentNode
return route
def checkEnd(node, numberofCitiesWithParticularPopulation, minPopulation):
if node.name != "Warszawa":
return False
route = getRouteToRootNode(node)
visitedCitiesWithPopulation = 0
polandParts = set()
for r in route:
if r.name != "Warszawa":
polandParts.add(r.position)
if r.name != "Warszawa" and r.population >= minPopulation:
visitedCitiesWithPopulation += 1
if visitedCitiesWithPopulation >= numberofCitiesWithParticularPopulation and len(polandParts) > 1:
return True
return False
def checkPopulation(node, numberofCitiesWithParticularPopulation, minPopulation):
route = getRouteToRootNode(node)
visitedCitiesWithPopulation = 0
for r in route:
if r.name != "Warszawa" and r.population >= minPopulation:
visitedCitiesWithPopulation += 1
if visitedCitiesWithPopulation >= numberofCitiesWithParticularPopulation:
return True
return False
def computeGreedySearch(graphCities, citiesJson, startingCityName, numberOfCitiesWithparticularPopulation=0, minPopulation=0, maxDepth = 10):
vNodes = []
rootNode = TreeNode(startingCityName, None, [], getJsonPosition(startingCityName, citiesJson),getPopulation(startingCityName, citiesJson), 0, getDistanceToWarsaw(startingCityName, citiesJson))
rootNode.childNodes = getNodeNeighbours(rootNode, graphCities, citiesJson)
rootNode.childNodes = sorted(rootNode.childNodes, key=lambda x: x.distanceWarsaw)
vNodes.append(rootNode)
nodeQueue = []
nodeQueue = rootNode.childNodes + nodeQueue
lastNode = ""
while True:
if len(nodeQueue) == 0:
break
currentNode = nodeQueue.pop(0)
if len(getRouteToRootNode(currentNode)) > maxDepth:
continue
if checkEnd(currentNode, numberOfCitiesWithparticularPopulation, minPopulation):
lastNode = currentNode
break
currentNode.childNodes = getNodeNeighbours(currentNode, graphCities, citiesJson)
currentNode.childNodes = getSortedChilds(currentNode, numberOfCitiesWithparticularPopulation, minPopulation)
if currentNode.name != "Warszawa":
vNodes.append(currentNode)
visitedNodes = getRouteToRootNode(currentNode)
for child in currentNode.childNodes:
found = False
for visited in visitedNodes:
if visited.name == child.name:
found = True
if found == False:
nodeQueue.append(child)
return printResult(lastNode, vNodes, "Greedy")
def getSortedChilds (parent, numberOfCitiesWithparticularPopulation, minPopulation):
childs = []
route = getRouteToRootNode(parent)
#Check if we changed part of Poland
changedPart = checkChangedPart(route)
#Check population
changedPopulation = checkPopulation(parent, numberOfCitiesWithparticularPopulation, minPopulation)
for c in parent.childNodes:
if c.name == "Warszawa":
c.population = 0
#Najpierw na populacje, potem na zmianę częsci polski
if changedPopulation == False:
parent.childNodes = sorted(parent.childNodes, key=lambda x: x.distanceWarsaw)
parent.childNodes = sorted(parent.childNodes, key=lambda x: x.population, reverse=True)
return parent.childNodes
if changedPart == False:
childs = getElementsWithOtherPosition(parent)
return childs
childs = sorted(parent.childNodes, key=lambda x: x.distanceWarsaw)
return childs
def checkChangedPart(route):
parts = set()
for r in route:
parts.add(r)
if len(parts) > 1:
return True
return False
def getElementsWithOtherPosition(parent):
childs = list()
parentBase = parent.position
for c in parent.childNodes:
if str(c.position) != str(parent.position):
childs.append(c)
for c in parent.childNodes:
found = False
for ch in childs:
if c.name == ch.name:
found = True
if found == False:
childs.append(c)
return childs
def computeAStarSearch(graphCities, citiesJson, startingCityName, numberOfCitiesWithparticularPopulation=0, minPopulation=0, maxDepth = 10):
vNodes = []
rootNode = TreeNode(startingCityName, None, [], getJsonPosition(startingCityName, citiesJson),
getPopulation(startingCityName, citiesJson), 0,
getDistanceToWarsaw(startingCityName, citiesJson))
rootNode.childNodes = getNodeNeighbours(rootNode, graphCities, citiesJson)
rootNode.childNodes = sorted(rootNode.childNodes, key=lambda x: x.distanceWarsaw)
vNodes.append(rootNode)
nodeQueue = []
nodeQueue = rootNode.childNodes + nodeQueue
lastNode = ""
while True:
if len(nodeQueue) == 0:
break
currentNode = nodeQueue.pop(0)
if len(getRouteToRootNode(currentNode)) > maxDepth:
continue
if checkEnd(currentNode, numberOfCitiesWithparticularPopulation, minPopulation):
lastNode = currentNode
break
currentNode.childNodes = getNodeNeighbours(currentNode, graphCities, citiesJson)
currentNode.childNodes = getSortedChilds(currentNode, numberOfCitiesWithparticularPopulation, minPopulation)
currentNode.childNodes = sortChildsByDistanceWhichIneedToTravel(currentNode)
if currentNode.name != "Warszawa":
vNodes.append(currentNode)
visitedNodes = getRouteToRootNode(currentNode)
for child in currentNode.childNodes:
found = False
for visited in visitedNodes:
if visited.name == child.name:
found = True
if found == False:
nodeQueue.append(child)
return printResult(lastNode, vNodes, "A*")
def sortChildsByDistanceWhichIneedToTravel(currentNode):
route = getRouteToRootNode(currentNode)
traveledDistance = 0
for r in route:
traveledDistance += r.distance
childs = []
for c in currentNode.childNodes:
childs.append({'name': c.name, 'val': (traveledDistance + c.distance + c.distanceWarsaw)})
childs = sorted(childs, key=lambda x: x['val'])
returnChilds = []
for c in childs:
for ch in currentNode.childNodes:
if str(c['name']) == str(ch.name):
returnChilds.append(ch)
return returnChilds
def getNodeNeighbours(parentNode, graphCities, citiesJson):
#Znajdź parenta grafie
neighboursGraph = []
neighboursNodes = []
for parent in graphCities:
if parentNode.name == parent.name:
neighboursGraph = parent.neighbours
break
#Nie podpisanaj parenta jako sąsiada
for n in neighboursGraph:
neighboursNodes.append(TreeNode(n.name, parentNode, [], getJsonPosition(n.name, citiesJson), getPopulation(n.name, citiesJson), getDistanceBetweenCities(parentNode.name, n.name, citiesJson), getDistanceToWarsaw(n.name, citiesJson)))
return neighboursNodes
def getNonVisitedNeighbours(neighbours, visitedCities):
citiesToVisit = []
for n in neighbours:
found = False
for v in visitedCities:
if n.name == v.name:
found = True
if found == False:
citiesToVisit.append(n)
return citiesToVisit
def main():
jsonCities = ""
with open("cities_merged.json") as cities_json:
jsonCities = json.load(cities_json)
minimumCities = 2
maxDepth = 10
minimalPopulation = 400000
minNumberOfNeighbours = 15
graph = generateGraph(jsonCities, minNumberOfNeighbours)
print("-+-+-+-+GRAPH+-+-+-+")
for g in graph:
shuffle(g.neighbours)
for n in g.neighbours:
if n.name == "Warszawa":
n.population = 0
# print("-+-+-+-+-+-+-+")
cities = []
cities.append(City("Warszawa", 1711000))
cities.append(City("Chełm", 63949))
cities.append(City("Sieradz", 44045))
cities.append(City("Garwolin", 16710))
cities.append(City("Ryki", 9716))
cities.append(City("Ostrów Wielkopolski", 72360))
cities.append(City("Kołobrzeg", 46830))
cities.append(City("Kielce", 201363))
cities.append(City("Zabrze", 179861))
cities.append(City("Gliwice", 186347))
cities.append(City("Gorzów Wielkopolski", 124470))
cities.append(City("Bytom", 175377))
cities.append(City("Dąbrowa Górnicza", 125063))
cities.append(City("Chorzów", 111314))
cities.append(City("Ruda Śląska", 142672))
cities.append(City("Płock", 124048))
cities.append(City("Rybnik", 140863))
cities.append(City("Legnica", 102708))
cities.append(City("Opole", 118938))
cities.append(City("Tarnów", 113188))
cities.append(City("Tychy", 129087))
cities.append(City("Wałbrzych", 119216))
cities.append(City("Elbląg", 123977))
cities.append(City("Koszalin", 109183))
cities.append(City("Suwałki", 69527))
cities.append(City("Zamość", 65149))
cities.append(City("Szczecin", 409211))
cities.append(City("Gdynia", 248574))
cities.append(City("Bydgoszcz", 362286))
cities.append(City("Gdańsk", 460354))
cities.append(City("Białystok", 294675))
cities.append(City("Olsztyn", 175482))
cities.append(City("Poznań", 552393))
cities.append(City("Włocławek", 115982))
cities.append(City("Częstochowa", 235156))
cities.append(City("Katowice", 308269))
cities.append(City("Kraków", 759131))
cities.append(City("Rzeszów", 180776))
cities.append(City("Przemyśl", 62485))
cities.append(City("Krosno", 46934))
cities.append(City("Nowy Sącz", 83903))
cities.append(City("Zakopane", 27486))
cities.append(City("Lublin", 324637))
cities.append(City("Łódź", 722022))
cities.append(City("Wrocław", 631377))
cities.append(City("Jelenia Góra", 84306))
cities.append(City("Zielona Góra", 119182))
cities.append(City("Bielsko Biała", 174291))
cities.append(City("Radom", 220062))
greedyDistance = 0
greedyDepth = 0
greedyVisited = 0
for c in cities:
startingCity = c.cityName
if c.cityName != "Warszawa":
# print (c.cityName+"+++++++++++++++++")
distance, depth, visited = computeGreedySearch(graph, jsonCities, startingCity, minimumCities, minimalPopulation, maxDepth)
greedyDistance += distance
greedyDepth += depth
greedyVisited += visited
print ("Greedy distance " + str(greedyDistance/(len(cities))))
print("Greedy depth " + str(greedyDepth / (len(cities))))
print("Greedy visited " + str(greedyVisited/ (len(cities))))
greedyDistance = 0
greedyDepth = 0
greedyVisited = 0
for c in cities:
startingCity = c.cityName
if c.cityName != "Warszawa":
# print (c.cityName+"+++++++++++++++++")
distance, depth, visited = computeAStarSearch(graph, jsonCities, startingCity, minimumCities, minimalPopulation, maxDepth)
greedyDistance += distance
greedyDepth += depth
greedyVisited += visited
print ("A* distance " + str(greedyDistance/(len(cities))))
print("A* depth " + str(greedyDepth / (len(cities))))
print("A* visited " + str(greedyVisited/ (len(cities))))
# print("-+-+-+-+-+-+-+")
# computeAStarSearch(graph, jsonCities, startingCity, minimumCities, minimalPopulation, maxDepth)
#TODO:
if __name__ == "__main__":
main()
|
DPP93/SearchGraphCities_PL
|
Searching/routing/routing.py
|
Python
|
gpl-3.0
| 20,838
|
"""
Basic MLP class methods for parameters initialization, saving, loading
plotting
"""
import os
from six.moves import cPickle as pickle
import yaml
import numpy as np
from copy import deepcopy
from lxmls.deep_learning.utils import Model
def load_parameters(parameter_file):
"""
Load model
"""
with open(parameter_file, 'rb') as fid:
parameters = pickle.load(fid)
return parameters
def load_config(config_path):
with open(config_path, 'r') as fid:
config = yaml.load(fid)
return config
def save_config(config_path, config):
with open(config_path, 'w') as fid:
yaml.dump(config, fid, default_flow_style=False)
def initialize_rnn_parameters(input_size, embedding_size, hidden_size,
output_size, random_seed=None,
loaded_parameters=None):
"""
Initialize parameters from geometry or existing weights
"""
# Initialize random seed if not given
if random_seed is None:
random_seed = np.random.RandomState(1234)
if loaded_parameters is not None:
# LOAD MODELS
assert len(loaded_parameters) == 4, \
"New geometry not matching model saved"
W_e, W_x, W_h, W_y = loaded_parameters
# Note: Pytorch requires this shape order fro nn.Embedding()
assert W_e.shape == (input_size, embedding_size), \
"Embedding layer ze not matching saved model"
assert W_x.shape == (hidden_size, embedding_size), \
"Input layer ze not matching saved model"
assert W_h.shape == (hidden_size, hidden_size), \
"Hidden layer not matching saved model"
assert W_y.shape == (output_size, hidden_size), \
"Output layer size not matching saved model"
else:
# INITIALIZE
# Input layer
W_e = 0.01*random_seed.uniform(size=(input_size, embedding_size))
# Input layer
W_x = random_seed.uniform(size=(hidden_size, embedding_size))
# Recurrent layer
W_h = random_seed.uniform(size=(hidden_size, hidden_size))
# Output layer
W_y = random_seed.uniform(size=(output_size, hidden_size))
return [W_e, W_x, W_h, W_y]
def get_rnn_parameter_handlers(layer_index=None, row=None, column=None):
def get_parameter(parameters):
# weight
return parameters[layer_index][row, column]
def set_parameter(parameters, parameter_value):
# weight
parameters[layer_index][row, column] = parameter_value
return parameters
return get_parameter, set_parameter
def get_rnn_loss_range(model, get_parameter, set_parameter, batch, span=10):
# perturbation of weight values
perturbations = np.linspace(-span, span, 200)
# Compute the loss when varying the study weight
parameters = deepcopy(model.parameters)
current_weight = float(get_parameter(parameters))
loss_range = []
old_parameters = list(model.parameters)
for perturbation in perturbations:
# Chage parameters
model.parameters = set_parameter(
parameters,
current_weight + perturbation
)
# Compute loss
perturbated_loss = model.cross_entropy_loss(
batch['input'],
batch['output']
)
loss_range.append(perturbated_loss)
# Return to old parameters
model.parameters = old_parameters
weight_range = current_weight + perturbations
return weight_range, loss_range
class RNN(Model):
def __init__(self, **config):
# CHECK THE PARAMETERS ARE VALID
self.sanity_checks(config)
# OPTIONAL MODEL LOADING
model_folder = config.get('model_folder', None)
if model_folder is not None:
saved_config, loaded_parameters = self.load(model_folder)
# Note that if a config is given this is used instead of the saved
# one (must be consistent)
if config is None:
config = saved_config
else:
loaded_parameters = None
# Class variables
self.config = config
self.parameters = initialize_rnn_parameters(
config['input_size'],
config['embedding_size'],
config['hidden_size'],
config['output_size'],
loaded_parameters=loaded_parameters
)
def sanity_checks(self, config):
model_folder = config.get('model_folder', None)
assert bool(config is None) or bool(model_folder is None), \
"Need to specify config, model_folder or both"
if config is not None:
pass
if model_folder is not None:
model_file = "%s/config.yml" % model_folder
assert os.path.isfile(model_file), "Need to provide %s" % model_file
def load(self, model_folder):
"""
Load model
"""
# Configuration un yaml format
config_file = "%s/config.yml" % model_folder
config = load_config(config_file)
# Computation graph parameters as pickle file
parameter_file = "%s/parameters.pkl" % model_folder
loaded_parameters = load_parameters(parameter_file)
return config, loaded_parameters
def save(self, model_folder):
"""
Save model
"""
# Create folder if it does not exist
if not os.path.isdir(model_folder):
os.mkdir(model_folder)
# Configuration un yaml format
config_file = "%s/config.yml" % model_folder
save_config(config_file, self.config)
# Computation graph parameters as pickle file
parameter_file = "%s/parameters.pkl" % model_folder
with open(parameter_file, 'wb') as fid:
pickle.dump(self.parameters, fid, pickle.HIGHEST_PROTOCOL)
def plot_weights(self, show=True, aspect='auto'):
"""
Plots the weights of the newtwork
Use show = False to plot various models one after the other
"""
import matplotlib.pyplot as plt
plt.figure()
for n in range(self.n_layers):
# Get weights and bias
weight, bias = self.parameters[n]
# Plot them
plt.subplot(2, self.n_layers, n+1)
plt.imshow(weight, aspect=aspect, interpolation='nearest')
plt.title('Layer %d Weight' % n)
plt.colorbar()
plt.subplot(2, self.n_layers, self.n_layers+(n+1))
plt.plot(bias)
plt.title('Layer %d Bias' % n)
plt.colorbar()
if show:
plt.show()
|
LxMLS/lxmls-toolkit
|
lxmls/deep_learning/rnn.py
|
Python
|
mit
| 6,642
|
"""
Statistical tools for time series analysis
"""
from statsmodels.compat.numpy import lstsq
from statsmodels.compat.pandas import deprecate_kwarg
from statsmodels.compat.python import lzip
from statsmodels.compat.scipy import _next_regular
import warnings
import numpy as np
from numpy.linalg import LinAlgError
import pandas as pd
from scipy import stats
from statsmodels.regression.linear_model import OLS, yule_walker
from statsmodels.tools.sm_exceptions import (
CollinearityWarning,
InfeasibleTestError,
InterpolationWarning,
MissingDataError,
)
from statsmodels.tools.tools import Bunch, add_constant
from statsmodels.tools.validation import (
array_like,
bool_like,
dict_like,
float_like,
int_like,
string_like,
)
from statsmodels.tsa._bds import bds
from statsmodels.tsa._innovations import innovations_algo, innovations_filter
from statsmodels.tsa.adfvalues import mackinnoncrit, mackinnonp
from statsmodels.tsa.tsatools import add_trend, lagmat, lagmat2ds
__all__ = [
"acovf",
"acf",
"pacf",
"pacf_yw",
"pacf_ols",
"ccovf",
"ccf",
"q_stat",
"coint",
"arma_order_select_ic",
"adfuller",
"kpss",
"bds",
"pacf_burg",
"innovations_algo",
"innovations_filter",
"levinson_durbin_pacf",
"levinson_durbin",
"zivot_andrews",
]
SQRTEPS = np.sqrt(np.finfo(np.double).eps)
def _autolag(
mod,
endog,
exog,
startlag,
maxlag,
method,
modargs=(),
fitargs=(),
regresults=False,
):
"""
Returns the results for the lag length that maximizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class
endog : array_like
nobs array containing endogenous variable
exog : array_like
nobs by (startlag + maxlag) array containing lags and possibly other
variables
startlag : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : {"aic", "bic", "t-stat"}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
modargs : tuple, optional
args to pass to model. See notes.
fitargs : tuple, optional
args to pass to fit. See notes.
regresults : bool, optional
Flag indicating to return optional return results
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
results : dict, optional
Dictionary containing all estimation results
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
# TODO: can tcol be replaced by maxlag + 2?
# TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in results.items())
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in results.items())
elif method == "t-stat":
# stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
# Default values to ensure that always set
bestlag = startlag + maxlag
icbest = 0.0
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
bestlag = lag
if np.abs(icbest) >= stop:
# Break for first lag with a significant t-stat
break
else:
raise ValueError(f"Information Criterion {method} not understood.")
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
# this needs to be converted to a class like HetGoldfeldQuandt,
# 3 different returns are a mess
# See:
# Ng and Perron(2001), Lag length selection and the construction of unit root
# tests with good size and power, Econometrica, Vol 69 (6) pp 1519-1554
# TODO: include drift keyword, only valid with regression == "c"
# just changes the distribution of the test statistic to a t distribution
# TODO: autolag is untested
def adfuller(
x,
maxlag=None,
regression="c",
autolag="AIC",
store=False,
regresults=False,
):
"""
Augmented Dickey-Fuller unit root test.
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
The data series to test.
maxlag : int
Maximum lag which is included in test, default 12*(nobs/100)^{1/4}.
regression : {"c","ct","ctt","nc"}
Constant and trend order to include in regression.
* "c" : constant only (default).
* "ct" : constant and trend.
* "ctt" : constant, and linear and quadratic trend.
* "nc" : no constant, no trend.
autolag : {"AIC", "BIC", "t-stat", None}
Method to use when automatically determining the lag length among the
values 0, 1, ..., maxlag.
* If "AIC" (default) or "BIC", then the number of lags is chosen
to minimize the corresponding information criterion.
* "t-stat" based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test.
* If None, then the number of included lags is set to maxlag.
store : bool
If True, then a result instance is returned additionally to
the adf statistic. Default is False.
regresults : bool, optional
If True, the full regression results are returned. Default is False.
Returns
-------
adf : float
The test statistic.
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994, 2010).
usedlag : int
The number of lags used.
nobs : int
The number of observations used for the ADF regression and calculation
of the critical values.
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010).
icbest : float
The maximized information criterion if autolag is not None.
resstore : ResultStore, optional
A dummy class with results attached as attributes.
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables. If the p-value is close
to significant, then the critical values should be used to judge whether
to reject the null.
The autolag option and maxlag for it are described in Greene.
References
----------
.. [1] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [2] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.
.. [3] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
.. [4] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen"s
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
Examples
--------
See example notebook
"""
x = array_like(x, "x")
maxlag = int_like(maxlag, "maxlag", optional=True)
regression = string_like(
regression, "regression", options=("c", "ct", "ctt", "nc")
)
autolag = string_like(
autolag, "autolag", optional=True, options=("aic", "bic", "t-stat")
)
store = bool_like(store, "store")
regresults = bool_like(regresults, "regresults")
if regresults:
store = True
trenddict = {None: "nc", 0: "c", 1: "ct", 2: "ctt"}
if regression is None or isinstance(regression, int):
regression = trenddict[regression]
regression = regression.lower()
nobs = x.shape[0]
ntrend = len(regression) if regression != "nc" else 0
if maxlag is None:
# from Greene referencing Schwert 1989
maxlag = int(np.ceil(12.0 * np.power(nobs / 100.0, 1 / 4.0)))
# -1 for the diff
maxlag = min(nobs // 2 - ntrend - 1, maxlag)
if maxlag < 0:
raise ValueError(
"sample size is too short to use selected "
"regression component"
)
elif maxlag > nobs // 2 - ntrend - 1:
raise ValueError(
"maxlag must be less than (nobs/2 - 1 - ntrend) "
"where n trend is the number of included "
"deterministic regressors"
)
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim="both", original="in")
nobs = xdall.shape[0]
xdall[:, 0] = x[-nobs - 1 : -1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
from statsmodels.stats.diagnostic import ResultsStore
resstore = ResultsStore()
if autolag:
if regression != "nc":
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1
# 1 for level
# search for lag length with smallest information criteria
# Note: use the same number of observations to have comparable IC
# aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(
OLS, xdshort, fullRHS, startlag, maxlag, autolag
)
else:
icbest, bestlag, alres = _autolag(
OLS,
xdshort,
fullRHS,
startlag,
maxlag,
autolag,
regresults=regresults,
)
resstore.autolag_results = alres
bestlag -= startlag # convert to lag not column index
# rerun ols with best autolag
xdall = lagmat(xdiff[:, None], bestlag, trim="both", original="in")
nobs = xdall.shape[0]
xdall[:, 0] = x[-nobs - 1 : -1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if regression != "nc":
resols = OLS(
xdshort, add_trend(xdall[:, : usedlag + 1], regression)
).fit()
else:
resols = OLS(xdshort, xdall[:, : usedlag + 1]).fit()
adfstat = resols.tvalues[0]
# adfstat = (resols.params[0]-1.0)/resols.bse[0]
# the "asymptotically correct" z statistic is obtained as
# nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
# I think this is the statistic that is used for series that are integrated
# for orders higher than I(1), ie., not ADF but cointegration tests.
# Get approx p-value and critical values
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {
"1%": critvalues[0],
"5%": critvalues[1],
"10%": critvalues[2],
}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = (
"The coefficient on the lagged level equals 1 - " "unit root"
)
resstore.HA = "The coefficient on the lagged level < 1 - stationary"
resstore.icbest = icbest
resstore._str = "Augmented Dickey-Fuller Test Results"
return adfstat, pvalue, critvalues, resstore
else:
if not autolag:
return adfstat, pvalue, usedlag, nobs, critvalues
else:
return adfstat, pvalue, usedlag, nobs, critvalues, icbest
@deprecate_kwarg("unbiased", "adjusted")
def acovf(x, adjusted=False, demean=True, fft=None, missing="none", nlag=None):
"""
Estimate autocovariances.
Parameters
----------
x : array_like
Time series data. Must be 1d.
adjusted : bool, default False
If True, then denominators is n-k, otherwise n.
demean : bool, default True
If True, then subtract the mean x from each element of x.
fft : bool, default None
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str, default "none"
A string in ["none", "raise", "conservative", "drop"] specifying how
the NaNs are to be treated. "none" performs no checks. "raise" raises
an exception if NaN values are found. "drop" removes the missing
observations and then estimates the autocovariances treating the
non-missing as contiguous. "conservative" computes the autocovariance
using nan-ops so that nans are removed when computing the mean
and cross-products that are used to estimate the autocovariance.
When using "conservative", n is set to the number of non-missing
observations.
nlag : {int, None}, default None
Limit the number of autocovariances returned. Size of returned
array is nlag + 1. Setting nlag when fft is False uses a simple,
direct estimator of the autocovariances that only computes the first
nlag + 1 values. This can be much faster when the time series is long
and only a small number of autocovariances are needed.
Returns
-------
ndarray
The estimated autocovariances.
References
-----------
.. [1] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
adjusted = bool_like(adjusted, "adjusted")
demean = bool_like(demean, "demean")
fft = bool_like(fft, "fft", optional=True)
missing = string_like(
missing, "missing", options=("none", "raise", "conservative", "drop")
)
nlag = int_like(nlag, "nlag", optional=True)
if fft is None:
msg = (
"fft=True will become the default after the release of the 0.12 "
"release of statsmodels. To suppress this warning, explicitly "
"set fft=False."
)
warnings.warn(msg, FutureWarning)
fft = False
x = array_like(x, "x", ndim=1)
missing = missing.lower()
if missing == "none":
deal_with_masked = False
else:
deal_with_masked = has_missing(x)
if deal_with_masked:
if missing == "raise":
raise MissingDataError("NaNs were encountered in the data")
notmask_bool = ~np.isnan(x) # bool
if missing == "conservative":
# Must copy for thread safety
x = x.copy()
x[~notmask_bool] = 0
else: # "drop"
x = x[notmask_bool] # copies non-missing
notmask_int = notmask_bool.astype(int) # int
if demean and deal_with_masked:
# whether "drop" or "conservative":
xo = x - x.sum() / notmask_int.sum()
if missing == "conservative":
xo[~notmask_bool] = 0
elif demean:
xo = x - x.mean()
else:
xo = x
n = len(x)
lag_len = nlag
if nlag is None:
lag_len = n - 1
elif nlag > n - 1:
raise ValueError("nlag must be smaller than nobs - 1")
if not fft and nlag is not None:
acov = np.empty(lag_len + 1)
acov[0] = xo.dot(xo)
for i in range(lag_len):
acov[i + 1] = xo[i + 1 :].dot(xo[: -(i + 1)])
if not deal_with_masked or missing == "drop":
if adjusted:
acov /= n - np.arange(lag_len + 1)
else:
acov /= n
else:
if adjusted:
divisor = np.empty(lag_len + 1, dtype=np.int64)
divisor[0] = notmask_int.sum()
for i in range(lag_len):
divisor[i + 1] = notmask_int[i + 1 :].dot(
notmask_int[: -(i + 1)]
)
divisor[divisor == 0] = 1
acov /= divisor
else: # biased, missing data but npt "drop"
acov /= notmask_int.sum()
return acov
if adjusted and deal_with_masked and missing == "conservative":
d = np.correlate(notmask_int, notmask_int, "full")
d[d == 0] = 1
elif adjusted:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
elif deal_with_masked:
# biased and NaNs given and ("drop" or "conservative")
d = notmask_int.sum() * np.ones(2 * n - 1)
else: # biased and no NaNs or missing=="none"
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(xo, n=n)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[nobs - 1 :]
acov = acov.real
else:
acov = np.correlate(xo, xo, "full")[n - 1 :] / d[n - 1 :]
if nlag is not None:
# Copy to allow gc of full array rather than view
return acov[: lag_len + 1].copy()
return acov
def q_stat(x, nobs, type=None):
"""
Compute Ljung-Box Q Statistic.
Parameters
----------
x : array_like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int, optional
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : ndarray
Ljung-Box Q-statistic for autocorrelation parameters.
p-value : ndarray
P-value of the Q statistic.
Notes
-----
Designed to be used with acf.
"""
x = array_like(x, "x")
nobs = int_like(nobs, "nobs")
if type is not None:
warnings.warn(
"The `type` argument is deprecated and has no effect. This "
"argument will be removed after the 0.12 release.",
FutureWarning,
)
ret = (
nobs
* (nobs + 2)
* np.cumsum((1.0 / (nobs - np.arange(1, len(x) + 1))) * x ** 2)
)
chi2 = stats.chi2.sf(ret, np.arange(1, len(x) + 1))
return ret, chi2
# NOTE: Changed unbiased to False
# see for example
# http://www.itl.nist.gov/div898/handbook/eda/section3/autocopl.htm
@deprecate_kwarg("unbiased", "adjusted")
def acf(
x,
adjusted=False,
nlags=None,
qstat=False,
fft=None,
alpha=None,
bartlett_confint=True,
missing="none",
):
"""
Calculate the autocorrelation function.
Parameters
----------
x : array_like
The time series data.
adjusted : bool, default False
If True, then denominators for autocovariance are n-k, otherwise n.
nlags : int, default 40
Number of lags to return autocorrelation for.
qstat : bool, default False
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, default None
If True, computes the ACF via FFT.
alpha : scalar, default None
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett"s formula.
bartlett_confint : bool, default True
Confidence intervals for ACF values are generally placed at 2
standard errors around r_k. The formula used for standard error
depends upon the situation. If the autocorrelations are being used
to test for randomness of residuals as part of the ARIMA routine,
the standard errors are determined assuming the residuals are white
noise. The approximate formula for any lag is that standard error
of each r_k = 1/sqrt(N). See section 9.4 of [2] for more details on
the 1/sqrt(N) result. For more elementary discussion, see section 5.3.2
in [3].
For the ACF of raw data, the standard error at a lag k is
found as if the right model was an MA(k-1). This allows the possible
interpretation that if all autocorrelations past a certain lag are
within the limits, the model might be an MA of order defined by the
last significant autocorrelation. In this case, a moving average
model is assumed for the data and the standard errors for the
confidence intervals should be generated using Bartlett's formula.
For more details on Bartlett formula result, see section 7.2 in [2].
missing : str, default "none"
A string in ["none", "raise", "conservative", "drop"] specifying how
the NaNs are to be treated. "none" performs no checks. "raise" raises
an exception if NaN values are found. "drop" removes the missing
observations and then estimates the autocovariances treating the
non-missing as contiguous. "conservative" computes the autocovariance
using nan-ops so that nans are removed when computing the mean
and cross-products that are used to estimate the autocovariance.
When using "conservative", n is set to the number of non-missing
observations.
Returns
-------
acf : ndarray
The autocorrelation function.
confint : ndarray, optional
Confidence intervals for the ACF. Returned if alpha is not None.
qstat : ndarray, optional
The Ljung-Box Q-Statistic. Returned if q_stat is True.
pvalues : ndarray, optional
The p-values associated with the Q-statistics. Returned if q_stat is
True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
For very long time series it is recommended to use fft convolution instead.
When fft is False uses a simple, direct estimator of the autocovariances
that only computes the first nlag + 1 values. This can be much faster when
the time series is long and only a small number of autocovariances are
needed.
If adjusted is true, the denominator for the autocovariance is adjusted
for the loss of data.
References
----------
.. [1] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
[2] Brockwell and Davis, 1987. Time Series Theory and Methods
[3] Brockwell and Davis, 2010. Introduction to Time Series and
Forecasting, 2nd edition.
"""
adjusted = bool_like(adjusted, "adjusted")
nlags = int_like(nlags, "nlags", optional=True)
qstat = bool_like(qstat, "qstat")
fft = bool_like(fft, "fft", optional=True)
alpha = float_like(alpha, "alpha", optional=True)
missing = string_like(
missing, "missing", options=("none", "raise", "conservative", "drop")
)
if nlags is None:
warnings.warn(
"The default number of lags is changing from 40 to"
"min(int(10 * np.log10(nobs)), nobs - 1) after 0.12"
"is released. Set the number of lags to an integer to "
" silence this warning.",
FutureWarning,
)
nlags = 40
if fft is None:
warnings.warn(
"fft=True will become the default after the release of the 0.12 "
"release of statsmodels. To suppress this warning, explicitly "
"set fft=False.",
FutureWarning,
)
fft = False
x = array_like(x, "x")
nobs = len(x) # TODO: should this shrink for missing="drop" and NaNs in x?
avf = acovf(x, adjusted=adjusted, demean=True, fft=fft, missing=missing)
acf = avf[: nlags + 1] / avf[0]
if not (qstat or alpha):
return acf
if alpha is not None:
if bartlett_confint:
varacf = np.ones_like(acf) / nobs
varacf[0] = 0
varacf[1] = 1.0 / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1] ** 2)
else:
varacf = 1.0 / len(x)
interval = stats.norm.ppf(1 - alpha / 2.0) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
if qstat:
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue
def pacf_yw(x, nlags=None, method="adjusted"):
"""
Partial autocorrelation estimated with non-recursive yule_walker.
Parameters
----------
x : array_like
The observations of time series for which pacf is calculated.
nlags : int, default 40
The largest lag for which pacf is returned.
method : {"adjusted", "mle"}, default "adjusted"
The method for the autocovariance calculations in yule walker.
Returns
-------
ndarray
The partial autocorrelations, maxlag+1 elements.
See Also
--------
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_ols
Partial autocorrelation estimation using OLS.
statsmodels.tsa.stattools.pacf_burg
Partial autocorrelation estimation using Burg"s method.
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations.
"""
x = array_like(x, "x")
nlags = int_like(nlags, "nlags", optional=True)
if nlags is None:
warnings.warn(
"The default number of lags is changing from 40 to"
"min(int(10 * np.log10(nobs)), nobs - 1) after 0.12"
"is released. Set the number of lags to an integer to "
" silence this warning.",
FutureWarning,
)
nlags = 40
method = string_like(
method, "method", options=("adjusted", "unbiased", "mle")
)
if method == "unbiased":
warnings.warn(
"unbiased is deprecated in factor of adjusted to reflect that the "
"term is adjusting the sample size used in the autocovariance "
"calculation rather than estimating an unbiased autocovariance. "
"After release 0.13, using 'unbiased' will raise.",
FutureWarning,
)
method = "adjusted"
pacf = [1.0]
for k in range(1, nlags + 1):
pacf.append(yule_walker(x, k, method=method)[0][-1])
return np.array(pacf)
def pacf_burg(x, nlags=None, demean=True):
"""
Calculate Burg"s partial autocorrelation estimator.
Parameters
----------
x : array_like
Observations of time series for which pacf is calculated.
nlags : int, optional
Number of lags to compute the partial autocorrelations. If omitted,
uses the smaller of 10(log10(nobs)) or nobs - 1.
demean : bool, optional
Flag indicating to demean that data. Set to False if x has been
previously demeaned.
Returns
-------
pacf : ndarray
Partial autocorrelations for lags 0, 1, ..., nlag.
sigma2 : ndarray
Residual variance estimates where the value in position m is the
residual variance in an AR model that includes m lags.
See Also
--------
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_yw
Partial autocorrelation estimation using Yule-Walker.
statsmodels.tsa.stattools.pacf_ols
Partial autocorrelation estimation using OLS.
References
----------
.. [1] Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series
and forecasting. Springer.
"""
x = array_like(x, "x")
if demean:
x = x - x.mean()
nobs = x.shape[0]
p = nlags if nlags is not None else min(int(10 * np.log10(nobs)), nobs - 1)
if p > nobs - 1:
raise ValueError("nlags must be smaller than nobs - 1")
d = np.zeros(p + 1)
d[0] = 2 * x.dot(x)
pacf = np.zeros(p + 1)
u = x[::-1].copy()
v = x[::-1].copy()
d[1] = u[:-1].dot(u[:-1]) + v[1:].dot(v[1:])
pacf[1] = 2 / d[1] * v[1:].dot(u[:-1])
last_u = np.empty_like(u)
last_v = np.empty_like(v)
for i in range(1, p):
last_u[:] = u
last_v[:] = v
u[1:] = last_u[:-1] - pacf[i] * last_v[1:]
v[1:] = last_v[1:] - pacf[i] * last_u[:-1]
d[i + 1] = (1 - pacf[i] ** 2) * d[i] - v[i] ** 2 - u[-1] ** 2
pacf[i + 1] = 2 / d[i + 1] * v[i + 1 :].dot(u[i:-1])
sigma2 = (1 - pacf ** 2) * d / (2.0 * (nobs - np.arange(0, p + 1)))
pacf[0] = 1 # Insert the 0 lag partial autocorrel
return pacf, sigma2
@deprecate_kwarg("unbiased", "adjusted")
def pacf_ols(x, nlags=None, efficient=True, adjusted=False):
"""
Calculate partial autocorrelations via OLS.
Parameters
----------
x : array_like
Observations of time series for which pacf is calculated.
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
efficient : bool, optional
If true, uses the maximum number of available observations to compute
each partial autocorrelation. If not, uses the same number of
observations to compute all pacf values.
adjusted : bool, optional
Adjust each partial autocorrelation by n / (n - lag).
Returns
-------
ndarray
The partial autocorrelations, (maxlag,) array corresponding to lags
0, 1, ..., maxlag.
See Also
--------
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_yw
Partial autocorrelation estimation using Yule-Walker.
statsmodels.tsa.stattools.pacf_burg
Partial autocorrelation estimation using Burg"s method.
Notes
-----
This solves a separate OLS estimation for each desired lag using method in
[1]_. Setting efficient to True has two effects. First, it uses
`nobs - lag` observations of estimate each pacf. Second, it re-estimates
the mean in each regression. If efficient is False, then the data are first
demeaned, and then `nobs - maxlag` observations are used to estimate each
partial autocorrelation.
The inefficient estimator appears to have better finite sample properties.
This option should only be used in time series that are covariance
stationary.
OLS estimation of the pacf does not guarantee that all pacf values are
between -1 and 1.
References
----------
.. [1] Box, G. E., Jenkins, G. M., Reinsel, G. C., & Ljung, G. M. (2015).
Time series analysis: forecasting and control. John Wiley & Sons, p. 66
"""
x = array_like(x, "x")
nlags = int_like(nlags, "nlags", optional=True)
efficient = bool_like(efficient, "efficient")
adjusted = bool_like(adjusted, "adjusted")
if nlags is None:
warnings.warn(
"The default number of lags is changing from 40 to"
"min(int(10 * np.log10(nobs)), nobs - 1) after 0.12"
"is released. Set the number of lags to an integer to "
" silence this warning.",
FutureWarning,
)
nlags = 40
pacf = np.empty(nlags + 1)
pacf[0] = 1.0
if efficient:
xlags, x0 = lagmat(x, nlags, original="sep")
xlags = add_constant(xlags)
for k in range(1, nlags + 1):
params = lstsq(xlags[k:, : k + 1], x0[k:], rcond=None)[0]
pacf[k] = params[-1]
else:
x = x - np.mean(x)
# Create a single set of lags for multivariate OLS
xlags, x0 = lagmat(x, nlags, original="sep", trim="both")
for k in range(1, nlags + 1):
params = lstsq(xlags[:, :k], x0, rcond=None)[0]
# Last coefficient corresponds to PACF value (see [1])
pacf[k] = params[-1]
if adjusted:
n = len(x)
pacf *= n / (n - np.arange(nlags + 1))
return pacf
def pacf(x, nlags=None, method="ywadjusted", alpha=None):
"""
Partial autocorrelation estimate.
Parameters
----------
x : array_like
Observations of time series for which pacf is calculated.
nlags : int
The largest lag for which the pacf is returned. The default
is currently 40, but will change to
min(int(10 * np.log10(nobs)), nobs // 2 - 1) in the future
method : str, default "ywunbiased"
Specifies which method for the calculations to use.
- "yw" or "ywadjusted" : Yule-Walker with sample-size adjustment in
denominator for acovf. Default.
- "ywm" or "ywmle" : Yule-Walker without adjustment.
- "ols" : regression of time series on lags of it and on constant.
- "ols-inefficient" : regression of time series on lags using a single
common sample to estimate all pacf coefficients.
- "ols-adjusted" : regression of time series on lags with a bias
adjustment.
- "ld" or "ldadjusted" : Levinson-Durbin recursion with bias
correction.
- "ldb" or "ldbiased" : Levinson-Durbin recursion without bias
correction.
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x)).
Returns
-------
pacf : ndarray
Partial autocorrelations, nlags elements, including lag zero.
confint : ndarray, optional
Confidence intervals for the PACF. Returned if confint is not None.
See Also
--------
statsmodels.tsa.stattools.acf
Estimate the autocorrelation function.
statsmodels.tsa.stattools.pacf
Partial autocorrelation estimation.
statsmodels.tsa.stattools.pacf_yw
Partial autocorrelation estimation using Yule-Walker.
statsmodels.tsa.stattools.pacf_ols
Partial autocorrelation estimation using OLS.
statsmodels.tsa.stattools.pacf_burg
Partial autocorrelation estimation using Burg"s method.
Notes
-----
Based on simulation evidence across a range of low-order ARMA models,
the best methods based on root MSE are Yule-Walker (MLW), Levinson-Durbin
(MLE) and Burg, respectively. The estimators with the lowest bias included
included these three in addition to OLS and OLS-adjusted.
Yule-Walker (adjusted) and Levinson-Durbin (adjusted) performed
consistently worse than the other options.
"""
nlags = int_like(nlags, "nlags", optional=True)
renames = {
"ydu": "yda",
"ywu": "ywa",
"ywunbiased": "ywadjusted",
"ldunbiased": "ldadjusted",
"ld_unbiased": "ld_adjusted",
"ldu": "lda",
"ols-unbiased": "ols-adjusted",
}
if method in renames:
warnings.warn(
f"{method} has been renamed {renames[method]}. After release 0.13, "
"using the old name will raise.",
FutureWarning,
)
method = renames[method]
methods = (
"ols",
"ols-inefficient",
"ols-adjusted",
"yw",
"ywa",
"ld",
"ywadjusted",
"yw_adjusted",
"ywm",
"ywmle",
"yw_mle",
"lda",
"ldadjusted",
"ld_adjusted",
"ldb",
"ldbiased",
"ld_biased",
)
x = array_like(x, "x", maxdim=2)
method = string_like(method, "method", options=methods)
alpha = float_like(alpha, "alpha", optional=True)
if nlags is None:
warnings.warn(
"The default number of lags is changing from 40 to"
"min(int(10 * np.log10(nobs)), nobs // 2 - 1) after 0.12"
"is released. Set the number of lags to an integer to "
" silence this warning.",
FutureWarning,
)
nlags = 40
if nlags >= x.shape[0] // 2:
raise ValueError(
"Can only compute partial correlations for lags up to 50% of the "
f"sample size. The requested nlags {nlags} must be < "
f"{x.shape[0] // 2}."
)
if method in ("ols", "ols-inefficient", "ols-adjusted"):
efficient = "inefficient" not in method
adjusted = "adjusted" in method
ret = pacf_ols(x, nlags=nlags, efficient=efficient, adjusted=adjusted)
elif method in ("yw", "ywa", "ywadjusted", "yw_adjusted"):
ret = pacf_yw(x, nlags=nlags, method="adjusted")
elif method in ("ywm", "ywmle", "yw_mle"):
ret = pacf_yw(x, nlags=nlags, method="mle")
elif method in ("ld", "lda", "ldadjusted", "ld_adjusted"):
acv = acovf(x, adjusted=True, fft=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
# inconsistent naming with ywmle
else: # method in ("ldb", "ldbiased", "ld_biased")
acv = acovf(x, adjusted=False, fft=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
if alpha is not None:
varacf = 1.0 / len(x) # for all lags >=1
interval = stats.norm.ppf(1.0 - alpha / 2.0) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
@deprecate_kwarg("unbiased", "adjusted")
def ccovf(x, y, adjusted=True, demean=True):
"""
Calculate the crosscovariance between two series.
Parameters
----------
x, y : array_like
The time series data to use in the calculation.
adjusted : bool, optional
If True, then denominators for autocovariance is n-k, otherwise n.
demean : bool, optional
Flag indicating whether to demean x and y.
Returns
-------
ndarray
The estimated crosscovariance function.
Notes
-----
This uses np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
"""
x = array_like(x, "x")
y = array_like(y, "y")
adjusted = bool_like(adjusted, "adjusted")
demean = bool_like(demean, "demean")
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if adjusted:
xi = np.ones(n)
d = np.correlate(xi, xi, "full")
else:
d = n
return (np.correlate(xo, yo, "full") / d)[n - 1 :]
@deprecate_kwarg("unbiased", "adjusted")
def ccf(x, y, adjusted=True):
"""
The cross-correlation function.
Parameters
----------
x, y : array_like
The time series data to use in the calculation.
adjusted : bool
If True, then denominators for autocovariance is n-k, otherwise n.
Returns
-------
ndarray
The cross-correlation function of x and y.
Notes
-----
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If adjusted is true, the denominator for the autocovariance is adjusted.
"""
x = array_like(x, "x")
y = array_like(y, "y")
adjusted = bool_like(adjusted, "adjusted")
cvf = ccovf(x, y, adjusted=adjusted, demean=True)
return cvf / (np.std(x) * np.std(y))
# moved from sandbox.tsa.examples.try_ld_nitime, via nitime
# TODO: check what to return, for testing and trying out returns everything
def levinson_durbin(s, nlags=10, isacov=False):
"""
Levinson-Durbin recursion for autoregressive processes.
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0.
nlags : int, optional
The largest lag to include in recursion or order of the autoregressive
process.
isacov : bool, optional
Flag indicating whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
The estimate of the error variance.
arcoefs : ndarray
The estimate of the autoregressive coefficients for a model including
nlags.
pacf : ndarray
The partial autocorrelation function.
sigma : ndarray
The entire sigma array from intermediate result, last value is sigma_v.
phi : ndarray
The entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags).
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft).
"""
s = array_like(s, "s")
nlags = int_like(nlags, "nlags")
isacov = bool_like(isacov, "isacov")
order = nlags
if isacov:
sxx_m = s
else:
sxx_m = acovf(s, fft=False)[: order + 1] # not tested
phi = np.zeros((order + 1, order + 1), "d")
sig = np.zeros(order + 1)
# initial points for the recursion
phi[1, 1] = sxx_m[1] / sxx_m[0]
sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]
for k in range(2, order + 1):
phi[k, k] = (
sxx_m[k] - np.dot(phi[1:k, k - 1], sxx_m[1:k][::-1])
) / sig[k - 1]
for j in range(1, k):
phi[j, k] = phi[j, k - 1] - phi[k, k] * phi[k - j, k - 1]
sig[k] = sig[k - 1] * (1 - phi[k, k] ** 2)
sigma_v = sig[-1]
arcoefs = phi[1:, -1]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.0
return sigma_v, arcoefs, pacf_, sig, phi # return everything
def levinson_durbin_pacf(pacf, nlags=None):
"""
Levinson-Durbin algorithm that returns the acf and ar coefficients.
Parameters
----------
pacf : array_like
Partial autocorrelation array for lags 0, 1, ... p.
nlags : int, optional
Number of lags in the AR model. If omitted, returns coefficients from
an AR(p) and the first p autocorrelations.
Returns
-------
arcoefs : ndarray
AR coefficients computed from the partial autocorrelations.
acf : ndarray
The acf computed from the partial autocorrelations. Array returned
contains the autocorrelations corresponding to lags 0, 1, ..., p.
References
----------
.. [1] Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series
and forecasting. Springer.
"""
pacf = array_like(pacf, "pacf")
nlags = int_like(nlags, "nlags", optional=True)
pacf = np.squeeze(np.asarray(pacf))
if pacf[0] != 1:
raise ValueError(
"The first entry of the pacf corresponds to lags 0 "
"and so must be 1."
)
pacf = pacf[1:]
n = pacf.shape[0]
if nlags is not None:
if nlags > n:
raise ValueError(
"Must provide at least as many values from the "
"pacf as the number of lags."
)
pacf = pacf[:nlags]
n = pacf.shape[0]
acf = np.zeros(n + 1)
acf[1] = pacf[0]
nu = np.cumprod(1 - pacf ** 2)
arcoefs = pacf.copy()
for i in range(1, n):
prev = arcoefs[: -(n - i)].copy()
arcoefs[: -(n - i)] = prev - arcoefs[i] * prev[::-1]
acf[i + 1] = arcoefs[i] * nu[i - 1] + prev.dot(acf[1 : -(n - i)][::-1])
acf[0] = 1
return arcoefs, acf
def breakvar_heteroskedasticity_test(
resid, subset_length=1 / 3, alternative="two-sided", use_f=True
):
r"""
Test for heteroskedasticity of residuals
Tests whether the sum-of-squares in the first subset of the sample is
significantly different than the sum-of-squares in the last subset
of the sample. Analogous to a Goldfeld-Quandt test. The null hypothesis
is of no heteroskedasticity.
Parameters
----------
resid : array_like
Residuals of a time series model.
The shape is 1d (nobs,) or 2d (nobs, nvars).
subset_length : {int, float}
Length of the subsets to test (h in Notes below).
If a float in 0 < subset_length < 1, it is interpreted as fraction.
Default is 1/3.
alternative : str, 'increasing', 'decreasing' or 'two-sided'
This specifies the alternative for the p-value calculation. Default
is two-sided.
use_f : bool, optional
Whether or not to compare against the asymptotic distribution
(chi-squared) or the approximate small-sample distribution (F).
Default is True (i.e. default is to compare against an F
distribution).
Returns
-------
test_statistic : {float, ndarray}
Test statistic(s) H(h).
p_value : {float, ndarray}
p-value(s) of test statistic(s).
Notes
-----
The null hypothesis is of no heteroskedasticity. That means different
things depending on which alternative is selected:
- Increasing: Null hypothesis is that the variance is not increasing
throughout the sample; that the sum-of-squares in the later
subsample is *not* greater than the sum-of-squares in the earlier
subsample.
- Decreasing: Null hypothesis is that the variance is not decreasing
throughout the sample; that the sum-of-squares in the earlier
subsample is *not* greater than the sum-of-squares in the later
subsample.
- Two-sided: Null hypothesis is that the variance is not changing
throughout the sample. Both that the sum-of-squares in the earlier
subsample is not greater than the sum-of-squares in the later
subsample *and* that the sum-of-squares in the later subsample is
not greater than the sum-of-squares in the earlier subsample.
For :math:`h = [T/3]`, the test statistic is:
.. math::
H(h) = \sum_{t=T-h+1}^T \tilde v_t^2
\Bigg / \sum_{t=1}^{h} \tilde v_t^2
This statistic can be tested against an :math:`F(h,h)` distribution.
Alternatively, :math:`h H(h)` is asymptotically distributed according
to :math:`\chi_h^2`; this second test can be applied by passing
`use_f=False` as an argument.
See section 5.4 of [1]_ for the above formula and discussion, as well
as additional details.
References
----------
.. [1] Harvey, Andrew C. 1990. *Forecasting, Structural Time Series*
*Models and the Kalman Filter.* Cambridge University Press.
"""
squared_resid = np.asarray(resid, dtype=float) ** 2
if squared_resid.ndim == 1:
squared_resid = squared_resid.reshape(-1, 1)
nobs = len(resid)
if 0 < subset_length < 1:
h = int(np.round(nobs * subset_length))
elif type(subset_length) is int and subset_length >= 1:
h = subset_length
numer_resid = squared_resid[-h:]
numer_dof = (~np.isnan(numer_resid)).sum(axis=0)
numer_squared_sum = np.nansum(numer_resid, axis=0)
for i, dof in enumerate(numer_dof):
if dof < 2:
warnings.warn(
"Early subset of data for variable %d"
" has too few non-missing observations to"
" calculate test statistic." % i
)
numer_squared_sum[i] = np.nan
denom_resid = squared_resid[:h]
denom_dof = (~np.isnan(denom_resid)).sum(axis=0)
denom_squared_sum = np.nansum(denom_resid, axis=0)
for i, dof in enumerate(denom_dof):
if dof < 2:
warnings.warn(
"Later subset of data for variable %d"
" has too few non-missing observations to"
" calculate test statistic." % i
)
denom_squared_sum[i] = np.nan
test_statistic = numer_squared_sum / denom_squared_sum
# Setup functions to calculate the p-values
if use_f:
from scipy.stats import f
pval_lower = lambda test_statistics: f.cdf( # noqa:E731
test_statistics, numer_dof, denom_dof
)
pval_upper = lambda test_statistics: f.sf( # noqa:E731
test_statistics, numer_dof, denom_dof
)
else:
from scipy.stats import chi2
pval_lower = lambda test_statistics: chi2.cdf( # noqa:E731
numer_dof * test_statistics, denom_dof
)
pval_upper = lambda test_statistics: chi2.sf( # noqa:E731
numer_dof * test_statistics, denom_dof
)
# Calculate the one- or two-sided p-values
alternative = alternative.lower()
if alternative in ["i", "inc", "increasing"]:
p_value = pval_upper(test_statistic)
elif alternative in ["d", "dec", "decreasing"]:
test_statistic = 1.0 / test_statistic
p_value = pval_upper(test_statistic)
elif alternative in ["2", "2-sided", "two-sided"]:
p_value = 2 * np.minimum(
pval_lower(test_statistic), pval_upper(test_statistic)
)
else:
raise ValueError("Invalid alternative.")
if len(test_statistic) == 1:
return test_statistic[0], p_value[0]
return test_statistic, p_value
def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
"""
Four tests for granger non causality of 2 time series.
All four tests give similar results. `params_ftest` and `ssr_ftest` are
equivalent based on F test which is identical to lmtest:grangertest in R.
Parameters
----------
x : array_like
The data for test whether the time series in the second column Granger
causes the time series in the first column. Missing values are not
supported.
maxlag : {int, Iterable[int]}
If an integer, computes the test for all lags up to maxlag. If an
iterable, computes the tests only for the lags in maxlag.
addconst : bool
Include a constant in the model.
verbose : bool
Print results.
Returns
-------
dict
All test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
test statistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
`params_ftest`, `ssr_ftest` are based on F distribution
`ssr_chi2test`, `lrtest` are based on chi-square distribution
References
----------
.. [1] https://en.wikipedia.org/wiki/Granger_causality
.. [2] Greene: Econometric Analysis
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.tsa.stattools import grangercausalitytests
>>> import numpy as np
>>> data = sm.datasets.macrodata.load_pandas()
>>> data = data.data[["realgdp", "realcons"]].pct_change().dropna()
All lags up to 4
>>> gc_res = grangercausalitytests(data, 4)
Only lag 4
>>> gc_res = grangercausalitytests(data, [4])
"""
x = array_like(x, "x", ndim=2)
if not np.isfinite(x).all():
raise ValueError("x contains NaN or inf values.")
addconst = bool_like(addconst, "addconst")
verbose = bool_like(verbose, "verbose")
try:
maxlag = int_like(maxlag, "maxlag")
if maxlag <= 0:
raise ValueError("maxlag must a a positive integer")
lags = np.arange(1, maxlag + 1)
except TypeError:
lags = np.array([int(lag) for lag in maxlag])
maxlag = lags.max()
if lags.min() <= 0 or lags.size == 0:
raise ValueError(
"maxlag must be a non-empty list containing only "
"positive integers"
)
if x.shape[0] <= 3 * maxlag + int(addconst):
raise ValueError(
"Insufficient observations. Maximum allowable "
"lag is {0}".format(int((x.shape[0] - int(addconst)) / 3) - 1)
)
resli = {}
for mlg in lags:
result = {}
if verbose:
print("\nGranger Causality")
print("number of lags (no zero)", mlg)
mxlg = mlg
# create lagmat of both time series
dta = lagmat2ds(x, mxlg, trim="both", dropex=1)
# add constant
if addconst:
dtaown = add_constant(dta[:, 1 : (mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
if (
dtajoint.shape[1] == (dta.shape[1] - 1)
or (dtajoint.max(0) == dtajoint.min(0)).sum() != 1
):
raise InfeasibleTestError(
"The x values include a column with constant values and so"
" the test statistic cannot be computed."
)
else:
raise NotImplementedError("Not Implemented")
# dtaown = dta[:, 1:mxlg]
# dtajoint = dta[:, 1:]
# Run ols on both models without and with lags of second variable
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
# print results
# for ssr based tests see:
# http://support.sas.com/rnd/app/examples/ets/granger/index.htm
# the other tests are made-up
# Granger Causality test using ssr (F statistic)
if res2djoint.model.k_constant:
tss = res2djoint.centered_tss
else:
tss = res2djoint.centered_tss
if (
tss == 0
or res2djoint.ssr == 0
or np.isnan(res2djoint.rsquared)
or (res2djoint.ssr / tss) < np.finfo(float).eps
or res2djoint.params.shape[0] != dtajoint.shape[1]
):
raise InfeasibleTestError(
"The Granger causality test statistic cannot be compute "
"because the VAR has a perfect fit of the data."
)
fgc1 = (
(res2down.ssr - res2djoint.ssr)
/ res2djoint.ssr
/ mxlg
* res2djoint.df_resid
)
if verbose:
print(
"ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d,"
" df_num=%d"
% (
fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid,
mxlg,
)
)
result["ssr_ftest"] = (
fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid,
mxlg,
)
# Granger Causality test using ssr (ch2 statistic)
fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr
if verbose:
print(
"ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, "
"df=%d" % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
)
result["ssr_chi2test"] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
# likelihood ratio test pvalue:
lr = -2 * (res2down.llf - res2djoint.llf)
if verbose:
print(
"likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d"
% (lr, stats.chi2.sf(lr, mxlg), mxlg)
)
result["lrtest"] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
# F test that all lag coefficients of exog are zero
rconstr = np.column_stack(
(np.zeros((mxlg, mxlg)), np.eye(mxlg, mxlg), np.zeros((mxlg, 1)))
)
ftres = res2djoint.f_test(rconstr)
if verbose:
print(
"parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d,"
" df_num=%d"
% (ftres.fvalue, ftres.pvalue, ftres.df_denom, ftres.df_num)
)
result["params_ftest"] = (
np.squeeze(ftres.fvalue)[()],
np.squeeze(ftres.pvalue)[()],
ftres.df_denom,
ftres.df_num,
)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli
def coint(
y0,
y1,
trend="c",
method="aeg",
maxlag=None,
autolag="aic",
return_results=None,
):
"""
Test for no-cointegration of a univariate equation.
The null hypothesis is no cointegration. Variables in y0 and y1 are
assumed to be integrated of order 1, I(1).
This uses the augmented Engle-Granger two-step cointegration test.
Constant or trend is included in 1st stage regression, i.e. in
cointegrating equation.
**Warning:** The autolag default has changed compared to statsmodels 0.8.
In 0.8 autolag was always None, no the keyword is used and defaults to
"aic". Use `autolag=None` to avoid the lag search.
Parameters
----------
y0 : array_like
The first element in cointegrated system. Must be 1-d.
y1 : array_like
The remaining elements in cointegrated system.
trend : str {"c", "ct"}
The trend term included in regression for cointegrating equation.
* "c" : constant.
* "ct" : constant and linear trend.
* also available quadratic trend "ctt", and no constant "nc".
method : {"aeg"}
Only "aeg" (augmented Engle-Granger) is available.
maxlag : None or int
Argument for `adfuller`, largest or given number of lags.
autolag : str
Argument for `adfuller`, lag selection criterion.
* If None, then maxlag lags are used without lag search.
* If "AIC" (default) or "BIC", then the number of lags is chosen
to minimize the corresponding information criterion.
* "t-stat" based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test.
return_results : bool
For future compatibility, currently only tuple available.
If True, then a results instance is returned. Otherwise, a tuple
with the test outcome is returned. Set `return_results=False` to
avoid future changes in return.
Returns
-------
coint_t : float
The t-statistic of unit-root test on residuals.
pvalue : float
MacKinnon"s approximate, asymptotic p-value based on MacKinnon (1994).
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels based on regression curve. This depends on the number of
observations.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values and critical values are obtained through regression surface
approximation from MacKinnon 1994 and 2010.
If the two series are almost perfectly collinear, then computing the
test is numerically unstable. However, the two series will be cointegrated
under the maintained assumption that they are integrated. In this case
the t-statistic will be set to -inf and the pvalue to zero.
TODO: We could handle gaps in data by dropping rows with nans in the
Auxiliary regressions. Not implemented yet, currently assumes no nans
and no gaps in time series.
References
----------
.. [1] MacKinnon, J.G. 1994 "Approximate Asymptotic Distribution Functions
for Unit-Root and Cointegration Tests." Journal of Business & Economics
Statistics, 12.2, 167-76.
.. [2] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."
Queen"s University, Dept of Economics Working Papers 1227.
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
y0 = array_like(y0, "y0")
y1 = array_like(y1, "y1", ndim=2)
trend = string_like(trend, "trend", options=("c", "nc", "ct", "ctt"))
method = string_like(method, "method", options=("aeg",))
maxlag = int_like(maxlag, "maxlag", optional=True)
autolag = string_like(
autolag, "autolag", optional=True, options=("aic", "bic", "t-stat")
)
return_results = bool_like(return_results, "return_results", optional=True)
nobs, k_vars = y1.shape
k_vars += 1 # add 1 for y0
if trend == "nc":
xx = y1
else:
xx = add_trend(y1, trend=trend, prepend=False)
res_co = OLS(y0, xx).fit()
if res_co.rsquared < 1 - 100 * SQRTEPS:
res_adf = adfuller(
res_co.resid, maxlag=maxlag, autolag=autolag, regression="nc"
)
else:
warnings.warn(
"y0 and y1 are (almost) perfectly colinear."
"Cointegration test is not reliable in this case.",
CollinearityWarning,
)
# Edge case where series are too similar
res_adf = (-np.inf,)
# no constant or trend, see egranger in Stata and MacKinnon
if trend == "nc":
crit = [np.nan] * 3 # 2010 critical values not available
else:
crit = mackinnoncrit(N=k_vars, regression=trend, nobs=nobs - 1)
# nobs - 1, the -1 is to match egranger in Stata, I do not know why.
# TODO: check nobs or df = nobs - k
pval_asy = mackinnonp(res_adf[0], regression=trend, N=k_vars)
return res_adf[0], pval_asy, crit
def _safe_arma_fit(y, order, model_kw, trend, fit_kw, start_params=None):
from statsmodels.tsa.arima.model import ARIMA
try:
return ARIMA(y, order=order, **model_kw, trend=trend).fit(
start_params=start_params, **fit_kw
)
except LinAlgError:
# SVD convergence failure on badly misspecified models
return
except ValueError as error:
if start_params is not None: # do not recurse again
# user supplied start_params only get one chance
return
# try a little harder, should be handled in fit really
elif "initial" not in error.args[0] or "initial" in str(error):
start_params = [0.1] * sum(order)
if trend == "c":
start_params = [0.1] + start_params
return _safe_arma_fit(
y, order, model_kw, trend, fit_kw, start_params
)
else:
return
except: # no idea what happened
return
def arma_order_select_ic(
y, max_ar=4, max_ma=2, ic="bic", trend="c", model_kw=None, fit_kw=None
):
"""
Compute information criteria for many ARMA models.
Parameters
----------
y : array_like
Array of time-series data.
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model.
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
Bunch
Dict-like object with attribute access. Each ic is an attribute with a
DataFrame for the results. The AR order used is the row index. The ma
order used is the column index. The minimum orders are available as
``ic_min_order``.
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : "css"} to fit_kw.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=["aic", "bic"], trend="n")
>>> res.aic_min_order
>>> res.bic_min_order
"""
max_ar = int_like(max_ar, "max_ar")
max_ma = int_like(max_ma, "max_ma")
trend = string_like(trend, "trend", options=("n", "c"))
model_kw = dict_like(model_kw, "model_kw", optional=True)
fit_kw = dict_like(fit_kw, "fit_kw", optional=True)
ar_range = [i for i in range(max_ar + 1)]
ma_range = [i for i in range(max_ma + 1)]
if isinstance(ic, str):
ic = [ic]
elif not isinstance(ic, (list, tuple)):
raise ValueError("Need a list or a tuple for ic if not a string.")
results = np.zeros((len(ic), max_ar + 1, max_ma + 1))
model_kw = {} if model_kw is None else model_kw
fit_kw = {} if fit_kw is None else fit_kw
y_arr = array_like(y, "y", contiguous=True)
for ar in ar_range:
for ma in ma_range:
mod = _safe_arma_fit(y_arr, (ar, 0, ma), model_kw, trend, fit_kw)
if mod is None:
results[:, ar, ma] = np.nan
continue
for i, criteria in enumerate(ic):
results[i, ar, ma] = getattr(mod, criteria)
dfs = [
pd.DataFrame(res, columns=ma_range, index=ar_range) for res in results
]
res = dict(zip(ic, dfs))
# add the minimums to the results dict
min_res = {}
for i, result in res.items():
mins = np.where(result.min().min() == result)
min_res.update({i + "_min_order": (mins[0][0], mins[1][0])})
res.update(min_res)
return Bunch(**res)
def has_missing(data):
"""
Returns True if "data" contains missing entries, otherwise False
"""
return np.isnan(np.sum(data))
@deprecate_kwarg("lags", "nlags")
def kpss(x, regression="c", nlags=None, store=False):
"""
Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.
Computes the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test for the null
hypothesis that x is level or trend stationary.
Parameters
----------
x : array_like, 1d
The data series to test.
regression : str{"c", "ct"}
The null hypothesis for the KPSS test.
* "c" : The data is stationary around a constant (default).
* "ct" : The data is stationary around a trend.
nlags : {None, str, int}, optional
Indicates the number of lags to be used. If None (default), lags is
calculated using the legacy method. If "auto", lags is calculated
using the data-dependent method of Hobijn et al. (1998). See also
Andrews (1991), Newey & West (1994), and Schwert (1989). If set to
"legacy", uses int(12 * (n / 100)**(1 / 4)) , as outlined in
Schwert (1989).
store : bool
If True, then a result instance is returned additionally to
the KPSS statistic (default is False).
Returns
-------
kpss_stat : float
The KPSS test statistic.
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Kwiatkowski et al. (1992), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
lags : int
The truncation lag parameter.
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Kwiatkowski et al. (1992).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes.
Notes
-----
To estimate sigma^2 the Newey-West estimator is used. If lags is None,
the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)),
as outlined in Schwert (1989). The p-values are interpolated from
Table 1 of Kwiatkowski et al. (1992). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
References
----------
.. [1] Andrews, D.W.K. (1991). Heteroskedasticity and autocorrelation
consistent covariance matrix estimation. Econometrica, 59: 817-858.
.. [2] Hobijn, B., Frances, B.H., & Ooms, M. (2004). Generalizations of the
KPSS-test for stationarity. Statistica Neerlandica, 52: 483-502.
.. [3] Kwiatkowski, D., Phillips, P.C.B., Schmidt, P., & Shin, Y. (1992).
Testing the null hypothesis of stationarity against the alternative of a
unit root. Journal of Econometrics, 54: 159-178.
.. [4] Newey, W.K., & West, K.D. (1994). Automatic lag selection in
covariance matrix estimation. Review of Economic Studies, 61: 631-653.
.. [5] Schwert, G. W. (1989). Tests for unit roots: A Monte Carlo
investigation. Journal of Business and Economic Statistics, 7 (2):
147-159.
"""
x = array_like(x, "x")
regression = string_like(regression, "regression", options=("c", "ct"))
store = bool_like(store, "store")
nobs = x.shape[0]
hypo = regression
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError("x of shape {0} not understood".format(x.shape))
if hypo == "ct":
# p. 162 Kwiatkowski et al. (1992): y_t = beta * t + r_t + e_t,
# where beta is the trend, r_t a random walk and e_t a stationary
# error term.
resids = OLS(x, add_constant(np.arange(1, nobs + 1))).fit().resid
crit = [0.119, 0.146, 0.176, 0.216]
elif hypo == "c":
# special case of the model above, where beta = 0 (so the null
# hypothesis is that the data is stationary around r_0).
resids = x - x.mean()
crit = [0.347, 0.463, 0.574, 0.739]
if nlags is None:
nlags = "legacy"
msg = (
"The behavior of using nlags=None will change in release 0.13."
"Currently nlags=None is the same as "
'nlags="legacy", and so a sample-size lag length is used. '
"After the next release, the default will change to be the "
'same as nlags="auto" which uses an automatic lag length '
"selection method. To silence this warning, either use "
'"auto" or "legacy"'
)
warnings.warn(msg, FutureWarning)
if nlags == "legacy":
nlags = int(np.ceil(12.0 * np.power(nobs / 100.0, 1 / 4.0)))
nlags = min(nlags, nobs - 1)
elif nlags == "auto":
# autolag method of Hobijn et al. (1998)
nlags = _kpss_autolag(resids, nobs)
nlags = min(nlags, nobs - 1)
else:
nlags = int(nlags)
if nlags >= nobs:
raise ValueError(
"lags ({}) must be < number of observations ({})".format(
nlags, nobs
)
)
pvals = [0.10, 0.05, 0.025, 0.01]
eta = np.sum(resids.cumsum() ** 2) / (nobs ** 2) # eq. 11, p. 165
s_hat = _sigma_est_kpss(resids, nobs, nlags)
kpss_stat = eta / s_hat
p_value = np.interp(kpss_stat, crit, pvals)
warn_msg = """\
The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is {direction} than the p-value returned.
"""
if p_value == pvals[-1]:
warnings.warn(
warn_msg.format(direction="smaller"), InterpolationWarning
)
elif p_value == pvals[0]:
warnings.warn(
warn_msg.format(direction="greater"), InterpolationWarning
)
crit_dict = {"10%": crit[0], "5%": crit[1], "2.5%": crit[2], "1%": crit[3]}
if store:
from statsmodels.stats.diagnostic import ResultsStore
rstore = ResultsStore()
rstore.lags = nlags
rstore.nobs = nobs
stationary_type = "level" if hypo == "c" else "trend"
rstore.H0 = "The series is {0} stationary".format(stationary_type)
rstore.HA = "The series is not {0} stationary".format(stationary_type)
return kpss_stat, p_value, crit_dict, rstore
else:
return kpss_stat, p_value, nlags, crit_dict
def _sigma_est_kpss(resids, nobs, lags):
"""
Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the
consistent estimator for the variance.
"""
s_hat = np.sum(resids ** 2)
for i in range(1, lags + 1):
resids_prod = np.dot(resids[i:], resids[: nobs - i])
s_hat += 2 * resids_prod * (1.0 - (i / (lags + 1.0)))
return s_hat / nobs
def _kpss_autolag(resids, nobs):
"""
Computes the number of lags for covariance matrix estimation in KPSS test
using method of Hobijn et al (1998). See also Andrews (1991), Newey & West
(1994), and Schwert (1989). Assumes Bartlett / Newey-West kernel.
"""
covlags = int(np.power(nobs, 2.0 / 9.0))
s0 = np.sum(resids ** 2) / nobs
s1 = 0
for i in range(1, covlags + 1):
resids_prod = np.dot(resids[i:], resids[: nobs - i])
resids_prod /= nobs / 2.0
s0 += resids_prod
s1 += i * resids_prod
s_hat = s1 / s0
pwr = 1.0 / 3.0
gamma_hat = 1.1447 * np.power(s_hat * s_hat, pwr)
autolags = int(gamma_hat * np.power(nobs, pwr))
return autolags
class ZivotAndrewsUnitRoot(object):
"""
Class wrapper for Zivot-Andrews structural-break unit-root test
"""
def __init__(self):
"""
Critical values for the three different models specified for the
Zivot-Andrews unit-root test.
Notes
-----
The p-values are generated through Monte Carlo simulation using
100,000 replications and 2000 data points.
"""
self._za_critical_values = {}
# constant-only model
self._c = (
(0.001, -6.78442),
(0.100, -5.83192),
(0.200, -5.68139),
(0.300, -5.58461),
(0.400, -5.51308),
(0.500, -5.45043),
(0.600, -5.39924),
(0.700, -5.36023),
(0.800, -5.33219),
(0.900, -5.30294),
(1.000, -5.27644),
(2.500, -5.03340),
(5.000, -4.81067),
(7.500, -4.67636),
(10.000, -4.56618),
(12.500, -4.48130),
(15.000, -4.40507),
(17.500, -4.33947),
(20.000, -4.28155),
(22.500, -4.22683),
(25.000, -4.17830),
(27.500, -4.13101),
(30.000, -4.08586),
(32.500, -4.04455),
(35.000, -4.00380),
(37.500, -3.96144),
(40.000, -3.92078),
(42.500, -3.88178),
(45.000, -3.84503),
(47.500, -3.80549),
(50.000, -3.77031),
(52.500, -3.73209),
(55.000, -3.69600),
(57.500, -3.65985),
(60.000, -3.62126),
(65.000, -3.54580),
(70.000, -3.46848),
(75.000, -3.38533),
(80.000, -3.29112),
(85.000, -3.17832),
(90.000, -3.04165),
(92.500, -2.95146),
(95.000, -2.83179),
(96.000, -2.76465),
(97.000, -2.68624),
(98.000, -2.57884),
(99.000, -2.40044),
(99.900, -1.88932),
)
self._za_critical_values["c"] = np.asarray(self._c)
# trend-only model
self._t = (
(0.001, -83.9094),
(0.100, -13.8837),
(0.200, -9.13205),
(0.300, -6.32564),
(0.400, -5.60803),
(0.500, -5.38794),
(0.600, -5.26585),
(0.700, -5.18734),
(0.800, -5.12756),
(0.900, -5.07984),
(1.000, -5.03421),
(2.500, -4.65634),
(5.000, -4.40580),
(7.500, -4.25214),
(10.000, -4.13678),
(12.500, -4.03765),
(15.000, -3.95185),
(17.500, -3.87945),
(20.000, -3.81295),
(22.500, -3.75273),
(25.000, -3.69836),
(27.500, -3.64785),
(30.000, -3.59819),
(32.500, -3.55146),
(35.000, -3.50522),
(37.500, -3.45987),
(40.000, -3.41672),
(42.500, -3.37465),
(45.000, -3.33394),
(47.500, -3.29393),
(50.000, -3.25316),
(52.500, -3.21244),
(55.000, -3.17124),
(57.500, -3.13211),
(60.000, -3.09204),
(65.000, -3.01135),
(70.000, -2.92897),
(75.000, -2.83614),
(80.000, -2.73893),
(85.000, -2.62840),
(90.000, -2.49611),
(92.500, -2.41337),
(95.000, -2.30820),
(96.000, -2.25797),
(97.000, -2.19648),
(98.000, -2.11320),
(99.000, -1.99138),
(99.900, -1.67466),
)
self._za_critical_values["t"] = np.asarray(self._t)
# constant + trend model
self._ct = (
(0.001, -38.17800),
(0.100, -6.43107),
(0.200, -6.07279),
(0.300, -5.95496),
(0.400, -5.86254),
(0.500, -5.77081),
(0.600, -5.72541),
(0.700, -5.68406),
(0.800, -5.65163),
(0.900, -5.60419),
(1.000, -5.57556),
(2.500, -5.29704),
(5.000, -5.07332),
(7.500, -4.93003),
(10.000, -4.82668),
(12.500, -4.73711),
(15.000, -4.66020),
(17.500, -4.58970),
(20.000, -4.52855),
(22.500, -4.47100),
(25.000, -4.42011),
(27.500, -4.37387),
(30.000, -4.32705),
(32.500, -4.28126),
(35.000, -4.23793),
(37.500, -4.19822),
(40.000, -4.15800),
(42.500, -4.11946),
(45.000, -4.08064),
(47.500, -4.04286),
(50.000, -4.00489),
(52.500, -3.96837),
(55.000, -3.93200),
(57.500, -3.89496),
(60.000, -3.85577),
(65.000, -3.77795),
(70.000, -3.69794),
(75.000, -3.61852),
(80.000, -3.52485),
(85.000, -3.41665),
(90.000, -3.28527),
(92.500, -3.19724),
(95.000, -3.08769),
(96.000, -3.03088),
(97.000, -2.96091),
(98.000, -2.85581),
(99.000, -2.71015),
(99.900, -2.28767),
)
self._za_critical_values["ct"] = np.asarray(self._ct)
def _za_crit(self, stat, model="c"):
"""
Linear interpolation for Zivot-Andrews p-values and critical values
Parameters
----------
stat : float
The ZA test statistic
model : {"c","t","ct"}
The model used when computing the ZA statistic. "c" is default.
Returns
-------
pvalue : float
The interpolated p-value
cvdict : dict
Critical values for the test statistic at the 1%, 5%, and 10%
levels
Notes
-----
The p-values are linear interpolated from the quantiles of the
simulated ZA test statistic distribution
"""
table = self._za_critical_values[model]
pcnts = table[:, 0]
stats = table[:, 1]
# ZA cv table contains quantiles multiplied by 100
pvalue = np.interp(stat, stats, pcnts) / 100.0
cv = [1.0, 5.0, 10.0]
crit_value = np.interp(cv, pcnts, stats)
cvdict = {
"1%": crit_value[0],
"5%": crit_value[1],
"10%": crit_value[2],
}
return pvalue, cvdict
def _quick_ols(self, endog, exog):
"""
Minimal implementation of LS estimator for internal use
"""
xpxi = np.linalg.inv(exog.T.dot(exog))
xpy = exog.T.dot(endog)
nobs, k_exog = exog.shape
b = xpxi.dot(xpy)
e = endog - exog.dot(b)
sigma2 = e.T.dot(e) / (nobs - k_exog)
return b / np.sqrt(np.diag(sigma2 * xpxi))
def _format_regression_data(self, series, nobs, const, trend, cols, lags):
"""
Create the endog/exog data for the auxiliary regressions
from the original (standardized) series under test.
"""
# first-diff y and standardize for numerical stability
endog = np.diff(series, axis=0)
endog /= np.sqrt(endog.T.dot(endog))
series /= np.sqrt(series.T.dot(series))
# reserve exog space
exog = np.zeros((endog[lags:].shape[0], cols + lags))
exog[:, 0] = const
# lagged y and dy
exog[:, cols - 1] = series[lags : (nobs - 1)]
exog[:, cols:] = lagmat(endog, lags, trim="none")[
lags : exog.shape[0] + lags
]
return endog, exog
def _update_regression_exog(
self, exog, regression, period, nobs, const, trend, cols, lags
):
"""
Update the exog array for the next regression.
"""
cutoff = period - (lags + 1)
if regression != "t":
exog[:cutoff, 1] = 0
exog[cutoff:, 1] = const
exog[:, 2] = trend[(lags + 2) : (nobs + 1)]
if regression == "ct":
exog[:cutoff, 3] = 0
exog[cutoff:, 3] = trend[1 : (nobs - period + 1)]
else:
exog[:, 1] = trend[(lags + 2) : (nobs + 1)]
exog[: (cutoff - 1), 2] = 0
exog[(cutoff - 1) :, 2] = trend[0 : (nobs - period + 1)]
return exog
def run(self, x, trim=0.15, maxlag=None, regression="c", autolag="AIC"):
"""
Zivot-Andrews structural-break unit-root test.
The Zivot-Andrews test tests for a unit root in a univariate process
in the presence of serial correlation and a single structural break.
Parameters
----------
x : array_like
The data series to test.
trim : float
The percentage of series at begin/end to exclude from break-period
calculation in range [0, 0.333] (default=0.15).
maxlag : int
The maximum lag which is included in test, default is
12*(nobs/100)^{1/4} (Schwert, 1989).
regression : {"c","t","ct"}
Constant and trend order to include in regression.
* "c" : constant only (default).
* "t" : trend only.
* "ct" : constant and trend.
autolag : {"AIC", "BIC", "t-stat", None}
The method to select the lag length when using automatic selection.
* if None, then maxlag lags are used,
* if "AIC" (default) or "BIC", then the number of lags is chosen
to minimize the corresponding information criterion,
* "t-stat" based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test.
Returns
-------
zastat : float
The test statistic.
pvalue : float
The pvalue based on MC-derived critical values.
cvdict : dict
The critical values for the test statistic at the 1%, 5%, and 10%
levels.
bpidx : int
The index of x corresponding to endogenously calculated break period
with values in the range [0..nobs-1].
baselag : int
The number of lags used for period regressions.
Notes
-----
H0 = unit root with a single structural break
Algorithm follows Baum (2004/2015) approximation to original
Zivot-Andrews method. Rather than performing an autolag regression at
each candidate break period (as per the original paper), a single
autolag regression is run up-front on the base model (constant + trend
with no dummies) to determine the best lag length. This lag length is
then used for all subsequent break-period regressions. This results in
significant run time reduction but also slightly more pessimistic test
statistics than the original Zivot-Andrews method, although no attempt
has been made to characterize the size/power trade-off.
References
----------
.. [1] Baum, C.F. (2004). ZANDREWS: Stata module to calculate
Zivot-Andrews unit root test in presence of structural break,"
Statistical Software Components S437301, Boston College Department
of Economics, revised 2015.
.. [2] Schwert, G.W. (1989). Tests for unit roots: A Monte Carlo
investigation. Journal of Business & Economic Statistics, 7:
147-159.
.. [3] Zivot, E., and Andrews, D.W.K. (1992). Further evidence on the
great crash, the oil-price shock, and the unit-root hypothesis.
Journal of Business & Economic Studies, 10: 251-270.
"""
x = array_like(x, "x")
trim = float_like(trim, "trim")
maxlag = int_like(maxlag, "maxlag", optional=True)
regression = string_like(
regression, "regression", options=("c", "t", "ct")
)
autolag = string_like(
autolag, "autolag", options=("aic", "bic", "t-stat"), optional=True
)
if trim < 0 or trim > (1.0 / 3.0):
raise ValueError("trim value must be a float in range [0, 1/3)")
nobs = x.shape[0]
if autolag:
adf_res = adfuller(
x, maxlag=maxlag, regression="ct", autolag=autolag
)
baselags = adf_res[2]
elif maxlag:
baselags = maxlag
else:
baselags = int(12.0 * np.power(nobs / 100.0, 1 / 4.0))
trimcnt = int(nobs * trim)
start_period = trimcnt
end_period = nobs - trimcnt
if regression == "ct":
basecols = 5
else:
basecols = 4
# normalize constant and trend terms for stability
c_const = 1 / np.sqrt(nobs)
t_const = np.arange(1.0, nobs + 2)
t_const *= np.sqrt(3) / nobs ** (3 / 2)
# format the auxiliary regression data
endog, exog = self._format_regression_data(
x, nobs, c_const, t_const, basecols, baselags
)
# iterate through the time periods
stats = np.full(end_period + 1, np.inf)
for bp in range(start_period + 1, end_period + 1):
# update intercept dummy / trend / trend dummy
exog = self._update_regression_exog(
exog,
regression,
bp,
nobs,
c_const,
t_const,
basecols,
baselags,
)
# check exog rank on first iteration
if bp == start_period + 1:
o = OLS(endog[baselags:], exog, hasconst=1).fit()
if o.df_model < exog.shape[1] - 1:
raise ValueError(
"ZA: auxiliary exog matrix is not full rank.\n"
" cols (exc intercept) = {} rank = {}".format(
exog.shape[1] - 1, o.df_model
)
)
stats[bp] = o.tvalues[basecols - 1]
else:
stats[bp] = self._quick_ols(endog[baselags:], exog)[
basecols - 1
]
# return best seen
zastat = np.min(stats)
bpidx = np.argmin(stats) - 1
crit = self._za_crit(zastat, regression)
pval = crit[0]
cvdict = crit[1]
return zastat, pval, cvdict, baselags, bpidx
def __call__(
self, x, trim=0.15, maxlag=None, regression="c", autolag="AIC"
):
return self.run(
x, trim=trim, maxlag=maxlag, regression=regression, autolag=autolag
)
zivot_andrews = ZivotAndrewsUnitRoot()
zivot_andrews.__doc__ = zivot_andrews.run.__doc__
|
jseabold/statsmodels
|
statsmodels/tsa/stattools.py
|
Python
|
bsd-3-clause
| 89,818
|
import urllib2
import base64
import simplejson as json
from pprint import pprint
import os
import time
import inspect
def exc_dec(f):
def wrapper(*args, **kwargs):
try:
f(*args, **kwargs)
except Exception:
print -1
return wrapper
class Rabbitmq_Helper:
def __init__(self, params):
self.ip = params['ip']
self.port = params['port']
self.usr = params['usr']
self.pwd = params['pwd']
self.cache_file_path = os.path.dirname(os.path.abspath(__file__))
self.cache_period_sec = 60
def make_url(self, api):
return "http://" + self.ip + ":" + self.port + '/' + api
def make_cache_file_name_from_url(self, url):
return self.cache_file_path + '/' + url.replace('/', '_') + ".tmp"
def is_old_cache_file(self, file_name):
one_minute_ago = time.time() - self.cache_period_sec
if not os.path.isfile(file_name) or (os.stat(file_name).st_mtime < one_minute_ago):
return True
else:
return False
def make_cache_file(self, file_name, data):
f = open(file_name, 'w')
try:
json.dump(data, f)
finally:
f.close()
def get_data_from_file(self, file_name):
stream = open(file_name, 'r')
data = json.load(stream)
return data
def get_data_from_http(self, url):
try:
request = urllib2.Request(self.make_url(url))
base64string = base64.encodestring('%s:%s' % (self.usr, self.pwd)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
result = urllib2.urlopen(request)
return json.load(result)
except Exception, e:
print ("URL" + self.make_url(url))
print (e)
return None
def get_data_from_api(self, url):
if self.is_old_cache_file(self.make_cache_file_name_from_url(url)):
new_data = self.get_data_from_http(url)
self.make_cache_file(self.make_cache_file_name_from_url(url), new_data)
return new_data
else:
return self.get_data_from_file(self.make_cache_file_name_from_url(url))
def get_api_queues(self):
return self.get_data_from_api("api/queues")
def get_api_exchanges(self):
return self.get_data_from_api("api/exchanges")
def get_api_overview(self):
return self.get_data_from_api("api/overview")
def get_api_resources(self):
return self.get_data_from_api("api/nodes")
def get_queue_data(self, queue_name):
return [item for item in self.get_api_queues()
if item["name"] == queue_name][0]
def get_exchange_data(self, exchange_name):
return [item for item in self.get_api_exchanges()
if item["name"] == exchange_name][0]
def get_messages(self, queue_name, msg_type):
return self.get_queue_data(queue_name)[msg_type]
def get_rates(self, queue_name, rate_type):
return self.get_queue_data(queue_name)["message_stats"][rate_type]["rate"]
def get_msg_sum(self, msg_type):
return self.get_api_overview()["queue_totals"][msg_type]
def get_rates_sum(self, rate_type):
return self.get_api_overview()["message_stats"][rate_type]["rate"]
def get_resources_info(self, res_type):
return self.get_api_resources()[0][res_type]
def get_totals(self, total_type):
return self.get_api_overview()["object_totals"][total_type]
def get_api_tests(self):
print('------------------------- GET API QUEUES ---------------------------------------')
pprint(self.get_api_queues())
print('------------------------- GET API EXCHANGES ------------------------------------')
pprint(self.get_api_exchanges())
print('------------------------- GET API OVERVIEW -------------------------------------')
pprint(self.get_api_overview())
print('------------------------- GET API RESOURCES ------------------------------------')
pprint(self.get_api_resources())
|
ngelik/python
|
RabbitmQ_mon/rabbitmq_helper.py
|
Python
|
mit
| 4,124
|
#-------------------------------------------------------------------------------
# Name: levels
# Purpose:
#
# Author: novirael
#
# Created: 17-04-2012
# Copyright: (c) novirael 2012
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
# Import
from sprites import Kafel
# Blocks
kafelek = [ "img/blue.png", "img/green.png", "img/red.png", "img/yellow.png",
"img/grey.png", "img/purple.png" ]
# Colors
black = (0,0,0)
white = (255,255,255)
blue = (0,100,200)
green = (0,200,0)
red = (255,0,0)
yellow = (235,235,0)
purple = (113,0,185)
# Variables
SW, SH = 900, 600
k_width, k_height = 45, 20
def draw_level(n):
if n == 1:
# The top of the block (y position)
top = 80
for i in range(15):
block = Kafel(blue, kafelek[0], i*(k_width+2), top)
blocks.add(block)
allsprites.add(block)
return allsprites, blocks
# --- Create blocks
"""
# Five rows of blocks
for row in range(2):
for column in range(0,20):
block = Kafel(blue, kafelek[0], column*(k_width+2), top)
blocks.add(block)
allsprites.add(block)
# Move the top of the next row down
top += k_height + 2
"""
|
novirael/arkanoid-pygame
|
levels.py
|
Python
|
gpl-2.0
| 1,317
|
# -*- coding:utf-8 -*-
import unittest
from boto.exception import SWFResponseError
from boto.swf.layer1 import Layer1
from mock import patch
import swf.settings
from swf.exceptions import DoesNotExistError, ResponseError
from swf.models.activity import ActivityType
from swf.models.domain import Domain
from swf.querysets.activity import ActivityTypeQuerySet
from ..mocks.activity import mock_describe_activity_type, mock_list_activity_types
swf.settings.set(aws_access_key_id="fakeaccesskey", aws_secret_access_key="fakesecret")
class TestActivityTypeQuerySet(unittest.TestCase):
def setUp(self):
self.domain = Domain("TestDomain")
self.atq = ActivityTypeQuerySet(self.domain)
def tearDown(self):
pass
def test_get_domain_property_instantiates_private_attribute(self):
"""Assert .__init__() instantiate _domain private attr"""
bw = ActivityTypeQuerySet(self.domain)
delattr(bw, "_domain")
dummy = bw.domain
self.assertTrue(hasattr(bw, "_domain"))
def test_get_or_create_existing_activity_type(self):
with patch.object(
Layer1, "describe_activity_type", mock_describe_activity_type
):
activity_type = self.atq.get_or_create("TestActivityType", "testversion")
self.assertIsInstance(activity_type, ActivityType)
def test_get_or_create_non_existent_activity_type(self):
with patch.object(Layer1, "describe_activity_type") as mock:
mock.side_effect = DoesNotExistError("Mocked exception")
with patch.object(
Layer1, "register_activity_type", mock_describe_activity_type
):
activity_type = self.atq.get_or_create("TestDomain", "testversion")
self.assertIsInstance(activity_type, ActivityType)
def test_instantiation_with_valid_domain(self):
"""Assert instantiation with valid domain object"""
bw = ActivityTypeQuerySet(self.domain)
self.assertIsInstance(bw.domain, Domain)
self.assertEqual(bw._domain, bw.domain)
def test_instantiation_with_invalid_domain(self):
"""Assert instantiation with invalid domain raises"""
with self.assertRaises(TypeError):
# noinspection PyTypeChecker
ActivityTypeQuerySet("WrongType")
def test_all(self):
"""Asserts .all() method returns a list of valid Activity instances"""
with patch.object(
self.atq.connection, "list_activity_types", mock_list_activity_types,
):
activities = self.atq.all()
self.assertIsNotNone(activities)
self.assertIsInstance(activities, list)
for activity in activities:
self.assertIsInstance(activity, ActivityType)
def test_get_existent_activity_type(self):
"""Assert .get() method with valid params returns the asked ActivityType model"""
with patch.object(
self.atq.connection, "describe_activity_type", mock_describe_activity_type
):
activity = self.atq.get("mocked-activity-type", "0.1")
self.assertIsNotNone(activity)
self.assertIsInstance(activity, ActivityType)
def test_get_with_failing_activity_type(self):
"""Asserts get method over a failing activity type raises"""
with patch.object(self.atq.connection, "describe_activity_type") as mock:
with self.assertRaises(ResponseError):
mock.side_effect = SWFResponseError(
400, "mocking exception", {"__type": "UnrecognizedClientException"}
)
self.atq.get("mocked-failing-activity-type", "0.1")
def test_get_with_non_existent_name(self):
"""Asserts get method with non existent activity type name provided raises"""
with patch.object(self.atq.connection, "describe_activity_type") as mock:
with self.assertRaises(DoesNotExistError):
mock.side_effect = SWFResponseError(
400, "mocking exception", {"__type": "UnknownResourceFault"}
)
self.atq.get("mocked-non-existent-activity-type-name", "0.1")
def test_get_with_non_existent_version(self):
"""Asserts get method with non existent activity type version provided raises"""
with patch.object(self.atq.connection, "describe_activity_type") as mock:
with self.assertRaises(DoesNotExistError):
mock.side_effect = SWFResponseError(
400, "mocking exception", {"__type": "UnknownResourceFault"}
)
self.atq.get("mocked-non-existent-activity-type-name", "na")
def test_create(self):
with patch.object(Layer1, "register_activity_type"):
new_activity_type = ActivityType(self.domain, "TestActivityType", "0.test")
self.assertIsNotNone(new_activity_type)
self.assertIsInstance(new_activity_type, ActivityType)
|
botify-labs/simpleflow
|
tests/test_swf/querysets/test_activity.py
|
Python
|
mit
| 5,019
|
# https://zenpack-sdk.zenoss.com/en/2.0.0/changes.html
from ZenPacks.zenoss.ZenPackLib import zenpacklib
CFG = zenpacklib.load_yaml()
schema = CFG.zenpack_module.schema
|
daviswr/ZenPacks.daviswr.OSX.Server.Caching
|
ZenPacks/daviswr/OSX/Server/Caching/__init__.py
|
Python
|
mit
| 169
|
import matplotlib.pyplot as plt
import models.model as model
import earthquake.catalog as catalog
from collections import OrderedDict
def histogramEarthquakes(catalog_, region):
"""
Creates the histogram of earthquake events by a given region.
Saves the histogram to the follwing path ./code/Zona2/histograms/Kanto/earthquake in ' + region + '.png'
Where region is given by the application
From 2000 to 2013
"""
definition = model.loadModelDefinition('../params/' + region + '.txt')
catalog_ = catalog.filter(catalog_, definition)
year = 2000
data = dict()
while(year <= 2013):
for i in range(len(catalog_)):
if catalog_[i]['year'] == year and catalog_[i]['lat'] > 34.8 and catalog_[i][
'lat'] < 37.05 and catalog_[i]['lon'] > 138.8 and catalog_[i]['lon'] < 141.05:
data[year] = data.get(year, 0) + 1
year += 1
plt.title('Histogram of earthquake in' + region)
plt.bar(range(len(data)), data.values(), align='center')
plt.xticks(range(len(data)), data.keys(), rotation=25)
axes = plt.gca()
plt.savefig('../Zona2/histograms/Kanto/earthquake in ' + region + '.png')
del data
def main():
"""
Calls function to plot a hitogram of earthquakes events by region, based on JMA catalog
"""
catalog_ = catalog.readFromFile('../data/jmacat_2000_2013.dat')
region = "Kanto"
histogramEarthquakes(catalog_, region)
region = "Kansai"
histogramEarthquakes(catalog_, region)
region = "Tohoku"
histogramEarthquakes(catalog_, region)
region = "EastJapan"
histogramEarthquakes(catalog_, region)
if __name__ == "__main__":
main()
|
PyQuake/earthquakemodels
|
code/runExperiments/histogramEarthquakes.py
|
Python
|
bsd-3-clause
| 1,708
|
#===============================================================================
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#===============================================================================
import sys
import datetime
from urllib2 import URLError
from PySide import QtGui, QtCore
import threading
import reddit
from MainWindow import Ui_MainWindow
from RDMainWindow import RDMainWindow
from submissionItem import Ui_submissionItem
from commentItem import Ui_commentItem
from threads import *
def unescape(string):
if(string is not None):
string = string.replace("<", "<")
string = string.replace(">", ">")
# this has to be last:
string = string.replace("&", "&")
return string
def formatDate(num):
return str(num/3600000) + " hours ago"
class Reddit_Desk():
defaultSubreddits = ["pics","gaming","worldnews","videos","todayilearned", \
"IAmA","funny","atheism", "politics","science","AskReddit","technology", \
"WTF","blog","announcements","bestof","AdviceAnimals","Music","aww", \
"askscience","movies"]
app = QtGui.QApplication(sys.argv)
form = QtGui.QMainWindow()
UI = Ui_MainWindow()
reddit = reddit.Reddit(user_agent="reddit-desk")
currentPostList = []
subredditList = []
comments = []
loggedIn = False
username =""
password =""
pasoLogin=0
requestThread = QtCore.QThread()
def enterLogin(self):
if(self.pasoLogin==0):
self.username = str(self.UI.lineLogin.text())
self.UI.lineLogin.setText("")
self.UI.lineLogin.setEchoMode(QtGui.QLineEdit.EchoMode(2)) #password
self.pasoLogin=1
elif(self.pasoLogin==1):
self.password = str(self.UI.lineLogin.text())
try:
#self.reddit.login(self.username, self.password)
self.requestThread = loginThread(self.reddit,self.username,self.password)
self.requestThread.finished.connect(self.refreshLogin)
except reddit.api_exceptions.InvalidUserPass:
[self.UI.comboSubreddit.addItem(i.display_name) for i in self.subredditList]
self.password=""
self.UI.lineLogin.setText("")
self.pasoLogin=0
self.UI.lineLogin.setEchoMode(QtGui.QLineEdit.EchoMode(0))#login
else:
self.UI.lineLogin.setEchoMode(QtGui.QLineEdit.EchoMode(0))#login
self.UI.lineLogin.setText("Logged in as "+self.reddit.user.user_name)
self.UI.lineLogin.setEnabled(False)
self.loadSubreddits()
def refreshLogin(self):
pass
def loadComment(self,comment, item):
"""
Loads a comment widget with the comment's data and puts it onto the tree item
"""
commentItem = Ui_commentItem()
widget = QtGui.QWidget(self.UI.treeComments)
commentItem.setupUi(widget)
commentItem.labelScore.setText(str(comment.ups - comment.downs)+" points")
commentItem.labelComentario.setText(unescape(comment.body_html))
commentItem.labelTime.setText(formatDate(comment.created))
commentItem.labelRedditor.setText(comment.author.user_name)
#commentItem.buttonUpvote.clicked.connect(comment.upvote)
#commentItem.buttonDownvote.clicked.connect(comment.downvote)
self.UI.treeComments.setItemWidget(item, 0 , widget)
def loadReplies(self, comment, item):
"""
Adds a comment's replies as children to that comment's tree item
"""
for j in comment.replies:
if type(j)!=type({}):
child = QtGui.QTreeWidgetItem(item)
item.addChild(child)
self.loadComment(j, child)
self.loadReplies(j,child)
def requestPostList(self):
"""
Starts a thread to request the post list from either a subreddit or the user's
front page
"""
if(self.UI.comboSubreddit.currentIndex()>=0 and not self.requestThread.isRunning()):
self.UI.labelTitle.setText("Loading Post List...")
self.UI.listPosts.clear()
if(self.UI.comboSubreddit.currentIndex() == 0):
self.requestThread = requestPostListThread(\
self.currentPostList,\
reddit=self.reddit)
elif(self.UI.comboSubreddit.currentIndex()>0):
currentSubreddit=self.subredditList[self.UI.comboSubreddit.currentIndex()-1]
self.requestThread = requestPostListThread(\
self.currentPostList,\
subreddit=currentSubreddit)
self.requestThread.finished.connect(self.loadPostList)
self.requestThread.start()
def loadPostList(self):
"""
Fills the GUI post list with the current post list
"""
self.UI.listPosts.clear()
for i in self.currentPostList:
item = QtGui.QListWidgetItem(self.UI.listPosts)
subredditItem = Ui_submissionItem()
widget = QtGui.QWidget()
subredditItem.setupUi(widget)
subredditItem.labelScore.setText(str(i.score))
subredditItem.labelTitle.setText(i.title)
subredditItem.labelSubreddit.setText("in "+i.subreddit.display_name)
subredditItem.labelRedditor.setText("by "+i.author.user_name)
self.UI.listPosts.addItem(item)
self.UI.listPosts.setItemWidget( item, widget)
item.setSizeHint(QtCore.QSize(0,105))
def requestSubmission(self):
"""
Starts the thread to request a submission's data.
"""
if(not self.requestThread.isRunning()):
self.UI.labelTitle.setText("Loading Submission...")
self.currentSubmission = self.currentPostList[self.UI.listPosts.currentRow()]
self.requestThread = requestSubmissionThread(self.currentSubmission)
self.requestThread.finished.connect(self.loadSubmission)
self.requestThread.start()
def loadSubmission(self):
"""
Fills the title, web view and comment tree with the current submission.
"""
#webview
if(self.currentSubmission.is_self):
self.UI.webView.setHtml(unescape(self.currentSubmission.selftext_html))
else:
self.UI.webView.load(QtCore.QUrl(self.currentSubmission.url))
#comments
self.UI.treeComments.clear()
for i in self.currentSubmission.comments:
if type(i)!=type({}): #The list has a dictionary at the end for some reason
item = QtGui.QTreeWidgetItem(self.UI.treeComments)
self.UI.treeComments.addTopLevelItem(item)
self.loadComment(i,item)
self.loadReplies(i,item)
self.UI.labelTitle.setText(self.currentSubmission.title)
self.UI.treeComments.expandAll()
def __init__(self):
#Set up the user interface
self.UI.setupUi(self.form)
self.form.addDockWidget(QtCore.Qt.LeftDockWidgetArea,\
self.UI.dockPostList)
self.form.addDockWidget(QtCore.Qt.LeftDockWidgetArea,\
self.UI.dockWebView,\
QtCore.Qt.Horizontal)
self.form.addDockWidget(QtCore.Qt.LeftDockWidgetArea,\
self.UI.dockComments,\
QtCore.Qt.Horizontal)
#BEGIN Setting default layout
self.form.setCentralWidget(None) #Effectively makes the whole window a Dock Area
self.form.tabifyDockWidget(self.UI.dockWebView,self.UI.dockComments)
#END default layout
self.UI.dockWebView.raise_()
self.UI.toolBar.addWidget(self.UI.labelTitle)
#setup signals & slots:
self.UI.listPosts.itemSelectionChanged.connect(self.requestSubmission)
self.UI.comboSubreddit.currentIndexChanged.connect(self.requestPostList)
self.UI.lineLogin.returnPressed.connect(self.enterLogin)
self.form.show()
try:
self.loadSubreddits()
except URLError:
self.UI.labelTitle.setText("No Internet connection")
self.app.exec_()
sys.exit()
def loadSubreddits(self):
"""
Fills the Subreddit combobox and the post list with the user's front page
"""
self.UI.comboSubreddit.clear()
if( self.reddit.user is not None ):
self.subredditList = list(self.reddit.user.get_my_reddits())
else:
self.subredditList = [self.reddit.get_subreddit(i) for i in self.defaultSubreddits]
self.UI.comboSubreddit.addItem("Front Page")
[self.UI.comboSubreddit.addItem(i.display_name) for i in self.subredditList]
self.loadPostList() #load the frontpage
#loadPostListThread(self).start()
if __name__ == "__main__":
r = Reddit_Desk()
|
cmotc/reddit-desk
|
Reddit_Desk.py
|
Python
|
gpl-3.0
| 10,219
|
import json
from urllib.parse import urlencode
from django import template
from django.conf import settings
from django.apps import apps
from vault.models import GroupProjects
from identity.keystone import KeystoneNoRequest
register = template.Library()
@register.simple_tag(takes_context=True)
def get_vault_env(context):
request = context.get("request")
envs = ["local", "dev", "qa", "qa1", "qa2", "prod", "beta", "docker"]
if settings.ENVIRON in envs:
return settings.ENVIRON
if settings.ENVIRON is None and "localhost" in request.get_host():
return "local"
return ""
@register.inclusion_tag('vault/set_project.html', takes_context=True)
def set_project(context):
user = context.get('user')
groups = user.groups.all()
request = context.get('request')
keystone = KeystoneNoRequest()
group_projects = []
for group in groups:
gps = GroupProjects.objects.filter(group=group.id)
gps_ks = [x for x in
filter(lambda x: x.enabled, keystone.project_list())]
gp_projs = []
for gp in gps:
for gp_ks in gps_ks:
if gp.project == gp_ks.id:
gp_projs.append(gp_ks)
break
gp_projs.sort(key=lambda x: x.name.lower())
group_projects.append({
'team': group.name,
'projects': gp_projs
})
current_project = {'id': context.get('project_id'),
'name': context.get('project_name')}
if current_project.get('id') is None:
req = context.get('request')
current_project['id'] = req.session.get('project_id')
current_project['name'] = req.session.get('project_name')
return {
'current_project': current_project,
'group_projects': group_projects,
'has_group': user.groups.count() > 0
}
@register.simple_tag(takes_context=True)
def get_logout_url(context):
request = context.get('request')
logout_url = settings.LOGOUT_URL.format(
request.META['HTTP_HOST']
)
return logout_url
@register.simple_tag()
def url_replace(get_parameters, **kwargs):
query = get_parameters
query.update(kwargs)
for x in query:
if type(query[x]) is not list:
query[x] = [query[x]]
query_pairs = [(k, v) for k, vlist in query.items() for v in vlist]
return urlencode(query_pairs)
|
globocom/vault
|
vault/templatetags/vault_tags.py
|
Python
|
apache-2.0
| 2,418
|
# -*- coding: utf-8 -*-
from tornado import ioloop
class StatusJob(ioloop.PeriodicCallback):
CALLBACK_TIME = 10000
def __init__(self, sample_service, ws_service):
super().__init__(self.callback, self.CALLBACK_TIME)
self.sample_service = sample_service
self.ws_service = ws_service
def callback(self):
status = self.sample_service.get_status()
self.ws_service.broadcast(status)
|
maveron58/indiana
|
web/jobs/status_job.py
|
Python
|
mit
| 434
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a Container VM with the provided Container manifest."""
from container_helper import GenerateManifest
def GenerateConfig(context):
"""Generates configuration."""
image = ''.join(['https://www.googleapis.com/compute/v1/',
'projects/cos-cloud/global/images/',
context.properties['containerImage']])
default_network = ''.join(['https://www.googleapis.com/compute/v1/projects/',
context.env['project'],
'/global/networks/default'])
instance_template = {
'name': context.env['name'] + '-it',
'type': 'compute.v1.instanceTemplate',
'properties': {
'properties': {
'metadata': {
'items': [{
'key': 'gce-container-declaration',
'value': GenerateManifest(context)
}]
},
'machineType': 'f1-micro',
'disks': [{
'deviceName': 'boot',
'boot': True,
'autoDelete': True,
'mode': 'READ_WRITE',
'type': 'PERSISTENT',
'initializeParams': {'sourceImage': image}
}],
'networkInterfaces': [{
'accessConfigs': [{
'name': 'external-nat',
'type': 'ONE_TO_ONE_NAT'
}],
'network': default_network
}],
'serviceAccounts': [{
'email': 'default',
'scopes': ['https://www.googleapis.com/auth/logging.write']
}]
}
}
}
outputs = [{'name': 'instanceTemplateSelfLink',
'value': '$(ref.' + instance_template['name'] + '.selfLink)'}]
return {'resources': [instance_template], 'outputs': outputs}
|
aljim/deploymentmanager-samples
|
examples/v2/common/python/container_instance_template.py
|
Python
|
apache-2.0
| 2,534
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re, time, random
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
"""
account.invoice object:
- Add support for Belgian structured communication
- Rename 'reference' field labels to 'Communication'
"""
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def _get_reference_type(self, cursor, user, context=None):
"""Add BBA Structured Communication Type and change labels from 'reference' into 'communication' """
res = super(account_invoice, self)._get_reference_type(cursor, user,
context=context)
res[[i for i,x in enumerate(res) if x[0] == 'none'][0]] = ('none', 'Free Communication')
res.append(('bba', 'BBA Structured Communication'))
#l_logger.warning('reference_type = %s' %res )
return res
def check_bbacomm(self, val):
supported_chars = '0-9+*/ '
pattern = re.compile('[^' + supported_chars + ']')
if pattern.findall(val or ''):
return False
bbacomm = re.sub('\D', '', val or '')
if len(bbacomm) == 12:
base = int(bbacomm[:10])
mod = base % 97 or 97
if mod == int(bbacomm[-2:]):
return True
return False
def _check_communication(self, cr, uid, ids):
for inv in self.browse(cr, uid, ids):
if inv.reference_type == 'bba':
return self.check_bbacomm(inv.reference)
return True
def onchange_partner_id(self, cr, uid, ids, type, partner_id,
date_invoice=False, payment_term=False, partner_bank_id=False, company_id=False):
result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id,
date_invoice, payment_term, partner_bank_id, company_id)
# reference_type = self.default_get(cr, uid, ['reference_type'])['reference_type']
# _logger.warning('partner_id %s' % partner_id)
reference = False
reference_type = 'none'
if partner_id:
if (type == 'out_invoice'):
reference_type = self.pool.get('res.partner').browse(cr, uid, partner_id).out_inv_comm_type
if reference_type:
reference = self.generate_bbacomm(cr, uid, ids, type, reference_type, partner_id, '', context={})['value']['reference']
res_update = {
'reference_type': reference_type or 'none',
'reference': reference,
}
result['value'].update(res_update)
return result
def generate_bbacomm(self, cr, uid, ids, type, reference_type, partner_id, reference, context=None):
partner_obj = self.pool.get('res.partner')
reference = reference or ''
algorithm = False
if partner_id:
algorithm = partner_obj.browse(cr, uid, partner_id, context=context).out_inv_comm_algorithm
algorithm = algorithm or 'random'
if (type == 'out_invoice'):
if reference_type == 'bba':
if algorithm == 'date':
if not self.check_bbacomm(reference):
doy = time.strftime('%j')
year = time.strftime('%Y')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (doy, year))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise osv.except_osv(_('Warning!'),
_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = doy + year + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (doy, year, seq, mod)
elif algorithm == 'partner_ref':
if not self.check_bbacomm(reference):
partner_ref = self.pool.get('res.partner').browse(cr, uid, partner_id).ref
partner_ref_nr = re.sub('\D', '', partner_ref or '')
if (len(partner_ref_nr) < 3) or (len(partner_ref_nr) > 7):
raise osv.except_osv(_('Warning!'),
_('The Partner should have a 3-7 digit Reference Number for the generation of BBA Structured Communications!' \
'\nPlease correct the Partner record.'))
else:
partner_ref_nr = partner_ref_nr.ljust(7, '0')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (partner_ref_nr[:3], partner_ref_nr[3:]))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise osv.except_osv(_('Warning!'),
_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = partner_ref_nr + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (partner_ref_nr[:3], partner_ref_nr[3:], seq, mod)
elif algorithm == 'random':
if not self.check_bbacomm(reference):
base = random.randint(1, 9999999999)
bbacomm = str(base).rjust(7, '0')
base = int(bbacomm)
mod = base % 97 or 97
mod = str(mod).rjust(2, '0')
reference = '+++%s/%s/%s%s+++' % (bbacomm[:3], bbacomm[3:7], bbacomm[7:], mod)
else:
raise osv.except_osv(_('Error!'),
_("Unsupported Structured Communication Type Algorithm '%s' !" \
"\nPlease contact your OpenERP support channel.") % algorithm)
return {'value': {'reference': reference}}
def create(self, cr, uid, vals, context=None):
reference = vals.get('reference', False)
reference_type = vals.get('reference_type', False)
if vals.get('type') == 'out_invoice' and not reference_type:
# fallback on default communication type for partner
reference_type = self.pool.get('res.partner').browse(cr, uid, vals['partner_id']).out_inv_comm_type
if reference_type == 'bba':
reference = self.generate_bbacomm(cr, uid, [], vals['type'], reference_type, vals['partner_id'], '', context={})['value']['reference']
vals.update({
'reference_type': reference_type or 'none',
'reference': reference,
})
if reference_type == 'bba':
if not reference:
raise osv.except_osv(_('Warning!'),
_('Empty BBA Structured Communication!' \
'\nPlease fill in a unique BBA Structured Communication.'))
if self.check_bbacomm(reference):
reference = re.sub('\D', '', reference)
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', '=', vals['reference'])])
if same_ids:
raise osv.except_osv(_('Warning!'),
_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
for inv in self.browse(cr, uid, ids, context):
if vals.has_key('reference_type'):
reference_type = vals['reference_type']
else:
reference_type = inv.reference_type or ''
if reference_type == 'bba':
if vals.has_key('reference'):
bbacomm = vals['reference']
else:
bbacomm = inv.reference or ''
if self.check_bbacomm(bbacomm):
reference = re.sub('\D', '', bbacomm)
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('id', '!=', inv.id), ('type', '=', 'out_invoice'),
('reference_type', '=', 'bba'), ('reference', '=', vals['reference'])])
if same_ids:
raise osv.except_osv(_('Warning!'),
_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).write(cr, uid, ids, vals, context)
_columns = {
'reference': fields.char('Communication', size=64, help="The partner reference of this invoice."),
'reference_type': fields.selection(_get_reference_type, 'Communication Type',
required=True),
}
_constraints = [
(_check_communication, 'Invalid BBA Structured Communication !', ['Communication']),
]
account_invoice()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
aricchen/openHR
|
openerp/addons/l10n_be_invoice_bba/invoice.py
|
Python
|
agpl-3.0
| 11,922
|
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from keras.datasets import mnist, cifar10, cifar100
from sklearn.preprocessing import LabelBinarizer
from nets import LeNet, LeNetVarDropout, VGG, VGGVarDropout
sess = tf.Session()
def main():
dataset = 'cifar10' # mnist, cifar10, cifar100
# Load the data
# It will be downloaded first if necessary
if dataset == 'mnist':
(X_train, y_train), (X_test, y_test) = mnist.load_data()
img_size = 28
num_classes = 10
num_channels = 1
elif dataset == 'cifar10':
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
img_size = 32
num_classes = 10
num_channels = 3
elif dataset == 'cifar100':
(X_train, y_train), (X_test, y_test) = cifar100.load_data()
img_size = 32
num_classes = 100
num_channels = 3
lb = LabelBinarizer()
lb.fit(y_train)
y_train_one_hot = lb.transform(y_train)
y_test_one_hot = lb.transform(y_test)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = np.reshape(X_train,[-1,img_size,img_size,num_channels])
X_test = np.reshape(X_test,[-1,img_size,img_size,num_channels])
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
m = VGGVarDropout(img_size,num_channels,num_classes)
sess.run(tf.global_variables_initializer())
m.fit(X_train,y_train_one_hot,sess)
pred = m.predict(X_test,sess)
y_test = np.squeeze(y_test)
acc = np.mean(np.equal(y_test,pred))
print("\nTest accuracy: %.3f" % acc)
if __name__ == "__main__":
main()
|
cjratcliff/variational-dropout
|
main.py
|
Python
|
gpl-3.0
| 1,649
|
from pytest_regressions.data_regression import DataRegressionFixture
import gdsfactory as gf
from gdsfactory.component import Component
def test_get_bundle_optical(
data_regression: DataRegressionFixture, check: bool = True
) -> Component:
lengths = {}
c = gf.Component("test_get_bundle_optical")
w = c << gf.components.straight_array(n=4, spacing=200)
d = c << gf.components.nxn(west=4, east=0)
d.y = w.y
d.xmin = w.xmax + 200
ports1 = [
w.ports["o7"],
w.ports["o8"],
]
ports2 = [
d.ports["o2"],
d.ports["o1"],
]
routes = gf.routing.get_bundle(ports1, ports2, sort_ports=True, radius=10)
for i, route in enumerate(routes):
c.add(route.references)
lengths[i] = route.length
if check:
data_regression.check(lengths)
return c
def test_get_bundle_optical2(
data_regression: DataRegressionFixture, check: bool = True
) -> Component:
lengths = {}
c = gf.Component("test_get_bundle_optical2")
w = c << gf.components.straight_array(n=4, spacing=200)
d = c << gf.components.nxn(west=4, east=1)
d.y = w.y
d.xmin = w.xmax + 200
ports1 = w.get_ports_list(orientation=0)
ports2 = d.get_ports_list(orientation=180)
routes = gf.routing.get_bundle(ports1, ports2, sort_ports=True)
for i, route in enumerate(routes):
c.add(route.references)
lengths[i] = route.length
if check:
data_regression.check(lengths)
return c
if __name__ == "__main__":
c = test_get_bundle_optical(None, check=False)
# c = test_get_bundle_optical2(None, check=False)
c.show()
|
gdsfactory/gdsfactory
|
gdsfactory/tests/test_get_bundle_optical.py
|
Python
|
mit
| 1,657
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-07 18:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AAPL',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(default=django.utils.timezone.now)),
('price', models.DecimalField(decimal_places=4, default=0, max_digits=12)),
('market_cap', models.DecimalField(decimal_places=8, default=0, max_digits=18)),
],
options={
'ordering': ('date',),
'abstract': False,
},
),
migrations.CreateModel(
name='FANG',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(default=django.utils.timezone.now)),
('price', models.DecimalField(decimal_places=4, default=0, max_digits=12)),
('market_cap', models.DecimalField(decimal_places=8, default=0, max_digits=18)),
],
options={
'ordering': ('date',),
'abstract': False,
},
),
migrations.CreateModel(
name='FB',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(default=django.utils.timezone.now)),
('price', models.DecimalField(decimal_places=4, default=0, max_digits=12)),
('market_cap', models.DecimalField(decimal_places=8, default=0, max_digits=18)),
],
options={
'ordering': ('date',),
'abstract': False,
},
),
migrations.CreateModel(
name='GOOG',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(default=django.utils.timezone.now)),
('price', models.DecimalField(decimal_places=4, default=0, max_digits=12)),
('market_cap', models.DecimalField(decimal_places=8, default=0, max_digits=18)),
],
options={
'ordering': ('date',),
'abstract': False,
},
),
migrations.CreateModel(
name='NFLX',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(default=django.utils.timezone.now)),
('price', models.DecimalField(decimal_places=4, default=0, max_digits=12)),
('market_cap', models.DecimalField(decimal_places=8, default=0, max_digits=18)),
],
options={
'ordering': ('date',),
'abstract': False,
},
),
]
|
Hawk94/coin_tracker
|
main/stocks/migrations/0001_initial.py
|
Python
|
mit
| 3,287
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
import unittest
import pycurl
from . import util
from . import appmanager
setup_module, teardown_module = appmanager.setup(('app', 8380))
class XferinfoCbTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
self.curl.setopt(self.curl.URL, 'http://localhost:8380/long_pause')
def tearDown(self):
self.curl.close()
@util.min_libcurl(7, 32, 0)
def test_xferinfo_cb(self):
all_args = []
def xferinfofunction(*args):
all_args.append(args)
self.curl.setopt(pycurl.XFERINFOFUNCTION, xferinfofunction)
self.curl.setopt(pycurl.NOPROGRESS, False)
self.curl.perform()
assert len(all_args) > 0
for args in all_args:
assert len(args) == 4
for arg in args:
assert isinstance(arg, util.long_int)
@util.min_libcurl(7, 32, 0)
def test_sockoptfunction_fail(self):
called = {}
def xferinfofunction(*args):
called['called'] = True
return -1
self.curl.setopt(pycurl.XFERINFOFUNCTION, xferinfofunction)
self.curl.setopt(pycurl.NOPROGRESS, False)
try:
self.curl.perform()
self.fail('should have raised')
except pycurl.error as e:
assert e.args[0] in [pycurl.E_ABORTED_BY_CALLBACK], \
'Unexpected pycurl error code %s' % e.args[0]
assert called['called']
@util.min_libcurl(7, 32, 0)
def test_sockoptfunction_exception(self):
called = {}
def xferinfofunction(*args):
called['called'] = True
raise ValueError
self.curl.setopt(pycurl.XFERINFOFUNCTION, xferinfofunction)
self.curl.setopt(pycurl.NOPROGRESS, False)
try:
self.curl.perform()
self.fail('should have raised')
except pycurl.error as e:
assert e.args[0] in [pycurl.E_ABORTED_BY_CALLBACK], \
'Unexpected pycurl error code %s' % e.args[0]
assert called['called']
|
buaabyl/pycurl-win32
|
tests/xferinfo_cb_test.py
|
Python
|
lgpl-2.1
| 2,113
|
from HTMLParser import HTMLParser
import urllib2
class myParser(HTMLParser):
def handle_starttag(self, tag, attrs):
if (tag == "a"):
for a in attrs:
if (a[0] == 'href'):
link = a[1]
if (link.find('http') >= 0):
print(link)
newParse = myParser()
newParse.feed(link)
url = "http://www.semo.edu"
request = urllib2.Request(url)
handle = urllib2.urlopen(request)
parser = myParser()
parser.feed(handle.read())
|
dreweggers/Vectrons_Klaw
|
net/spider.py
|
Python
|
mit
| 445
|
"""Kombu transport using SQLAlchemy as the message store."""
from Queue import Empty
from anyjson import loads, dumps
from sqlalchemy import create_engine
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import sessionmaker
from .. import virtual
from .models import Queue, Message, metadata
VERSION = (1, 1, 0)
__version__ = ".".join(map(str, VERSION))
class Channel(virtual.Channel):
_session = None
_engines = {} # engine cache
def _engine_from_config(self):
conninfo = self.connection.client
configuration = dict(conninfo.transport_options)
url = conninfo.hostname
return create_engine(url, **configuration)
def _open(self):
conninfo = self.connection.client
if conninfo.hostname not in self._engines:
engine = self._engine_from_config()
Session = sessionmaker(bind=engine)
metadata.create_all(engine)
self._engines[conninfo.hostname] = engine, Session
return self._engines[conninfo.hostname]
@property
def session(self):
if self._session is None:
_, Session = self._open()
self._session = Session()
return self._session
def _get_or_create(self, queue):
obj = self.session.query(Queue).filter(Queue.name == queue) \
.first()
if not obj:
obj = Queue(queue)
self.session.add(obj)
try:
self.session.commit()
except OperationalError:
self.session.rollback()
return obj
def _new_queue(self, queue, **kwargs):
self._get_or_create(queue)
def _put(self, queue, payload, **kwargs):
obj = self._get_or_create(queue)
message = Message(dumps(payload), obj)
self.session.add(message)
try:
self.session.commit()
except OperationalError:
self.session.rollback()
def _get(self, queue):
obj = self._get_or_create(queue)
if self.session.bind.name == 'sqlite':
self.session.execute('BEGIN IMMEDIATE TRANSACTION')
try:
msg = self.session.query(Message) \
.with_lockmode('update') \
.filter(Message.queue_id == obj.id) \
.filter(Message.visible != False) \
.order_by(Message.sent_at) \
.order_by(Message.id) \
.limit(1) \
.first()
if msg:
msg.visible = False
return loads(msg.payload)
raise Empty()
finally:
self.session.commit()
def _query_all(self, queue):
obj = self._get_or_create(queue)
return self.session.query(Message) \
.filter(Message.queue_id == obj.id)
def _purge(self, queue):
count = self._query_all(queue).delete(synchronize_session=False)
try:
self.session.commit()
except OperationalError:
self.session.rollback()
return count
def _size(self, queue):
return self._query_all(queue).count()
class Transport(virtual.Transport):
Channel = Channel
default_port = 0
connection_errors = ()
channel_errors = ()
|
kumar303/rockit
|
vendor-local/kombu/transport/sqlalchemy/__init__.py
|
Python
|
bsd-3-clause
| 3,337
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP - Account renumber wizard
# Copyright (C) 2009 Pexego Sistemas Informáticos. All Rights Reserved
# Copyright (c) 2013 Servicios Tecnológicos Avanzados
# (http://www.serviciosbaeza.com)
# Pedro Manuel Baeza <pedro.baeza@serviciosbaeza.com>
# Copyright (c) 2013 Joaquin Gutierrez (http://www.gutierrezweb.es)
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Account renumber wizard",
'version': "1.0",
'author': "Pexego",
'website': "http://www.pexego.es",
'category': "Accounting & Finance",
'contributors': ['Pedro M. Baeza', 'Jordi Llinares', 'Joaquín Gutierrez'],
'description': """
This module adds a wizard to renumber account moves by date only for admin.
===========================================================================
The wizard, which is accesible from the "End of Period" menuitem,
lets you select journals, periods, and a starting number. When
launched, it renumbers all posted moves that match selected criteria
(after ordering them by date).
It will recreate the sequence number for each account move
using its journal sequence, which means that:
- Sequences per journal are supported.
- Sequences with prefixes and suffixes based on the move
date are also supported.
""",
"license": "AGPL-3",
"depends": [
'account',
],
"demo": [],
"data": [
'wizard/wizard_renumber_view.xml',
],
'installable': True,
'auto_install': False,
}
|
ClearCorp-dev/account-financial-tools
|
account_renumber/__openerp__.py
|
Python
|
agpl-3.0
| 2,370
|
# -*- coding: utf-8 -*-
# OpenERP, Open Source Management Solution
# Copyright (c) 2015 Rooms For (Hong Kong) Limited T/A OSCG
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import main
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
rfhk/rfhk-addons
|
website_crm_notify/controllers/__init__.py
|
Python
|
agpl-3.0
| 900
|
# -*- coding: UTF-8 -*-
from django.shortcuts import render
from django.template import loader, Context
from django.http import HttpResponse, HttpResponseRedirect
from travels.models import *
from photo.models import *
from favourite.models import *
from accounts.models import *
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.http import Http404
import time
from django.core.files.base import ContentFile
from django.views.decorators.csrf import csrf_exempt
from django.contrib import auth
from django.core.paginator import Paginator, InvalidPage, EmptyPage, PageNotAnInteger
from django.core.mail import send_mail
from django.utils import timezone
import datetime
# Create your views here.
def archive(request):
travels = Travels.objects.all()
photos = photoList.objects.all()
users = MyProfile.objects.all()
user = request.user
travels_list = Travels.objects.order_by('-pub_date')
# 总数据列表
paginator = Paginator(travels_list, 555)
page = request.GET.get('page')
try:
travels = paginator.page(page)
except PageNotAnInteger:
travels = paginator.page(1)
except EmptyPage:
travels = paginator.page(paginator.num_pages)
return render_to_response('travels.html', {"travels": travels, "photos":photos, "user": user, "users": users}, context_instance=RequestContext(request))
def detail(request, travels_id):
users = MyProfile.objects.all()
travelsall = Travels.objects.all()
try:
travels = Travels.objects.get(id=travels_id)
travels.count_hit += 1
travels.save()
favlistall = Favourite.objects.filter(travels=travels)
favis = Favourite.objects.filter(user__username=request.user, travels=travels)
except Travels.DoesNotExist:
raise Http404
return render_to_response('travelsdetails.html', {"travels": travels, "users": users, "travelsall": travelsall, "favlistall": favlistall, "favis": favis}, context_instance=RequestContext(request))
# 点赞
def like_tarvels(request,travels_id):
p_id = None
LIKED = '谢谢鼓励,但你已经赞过该游记啦!'
liked_post = request.session.get('liked')
p_id = travels_id
if p_id == liked_post:
return HttpResponse(LIKED)
travels = get_object_or_404(Travels, id=travels_id)
travels.praise_num += 1
likes =travels.praise_num
travels.save()
request.session['liked'] = p_id
return HttpResponse('点赞成功!点赞数+%s' %likes)
def uploadimage(request):
if request.method == 'POST':
callback = request.GET.get('CKEditorFuncNum')
try:
path = "media/upload/pic/" + time.strftime("%Y%m%d%H%M%S", time.localtime())
f = request.FILES["upload"]
file_name = path + "_" + f.name
des_origin_f = open(file_name, "wb+")
for chunk in f:
des_origin_f.write(chunk)
des_origin_f.close()
except Exception, e:
print e
res = r"<script>window.parent.CKEDITOR.tools.callFunction("+callback+",'/"+file_name+"', '');</script>"
return HttpResponse(res)
else:
raise Http404()
# 登录视图:
# @csrf_exempt
def login(request):
errors = []
register_flag = False
redirect_to = request.REQUEST.get('next', '')
if request.method == "POST":
if not request.POST.get('username', ''):
errors.append(u"请输入用户名")
if not request.POST.get('password', ''):
errors.append(u"请输入密码")
else:
name = request.POST.get('username', '')
password = request.POST.get('password', '')
user = auth.authenticate(username=name, password=password)
if user is not None and user.is_active:
auth.login(request, user)
return HttpResponseRedirect(redirect_to)
else:
errors.append(u'登录失败,请重试')
response = render_to_response('login.html', {'errors': errors, },
context_instance=RequestContext(request))
return render(request, 'login.html', locals())
def logout(request):
redirect_to = request.REQUEST.get('next', '')
if request.user.is_authenticated():
auth.logout(request)
return render(request, "logout.html", {'redirect_to':redirect_to})
else:
return HttpResponseRedirect(redirect_to)
# 注册视图
from django.contrib.auth.models import User
@csrf_exempt
def register(request):
errors=[]
if request.method == 'POST':
name = request.POST.get('username', '')
password1 = request.POST.get('password1', '')
password2 = request.POST.get('password2', '')
if len(name) < 3:
errors.append(u'用户名长度必须大于4个字符')
elif len(password1) < 6:
errors.append(u'密码长度必须大于6个字符')
elif password1 != password2 :
errors.append(u'两次输入密码必须相同')
else:
try:
user = User.objects.get(username=name)
errors.append(u'用户名已被注册')
return render(request,'register.html',{'errors':errors})
except User.DoesNotExist:
user = User.objects.create_user(
username = name,
password = password1,
)
register_flag = True
user.save()
user = auth.authenticate(username=name,
password=password1)
auth.login(request, user)
return render(request,'login.html',{'register_flag':register_flag})
return render(request,'register.html',{'errors':errors})
else:
return render(request,'register.html')
# 搜索
def search(request):
errors = []
if 'q' in request.GET:
q = request.GET['q']
if not q:
errors.append(u'请填写查询内容!')
elif len(q) > 50:
errors.append(u'填写内容必须小于50字!')
else:
travels = Travels.objects.filter(title__icontains=q)
return render_to_response('search.html',
{'travels': travels, 'q': q}, context_instance=RequestContext(request))
return render_to_response('search.html', {'errors': errors},
context_instance=RequestContext(request))
# 城市归类
def search_city(request, city):
users = MyProfile.objects.all()
try:
travels_list = Travels.objects.filter(city__iexact=city)
except Travels.DoesNotExist:
raise Http404
return render_to_response('city.html', {'travels_list' : travels_list, 'users':users}, context_instance=RequestContext(request))
from travels.forms import *
from django.core.urlresolvers import reverse
def list(request, author):
users = MyProfile.objects.all()
#favlistall = Favourite.objects.filter(travels=travels)
try:
list = Travels.objects.filter(author__iexact=author)
except Travels.DoesNotExist:
raise Http404
return render_to_response('../templates/userena/tra_list.html', {'list' : list, 'users':users}, context_instance=RequestContext(request))
# 新增:
@csrf_exempt
def add(request):
user = request.user
if request.method == "POST":
uf = UserForm(request.POST, request.FILES)
if uf.is_valid():
title = uf.cleaned_data['title']
content = uf.cleaned_data['content']
contextinfo = uf.cleaned_data['contextinfo']
image = uf.cleaned_data['image']
bigimage = uf.cleaned_data['bigimage']
city = uf.cleaned_data['city']
author = request.user.username
author_id = request.user.id
travels = Travels()
travels.title = title
travels.content = content
travels.contextinfo = contextinfo
travels.image = image
travels.bigimage = bigimage
travels.city = city
travels.author = author
travels.author_id = author_id
travels.save()
return HttpResponseRedirect('/list/'+author)
else:
uf = UserForm()
return render_to_response('../templates/userena/tra_add.html',{'uf':uf,'user':user})
#修改
from django.shortcuts import render_to_response,get_object_or_404
@csrf_exempt
def updatetra(request, travels_id):
travels = get_object_or_404(Travels, pk=int(travels_id))
if request.method == 'POST':
uf = UserForm(request.POST, instance=travels)
if uf.is_valid():
title = uf.cleaned_data['title']
content = uf.cleaned_data['content']
contextinfo = uf.cleaned_data['contextinfo']
image = uf.cleaned_data['image']
bigimage = uf.cleaned_data['bigimage']
city = uf.cleaned_data['city']
author = request.user.username
author_id = request.user.id
travels.title = title
travels.content = content
travels.contextinfo = contextinfo
travels.image = image
travels.bigimage = bigimage
travels.city = city
travels.author = author
travels.author_id = author_id
travels.save()
return HttpResponseRedirect('/list/'+author)
return render_to_response('add.html',{'uf': UserForm(instance=travels)})
#删除
@csrf_exempt
def deletetra(request, travels_id):
travels = get_object_or_404(Travels, pk=int(travels_id))
travels.delete()
author = request.user.username
return HttpResponseRedirect('/list/'+author)
|
liuasliy/rdstourcms
|
travels/views.py
|
Python
|
mit
| 9,812
|
from pylab import *
import pyasf
gamma = 1
bg = 1e-12
det = pyasf.AreaDetector((22, 0, 0.2)) # delta, nu, dist
energy = [28000] # eV
alpha = 15.7 # angle of incidence
psi = 0 # azimuth
cs = pyasf.unit_cell("7101739") # from crystallography open database
s = pyasf.Geometry.ThreeCircleVertical(cs, (1,1,1))
fig, ax = subplots(1,3)
ax = ax.ravel()
Q,im = s.Diffraction2D(energy, alpha, det, psi, divergence=(1,1), verbose=True, dEvsE=1e-4)
im = im**gamma
ax[0].imshow(flipud(log10(im+bg)))#, vmax=1000.**gamma)
ax[0].set_title("$I_\\mathrm{max}=%g$"%(im.max()))
ax[1].pcolormesh(Q[0],Q[1],log10(im+bg))
ax[1].set_xlabel("h")
ax[1].set_ylabel("k")
ax[2].pcolormesh(Q[0],Q[2],log10(im+bg))
ax[2].set_xlabel("h")
ax[2].set_ylabel("l")
show()
|
carichte/pyasf
|
pyasf/examples/Diffraction2d.py
|
Python
|
gpl-3.0
| 744
|
import os
from django.shortcuts import render
from django.http import HttpResponseNotFound
from django.contrib.staticfiles import finders
def view_presentation(request, filename):
# Find the file in staticfiles.
full_path = finders.find(os.path.join("presentation", filename + ".htm"))
if not full_path or not os.path.exists(full_path):
return HttpResponseNotFound("Unknown presentation name: " + filename)
with open(full_path) as presentation_file:
presentation_html = presentation_file.read()
return render(request, "presentation/view.dj.htm", {
"presentation_html": presentation_html,
})
|
w0rp/w0rpzone
|
presentation/views.py
|
Python
|
bsd-2-clause
| 645
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
import webnotes
def execute():
webnotes.reload_doc("utilities", "doctype", "address")
webnotes.conn.auto_commit_on_many_writes = True
for lead in webnotes.conn.sql("""select name as lead, lead_name, address_line1, address_line2, city, country,
state, pincode, status, company_name from `tabLead` where not exists
(select name from `tabAddress` where `tabAddress`.lead=`tabLead`.name) and
(ifnull(address_line1, '')!='' or ifnull(city, '')!='' or ifnull(country, '')!='' or ifnull(pincode, '')!='')""", as_dict=True):
if set_in_customer(lead):
continue
create_address_for(lead)
webnotes.conn.auto_commit_on_many_writes = False
def set_in_customer(lead):
customer = webnotes.conn.get_value("Customer", {"lead_name": lead.lead})
if customer:
customer_address = webnotes.conn.sql("""select name from `tabAddress`
where customer=%s and (address_line1=%s or address_line2=%s or pincode=%s)""",
(customer, lead.address_line1, lead.address_line2, lead.pincode))
if customer_address:
webnotes.conn.sql("""update `tabAddress` set lead=%s, lead_name=%s
where name=%s""", (lead.lead, lead.company_name or lead.lead_name, customer_address[0][0]))
return True
return False
def create_address_for(lead):
address_title = lead.company_name or lead.lead_name or lead.lead
for c in ['%', "'", '"', '#', '*', '?', '`']:
address_title = address_title.replace(c, "")
if webnotes.conn.get_value("Address", address_title.strip() + "-" + "Billing"):
address_title += " " + lead.lead
lead.update({
"doctype": "Address",
"address_type": "Billing",
"address_title": address_title
})
del lead["company_name"]
del lead["status"]
lead_bean = webnotes.bean(lead)
lead_bean.ignore_mandatory = True
lead_bean.insert()
|
Yellowen/Owrang
|
patches/june_2013/p10_lead_address.py
|
Python
|
agpl-3.0
| 1,899
|
"""
Run gdb with:
gdb -ex 'source factory-test.py' -ex 'target remote localhost:3333' -ex 'break orchardShellRestart' -ex 'continue' -ex 'testpane' build/orchard.elf
"""
import gdb
import sys
import gtk
def run_test(test_name, test_type):
tests = list_tests()
test_index = tests.index(test_name)
testidx = "first_test[" + str(test_index) + "]"
cmd = "call "+testidx+".test_function("+testidx+", " + str(test_type) + ")"
result = gdb.execute(cmd, False, True)
for line in result.split("\n"):
if line.startswith("$"):
# print("Evaluating line: " + line)
# return gdb.parse_and_eval(line.split(' ')[0])
return line.split(' ')[2].replace("orchardResult", "")
return None
def list_tests():
tests = []
i = 0
while True:
val = gdb.parse_and_eval("first_test[" + str(i) + "].test_name")
if int(val.referenced_value()) == 0:
break
else:
tests.append(val.string())
i = i + 1
return tests
class OrchardCall(gdb.Command):
def __init__(self):
super(OrchardCall, self).__init__(
name = "orchardcall",
command_class = gdb.COMMAND_DATA,
completer_class = gdb.COMPLETE_SYMBOL
)
def invoke(self, argument, from_tty):
print("Greetings again from Python")
list_tests()
OrchardCall()
from threading import Thread
import gtk
class TestGtkThread(Thread):
def destroy(self, *args):
self.window.hide()
def runtest(self, button, arg):
testname = button.get_label()
self.result_label.set_label("Testing " + testname + "...")
# Force a redraw
gtk.main_iteration()
result = run_test(testname, arg)
self.result_label.set_label("Testing " + testname + ": " + result)
def run(self):
gtk.gdk.threads_init()
tests = list_tests()
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.connect("destroy", self.destroy)
self.window.set_border_width(10)
self.window.set_title("Orchard Tests")
table = gtk.Table(4, len(tests) + 2, False)
row = 0
self.window.add(table)
table.attach(gtk.Label("Poweron"), 0, 1, row, row + 1)
table.attach(gtk.Label("Trivial"), 1, 2, row, row + 1)
table.attach(gtk.Label("Comprehensive"), 2, 3, row, row + 1)
table.attach(gtk.Label("Interactive"), 3, 4, row, row + 1)
row = row + 1
for test in tests:
button = gtk.Button(test)
button.connect("clicked", self.runtest, 0)
table.attach(button, 0, 1, row, row + 1)
button = gtk.Button(test)
button.connect("clicked", self.runtest, 1)
table.attach(button, 1, 2, row, row + 1)
button = gtk.Button(test)
button.connect("clicked", self.runtest, 2)
table.attach(button, 2, 3, row, row + 1)
button = gtk.Button(test)
button.connect("clicked", self.runtest, 3)
table.attach(button, 3, 4, row, row + 1)
row = row + 1
self.result_label = gtk.Label("")
table.attach(self.result_label, 0, 4, row, row + 1)
row = row + 1
self.window.show_all()
gtk.main()
class TestPane(gdb.Command):
def __init__ (self):
super(TestPane, self).__init__ ("testpane",
gdb.COMMAND_NONE,
gdb.COMPLETE_NONE)
self.init = False
def invoke(self, arg, from_tty):
self.dont_repeat()
gdb.execute("set pagination off")
# gdb.execute("mon reset halt")
# gdb.execute("continue")
if not self.init:
self.init = True
v = TestGtkThread()
v.setDaemon(True)
v.start()
TestPane()
|
bunnie/chibios-orchard
|
orchard/factory-test.py
|
Python
|
gpl-3.0
| 3,923
|
from django.conf.urls import include, url
from django.views.generic.base import RedirectView
from cms.models import *
from cms.views import show
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
url(r'^$', show,{'slug':"/%s"%settings.HOME_SLUG}, name='cms.home'), # contenido definido en home slug
url(r'^%s/$'%settings.HOME_SLUG, RedirectView.as_view(url='/', permanent=False)), # redirect
url(r'(?P<slug>.*)/$', show,name='cms.show'),
]
|
eliasfernandez/django-simplecms
|
cms/urls.py
|
Python
|
bsd-2-clause
| 537
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 7, transform = "Logit", sigma = 0.0, exog_count = 20, ar_order = 0);
|
antoinecarme/pyaf
|
tests/artificial/transf_Logit/trend_MovingAverage/cycle_7/ar_/test_artificial_128_Logit_MovingAverage_7__20.py
|
Python
|
bsd-3-clause
| 264
|
#!/usr/bin/env python
# Author Version Date
# -----------------------------------------------
# Keith Smith (Nottingham) 0.8 23 Nov 2007
# Module for finding SALT guide star
# - full changelog is in /doc/changelog.txt
# - manual is (will be?) in /doc/manual.txt
__version__ = "0.8"
__author__ = "Keith Smith"
__doc__="\nSALT guide star finder, version "+__version__ +"""
Finds guide stars suitable for use with the Southern African Large Telescope
Uses the HST Guide Star Catalogue 2.3.2 and the VizieR catalogue service
Usage: python guide_stars.py [OPTIONS] [TARGET]
TARGET should be the coordinates of the target or a SIMBAD-resolvable name
Acceptable formats are:
colon-seperated sexagesimal eg. 15:43:17.2 -18:56:12.9
space-seperated sexagesimal eg. '15 43 17.2' '-18 56 12.9'
decimal eg. 15.7837 -18.9543
target name eg. m31 or 'RV Cen'
All coordinates should be J2000 right ascension and declination
OPTIONS are as follows, arguments are compulsory for both long and short forms:
--help Prints this help
-v --verbose Prints useful information during execution
-d --debug Prints debugging information, implies -v
-f --filter=FILTER Uses the FILTER filter,
defaults to V
-i --instrument=INS Uses the INS instrument,
defaults to RSS
-r --radius=RADIUS Excludes RADIUS arcseconds around target,
defaults to 2 arcseconds"""
# import required modules
import urllib2 # reading URLs
from xml.dom import minidom # XML parsing
import csv, StringIO # CSV parsing
from numpy import * # array
import sys, getopt # command line switches
import warnings
from salttime import dec2sex
# define exceptions
class GuideStarError(Exception): pass # misc errors in this module
class VizError(Exception): pass # errors thrown by VizieR
#class URLError(Exception): pass # errors accessing the URL
class BadInput(Exception): pass # incorrect input
# define global variables, later read from the command line
# define helper functions
def usage():
print __doc__
raise SystemExit(2) # 2 is the UNIX code for bad command line input apparently
def isDecimal(string):
"returns true if the input string converts to a valid decimal"
try:
float(string)
return True
except ValueError:
return False
def checkdms(dms):
"""Verify a sexagesimal string; returns True if valid, False if not
"""
assert isinstance(dms, str)
try:
d=dms.split(':')
for i in range(3): float(d[i])
return True
except:
return False
def checkInput(targetRA, targetDec, imfilter, instrument, targetRadius, maxRadius=300):
"validates input to the main function"
if isDecimal(targetDec):
# dec is decimal
if isDecimal(targetRA)==False: # check RA in same format
raise BadInput, 'RA and dec appear to be in different formats'
if targetDec>0:
targetDec='+'+dec2sex(float(targetDec))
else:
targetDec=dec2sex(float(targetDec))
targetRA=dec2sex(float(targetRA)/15.0)
elif isDecimal(targetRA):
# RA is in decimal, but dec wasn't
# pretty sure there are no valid target names which are decimals...
# need to check this as decimals pass as valid sexa!
raise BadInput, 'RA and dec appear to be in different formats'
elif checkdms(targetDec) == True:
# target already colon-seperated value
# couldn't check this first as this function returns True
# for decimals (for some reason), but False if space seperated sexa
if checkdms(targetRA) == False: # check RA in same format
raise BadInput, 'RA and dec appear to be in different formats'
pass # both already in correct format
elif checkdms(targetDec.replace(' ',':')) == True:
# is a valid space seperated sexa, convert to colon seperated
if (checkdmsStr(targetRA.replace(' ',':')) == False or
(targetRA.replace(' ',':')==targetRA)):
# check RA works colon seperated and wasn't to start with
raise BadInput, 'RA and dec appear to be in different formats'
targetDec=targetDec.replace(' ',':')
targetRA=targetRA.replace(' ',':')
else:
# dec isn't decimal, or space/colon seperated sexa, or blank
raise BadInput, 'Format of declination not recognised, was given: '+str(targetDec)
# convert filters to those in the GSC
if imfilter == "":
warnings.warn('No filter specified, defaulting to V')
imfilter = 'V'
elif imfilter in ('j', 'F', 'V', 'N'):
pass # these are the native filters in the GSC
elif imfilter in ('U', "u'", 'u'):
warnings.warn('Specified '+ imfilter + ' filter, but GSC has very little data this far blue. Falling back to photographic Bj')
imfilter = 'j' # there IS a U field in GSC, but hardly any entries
elif imfilter in ('B', "g'", 'b', 'v'):
warnings.warn('Specified ' + imfilter + ' filter, closest GSC band is photographic Bj')
imfilter = 'j' # there is also a Bmag field in GSC, but less entries
elif imfilter in 'y':
warnings.warn('Specified ' + imfilter + ' filter, closest GSC band is photographic V')
imfilter = 'V'
elif imfilter in ('R', "r'"):
warnings.warn('Specified ' + imfilter + ' filter, closest GSC band is photographic F')
imfilter = 'F'
elif imfilter in ('I', "i'", "z'"):
warnings.warn('Specified ' + imfilter + ' filter, closest GSC band is photographic N')
imfilter = 'N'
else:
raise BadInput, "Filter '%s' not recognised" % str(imfilter)
# check instrument input
instrument = instrument.lower()
if instrument == "":
warnings.warn('No instrument specified, defaulting to RSS')
instrument = 'rss'
elif instrument in ['rss','pfis']:
instrument = 'rss'
elif instrument in ["salticam","scam"]:
warnings.warn('Selected SALTICAM; SALTICAM values not yet validated')
instrument = 'scam'
elif instrument == "hrs":
warnings.warn('Selected HRS; HRS values not yet validated')
instrument = 'hrs'
else:
raise BadInput, 'Instrument "' + str(instrument) + '" not recognised'
# check radius
if targetRadius == '':
warnings.warn('No target radius specified, defaulting to 2 arcsec')
targetRadius = 2.
elif 0 < targetRadius < maxRadius:
pass
elif targetRadius > maxRadius:
raise BadInput, 'Target radius '+str(targetRadius)+' arcsec is larger than the science FoV'
else:
raise BadInput, 'Target radius of ' + str(targetRadius) + 'arcsec is invalid'
return (targetRA, targetDec, imfilter, instrument, targetRadius)
def queryUrl(url):
"accesses the input url and returns the response"
request = urllib2.Request(url)
opener = urllib2.build_opener()
request.add_header('User-Agent', 'SALT guide star finder/'+ __version__ +' www.salt.ac.za')
try:
data = opener.open(request).read()
except:
raise GuideStarError, "Could not connect to VizieR. Please check your internet connection"
return data
def constructVizUrl(targetRA, targetDec, min_r, max_r, imfilter, min_mag, max_mag):
"constructs the url for the VizieR guide star query"
url = "http://vizier.u-strasbg.fr/cgi-bin/asu-xml?" # base URL for VizieR XML queries
url += "-source=I/305/out" # use HST GSC 2.3.2
url += "&-c=" + targetRA
url += targetDec + "&-c.eq=J2000" # target section
url += "&-c.rm=%s,%s" % (min_r, max_r) # anulus range (arcmin)
url += "&-out=%smag&%smag=%s..%s" % (imfilter, imfilter, min_mag, max_mag)
url += "&-out.max=1000" # max entries
url += "&-out=_r,_RA*-c.eq,_DE*-c.eq" # calculate 2000 RA, dec and distance
url += ",Class&Class=0" # only objects flagged as stars
url += "&-oc.form=sexa" # output coords in sexagesimal
url += "&-sort=-_r" # sort by decreasing r
url += "&-mime=CSV" # data in CSV format
return url
def parseVizResponse(response):
"parses the response from VizieR into into a list of data for each star"
# extract CSV table from XML
## if "****" in response:
## # actually puts this in
## #<INFO ID="Errors" value="(**** indicates an Error,++++ indicates a Warning)">
## #blah blah blah
## #</INFO>
## # could handle this better with a little parsing
## errordata=""
## for line in response:
## if "****" in response:
## errordata += line
## raise VizError, "Vizier generated an error. The data returned was:\n" + errordata
xml = minidom.parseString(response)
table = xml.getElementsByTagName('CSV')
if len(table)==0:
info = xml.getElementsByTagName('INFO')
for element in info:
id = element.getAttribute('ID')
if id=='Errors':
#found an error
raise VizError, 'Vizier generated an error. The error was:' + element.firstChild.data
# no CSV table was found ie. no data
# needs some error checking - might be a bad url or target
return [],0,[]
colsep = table[0].getAttribute('colsep') # CSV columns seperator
headlines = table[0].getAttribute('headlines') # CSV header lines
table = table[0].firstChild.data # the CSV table itself
# convert to from unicode
colsep = colsep.encode('ASCII')
headlines = int(headlines)
# extract data from CSV table
csvreader=csv.reader(StringIO.StringIO(table), delimiter=colsep)
# this next bit is very kludgy, weird csvreader object and array manipulations
for row in csvreader: # can't index csvreader!
if len(row)>0: # blank rows in the reader!
if 'csvtable' in locals(): # checks to see if csvtable exists
# python has no exist() function!
csvtable=vstack((csvtable,array([row])))
# fragile, dimension mismatch possible
else:
csvtable=array([row])
# can't append to blank arrays!
# old way of using dictionaries:
#if len(row)>=4:
#rowdict = {0:row[0],1:row[1],2:row[2],3:row[3],4:row[4]}
#csvtable.append(rowdict) # last column Class = 0 for all
#csvtable = csvtable[int(headlines):] # strip headers
# header = csvtable[:headlines]
# data = csvtable[headlines:]
n_stars=len(csvtable)-headlines
# split off the Class column, as an added bonus won't break if there isn't one
index = (csvtable[0,:]!='Class') # boolean index of first row != Class
csvtable = csvtable[:,index] # just retain columns in the index
return csvtable, n_stars, headlines
def sortResults(table, n_stars, headlines, min_r, max_r, min_mag, max_mag):
"Sorts the results to select the best guide stars"
# split the header off the table
header=table[:headlines]
data=table[headlines:]
top=header[0,:]
r_index = top=='_r'
if not True in r_index:
raise GuideStarError, ('Could not find a radius column in search results\n' +
'Returned columns are: ' + str(top))
r=data[:,r_index]
mag_index = (top=='Vmag') + (top=='Nmag') + (top=='Bmag') +\
(top=='Fmag') + (top=='Umag') + (top=='jmag')
# hard coded brute force
if not True in mag_index:
raise GuideStarError, ('Could not find a magnitude column in search results\n' +
'Returned columns are: ' + str(top))
mag=data[:,mag_index]
# convert to floats
r=r.astype(float64)
mag=mag.astype(float64)
r_pref=select([r<min_r, (min_r<=r) & (r<=max_r), r>max_r],
[min_r-r, 0., r-max_r])
mag_pref=select([mag<min_mag, (min_mag<=mag) & (mag<=max_mag), mag>max_r],
[min_mag-mag, 0., mag-max_mag])
pref = 2*r_pref + mag_pref # weighted such that 1 mag = 0.5 arcmin
sort_index = lexsort(keys=(mag.T,pref.T),axis=1) # decide the order from the pref list
# in order, so LOWEST first
# note the index is crazy - lists the indexes in order, not the order of indices!
sorted = data[sort_index]
pref_sort = pref[sort_index]
n_stars = min(n_stars,6) # return at most 6 stars
# replace some of the column headings
top[top.tolist().index('_r')]='Offset'
top[top.tolist().index('_RAJ2000')]='RA (J2000)'
top[top.tolist().index('_DEJ2000')]='Dec (J2000)'
header[0,:]=top
# put the header back on
data = vstack((header,sorted[:,:n_stars][0]))
return data, n_stars
def QueryCatalog(targetRA, targetDec, abs_min_r=0, abs_max_r=10, imfilter='V',
abs_min_mag=0, abs_max_mag=30, header=True):
'''Finds guide stars based upon the HST GSC 2.3.2.
Pass target RA and Dec (J2000), and optionally
filter, instrument, and target radius in arcsec.
Parameters
----------
targetRA: string
string specifying the target RA. The format should either
be the decimal position or a colon or space separated values
targetDec: string
string specifying the target RA. The format should either
be the decimal position or a colon or space separated values
abs_min_r: float
Minimum radius in arcminutes for search annulus
abs_max_r: float
Maximum radius in arcminutes for search annulus
imfilter: string
Input filter for the observations
abs_min_mag: float
Minimum magnitude for stars (bright limit)
abs_max_r: float
Maximum mangitude for stars (faint limit)
header: boolean
Include header in array
Returns
-------
data: array
Array containing the sorted results from the query
n_stars: int
Number of stars returned
headlines: int [optional]
Number of lines in the header. Only returned if header=True
'''
# set up query
url = constructVizUrl(targetRA, targetDec, abs_min_r, abs_max_r, imfilter, abs_min_mag, abs_max_mag)
# retrieve data
response = queryUrl(url)
#sort the data
data, n_stars, headlines = parseVizResponse(response)
if not header:
return data[headlines:], n_stars
return data, n_stars, headlines
# main function
def findGuideStars(targetRA, targetDec="", imfilter='', instrument='', targetRadius=2.):
'''Finds SALT-suitable guide stars based upon the HST GSC 2.3.2.
Pass target RA and Dec (J2000), and optionally
filter, instrument, and target radius in arcsec.
Parameters
----------
targetRA: string
string specifying the target RA. The format should either
be the decimal position or a colon or space separated values
targetDec: string
string specifying the target RA. The format should either
be the decimal position or a colon or space separated values
imfilter: string
Input filter for the observations
instrument: string
Instrument to be used for guiding: either rss or scam
targetRadius: float
Radius in arcsec to search for targets. Maximum value should be 300"
'''
(targetRA, targetDec, imfilter, instrument, targetRadius)= \
checkInput(targetRA, targetDec, imfilter, instrument, targetRadius)
# set up instrument-specific settings
# this should go into the check input function, but too much passing
# for the moment - will change later
# alternatively use classes?
instrument = instrument.lower()
if instrument == 'rss':
pref_max_r = 5. # prefered outer edge of anulus in arcmin
pref_min_r = 4. # prefered inner edge of anulus in arcmin
abs_max_r = 5. # absolute limit on outer edge
abs_min_r = 1. # absolute limit on inner edge
pref_max_mag = 15. # prefered magnitude limits
pref_min_mag = 12.
abs_max_mag = 19. # absolute magnitude limits
abs_min_mag = 10
elif instrument == 'scam':
# salticam guider has not yet been installed - use values from rss
# if you fill this in, please change warning message in input check above
pref_max_r = 5.
pref_min_r = 4.
abs_max_r = 5.
abs_min_r = 1.
pref_max_mag = 15.
pref_min_mag = 12.
abs_max_mag = 19.
abs_min_mag = 10.
elif instrument == "hrs":
# hrs has not yet been built - use values from rss
# if you fill this in, please change warning message in input check above
pref_max_r = 5.
pref_min_r = 4.
abs_max_r = 5.
abs_min_r = 1.
pref_max_mag = 15.
pref_min_mag = 12.
abs_max_mag = 19.
abs_min_mag = 10.
else:
raise GuideStarError, "Instrument settings not found, but passed input checking. This shouldn't happen"
# modify radii if the source is very large
targetRadius = (targetRadius / 60.) # convert from arcsec to arcmin
if targetRadius > pref_max_r:
if targetRadius > abs_max_r:
return "Target is larger than the guide star field of view"
else:
pref_max_r = abs_max_r
if targetRadius > abs_min_r:
abs_min_r = targetRadius # + a bit to avoid vignetting?
if abs_min_r > pref_min_r:
pref_min_r = abs_min_r
gsdata = []
#query the catalog and return the results as an array
data,n_stars, headlines=QueryCatalog(targetRA, targetDec, abs_min_r, abs_max_r, imfilter, abs_min_mag, abs_max_mag)
#sort the stars
if n_stars>0:
data, n_stars = sortResults(data, n_stars, headlines, \
pref_min_r, pref_max_r, pref_min_mag, pref_max_mag)
if n_stars>1:
print('Finished, selected ' + str(n_stars) + ' guide stars')
status = 'Found ' + str(n_stars) + ' stars'
elif n_stars==1:
print('Finished, found one guide star')
status = 'Found 1 star'
else:
print('Failed, found no guide stars')
if imfilter != 'V':
print('Falling back to V filter...')
status, data = findGuideStars(targetRA, targetDec, 'V', instrument, targetRadius*60.)
status = status + ' after falling back to V'
else:
print('Nothing else to fall back to. Try modifying your query')
status = 'Failed, no suitable stars found'
return data
if __name__ == "__main__":
# executes if module is run from the command line
# read command line options
try:
opts,args = getopt.getopt(sys.argv[1:],"vdf:i:r:",
["verbose","debug","filter=","instrument=","radius=","help"])
except getopt.GetoptError, inst:
print inst
print 'Use --help to get a list of options'
sys.exit(2)
ra, dec, imfilter, ins, radius = "","","","",""
# parse them to the relevant variables
for opt, arg in opts:
if opt in ('--help'):
usage()
elif opt in ('-v','--verbose'):
verbose=True
elif opt in ('-d','--debug'):
verbose=True # implied
debug=True
elif opt in ('-f','--filter'):
imfilter = arg
elif opt in ('-i','--instrument'):
ins = arg
elif opt in ('-r','--radius'):
radius = float(arg)
else:
print 'Unknown option: ' + opt
usage()
for argument in args:
if ra=="":
ra = argument
elif dec=="":
dec = argument
else:
#too many arguments
raise BadInput, 'Too many arguments, takes one or two input arguments'
if ra=="": # no target was specified
raise BadInput, 'No target specified'
n_stars,data = findGuideStars(ra,dec,imfilter,ins,radius)
print n_stars, data
sys.exit(0)
### Testing stuff
# print "Debugging mode: using default values"
## verbose=True
## debug=True
## import simbad
## name="hudf"
### (ra, dec) = simbad.simbad(name)
## (ra, dec) = (name, "")
### (ra, dec) = ('80.9','80.9')
## radius=2.
## filter="B"
## ins = 'rss'
## status, data = findGuideStars(ra, dec, filter=filter, instrument=ins, targetRadius=radius)
## print data
### print status
### for line in data:
# print line
|
crawfordsm/pysalt
|
plugins/guide_stars.py
|
Python
|
bsd-3-clause
| 20,834
|
#!/usr/bin/env python
from __future__ import print_function
import rospy
import sys
import mavros
import argparse
import threading
from std_msgs.msg import Float64
from mavros.utils import*
#latitude = hold2[0].latitude
#longitude = hold2[0].longitude
#altitude = hold2[0].altitude
#latitude = 7
#longitude = 6
#altitude = 2
# read from /mavros/global_postition/rel_alt
# info is stored in data.data (is a std_msgs/Float64)
# set up the class and subscriber
#############################################################
#defining the class
class Float64Data(object):
def __init__(self):
self.myfloat64 = None #"" # unpacked
self.myfloat64_ros = Float64() # raw format
self.lock = threading.Lock()
def callback(self, data):
self.lock.acquire()
try: # this looks different depending on the datatype
self.myfloat64 = data.data # data is expected to be in Float64 format
self.myfloat64_ros = data
finally:
self.lock.release()
def received_data(self):
self.thedata = None
self.lock.acquire()
try:
thedata = self.myfloat64
self.myfloat64 = None
finally:
self.lock.release()
return thedata
def received_rawdata(self):
"""
input: (none)
output: thedata (std_msgs.msg.String)
returns raw data that was received,
in its original ROS format
"""
self.lock.acquire()
try:
thedata = self.myfloat64_ros
self.myfloat64_ros = Float64()
finally:
self.lock.release()
return thedata
# defining waypoint_node_new
def altitude_node_new():
rospy.init_node('altitude_node_new', anonymous=True)
float64_in = Float64Data()
rospy.Subscriber("/mavros/global_position/rel_alt", Float64, float64_in.callback)
#rospy.Subscriber("/UAV1/waypoint_list", soi_waypoint_work/LatLongWayptList, waypoints_in.callback)
print("Waiting for incoming ROS topic data...")
rospy.spin()
while (1):
#hold = string_in.received_data()
#if hold is None:
# pass
#else: # we have new data
# print("String received: %r" % hold)
hold_alt = float64_in.received_data()
if hold_alt is None:
pass
else: # we have new data
print("Data received: %r" % hold_alt)
# spin() simply keeps python from exiting until this node is stopped
#rospy.spin()
##############################################################
#hold_alt = None
#while hold_alt is None: # loop until you get data from mavros
# hold_alt = ???.received_data() # to be filled in
##############################################################
fh = open("filename.txt", 'w')
fh.write("QGC WPL 110\n")
# this presumes that we're starting at this landing strip and location (lat=149.165085, long=-35.362938)
if hold_alt < 0.3: # then we haven't takeoff yet, so give "takeoff"-type commands
#takeoff 00 position
fh.write("0\t1\t0\t16\t0.000000\t0.000000\t0.000000\t0.000000\t-35.362938\t149.165085\t584.409973\t1\n")
#takeoff 01 position
fh.write("1\t0\t3\t22\t15.000000\t0.000000\t0.000000\t0.000000\t-35.361164\t149.163986\t28.110001\t\n1")
else: # we're already in the air =, just get the next waypoint
pass
firstwaypt = 0; counter = 2
#for pt in hold2:
## [longitude, latitude, altitude] = pt
# latitude = t.latitude
# longitude = pt.longitude
# altitude = pt.altitude
#if firstwaypt == 0: # with constant unchanging orientation
# opened_file.write("%d 1 0 22 15.000000 0.000000 0.000000 0.000000 %d %d %d 1\n" % (counter, longitude, latitude, altitude))
#firstwaypt = 1
#else: # with constant unchanging orientation
fh.write("%d 0 3 22 15.000000 0.000000 0.000000 0.000000 %f %f %f 1\n" % (counter, longitude, latitude, altitude))
#counter += 1
fh.close()
rospy.spin()
|
medhijk/soi_waypoint_work
|
scripts/deprecated/altitude_node_new.py
|
Python
|
bsd-3-clause
| 4,179
|
from dream.plugins import plugin
from pprint import pformat
from copy import copy, deepcopy
import json
import time
import random
import operator
import xmlrpclib
import signal
from multiprocessing import Pool
# # run an ant in a subrocess. Can be parrallelized.
# def runAntInSubProcess(ant):
# ant['result'] = plugin.ExecutionPlugin.runOneScenario(ant['input'])['result']
# return ant
# Execution plugin that implements enumeration of different solutions
# this is an abstract class only its run() is to be used. Sub-classes should implement other methods
# depending on the problem
class Enumeration(plugin.ExecutionPlugin):
def calculateScenarioScore(self, scenario):
raise NotImplementedError("Subclass must define 'calculateScenarioScore' method")
# creates the collated scenarios, i.e. the list
# of options collated into a dictionary for ease of referencing in ManPy
def createScenarioList(self,data):
raise NotImplementedError("Subclass must define 'createScenarioList' method")
# create the scenario
def createScenarioData(self,data,scenario):
raise NotImplementedError("Subclass must define 'createScenarioData' method")
# checks if the algorithm should terminate. Default is set to False so that the algorithm
# terminates only when all scenarios are considered
def checkIfShouldTerminate(self,data,scenarioList):
return False
def run(self, data):
# distributor_url = data['general'].get('distributorURL',None)
# distributor = None
# if distributor_url:
# distributor = xmlrpclib.Server(distributor_url)
# multiprocessorCount = data['general'].get('multiprocessorCount',None)
start = time.time() # start counting execution time
numberOfSolutions = int(data['general'].get('numberOfSolutions',15))
assert numberOfSolutions >= 1
scenarioList=self.createScenarioList(data)
# run the scenario. Only synchronous for now
i=0
for scenario in scenarioList:
scenario['input']=self.createScenarioData(data, scenario)
scenario['result'] = self.runOneScenario(scenario['input'])['result']
scenario['score'] = self.calculateScenarioScore(scenario)
# it we should terminate remove the scenarios that are not scored yet
if self.checkIfShouldTerminate(data, scenarioList):
scenarioList = scenarioList[:i+1]
break
i+=1
# remove ants that outputs the same schedules
# XXX we in fact remove ants that produce the same output json
scenarioListWithoutDuplicates = []
resultList=[]
for scenario in scenarioList:
scenarioResult = copy(scenario['result']['result_list'][0]['elementList'])
#scenarioResult['general'].pop('totalExecutionTime', None)
if scenarioResult not in resultList:
resultList.append(scenarioResult)
scenarioListWithoutDuplicates.append(scenario)
# rank the scenarios based on their score and take only the numberOfSolutions best
scenariosToReturn = sorted(scenarioListWithoutDuplicates,key=operator.itemgetter('score'))[:numberOfSolutions]
# return the number of scenarios that need to be returned
data['result']['result_list'] = result_list = []
for scenario in scenariosToReturn:
result, = scenario['result']['result_list']
result['score'] = scenario['score']
result['key'] = scenario['key']
result_list.append(result)
self.logger.info("Enumeration finished, execution time %0.2fs" % (time.time() - start))
return data
|
nexedi/dream
|
dream/plugins/Enumeration.py
|
Python
|
gpl-3.0
| 3,711
|
#!/usr/bin/env python
'''
Mulliken population analysis with NAO
'''
import numpy
from pyscf import gto, scf, lo
from functools import reduce
x = .63
mol = gto.M(atom=[['C', (0, 0, 0)],
['H', (x , x, x)],
['H', (-x, -x, x)],
['H', (-x, x, -x)],
['H', ( x, -x, -x)]],
basis='ccpvtz')
mf = scf.RHF(mol).run()
# C matrix stores the AO to localized orbital coefficients
C = lo.orth_ao(mf, 'nao')
# C is orthogonal wrt to the AO overlap matrix. C^T S C is an identity matrix.
print(abs(reduce(numpy.dot, (C.T, mf.get_ovlp(), C)) -
numpy.eye(mol.nao_nr())).max()) # should be close to 0
# The following linear equation can also be solved using the matrix
# multiplication reduce(numpy.dot (C.T, mf.get_ovlp(), mf.mo_coeff))
mo = numpy.linalg.solve(C, mf.mo_coeff)
#
# Mulliken population analysis based on NAOs
#
dm = mf.make_rdm1(mo, mf.mo_occ)
mf.mulliken_pop(mol, dm, numpy.eye(mol.nao_nr()))
|
gkc1000/pyscf
|
examples/local_orb/01-pop_with_nao.py
|
Python
|
apache-2.0
| 997
|
"""
The various HTTP responses for use in returning proper HTTP codes.
"""
from django.http import HttpResponse
class HttpCreated(HttpResponse):
status_code = 201
def __init__(self, *args, **kwargs):
location = kwargs.pop('location', '')
super(HttpCreated, self).__init__(*args, **kwargs)
self['Location'] = location
class HttpAccepted(HttpResponse):
status_code = 202
class HttpNoContent(HttpResponse):
status_code = 204
class HttpMultipleChoices(HttpResponse):
status_code = 300
class HttpSeeOther(HttpResponse):
status_code = 303
class HttpNotModified(HttpResponse):
status_code = 304
class HttpBadRequest(HttpResponse):
status_code = 400
class HttpUnauthorized(HttpResponse):
status_code = 401
class HttpForbidden(HttpResponse):
status_code = 403
class HttpNotFound(HttpResponse):
status_code = 404
class HttpMethodNotAllowed(HttpResponse):
status_code = 405
class HttpConflict(HttpResponse):
status_code = 409
class HttpGone(HttpResponse):
status_code = 410
class HttpTooManyRequests(HttpResponse):
status_code = 429
class HttpApplicationError(HttpResponse):
status_code = 500
class HttpNotImplemented(HttpResponse):
status_code = 501
|
hzlf/openbroadcast
|
website/apps/tastypie__/http.py
|
Python
|
gpl-3.0
| 1,267
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set fileencodings=utf-8
import os
import re
import sys
import csv
import ast
import json
import argparse
import calendar
from types import *
from datetime import datetime
class NotSupportedError(NotImplementedError):
pass
class InputConverter(object):
def __init__(self):
self.impl_map = {
'short' : int,
'int' : int,
'integer' : int,
'long' : long,
'float' : float,
'double' : float,
'string' : self.convert_string,
'timestamp' : long,
'array' : self.convert_array,
'boolean' : bool
}
def get_impl(self, name):
impl = self.impl_map.get(name)
if not impl:
raise NotSupportedError('"{}" is not a supported type'.format(name))
return impl
def convert_string(self, value):
return value.decode('utf-8')
def convert_array(self, value):
return self.convert_string(value).split(u',')
def convert(infile, outfile, columns, delimiter=',', quotechar='"'):
"""
Convert infile (stdin) formatted as csv (with column headers)
to outfile (stdout) formatted as json
"""
converter = InputConverter()
cols = [col.split(':') for col in columns]
function_seq = [(c[0], converter.get_impl(c[1])) for c in cols]
reader = csv.DictReader(infile, [c[0] for c in cols],
delimiter=delimiter, quotechar=quotechar)
for row in reader:
for k, impl in function_seq:
if row[k] == '\\N':
row[k] = None # NULL -> None
else:
row[k] = impl(row[k])
outfile.write(json.dumps(row))
outfile.write('\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert MySQL csv to Crate JSON')
parser.add_argument('infile', nargs='?',
type=argparse.FileType('r'),
help='path to csv file',
default=sys.stdin)
parser.add_argument('outfile', nargs='?',
type=argparse.FileType('w'),
help='path to json file',
default=sys.stdout)
parser.add_argument('--columns', nargs='*',
help="""column definition formatted as col_name:col_type [...]
column types are: short, int, integer, long, float, double, string, timestamp, array""")
args = parser.parse_args()
convert(args.infile, args.outfile, args.columns)
|
crate/crate-utils
|
migrations/mysql/csv2json.py
|
Python
|
apache-2.0
| 2,610
|
import unittest
from queue import Queue
import os
from bears.general.IndentationBear import IndentationBear
from bears.general.AnnotationBear import AnnotationBear
from coala_utils.string_processing.Core import escape
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
class IndentationBearTest(unittest.TestCase):
def setUp(self):
self.section = Section("")
self.section.append(Setting('language', 'test'))
self.section.append(Setting('use_spaces', False))
self.section.append(Setting('coalang_dir', escape(os.path.join(
os.path.dirname(__file__), "test_files"), '\\')))
self.dep_uut = AnnotationBear(self.section, Queue())
def get_results(self, file, section=None):
if section is None:
section = self.section
dep_results_valid = self.dep_uut.execute("file", file)
uut = IndentationBear(section, Queue())
arg_dict = {'dependency_results':
{AnnotationBear.__name__:
list(dep_results_valid)},
'file': file}
return list(uut.run_bear_from_section(["file"], arg_dict))
def verify_bear(self,
valid_file=None,
invalid_file=None,
section=None):
if valid_file:
valid_results = self.get_results(valid_file, section)
self.assertEqual(valid_results, [])
if invalid_file:
invalid_results = self.get_results(invalid_file, section)
self.assertNotEqual(invalid_results, [])
def test_basic_indent(self):
valid_file =\
("{\n",
"\tright indent\n",
"}\n")
invalid_file =\
("{\n",
"wrong indent\n",
"}\n")
self.verify_bear(valid_file, invalid_file)
valid_file2 =\
("a {\n",
"\tindent1\n",
"\tindent2\n",
"}\n")
invalid_file2 =\
("a {\n",
"\tindentlevel1;\n",
"\t\tsecondlinehere;\n",
"}\n")
self.verify_bear(valid_file2, invalid_file2)
def test_within_strings(self):
valid_file1 =\
('"indent specifier within string{"\n',
'does not indent\n')
self.verify_bear(valid_file1)
valid_file2 =\
('R("strings can span\n',
'multiple lines as well{")\n',
'but the bear works correctly\n')
self.verify_bear(valid_file2)
valid_file3 =\
('"this should indent"{ "hopefully"\n',
'\tand it does\n',
'}\n')
self.verify_bear(valid_file3)
def test_within_comments(self):
valid_file1 =\
('//indent specifier within comments{\n',
'remains unindented\n')
self.verify_bear(valid_file1)
valid_file2 =\
('/*Indent specifier within\n',
'lines of multiline comment {\n',
'doesnt have any effect{ */\n',
'no affect on regular lines as well\n')
self.verify_bear(valid_file2)
valid_file3 =\
('/*this should indent*/{ /*hopefully*/\n',
'\tand it does\n',
'}\n')
self.verify_bear(valid_file3)
def test_branch_indents(self):
valid_file =\
('branch indents{\n',
'\tsecond branch{\n',
'\t\twithin second branch\n',
'\t}\n',
'}\n',)
self.verify_bear(valid_file)
def test_bracket_matching(self):
valid_file = ("{{{}{}}",
"\tone_indent",
"}")
invalid_file = ("{{{}{}}",
"did not give indent",
"}")
self.verify_bear(valid_file, invalid_file)
invalid_file = ('}}}{{{\n',)
self.verify_bear(invalid_file=invalid_file)
def test_blank_lines(self):
valid_file = ("{ trying indent",
"\n",
"\tIndents even after blank line}")
invalid_file = ("{ trying indent",
"\n",
"should have Indented after blank line}")
self.verify_bear(valid_file, invalid_file)
valid_file = ('def func(x):\n',
'\tlevel1\n',
'\n',
'level0\n')
self.verify_bear(valid_file)
def test_settings(self):
section = Section("")
section.append(Setting('language', 'c'))
section.append(Setting('use_spaces', True))
section.append(Setting('tab_width', 6))
valid_file = ('{\n',
# Start ignoring SpaceConsistencyBear
' 6 spaces of indentation\n'
# Stop ignoring
'}\n')
invalid_file = ('{\n',
# Start ignoring SpaceConsistencyBearW
' 4 spaces of indentation\n'
# Stop ignoring
'}\n')
self.verify_bear(valid_file, invalid_file, section)
def test_unmatched_indents(self):
valid_file = ('{}\n',)
invalid_file = ('{\n',)
self.verify_bear(valid_file, invalid_file)
invalid_file2 = ('{}}\n',)
self.verify_bear(valid_file=None, invalid_file=invalid_file2)
def test_multiple_indent_specifiers(self):
valid_file = ('{<\n',
'\t\tdouble indents\n',
'\t>\n',
'\tother specifier closes\n',
'}\n')
invalid_file = ('{\n',
'\t<\n',
'\t not giving indentation>}\n')
self.verify_bear(valid_file, invalid_file)
def test_unspecified_unindents(self):
valid_file = ('switch(expression) {\n',
'\tcase constant-expression :\n',
'\t\tstatement(s);\n',
'\t\tbreak;\n',
'\tcase constant-expression :\n',
'\t\tstatement(s);\n',
'\t\tbreak;\n',
'\tdefault :\n',
'\t\tstatement(s);\n',
'}\n')
invalid_file = ('switch(expression){\n',
'\tcase expr:\n',
'\tstatement(s);\n',
'}')
self.verify_bear(valid_file, invalid_file)
valid_file = ('def func(x,\n',
' y,\n',
' z):\n',
'\tsome line\n',
'\tsome line 2\n')
invalid_file = ('def func(x):\n',
'\t\tsome line\n',
'\tsome line\n')
self.verify_bear(valid_file, invalid_file)
invalid_file = ('def func(x):\n',
'\tline 1\n',
'# A comment')
self.verify_bear(invalid_file=invalid_file)
invalid_file = ('def func(x):\n',
'\ta = [1, 2,\n',
'3, 4]\n')
self.verify_bear(invalid_file=invalid_file)
invalid_file = ('def func(x):\n',
'\t/* multiline comment\n',
'unindent*/')
self.verify_bear(invalid_file=invalid_file)
def test_absolute_indentation(self):
valid_file =\
("some_function(param1,\n",
" second_param,\n",
" third_one)\n",
"indent back to normal\n")
invalid_file =\
("some_function(param1,\n",
" param2)\n",
" wrong_indent\n")
self.verify_bear(valid_file=valid_file, invalid_file=invalid_file)
valid_file = \
("branched_function(param1,\n",
" param2_func(param3,\n",
" param4)\n",
" param5)\n",
"indent back to original\n")
invalid_file = \
("some_function(param1\n",
" param2(param3,\n",
" param4))\n",
" wrong indent\n")
self.verify_bear(valid_file=valid_file, invalid_file=invalid_file)
valid_file =\
("some_function(param1{\n",
" \tshould be here\n",
" }\n",
" param2)\n")
invalid_file =\
("some_function(param1{\n",
" \tis this right?\n",
" }\n",
" probably not)\n")
self.verify_bear(valid_file=valid_file, invalid_file=invalid_file)
valid_file =\
("some_function(\n",
" does hanging indents\n"
" so can indent like this)\n")
self.verify_bear(valid_file)
def test_invalid_specifiers(self):
valid_file = ("not a valid : indent specifier\n",
"does not indent\n")
invalid_file = ("not a valid : indent specifier\n",
"\tindents\n")
self.verify_bear(valid_file, invalid_file)
valid_file = ("[a specifier :\n",
" inside an encapsulator]\n",
"is not valid")
self.verify_bear(valid_file)
valid_file = ("This is a valid specifier: # A comment\n",
"\tand so it indents\n")
self.verify_bear(valid_file)
|
mr-karan/coala-bears
|
tests/general/IndentationBearTest.py
|
Python
|
agpl-3.0
| 9,764
|
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The actuator interface and implementations.
"""
import json
import os
class Actuator(object):
def push(self, core, image, args):
"""Push an image to a target."""
raise NotImplementedError()
class ActuatorExtension(Actuator):
"""Actuator extension consisting of hook programs."""
def __init__(self, actuator_root_dir):
self.root = actuator_root_dir
self.name = os.path.basename(actuator_root_dir)
def push(self, core, image, args):
result = core.get_utils().run_hook(
self.root, 'push', core.get_source_directory(), *args,
input=json.dumps(image))
return result
|
GoogleCloudPlatform/SourceXCloud
|
lib/sxc/actuator.py
|
Python
|
apache-2.0
| 1,235
|
import json
import uuid
import functions
import flask
import httplib2
import requests
from flask import Flask
from apiclient import discovery
from oauth2client import client
app = Flask(__name__)
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
#CLIENT_SECRETS = os.path.join(os.path.dirname(__file__), '..', 'client_secrets.json')
#FLOW = flow_from_clientsecrets(
# CLIENT_SECRETS,
# scope='https://www.googleapis.com/auth/plus.me',
# redirect_uri='http://localhost:8000/oauth2callback')
@app.route('/')
def index():
#if 'credentials' not in flask.session:
#return flask.redirect(flask.url_for('oauth2callback'))
#credentials = client.OAuth2Credentials.from_json(flask.session['credentials'])
#if credentials.access_token_expired:
#return flask.redirect(flask.url_for('oauth2callback'))
#else:
#http_auth = credentials.authorize(httplib2.Http())
#yt_service = discovery.build('youtube', 'v3', http_auth)
#functions.feed_playlist(yt_service)
response = requests.get("http://www.rmf.fm/au/?a=poplista")
return response.text
@app.route('/oauth2callback')
def oauth2callback():
flow = client.flow_from_clientsecrets('youtube/client_secret.json',scope='https://www.googleapis.com/auth/youtube',redirect_uri=flask.url_for('oauth2callback', _external=True))
if 'code' not in flask.request.args:
auth_uri = flow.step1_get_authorize_url()
return flask.redirect(auth_uri)
else:
auth_code = flask.request.args.get('code')
credentials = flow.step2_exchange(auth_code)
flask.session['credentials'] = credentials.to_json()
return flask.redirect(flask.url_for('index'))
app.secret_key = str(uuid.uuid4())
|
Kraxi/YTplaylist
|
playlist.py
|
Python
|
gpl-2.0
| 1,868
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Cédric Krier
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup
import re
import os
import ConfigParser
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
config = ConfigParser.ConfigParser()
config.readfp(open('tryton.cfg'))
info = dict(config.items('tryton'))
for key in ('depends', 'extras_depend', 'xml'):
if key in info:
info[key] = info[key].strip().splitlines()
major_version, minor_version = 3, 8
requires = []
for dep in info.get('depends', []):
if dep.startswith('health'):
requires.append('trytond_%s == %s' %
(dep, info.get('version')))
elif not re.match(r'(ir|res|webdav)(\W|$)', dep):
requires.append('trytond_%s >= %s.%s, < %s.%s' %
(dep, major_version, minor_version, major_version,
minor_version + 1))
requires.append('trytond >= %s.%s, < %s.%s' %
(major_version, minor_version, major_version, minor_version + 1))
setup(name='trytond_health_nursing',
version=info.get('version', '0.0.1'),
description=info.get('description', 'GNU Health Nursing Module'),
author=info.get('author', 'GNU Solidario'),
author_email=info.get('email', 'health@gnusolidario.org'),
url=info.get('website', 'http://health.gnu.org/'),
download_url='http://ftp.gnu.org/gnu/health/',
package_dir={'trytond.modules.health_nursing': '.'},
packages=[
'trytond.modules.health_nursing',
'trytond.modules.health_nursing.tests',
],
package_data={
'trytond.modules.health_nursing': info.get('xml', []) \
+ info.get('translation', []) \
+ ['tryton.cfg', 'view/*.xml', 'doc/*.rst', 'locale/*.po',
'report/*.odt', 'icons/*.svg'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Plugins',
'Framework :: Tryton',
'Intended Audience :: Developers',
'Intended Audience :: Healthcare Industry',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Natural Language :: Spanish',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
],
license='GPL-3',
install_requires=requires,
zip_safe=False,
entry_points="""
[trytond.modules]
health_nursing = trytond.modules.health_nursing
""",
test_suite='tests',
test_loader='trytond.test_loader:Loader',
)
|
kret0s/gnuhealth-live
|
tryton/server/trytond-3.8.3/trytond/modules/health_nursing/setup.py
|
Python
|
gpl-3.0
| 3,333
|
# Django settings for Django Generic Counter project.
import os
from tempfile import gettempdir
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(gettempdir(), 'django-generic-counter.tests.db'),
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.4/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '0yt52*upn&&_(jkkkd&-=r-x1b$2w(b2umv2_(+ak)gqzx#e($'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'django_generic_counter.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django_generic_counter',
'django_nose'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
|
0x07Ltd/django-generic-counter
|
tests/settings14.py
|
Python
|
unlicense
| 5,470
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.