code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate keyboard layout and hotkey data for the keyboard overlay.
This script fetches data from the keyboard layout and hotkey data spreadsheet,
and output the data depending on the option.
--cc: Rewrites a part of C++ code in
chrome/browser/chromeos/webui/keyboard_overlay_ui.cc
--grd: Rewrites a part of grd messages in
chrome/app/generated_resources.grd
--js: Rewrites the entire JavaScript code in
chrome/browser/resources/keyboard_overlay/keyboard_overlay_data.js
These options can be specified at the same time.
e.g.
python gen_keyboard_overlay_data.py --cc --grd --js
The output directory of the generated files can be changed with --outdir.
e.g. (This will generate tmp/keyboard_overlay.js)
python gen_keyboard_overlay_data.py --outdir=tmp --js
"""
import cStringIO
import datetime
import gdata.spreadsheet.service
import getpass
import json
import optparse
import os
import re
import sys
MODIFIER_SHIFT = 1 << 0
MODIFIER_CTRL = 1 << 1
MODIFIER_ALT = 1 << 2
KEYBOARD_GLYPH_SPREADSHEET_KEY = '0Ao3KldW9piwEdExLbGR6TmZ2RU9aUjFCMmVxWkVqVmc'
HOTKEY_SPREADSHEET_KEY = '0AqzoqbAMLyEPdE1RQXdodk1qVkFyTWtQbUxROVM1cXc'
CC_OUTDIR = 'chrome/browser/ui/webui/chromeos'
CC_FILENAME = 'keyboard_overlay_ui.cc'
GRD_OUTDIR = 'chrome/app'
GRD_FILENAME = 'chromeos_strings.grdp'
JS_OUTDIR = 'chrome/browser/resources/chromeos'
JS_FILENAME = 'keyboard_overlay_data.js'
CC_START = r'IDS_KEYBOARD_OVERLAY_INSTRUCTIONS_HIDE },'
CC_END = r'};'
GRD_START = r' <!-- BEGIN GENERATED KEYBOARD OVERLAY STRINGS -->'
GRD_END = r' <!-- END GENERATED KEYBOARD OVERLAY STRINGS -->'
LABEL_MAP = {
'glyph_arrow_down': 'down',
'glyph_arrow_left': 'left',
'glyph_arrow_right': 'right',
'glyph_arrow_up': 'up',
'glyph_back': 'back',
'glyph_backspace': 'backspace',
'glyph_brightness_down': 'bright down',
'glyph_brightness_up': 'bright up',
'glyph_enter': 'enter',
'glyph_forward': 'forward',
'glyph_fullscreen': 'maximize',
# Kana/Eisu key on Japanese keyboard
'glyph_ime': u'\u304b\u306a\u0020\u002f\u0020\u82f1\u6570',
'glyph_lock': 'lock',
'glyph_overview': 'switch window',
'glyph_power': 'power',
'glyph_right': 'right',
'glyph_reload': 'reload',
'glyph_search': 'search',
'glyph_shift': 'shift',
'glyph_tab': 'tab',
'glyph_tools': 'tools',
'glyph_volume_down': 'vol. down',
'glyph_volume_mute': 'mute',
'glyph_volume_up': 'vol. up',
};
INPUT_METHOD_ID_TO_OVERLAY_ID = {
'm17n:ar:kbd': 'ar',
'm17n:fa:isiri': 'ar',
'm17n:hi:itrans': 'hi',
'm17n:th:kesmanee': 'th',
'm17n:th:pattachote': 'th',
'm17n:th:tis820': 'th',
'm17n:vi:tcvn': 'vi',
'm17n:vi:telex': 'vi',
'm17n:vi:viqr': 'vi',
'm17n:vi:vni': 'vi',
'm17n:zh:cangjie': 'zh_TW',
'm17n:zh:quick': 'zh_TW',
'mozc': 'en_US',
'mozc-chewing': 'zh_TW',
'mozc-dv': 'en_US_dvorak',
'mozc-hangul': 'ko',
'mozc-jp': 'ja',
'pinyin': 'zh_CN',
'pinyin-dv': 'en_US_dvorak',
'xkb:be::fra': 'fr',
'xkb:be::ger': 'de',
'xkb:be::nld': 'nl',
'xkb:bg::bul': 'bg',
'xkb:bg:phonetic:bul': 'bg',
'xkb:br::por': 'pt_BR',
'xkb:ca::fra': 'fr_CA',
'xkb:ca:eng:eng': 'ca',
'xkb:ch::ger': 'de',
'xkb:ch:fr:fra': 'fr',
'xkb:cz::cze': 'cs',
'xkb:de::ger': 'de',
'xkb:de:neo:ger': 'de_neo',
'xkb:dk::dan': 'da',
'xkb:ee::est': 'et',
'xkb:es::spa': 'es',
'xkb:es:cat:cat': 'ca',
'xkb:fi::fin': 'fi',
'xkb:fr::fra': 'fr',
'xkb:gb:dvorak:eng': 'en_GB_dvorak',
'xkb:gb:extd:eng': 'en_GB',
'xkb:gr::gre': 'el',
'xkb:hr::scr': 'hr',
'xkb:hu::hun': 'hu',
'xkb:il::heb': 'iw',
'xkb:it::ita': 'it',
'xkb:jp::jpn': 'ja',
'xkb:kr:kr104:kor': 'ko',
'xkb:latam::spa': 'es_419',
'xkb:lt::lit': 'lt',
'xkb:lv:apostrophe:lav': 'lv',
'xkb:no::nob': 'no',
'xkb:pl::pol': 'pl',
'xkb:pt::por': 'pt_PT',
'xkb:ro::rum': 'ro',
'xkb:rs::srp': 'sr',
'xkb:ru::rus': 'ru',
'xkb:ru:phonetic:rus': 'ru',
'xkb:se::swe': 'sv',
'xkb:si::slv': 'sl',
'xkb:sk::slo': 'sk',
'xkb:tr::tur': 'tr',
'xkb:ua::ukr': 'uk',
'xkb:us::eng': 'en_US',
'xkb:us:altgr-intl:eng': 'en_US_altgr_intl',
'xkb:us:colemak:eng': 'en_US_colemak',
'xkb:us:dvorak:eng': 'en_US_dvorak',
'xkb:us:intl:eng': 'en_US_intl',
'zinnia-japanese': 'ja',
}
# The file was first generated in 2012 and we have a policy of not updating
# copyright dates.
COPYRIGHT_HEADER=\
"""// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
"""
# A snippet for grd file
GRD_SNIPPET_TEMPLATE=""" <message name="%s" desc="%s">
%s
</message>
"""
# A snippet for C++ file
CC_SNIPPET_TEMPLATE=""" { "%s", %s },
"""
def SplitBehavior(behavior):
"""Splits the behavior to compose a message or i18n-content value.
Examples:
'Activate last tab' => ['Activate', 'last', 'tab']
'Close tab' => ['Close', 'tab']
"""
return [x for x in re.split('[ ()"-.,]', behavior) if len(x) > 0]
def ToMessageName(behavior):
"""Composes a message name for grd file.
Examples:
'Activate last tab' => IDS_KEYBOARD_OVERLAY_ACTIVATE_LAST_TAB
'Close tab' => IDS_KEYBOARD_OVERLAY_CLOSE_TAB
"""
segments = [segment.upper() for segment in SplitBehavior(behavior)]
return 'IDS_KEYBOARD_OVERLAY_' + ('_'.join(segments))
def ToMessageDesc(description):
"""Composes a message description for grd file."""
message_desc = 'The text in the keyboard overlay to explain the shortcut'
if description:
message_desc = '%s (%s).' % (message_desc, description)
else:
message_desc += '.'
return message_desc
def Toi18nContent(behavior):
"""Composes a i18n-content value for HTML/JavaScript files.
Examples:
'Activate last tab' => keyboardOverlayActivateLastTab
'Close tab' => keyboardOverlayCloseTab
"""
segments = [segment.lower() for segment in SplitBehavior(behavior)]
result = 'keyboardOverlay'
for segment in segments:
result += segment[0].upper() + segment[1:]
return result
def ToKeys(hotkey):
"""Converts the action value to shortcut keys used from JavaScript.
Examples:
'Ctrl - 9' => '9<>CTRL'
'Ctrl - Shift - Tab' => 'tab<>CTRL<>SHIFT'
"""
values = hotkey.split(' - ')
modifiers = sorted(value.upper() for value in values
if value in ['Shift', 'Ctrl', 'Alt', 'Search'])
keycode = [value.lower() for value in values
if value not in ['Shift', 'Ctrl', 'Alt', 'Search']]
# The keys which are highlighted even without modifier keys.
base_keys = ['backspace', 'power']
if not modifiers and (keycode and keycode[0] not in base_keys):
return None
return '<>'.join(keycode + modifiers)
def ParseOptions():
"""Parses the input arguemnts and returns options."""
# default_username = os.getusername() + '@google.com';
default_username = '%s@google.com' % os.environ.get('USER')
parser = optparse.OptionParser()
parser.add_option('--key', dest='key',
help='The key of the spreadsheet (required).')
parser.add_option('--username', dest='username',
default=default_username,
help='Your user name (default: %s).' % default_username)
parser.add_option('--password', dest='password',
help='Your password.')
parser.add_option('--account_type', default='GOOGLE', dest='account_type',
help='Account type used for gdata login (default: GOOGLE)')
parser.add_option('--js', dest='js', default=False, action='store_true',
help='Output js file.')
parser.add_option('--grd', dest='grd', default=False, action='store_true',
help='Output resource file.')
parser.add_option('--cc', dest='cc', default=False, action='store_true',
help='Output cc file.')
parser.add_option('--outdir', dest='outdir', default=None,
help='Specify the directory files are generated.')
(options, unused_args) = parser.parse_args()
if not options.username.endswith('google.com'):
print 'google.com account is necessary to use this script.'
sys.exit(-1)
if (not (options.js or options.grd or options.cc)):
print 'Either --js, --grd, or --cc needs to be specified.'
sys.exit(-1)
# Get the password from the terminal, if needed.
if not options.password:
options.password = getpass.getpass(
'Application specific password for %s: ' % options.username)
return options
def InitClient(options):
"""Initializes the spreadsheet client."""
client = gdata.spreadsheet.service.SpreadsheetsService()
client.email = options.username
client.password = options.password
client.source = 'Spread Sheet'
client.account_type = options.account_type
print 'Logging in as %s (%s)' % (client.email, client.account_type)
client.ProgrammaticLogin()
return client
def PrintDiffs(message, lhs, rhs):
"""Prints the differences between |lhs| and |rhs|."""
dif = set(lhs).difference(rhs)
if dif:
print message, ', '.join(dif)
def FetchSpreadsheetFeeds(client, key, sheets, cols):
"""Fetch feeds from the spreadsheet.
Args:
client: A spreadsheet client to be used for fetching data.
key: A key string of the spreadsheet to be fetched.
sheets: A list of the sheet names to read data from.
cols: A list of columns to read data from.
"""
worksheets_feed = client.GetWorksheetsFeed(key)
print 'Fetching data from the worksheet: %s' % worksheets_feed.title.text
worksheets_data = {}
titles = []
for entry in worksheets_feed.entry:
worksheet_id = entry.id.text.split('/')[-1]
list_feed = client.GetListFeed(key, worksheet_id)
list_data = []
# Hack to deal with sheet names like 'sv (Copy of fl)'
title = list_feed.title.text.split('(')[0].strip()
titles.append(title)
if title not in sheets:
continue
print 'Reading data from the sheet: %s' % list_feed.title.text
for i, entry in enumerate(list_feed.entry):
line_data = {}
for k in entry.custom:
if (k not in cols) or (not entry.custom[k].text):
continue
line_data[k] = entry.custom[k].text
list_data.append(line_data)
worksheets_data[title] = list_data
PrintDiffs('Exist only on the spreadsheet: ', titles, sheets)
PrintDiffs('Specified but do not exist on the spreadsheet: ', sheets, titles)
return worksheets_data
def FetchKeyboardGlyphData(client):
"""Fetches the keyboard glyph data from the spreadsheet."""
glyph_cols = ['scancode', 'p0', 'p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7',
'p8', 'p9', 'label', 'format', 'notes']
keyboard_glyph_data = FetchSpreadsheetFeeds(
client, KEYBOARD_GLYPH_SPREADSHEET_KEY,
INPUT_METHOD_ID_TO_OVERLAY_ID.values(), glyph_cols)
ret = {}
for lang in keyboard_glyph_data:
ret[lang] = {}
keys = {}
for line in keyboard_glyph_data[lang]:
scancode = line.get('scancode')
if (not scancode) and line.get('notes'):
ret[lang]['layoutName'] = line['notes']
continue
del line['scancode']
if 'notes' in line:
del line['notes']
if 'label' in line:
line['label'] = LABEL_MAP.get(line['label'], line['label'])
keys[scancode] = line
# Add a label to space key
if '39' not in keys:
keys['39'] = {'label': 'space'}
ret[lang]['keys'] = keys
return ret
def FetchLayoutsData(client):
"""Fetches the keyboard glyph data from the spreadsheet."""
layout_names = ['U_layout', 'J_layout', 'E_layout', 'B_layout']
cols = ['scancode', 'x', 'y', 'w', 'h']
layouts = FetchSpreadsheetFeeds(client, KEYBOARD_GLYPH_SPREADSHEET_KEY,
layout_names, cols)
ret = {}
for layout_name, layout in layouts.items():
ret[layout_name[0]] = []
for row in layout:
line = []
for col in cols:
value = row.get(col)
if not value:
line.append('')
else:
if col != 'scancode':
value = float(value)
line.append(value)
ret[layout_name[0]].append(line)
return ret
def FetchHotkeyData(client):
"""Fetches the hotkey data from the spreadsheet."""
hotkey_sheet = ['Cross Platform Behaviors']
hotkey_cols = ['behavior', 'context', 'kind', 'actionctrlctrlcmdonmac',
'chromeos', 'descriptionfortranslation']
hotkey_data = FetchSpreadsheetFeeds(client, HOTKEY_SPREADSHEET_KEY,
hotkey_sheet, hotkey_cols)
action_to_id = {}
id_to_behavior = {}
# (behavior, action)
result = []
for line in hotkey_data['Cross Platform Behaviors']:
if (not line.get('chromeos')) or (line.get('kind') != 'Key'):
continue
action = ToKeys(line['actionctrlctrlcmdonmac'])
if not action:
continue
behavior = line['behavior'].strip()
description = line.get('descriptionfortranslation')
result.append((behavior, action, description))
return result
def UniqueBehaviors(hotkey_data):
"""Retrieves a sorted list of unique behaviors from |hotkey_data|."""
return sorted(set((behavior, description) for (behavior, _, description)
in hotkey_data),
cmp=lambda x, y: cmp(ToMessageName(x[0]), ToMessageName(y[0])))
def GetPath(path_from_src):
"""Returns the absolute path of the specified path."""
path = os.path.join(os.path.dirname(__file__), '../..', path_from_src)
if not os.path.isfile(path):
print 'WARNING: %s does not exist. Maybe moved or renamed?' % path
return path
def OutputFile(outpath, snippet):
"""Output the snippet into the specified path."""
out = file(outpath, 'w')
out.write(COPYRIGHT_HEADER + '\n')
out.write(snippet)
print 'Output ' + os.path.normpath(outpath)
def RewriteFile(start, end, original_dir, original_filename, snippet,
outdir=None):
"""Replaces a part of the specified file with snippet and outputs it."""
original_path = GetPath(os.path.join(original_dir, original_filename))
original = file(original_path, 'r')
original_content = original.read()
original.close()
if outdir:
outpath = os.path.join(outdir, original_filename)
else:
outpath = original_path
out = file(outpath, 'w')
rx = re.compile(r'%s\n.*?%s\n' % (re.escape(start), re.escape(end)),
re.DOTALL)
new_content = re.sub(rx, '%s\n%s%s\n' % (start, snippet, end),
original_content)
out.write(new_content)
out.close()
print 'Output ' + os.path.normpath(outpath)
def OutputJson(keyboard_glyph_data, hotkey_data, layouts, var_name, outdir):
"""Outputs the keyboard overlay data as a JSON file."""
action_to_id = {}
for (behavior, action, _) in hotkey_data:
i18nContent = Toi18nContent(behavior)
action_to_id[action] = i18nContent
data = {'keyboardGlyph': keyboard_glyph_data,
'shortcut': action_to_id,
'layouts': layouts,
'inputMethodIdToOverlayId': INPUT_METHOD_ID_TO_OVERLAY_ID}
if not outdir:
outdir = JS_OUTDIR
outpath = GetPath(os.path.join(outdir, JS_FILENAME))
json_data = json.dumps(data, sort_keys=True, indent=2)
# Remove redundant spaces after ','
json_data = json_data.replace(', \n', ',\n')
# Replace double quotes with single quotes to avoid lint warnings.
json_data = json_data.replace('\"', '\'')
snippet = 'var %s = %s;\n' % (var_name, json_data)
OutputFile(outpath, snippet)
def OutputGrd(hotkey_data, outdir):
"""Outputs a part of messages in the grd file."""
snippet = cStringIO.StringIO()
for (behavior, description) in UniqueBehaviors(hotkey_data):
# Do not generate message for 'Show wrench menu'. It is handled manually
# based on branding.
if behavior == 'Show wrench menu':
continue
snippet.write(GRD_SNIPPET_TEMPLATE %
(ToMessageName(behavior), ToMessageDesc(description),
behavior))
RewriteFile(GRD_START, GRD_END, GRD_OUTDIR, GRD_FILENAME, snippet.getvalue(),
outdir)
def OutputCC(hotkey_data, outdir):
"""Outputs a part of code in the C++ file."""
snippet = cStringIO.StringIO()
for (behavior, _) in UniqueBehaviors(hotkey_data):
message_name = ToMessageName(behavior)
output = CC_SNIPPET_TEMPLATE % (Toi18nContent(behavior), message_name)
# Break the line if the line is longer than 80 characters
if len(output) > 80:
output = output.replace(' ' + message_name, '\n %s' % message_name)
snippet.write(output)
RewriteFile(CC_START, CC_END, CC_OUTDIR, CC_FILENAME, snippet.getvalue(),
outdir)
def main():
options = ParseOptions()
client = InitClient(options)
hotkey_data = FetchHotkeyData(client)
if options.js:
keyboard_glyph_data = FetchKeyboardGlyphData(client)
if options.js:
layouts = FetchLayoutsData(client)
OutputJson(keyboard_glyph_data, hotkey_data, layouts, 'keyboardOverlayData',
options.outdir)
if options.grd:
OutputGrd(hotkey_data, options.outdir)
if options.cc:
OutputCC(hotkey_data, options.outdir)
if __name__ == '__main__':
main()
|
codenote/chromium-test
|
tools/gen_keyboard_overlay_data/gen_keyboard_overlay_data.py
|
Python
|
bsd-3-clause
| 17,272
|
from PyQt5 import QtCore, QtWidgets
import os
class Welcome(QtWidgets.QWidget):
"""
This class contains content of dock area part of initial esim Window.
It creates Welcome page of eSim.
"""
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.vlayout = QtWidgets.QVBoxLayout()
self.browser = QtWidgets.QTextBrowser()
init_path = '../../'
if os.name == 'nt':
init_path = ''
self.browser.setSource(QtCore.QUrl(
init_path + "library/browser/welcome.html")
)
self.browser.setOpenExternalLinks(True)
self.browser.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.vlayout.addWidget(self.browser)
self.setLayout(self.vlayout)
self.show()
|
FOSSEE/eSim
|
src/browser/Welcome.py
|
Python
|
gpl-3.0
| 795
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-04-25 20:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('programs', '0016_add_allowed_language'),
]
operations = [
migrations.RenameField(
model_name='programsconfig',
old_name='execuction_mode',
new_name='execution_mode',
),
]
|
sio2project/oioioi
|
oioioi/programs/migrations/0017_auto_20210425_2019.py
|
Python
|
gpl-3.0
| 457
|
"""Implement Prometheus statistics."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2018 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from prometheus_client import Gauge as PromGauge
from prometheus_client import Counter, Histogram
from faucet.prom_client import PromClient
class FaucetMetrics(PromClient):
"""Container class for objects that can be exported to Prometheus."""
_dpid_counters = None # type: dict
_dpid_gauges = None # type: dict
def __init__(self, reg=None):
super(FaucetMetrics, self).__init__(reg=reg)
self.PORT_REQUIRED_LABELS = self.REQUIRED_LABELS + ['port']
self._dpid_counters = {}
self._dpid_gauges = {}
self.faucet_config_reload_requests = self._counter(
'faucet_config_reload_requests',
'number of config reload requests', [])
self.faucet_event_id = self._gauge(
'faucet_event_id',
'highest/most recent event ID to be sent', [])
self.faucet_config_reload_warm = self._dpid_counter(
'faucet_config_reload_warm',
'number of warm, differences only config reloads executed')
self.faucet_config_reload_cold = self._dpid_counter(
'faucet_config_reload_cold',
'number of cold, complete reprovision config reloads executed')
self.of_ignored_packet_ins = self._dpid_counter(
'of_ignored_packet_ins',
'number of OF packet_ins received but ignored from DP')
self.of_packet_ins = self._dpid_counter(
'of_packet_ins',
'number of OF packet_ins received from DP')
self.of_non_vlan_packet_ins = self._dpid_counter(
'of_non_vlan_packet_ins',
'number of OF packet_ins received from DP, not associated with a FAUCET VLAN')
self.of_vlan_packet_ins = self._dpid_counter(
'of_vlan_packet_ins',
'number of OF packet_ins received from DP, associated with a FAUCET VLAN')
self.of_flowmsgs_sent = self._dpid_counter(
'of_flowmsgs_sent',
'number of OF flow messages (and packet outs) sent to DP')
self.of_errors = self._dpid_counter(
'of_errors',
'number of OF errors received from DP')
self.of_dp_connections = self._dpid_counter(
'of_dp_connections',
'number of OF connections from a DP')
self.of_dp_disconnections = self._dpid_counter(
'of_dp_disconnections',
'number of OF connections from a DP')
self.vlan_hosts_learned = self._gauge(
'vlan_hosts_learned',
'number of hosts learned on a VLAN',
self.REQUIRED_LABELS + ['vlan'])
self.port_vlan_hosts_learned = self._gauge(
'port_vlan_hosts_learned',
'number of hosts learned on a port and VLAN',
self.PORT_REQUIRED_LABELS + ['vlan'])
self.vlan_neighbors = self._gauge(
'vlan_neighbors',
'number of L3 neighbors on a VLAN (whether resolved to L2 addresses, or not)',
self.REQUIRED_LABELS + ['vlan', 'ipv'])
self.vlan_learn_bans = self._gauge(
'vlan_learn_bans',
'number of times learning was banned on a VLAN',
self.REQUIRED_LABELS + ['vlan'])
self.faucet_config_table_names = self._gauge(
'faucet_config_table_names',
'number to names map of FAUCET pipeline tables',
self.REQUIRED_LABELS + ['table_name'])
self.faucet_packet_in_secs = self._histogram(
'faucet_packet_in_secs',
'FAUCET packet in processing time',
self.REQUIRED_LABELS,
(0.0001, 0.001, 0.01, 0.1, 1))
self.faucet_valve_service_secs = self._histogram(
'faucet_valve_service_secs',
'FAUCET valve service processing time',
self.REQUIRED_LABELS + ['valve_service'],
(0.0001, 0.001, 0.01, 0.1, 1))
self.bgp_neighbor_uptime_seconds = self._gauge(
'bgp_neighbor_uptime',
'BGP neighbor uptime in seconds',
self.REQUIRED_LABELS + ['vlan', 'neighbor'])
self.bgp_neighbor_routes = self._gauge(
'bgp_neighbor_routes',
'BGP neighbor route count',
self.REQUIRED_LABELS + ['vlan', 'neighbor', 'ipv'])
self.learned_macs = self._gauge(
'learned_macs',
('MAC address stored as 64bit number to DP ID, port, VLAN, '
'and n (discrete index)'),
self.PORT_REQUIRED_LABELS + ['vlan', 'n'])
self.port_status = self._gauge(
'port_status',
'status of switch ports',
self.PORT_REQUIRED_LABELS)
self.port_stack_state = self._gauge(
'port_stack_state',
'state of stacking on a port',
self.PORT_REQUIRED_LABELS)
self.port_learn_bans = self._gauge(
'port_learn_bans',
'number of times learning was banned on a port',
self.PORT_REQUIRED_LABELS)
self.port_lacp_status = self._gauge(
'port_lacp_status',
'status of LACP on port',
self.PORT_REQUIRED_LABELS)
self.dp_status = self._dpid_gauge(
'dp_status',
'status of datapaths')
self.of_dp_desc_stats = self._gauge(
'of_dp_desc_stats',
'DP description (OFPDescStatsReply)',
self.REQUIRED_LABELS + ['mfr_desc', 'hw_desc', 'sw_desc', 'serial_num', 'dp_desc'])
self.stack_cabling_errors = self._dpid_counter(
'stack_cabling_errors',
'number of cabling errors detected in all FAUCET stacks')
self.stack_probes_received = self._dpid_counter(
'stack_probes_received',
'number of stacking messages received')
self.dp_dot1x_success = self._dpid_counter(
'dp_dot1x_success',
'number of successful authentications on dp')
self.dp_dot1x_failure = self._dpid_counter(
'dp_dot1x_failure',
'number of authentications attempts failed on dp')
self.dp_dot1x_logoff = self._dpid_counter(
'dp_dot1x_logoff',
'number of eap-logoff events on dp')
self.port_dot1x_success = self._counter(
'port_dot1x_success',
'number of successful authentications on port',
self.PORT_REQUIRED_LABELS)
self.port_dot1x_failure = self._counter(
'port_dot1x_failure',
'number of authentications attempts failed on port',
self.PORT_REQUIRED_LABELS)
self.port_dot1x_logoff = self._counter(
'port_dot1x_logoff',
'number of eap-logoff events on port',
self.PORT_REQUIRED_LABELS)
def _counter(self, var, var_help, labels):
return Counter(var, var_help, labels, registry=self._reg) # pylint: disable=unexpected-keyword-arg
def _gauge(self, var, var_help, labels):
return PromGauge(var, var_help, labels, registry=self._reg) # pylint: disable=unexpected-keyword-arg
def _histogram(self, var, var_help, labels, buckets):
return Histogram(var, var_help, labels, buckets=buckets, registry=self._reg) # pylint: disable=unexpected-keyword-arg
def _dpid_counter(self, var, var_help):
counter = self._counter(var, var_help, self.REQUIRED_LABELS)
self._dpid_counters[var] = counter
return counter
def _dpid_gauge(self, var, var_help):
gauge = self._gauge(var, var_help, self.REQUIRED_LABELS)
self._dpid_gauges[var] = gauge
return gauge
def reset_dpid(self, dp_labels):
"""Set all DPID-only counter/gauges to 0."""
for counter in list(self._dpid_counters.values()):
counter.labels(**dp_labels).inc(0)
for gauge in list(self._dpid_gauges.values()):
gauge.labels(**dp_labels).set(0)
def inc_var(self, var, labels, val=1):
assert labels is not None
metrics_var = getattr(self, var)
metrics_var.labels(**labels).inc(val)
|
trentindav/faucet
|
faucet/faucet_metrics.py
|
Python
|
apache-2.0
| 8,839
|
# Spawn Area file created with PSWG Planetary Spawn Tool
import sys
def addSpawnArea(core):
core.spawnService.addLairSpawnArea('mixed_lair_group_1', -4014, -1966, 1024, 'dantooine')
return
|
agry/NGECore2
|
scripts/mobiles/spawnareas/dantooine_imperial_op_lair.py
|
Python
|
lgpl-3.0
| 192
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from decimal import Decimal
from oscar_docdata import appsettings
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DocdataOrder',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('merchant_name', models.CharField(default=appsettings.DOCDATA_MERCHANT_NAME, max_length=100, verbose_name='Docdata account')),
('merchant_order_id', models.CharField(default='', max_length=100, verbose_name='Order ID')),
('order_key', models.CharField(default='', unique=True, max_length=200, verbose_name='Payment cluster ID')),
('status', models.CharField(default='new', max_length=50, verbose_name='Status', choices=[('new', 'New'), ('in_progress', 'In Progress'), ('pending', 'Pending'), ('paid', 'Paid'), ('paid_refunded', 'Paid, part refunded'), ('cancelled', 'Cancelled'), ('charged_back', 'Charged back'), ('refunded', 'Refunded'), ('expired', 'Expired'), ('unknown', 'Unknown')])),
('language', models.CharField(default='en', max_length=5, verbose_name='Language', blank=True)),
('total_gross_amount', models.DecimalField(verbose_name='Total gross amount', max_digits=15, decimal_places=2)),
('currency', models.CharField(max_length=10, verbose_name='Currency')),
('country', models.CharField(max_length=2, null=True, verbose_name='Country_code', blank=True)),
('total_registered', models.DecimalField(default=Decimal('0.00'), verbose_name='Total registered', max_digits=15, decimal_places=2)),
('total_shopper_pending', models.DecimalField(default=Decimal('0.00'), verbose_name='Total shopper pending', max_digits=15, decimal_places=2)),
('total_acquirer_pending', models.DecimalField(default=Decimal('0.00'), verbose_name='Total acquirer pending', max_digits=15, decimal_places=2)),
('total_acquirer_approved', models.DecimalField(default=Decimal('0.00'), verbose_name='Total acquirer approved', max_digits=15, decimal_places=2)),
('total_captured', models.DecimalField(default=Decimal('0.00'), verbose_name='Total captured', max_digits=15, decimal_places=2)),
('total_refunded', models.DecimalField(default=Decimal('0.00'), verbose_name='Total refunded', max_digits=15, decimal_places=2)),
('total_charged_back', models.DecimalField(default=Decimal('0.00'), verbose_name='Total changed back', max_digits=15, decimal_places=2)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='updated')),
],
options={
'ordering': ('-created', '-updated'),
'verbose_name': 'Docdata Order',
'verbose_name_plural': 'Docdata Orders',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DocdataPayment',
fields=[
('payment_id', models.CharField(primary_key=True, default='', serialize=False, max_length=100, blank=True, verbose_name='Payment id')),
('status', models.CharField(default='NEW', max_length=30, verbose_name='status')),
('payment_method', models.CharField(default='', max_length=60, blank=True)),
('confidence_level', models.CharField(default='', verbose_name='Confidence level', max_length=30, editable=False)),
('amount_allocated', models.DecimalField(default=Decimal('0.00'), verbose_name='Amount Allocated', editable=False, max_digits=12, decimal_places=2)),
('amount_debited', models.DecimalField(default=Decimal('0.00'), verbose_name='Amount Debited', editable=False, max_digits=12, decimal_places=2)),
('amount_refunded', models.DecimalField(default=Decimal('0.00'), verbose_name='Amount Refunded', editable=False, max_digits=12, decimal_places=2)),
('amount_chargeback', models.DecimalField(default=Decimal('0.00'), verbose_name='Amount Changed back', editable=False, max_digits=12, decimal_places=2)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='updated')),
],
options={
'ordering': ('payment_id',),
'verbose_name': 'Payment',
'verbose_name_plural': 'Payments',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DocdataDirectDebitPayment',
fields=[
('docdatapayment_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, on_delete=models.CASCADE, to='oscar_docdata.DocdataPayment')),
('holder_name', models.CharField(max_length=35)),
('holder_city', models.CharField(max_length=35)),
('holder_country_code', models.CharField(max_length=2, null=True, verbose_name='Country_code', blank=True)),
('iban', models.CharField(max_length=34)),
('bic', models.CharField(max_length=11)),
],
options={
'ordering': ('-created', '-updated'),
'verbose_name': 'Direct Debit Payment',
'verbose_name_plural': 'Derect Debit Payments',
},
bases=('oscar_docdata.docdatapayment',),
),
migrations.AddField(
model_name='docdatapayment',
name='docdata_order',
field=models.ForeignKey(related_name='payments', on_delete=models.deletion.PROTECT, to='oscar_docdata.DocdataOrder'),
preserve_default=True,
),
migrations.AddField(
model_name='docdatapayment',
name='polymorphic_ctype',
field=models.ForeignKey(related_name='polymorphic_oscar_docdata.docdatapayment_set+', on_delete=models.CASCADE, editable=False, to='contenttypes.ContentType', null=True),
preserve_default=True,
),
]
|
edoburu/django-oscar-docdata
|
oscar_docdata/migrations/0001_initial.py
|
Python
|
apache-2.0
| 6,455
|
"""This module extends the generic mainline code to add
periodic query for availability of spiders and caches
the resutls of the query for use both other services.
"""
import datetime
import logging
import tornado.ioloop
from . import __api_version__
from async_actions import AsyncGetAndCacheSpiderMetadataForAllRepos
from ..tor_async_google_container_registry import AsyncGenerateContainerRegistryAccessToken
from config import Config
from .. import main
import request_handlers
_logger = logging.getLogger(__name__)
class Main(main.Main):
@classmethod
def description(cls):
return (
'This service periodically queries for availability '
'of spiders and updates the spider metadata cache '
'as required.'
)
@classmethod
def config_class(cls):
return Config
@classmethod
def api_version(cls):
return __api_version__
@classmethod
def handler_classes(cls):
return [
request_handlers.HealthRequestHandler,
]
def listen(self, *args, **kwargs):
self._generate_google_container_registry_access_token()
# this call to 'listen()' doesn't return
main.Main.listen(self, *args, **kwargs)
def _generate_google_container_registry_access_token(self):
filename = self.config.google_container_registry_service_account_credentials_filename
async_action = AsyncGenerateContainerRegistryAccessToken(filename)
async_action.generate(self._on_access_token_gen_done)
def _on_access_token_gen_done(self, google_container_registry_access_token, async_action):
# this is only called if an access token has been generated
assert google_container_registry_access_token is not None
assert self.config.google_container_registry_access_token is None
self.config.google_container_registry_access_token = google_container_registry_access_token
self._discovery()
def _discovery(self):
if self.process_targetted_for_termination:
"""A SIGTERM has been sent to the process by Kubernetes indicating
that the process is going to be terminated so let's not start another
sync in case the termination happens right in the middle of the sync.
"""
_logger.info('process is targetted for termiation - not starting and not rescheduling sync')
return
_logger.info('starting discovery and caching process')
async_action = AsyncGetAndCacheSpiderMetadataForAllRepos(datetime.datetime.now())
async_action.discover_and_cache(self._on_discover_and_cache_done)
def _on_discover_and_cache_done(self, is_ok, async_action):
duration = datetime.datetime.now() - async_action.async_state
duration_in_seconds = duration.total_seconds()
if is_ok:
_logger.info(
'successfully finished discovering and caching spiders - took in %.0f seconds to complete',
duration_in_seconds)
else:
_logger.error(
'error discovering and caching spiders - failed after %.0f seconds',
duration_in_seconds)
self._schedule_discovery()
def _schedule_discovery(self):
if not self.config.discovery_frequency_in_seconds:
return
if self.process_targetted_for_termination:
"""A SIGTERM has been sent to the process by Kubernetes indicating
that the process is going to be terminated so let's not start another
sync in case the termination happens right in the middle of the sync.
"""
_logger.info('process is targetted for termiation - not rescheduling sync')
return
tornado.ioloop.IOLoop.current().add_timeout(
datetime.timedelta(0, self.config.discovery_frequency_in_seconds, 0),
self._discovery)
_logger.info(
'spider discovery process scheduled to run in %d seconds',
self.config.discovery_frequency_in_seconds)
|
simonsdave/cloudfeaster_infrastructure
|
cloudfeaster_services/discovery/main.py
|
Python
|
mit
| 4,090
|
import SASModels.sas_models as sas_models
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import sys, lmfit
from mpl_toolkits.axes_grid1 import make_axes_locatable
sas_models.math.n_integration_cuts = 1
I0 = 0.00002
RL = 3000
R0 = 600
phi = 90
SLDspindle = 42.209e-6 #a-Fe2O3 at 1.3414 A
SLDmatrix = 9.459e-6 #H2O at 1.3414 A
sigRL = 0.0
B = 0.1 # T
Ms = 1642 # A/m
V = 4.6e-22
k = 1.3807e-23
T = 300
MsVperKT = Ms*V/k/T
def get_custom_cmap():
def make_colormap(seq):
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
pos, r, g, b = item
cdict['red'].append([pos, r, r])
cdict['green'].append([pos, g, g])
cdict['blue'].append([pos, b, b])
return mcolors.LinearSegmentedColormap('CustomMap', cdict)
#Nice Coloring:
c = mcolors.ColorConverter().to_rgb
custom_colors = [(0, 0, 0, 0),\
(0.18, 0.05, 0.05, 0.2),\
(0.28, 0, 0, 1),\
(0.4, 0.7, 0.85, 0.9),\
(0.45, 0, 0.75, 0),\
(0.6, 1, 1, 0),\
(0.75, 1, 0, 0),\
(0.92 , 0.6, 0.6, 0.6),\
(1 , 0.95, 0.95, 0.95)]
custom_cmap = make_colormap(custom_colors)
custom_cmap.set_bad(color='black')
return custom_cmap
if False:
psi_range = np.linspace(0, np.pi/2, 90)
prob=[]
for psi in psi_range:
prob_val = sas_models.math.langevin_prob(psi, B*MsVperKT, 0.)
prob.append(prob_val)
fig, ax = plt.subplots()
ax.plot(psi_range, prob)
plt.show()
pix_size = 100
qy = np.linspace(-0.005, 0.005, pix_size)
qz = np.linspace(-0.005, 0.005, pix_size)
Imodel = sas_models.spindle.get_2dimage_langevin(qy, qz, RL, R0, phi,\
SLDspindle, SLDmatrix,\
sigRL, B, MsVperKT)
Imodel = I0*Imodel
fig, ax = plt.subplots()
cmap = get_custom_cmap()
pcm = ax.pcolormesh(qy, qz, Imodel.T,\
norm=mcolors.LogNorm(), cmap=cmap)
ax.set_xlabel('$q_y \, / \, \AA^{-1}$')
ax.set_ylabel('$q_z \, / \, \AA^{-1}$')
ax.set_xlim(min(qy), max(qy))
ax.set_ylim(min(qz), max(qz))
divider3 = make_axes_locatable(ax)
cax = divider3.append_axes('right', size="5%", pad=0.05)
cbar = fig.colorbar(pcm, cax=cax)
fig.tight_layout()
fig.savefig('Spindle.png')
plt.show()
|
DomiDre/SASModels
|
tests/sabrina_simulations.py
|
Python
|
gpl-3.0
| 2,566
|
import os
from gevent import spawn
from logging import INFO
from tuntap import Tun
from utils import create_logger
from net import VPNServerConnection
from config import VPNClientConfig
import traceback
client_logger = create_logger(name="PyVPN Client Logger", file=os.path.join(".", "client.log"), level=INFO)
class VPNClient(object):
args_config = ["tun_name"]
def __init__(self, **kwargs):
self.logger = client_logger
self.config = VPNClientConfig(path_to_config="./client.conf")
overrides = ( (k,v) for k,v in kwargs.iteritems() if k in VPNClient.args_config )
overrides = dict(overrides)
self.config.update(overrides)
def _forward_data_from_net(self):
self.logger.info("start forwarder from net")
while True:
self.tt.write_packet(self.net.read_packet())
def _forward_data_from_tun(self):
self.logger.info("start forwarder from tun")
while True:
self.net.write_packet(self.tt.read_packet())
def _connect_and_configure(self):
self.net = VPNServerConnection(host=self.config.server["host"], port=self.config.server["port"], app=self)
self.tt = Tun(name=self.config.tun_name)
self.tt.configure(ip=self.config.ip, mask=self.config.mask)
def start(self):
self._connect_and_configure()
self.logger.info("connect and configure ok")
g1 = spawn(self._forward_data_from_net)
g2 = spawn(self._forward_data_from_tun)
self.logger.info("client ok")
print "client ok"
g1.join()
g2.join()
if __name__ == "__main__":
client = None
try:
client = VPNClient(config="./client.conf", tun_name="tun0")
client.start()
except Exception as e:
client_logger.error("client failed: %s" % e)
client_logger.error(traceback.format_exc())
traceback.print_exc()
exit(-1)
|
mitin123/PyVPN
|
src/client.py
|
Python
|
apache-2.0
| 1,920
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Vote'
db.create_table('event_vote', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('note', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True)),
('voter', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['auth.User'])),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='votes', to=orm['contenttypes.ContentType'])),
('entity_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('status', self.gf('django.db.models.fields.CharField')(default='neutral', max_length=20)),
))
db.send_create_signal('event', ['Vote'])
# Adding model 'Activity'
db.create_table('event_activity', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('description', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True)),
('create_user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='hostedActivities', to=orm['auth.User'])),
('create_time', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('start_time', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('end_time', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('vote_time', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('event', ['Activity'])
# Adding model 'Campaign'
db.create_table('event_campaign', (
('activity_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['event.Activity'], unique=True, primary_key=True)),
('status', self.gf('django.db.models.fields.CharField')(default='preparing', max_length=20)),
))
db.send_create_signal('event', ['Campaign'])
# Adding model 'Participation'
db.create_table('event_participation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('note', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True)),
('participant', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['auth.User'])),
('activity', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['event.Activity'])),
('status', self.gf('django.db.models.fields.CharField')(default='invited', max_length=20)),
))
db.send_create_signal('event', ['Participation'])
def backwards(self, orm):
# Deleting model 'Vote'
db.delete_table('event_vote')
# Deleting model 'Activity'
db.delete_table('event_activity')
# Deleting model 'Campaign'
db.delete_table('event_campaign')
# Deleting model 'Participation'
db.delete_table('event_participation')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'event.activity': {
'Meta': {'object_name': 'Activity'},
'create_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'create_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hostedActivities'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'activities'", 'symmetrical': 'False', 'through': "orm['event.Participation']", 'to': "orm['auth.User']"}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vote_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'event.campaign': {
'Meta': {'object_name': 'Campaign', '_ormbases': ['event.Activity']},
'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['event.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'preparing'", 'max_length': '20'})
},
'event.event': {
'Meta': {'object_name': 'Event'},
'create_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'create_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'num_of_ppl': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'normal'", 'max_length': '20'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'event.participation': {
'Meta': {'object_name': 'Participation'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['event.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'participant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'invited'", 'max_length': '20'})
},
'event.vote': {
'Meta': {'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['contenttypes.ContentType']"}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'neutral'", 'max_length': '20'}),
'voter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"})
},
'upload.imageupload': {
'Meta': {'object_name': 'ImageUpload'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['contenttypes.ContentType']"}),
'create_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'create_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_large': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'db_index': 'True'}),
'image_large_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'image_large_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'image_medium': ('django.db.models.fields.files.ImageField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'image_medium_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'image_medium_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'image_small': ('django.db.models.fields.files.ImageField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'image_small_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'image_small_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'normal'", 'max_length': '20'}),
'usage': ('django.db.models.fields.CharField', [], {'default': "'original'", 'max_length': '20'})
}
}
complete_apps = ['event']
|
elin-moco/ffclub
|
ffclub/event/migrations/0002_added_activity_campaign_participation_vote_models.py
|
Python
|
bsd-3-clause
| 13,005
|
"Interactions with the Juju environment"
# Copyright 2013 Canonical Ltd.
#
# Authors:
# Charm Helpers Developers <juju@lists.ubuntu.com>
import os
import json
import yaml
import subprocess
import sys
import UserDict
from subprocess import CalledProcessError
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
INFO = "INFO"
DEBUG = "DEBUG"
MARKER = object()
cache = {}
def cached(func):
"""Cache return values for multiple executions of func + args
For example::
@cached
def unit_get(attribute):
pass
unit_get('test')
will cache the result of unit_get + 'test' for future calls.
"""
def wrapper(*args, **kwargs):
global cache
key = str((func, args, kwargs))
try:
return cache[key]
except KeyError:
res = func(*args, **kwargs)
cache[key] = res
return res
return wrapper
def flush(key):
"""Flushes any entries from function cache where the
key is found in the function+args """
flush_list = []
for item in cache:
if key in item:
flush_list.append(item)
for item in flush_list:
del cache[item]
def log(message, level=None):
"""Write a message to the juju log"""
command = ['juju-log']
if level:
command += ['-l', level]
command += [message]
subprocess.call(command)
class Serializable(UserDict.IterableUserDict):
"""Wrapper, an object that can be serialized to yaml or json"""
def __init__(self, obj):
# wrap the object
UserDict.IterableUserDict.__init__(self)
self.data = obj
def __getattr__(self, attr):
# See if this object has attribute.
if attr in ("json", "yaml", "data"):
return self.__dict__[attr]
# Check for attribute in wrapped object.
got = getattr(self.data, attr, MARKER)
if got is not MARKER:
return got
# Proxy to the wrapped object via dict interface.
try:
return self.data[attr]
except KeyError:
raise AttributeError(attr)
def __getstate__(self):
# Pickle as a standard dictionary.
return self.data
def __setstate__(self, state):
# Unpickle into our wrapper.
self.data = state
def json(self):
"""Serialize the object to json"""
return json.dumps(self.data)
def yaml(self):
"""Serialize the object to yaml"""
return yaml.dump(self.data)
def execution_environment():
"""A convenient bundling of the current execution context"""
context = {}
context['conf'] = config()
if relation_id():
context['reltype'] = relation_type()
context['relid'] = relation_id()
context['rel'] = relation_get()
context['unit'] = local_unit()
context['rels'] = relations()
context['env'] = os.environ
return context
def in_relation_hook():
"""Determine whether we're running in a relation hook"""
return 'JUJU_RELATION' in os.environ
def relation_type():
"""The scope for the current relation hook"""
return os.environ.get('JUJU_RELATION', None)
def relation_id():
"""The relation ID for the current relation hook"""
return os.environ.get('JUJU_RELATION_ID', None)
def local_unit():
"""Local unit ID"""
return os.environ['JUJU_UNIT_NAME']
def remote_unit():
"""The remote unit for the current relation hook"""
return os.environ['JUJU_REMOTE_UNIT']
def service_name():
"""The name service group this unit belongs to"""
return local_unit().split('/')[0]
def hook_name():
"""The name of the currently executing hook"""
return os.path.basename(sys.argv[0])
class Config(dict):
"""A dictionary representation of the charm's config.yaml, with some
extra features:
- See which values in the dictionary have changed since the previous hook.
- For values that have changed, see what the previous value was.
- Store arbitrary data for use in a later hook.
NOTE: Do not instantiate this object directly - instead call
``hookenv.config()``, which will return an instance of :class:`Config`.
Example usage::
>>> # inside a hook
>>> from charmhelpers.core import hookenv
>>> config = hookenv.config()
>>> config['foo']
'bar'
>>> # store a new key/value for later use
>>> config['mykey'] = 'myval'
>>> # user runs `juju set mycharm foo=baz`
>>> # now we're inside subsequent config-changed hook
>>> config = hookenv.config()
>>> config['foo']
'baz'
>>> # test to see if this val has changed since last hook
>>> config.changed('foo')
True
>>> # what was the previous value?
>>> config.previous('foo')
'bar'
>>> # keys/values that we add are preserved across hooks
>>> config['mykey']
'myval'
"""
CONFIG_FILE_NAME = '.juju-persistent-config'
def __init__(self, *args, **kw):
super(Config, self).__init__(*args, **kw)
self.implicit_save = True
self._prev_dict = None
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
if os.path.exists(self.path):
self.load_previous()
def __getitem__(self, key):
"""For regular dict lookups, check the current juju config first,
then the previous (saved) copy. This ensures that user-saved values
will be returned by a dict lookup.
"""
try:
return dict.__getitem__(self, key)
except KeyError:
return (self._prev_dict or {})[key]
def load_previous(self, path=None):
"""Load previous copy of config from disk.
In normal usage you don't need to call this method directly - it
is called automatically at object initialization.
:param path:
File path from which to load the previous config. If `None`,
config is loaded from the default location. If `path` is
specified, subsequent `save()` calls will write to the same
path.
"""
self.path = path or self.path
with open(self.path) as f:
self._prev_dict = json.load(f)
def changed(self, key):
"""Return True if the current value for this key is different from
the previous value.
"""
if self._prev_dict is None:
return True
return self.previous(key) != self.get(key)
def previous(self, key):
"""Return previous value for this key, or None if there
is no previous value.
"""
if self._prev_dict:
return self._prev_dict.get(key)
return None
def save(self):
"""Save this config to disk.
If the charm is using the :mod:`Services Framework <services.base>`
or :meth:'@hook <Hooks.hook>' decorator, this
is called automatically at the end of successful hook execution.
Otherwise, it should be called directly by user code.
To disable automatic saves, set ``implicit_save=False`` on this
instance.
"""
if self._prev_dict:
for k, v in self._prev_dict.iteritems():
if k not in self:
self[k] = v
with open(self.path, 'w') as f:
json.dump(self, f)
@cached
def config(scope=None):
"""Juju charm configuration"""
config_cmd_line = ['config-get']
if scope is not None:
config_cmd_line.append(scope)
config_cmd_line.append('--format=json')
try:
config_data = json.loads(subprocess.check_output(config_cmd_line))
if scope is not None:
return config_data
return Config(config_data)
except ValueError:
return None
@cached
def relation_get(attribute=None, unit=None, rid=None):
"""Get relation information"""
_args = ['relation-get', '--format=json']
if rid:
_args.append('-r')
_args.append(rid)
_args.append(attribute or '-')
if unit:
_args.append(unit)
try:
return json.loads(subprocess.check_output(_args))
except ValueError:
return None
except CalledProcessError, e:
if e.returncode == 2:
return None
raise
def relation_set(relation_id=None, relation_settings=None, **kwargs):
"""Set relation information for the current unit"""
relation_settings = relation_settings if relation_settings else {}
relation_cmd_line = ['relation-set']
if relation_id is not None:
relation_cmd_line.extend(('-r', relation_id))
for k, v in (relation_settings.items() + kwargs.items()):
if v is None:
relation_cmd_line.append('{}='.format(k))
else:
relation_cmd_line.append('{}={}'.format(k, v))
subprocess.check_call(relation_cmd_line)
# Flush cache of any relation-gets for local unit
flush(local_unit())
@cached
def relation_ids(reltype=None):
"""A list of relation_ids"""
reltype = reltype or relation_type()
relid_cmd_line = ['relation-ids', '--format=json']
if reltype is not None:
relid_cmd_line.append(reltype)
return json.loads(subprocess.check_output(relid_cmd_line)) or []
return []
@cached
def related_units(relid=None):
"""A list of related units"""
relid = relid or relation_id()
units_cmd_line = ['relation-list', '--format=json']
if relid is not None:
units_cmd_line.extend(('-r', relid))
return json.loads(subprocess.check_output(units_cmd_line)) or []
@cached
def relation_for_unit(unit=None, rid=None):
"""Get the json represenation of a unit's relation"""
unit = unit or remote_unit()
relation = relation_get(unit=unit, rid=rid)
for key in relation:
if key.endswith('-list'):
relation[key] = relation[key].split()
relation['__unit__'] = unit
return relation
@cached
def relations_for_id(relid=None):
"""Get relations of a specific relation ID"""
relation_data = []
relid = relid or relation_ids()
for unit in related_units(relid):
unit_data = relation_for_unit(unit, relid)
unit_data['__relid__'] = relid
relation_data.append(unit_data)
return relation_data
@cached
def relations_of_type(reltype=None):
"""Get relations of a specific type"""
relation_data = []
reltype = reltype or relation_type()
for relid in relation_ids(reltype):
for relation in relations_for_id(relid):
relation['__relid__'] = relid
relation_data.append(relation)
return relation_data
@cached
def relation_types():
"""Get a list of relation types supported by this charm"""
charmdir = os.environ.get('CHARM_DIR', '')
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
md = yaml.safe_load(mdf)
rel_types = []
for key in ('provides', 'requires', 'peers'):
section = md.get(key)
if section:
rel_types.extend(section.keys())
mdf.close()
return rel_types
@cached
def relations():
"""Get a nested dictionary of relation data for all related units"""
rels = {}
for reltype in relation_types():
relids = {}
for relid in relation_ids(reltype):
units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
for unit in related_units(relid):
reldata = relation_get(unit=unit, rid=relid)
units[unit] = reldata
relids[relid] = units
rels[reltype] = relids
return rels
@cached
def is_relation_made(relation, keys='private-address'):
'''
Determine whether a relation is established by checking for
presence of key(s). If a list of keys is provided, they
must all be present for the relation to be identified as made
'''
if isinstance(keys, str):
keys = [keys]
for r_id in relation_ids(relation):
for unit in related_units(r_id):
context = {}
for k in keys:
context[k] = relation_get(k, rid=r_id,
unit=unit)
if None not in context.values():
return True
return False
def open_port(port, protocol="TCP"):
"""Open a service network port"""
_args = ['open-port']
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
def close_port(port, protocol="TCP"):
"""Close a service network port"""
_args = ['close-port']
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
@cached
def unit_get(attribute):
"""Get the unit ID for the remote unit"""
_args = ['unit-get', '--format=json', attribute]
try:
return json.loads(subprocess.check_output(_args))
except ValueError:
return None
def unit_private_ip():
"""Get this unit's private IP address"""
return unit_get('private-address')
class UnregisteredHookError(Exception):
"""Raised when an undefined hook is called"""
pass
class Hooks(object):
"""A convenient handler for hook functions.
Example::
hooks = Hooks()
# register a hook, taking its name from the function name
@hooks.hook()
def install():
pass # your code here
# register a hook, providing a custom hook name
@hooks.hook("config-changed")
def config_changed():
pass # your code here
if __name__ == "__main__":
# execute a hook based on the name the program is called by
hooks.execute(sys.argv)
"""
def __init__(self):
super(Hooks, self).__init__()
self._hooks = {}
def register(self, name, function):
"""Register a hook"""
self._hooks[name] = function
def execute(self, args):
"""Execute a registered hook based on args[0]"""
hook_name = os.path.basename(args[0])
if hook_name in self._hooks:
self._hooks[hook_name]()
cfg = config()
if cfg.implicit_save:
cfg.save()
else:
raise UnregisteredHookError(hook_name)
def hook(self, *hook_names):
"""Decorator, registering them as hooks"""
def wrapper(decorated):
for hook_name in hook_names:
self.register(hook_name, decorated)
else:
self.register(decorated.__name__, decorated)
if '_' in decorated.__name__:
self.register(
decorated.__name__.replace('_', '-'), decorated)
return decorated
return wrapper
def charm_dir():
"""Return the root directory of the current charm"""
return os.environ.get('CHARM_DIR')
|
chuckbutler/shoutcast-charm
|
lib/charmhelpers/core/hookenv.py
|
Python
|
mit
| 14,883
|
# pylint: disable=redefined-outer-name,protected-access
import pickle
from collections import OrderedDict
import pytest
import matplotlib.pyplot as plt
from dtocean_core.core import OrderedSim, Project
from dtocean_core.data import CoreMetaData
from dtocean_core.extensions import StrategyManager
from dtocean_core.strategies import Strategy
class MockStrategy(Strategy):
@classmethod
def get_name(cls):
'''A class method for the common name of the strategy.
Returns:
str: A unique string
'''
return "Mock"
def configure(self):
'''The configure method is collect information required for executing
the strategy.
'''
return
def get_variables(self):
'''The get_variables method returns the list of any variables that
will be set by the strategy
'''
return None
def execute(self, core, project):
'''The execute method is used to execute the strategy. It should always
take a Core and a Project class as the only inputs.
'''
return
@pytest.fixture()
def manager():
return StrategyManager()
def test_get_available(manager):
result = manager.get_available()
assert len(result) > 0
def test_get_strategy(manager):
strategies = manager.get_available()
for strategy_name in strategies:
manager.get_strategy(strategy_name)
assert True
def test_get_level_values_project(mocker, manager):
level_values = OrderedDict([
('Default',
OrderedDict([('hydrodynamics global output',
20000000.0),
('electrical sub-systems global output',
23065408.054377057),
('mooring and foundations global output',
28263096.852039404),
('installation global output',
31673780.41058045),
('operations and maintenance global output',
31673780.41058045)])),
('Default Clone 1',
OrderedDict([('hydrodynamics global output',
20000000.0),
('electrical sub-systems global output',
23065408.054377057),
('mooring and foundations global output',
119195335.02247664),
('installation global output',
123875364.39132734),
('operations and maintenance global output',
123875364.39132734)]))])
core = mocker.Mock()
core.get_level_values.return_value = level_values
project = Project("mock")
project.add_simulation(OrderedSim("Default"))
project.add_simulation(OrderedSim("Default Clone 1"))
active_levels = ['Hydrodynamics',
'Electrical Sub-Systems',
'Mooring and Foundations',
'Installation',
'Operations and Maintenance']
mocker.patch.object(manager._module_menu,
'get_active',
return_value=active_levels,
autospec=True)
level_values = manager.get_level_values(core, project, None)
assert level_values == level_values
def test_get_level_values_strategy(mocker, manager):
level_values = OrderedDict([
('Default',
OrderedDict([('hydrodynamics global output',
20000000.0),
('electrical sub-systems global output',
23065408.054377057),
('mooring and foundations global output',
28263096.852039404),
('installation global output',
31673780.41058045),
('operations and maintenance global output',
31673780.41058045)])),
('Default Clone 1',
OrderedDict([('hydrodynamics global output',
20000000.0),
('electrical sub-systems global output',
23065408.054377057),
('mooring and foundations global output',
119195335.02247664),
('installation global output',
123875364.39132734),
('operations and maintenance global output',
123875364.39132734)]))])
core = mocker.Mock()
core.get_level_values.return_value = level_values
project = Project("mock")
project.add_simulation(OrderedSim("Default"))
project.add_simulation(OrderedSim("Default Clone 1"))
project.add_simulation(OrderedSim("Default Clone 2"))
strategy = MockStrategy()
strategy.add_simulation_title('Default')
strategy.add_simulation_title('Default Clone 1')
active_levels = ['Hydrodynamics',
'Electrical Sub-Systems',
'Mooring and Foundations',
'Installation',
'Operations and Maintenance']
mocker.patch.object(manager._module_menu,
'get_active',
return_value=active_levels,
autospec=True)
level_values = manager.get_level_values(core,
project,
None,
strategy=strategy)
assert level_values == level_values
def test_get_level_values_sim_titles(mocker, manager):
level_values = OrderedDict([
('Default',
OrderedDict([('hydrodynamics global output',
20000000.0),
('electrical sub-systems global output',
23065408.054377057),
('mooring and foundations global output',
28263096.852039404),
('installation global output',
31673780.41058045),
('operations and maintenance global output',
31673780.41058045)])),
('Default Clone 1',
OrderedDict([('hydrodynamics global output',
20000000.0),
('electrical sub-systems global output',
23065408.054377057),
('mooring and foundations global output',
119195335.02247664),
('installation global output',
123875364.39132734),
('operations and maintenance global output',
123875364.39132734)]))])
core = mocker.Mock()
core.get_level_values.return_value = level_values
project = Project("mock")
project.add_simulation(OrderedSim("Default"))
project.add_simulation(OrderedSim("Default Clone 1"))
project.add_simulation(OrderedSim("Default Clone 2"))
sim_titles = ["Default", "Default Clone 1"]
active_levels = ['Hydrodynamics',
'Electrical Sub-Systems',
'Mooring and Foundations',
'Installation',
'Operations and Maintenance']
mocker.patch.object(manager._module_menu,
'get_active',
return_value=active_levels,
autospec=True)
level_values = manager.get_level_values(core,
project,
None,
sim_titles=sim_titles)
assert level_values == level_values
def test_get_level_values_df(mocker, manager):
level_values = OrderedDict([
('Default',
OrderedDict([('hydrodynamics global output',
20000000.0),
('electrical sub-systems global output',
23065408.054377057),
('mooring and foundations global output',
28263096.852039404),
('installation global output',
31673780.41058045),
('operations and maintenance global output',
31673780.41058045)])),
('Default Clone 1',
OrderedDict([('hydrodynamics global output',
20000000.0),
('electrical sub-systems global output',
23065408.054377057),
('mooring and foundations global output',
119195335.02247664),
('installation global output',
123875364.39132734),
('operations and maintenance global output',
123875364.39132734)]))])
completed_levels = ['Hydrodynamics',
'Electrical Sub-Systems',
'Mooring and Foundations',
'Installation',
'Operations and Maintenance']
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test"})
mocker.patch.object(manager,
'get_level_values',
return_value=level_values,
autospec=True)
mocker.patch.object(manager._module_menu,
'get_completed',
return_value=completed_levels,
autospec=True)
core = mocker.Mock()
core.get_metadata.return_value = meta
df = manager.get_level_values_df(core,
None,
None)
assert set(df['Simulation Name'].values) == \
set(['Default', 'Default Clone 1'])
def test_get_level_values_plot(mocker, manager):
level_values = OrderedDict([
('Default',
OrderedDict([('hydrodynamics global output',
20000000.0),
('electrical sub-systems global output',
23065408.054377057),
('mooring and foundations global output',
28263096.852039404),
('installation global output',
31673780.41058045),
('operations and maintenance global output',
31673780.41058045)])),
('Default Clone 1',
OrderedDict([('hydrodynamics global output',
20000000.0),
('electrical sub-systems global output',
23065408.054377057),
('mooring and foundations global output',
119195335.02247664),
('installation global output',
123875364.39132734),
('operations and maintenance global output',
123875364.39132734)]))])
completed_levels = ['Hydrodynamics',
'Electrical Sub-Systems',
'Mooring and Foundations',
'Installation',
'Operations and Maintenance']
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test"})
mocker.patch.object(manager,
'get_level_values',
return_value=level_values,
autospec=True)
mocker.patch.object(manager._module_menu,
'get_completed',
return_value=completed_levels,
autospec=True)
core = mocker.Mock()
core.get_metadata.return_value = meta
manager.get_level_values_plot(core,
None,
None)
assert len(plt.get_fignums()) == 1
ax = plt.gca()
_, labels = ax.get_legend_handles_labels()
assert len(labels) == 2
plt.close("all")
def test_get_level_values_plot_max_lines(mocker, manager):
level_values = OrderedDict([
('Default',
OrderedDict([('hydrodynamics global output',
20000000.0),
('electrical sub-systems global output',
23065408.054377057),
('mooring and foundations global output',
28263096.852039404),
('installation global output',
31673780.41058045),
('operations and maintenance global output',
31673780.41058045)])),
('Default Clone 1',
OrderedDict([('hydrodynamics global output',
20000000.0),
('electrical sub-systems global output',
23065408.054377057),
('mooring and foundations global output',
119195335.02247664),
('installation global output',
123875364.39132734),
('operations and maintenance global output',
123875364.39132734)]))])
completed_levels = ['Hydrodynamics',
'Electrical Sub-Systems',
'Mooring and Foundations',
'Installation',
'Operations and Maintenance']
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test"})
mocker.patch.object(manager,
'get_level_values',
return_value=level_values,
autospec=True)
mocker.patch.object(manager._module_menu,
'get_completed',
return_value=completed_levels,
autospec=True)
core = mocker.Mock()
core.get_metadata.return_value = meta
manager.get_level_values_plot(core,
None,
None,
max_lines=1)
assert len(plt.get_fignums()) == 1
ax = plt.gca()
_, labels = ax.get_legend_handles_labels()
assert len(labels) == 1
plt.close("all")
def test_load_strategy(mocker, manager):
strategy = MockStrategy()
mocker.patch.object(manager,
'get_strategy',
return_value=strategy,
autospec=True)
mocker.patch('dtocean_core.extensions.os.path.isfile',
return_value=True,
autospec=True)
test_strategy = MockStrategy()
test_strategy.add_simulation_title('Default')
test_strategy.add_simulation_title('Default Clone 1')
mock_stg_dict = manager._get_dump_dict(test_strategy)
mocker.patch('dtocean_core.extensions.open',
mocker.mock_open(read_data=pickle.dumps(mock_stg_dict, -1)))
result = manager.load_strategy("mock.pkl")
assert isinstance(result, MockStrategy)
assert set(strategy._sim_record) == set(['Default', 'Default Clone 1'])
def test_load_strategy_no_version(mocker, manager):
strategy = MockStrategy()
mocker.patch.object(manager,
'get_strategy',
return_value=strategy,
autospec=True)
mocker.patch('dtocean_core.extensions.os.path.isfile',
return_value=True,
autospec=True)
mock_stg_dict = {"name": "basic",
"sim_record": ['Default', 'Default Clone 1'],
"config": None,
"sim_details": None}
mocker.patch('dtocean_core.extensions.open',
mocker.mock_open(read_data=pickle.dumps(mock_stg_dict, -1)))
with pytest.raises(ValueError) as excinfo:
manager.load_strategy("mock.pkl")
assert "project object is required" in str(excinfo.value)
def test_load_strategy_old(mocker, manager):
strategy = MockStrategy()
mocker.patch.object(manager,
'get_strategy',
return_value=strategy,
autospec=True)
mocker.patch('dtocean_core.extensions.os.path.isfile',
return_value=True,
autospec=True)
mock_stg_dict = {"name": "basic",
"sim_record": [0, 1],
"config": None,
"sim_details": None}
mocker.patch('dtocean_core.extensions.open',
mocker.mock_open(read_data=pickle.dumps(mock_stg_dict, -1)))
project = Project("mock")
project.add_simulation(OrderedSim("Default"))
project.add_simulation(OrderedSim("Default Clone 1"))
result = manager.load_strategy("mock.pkl", project)
assert isinstance(result, MockStrategy)
assert set(strategy._sim_record) == set(['Default', 'Default Clone 1'])
|
DTOcean/dtocean-core
|
tests/test_extensions_strategy.py
|
Python
|
gpl-3.0
| 18,979
|
import re
import os
from lxml import etree
from .utils import *
from .namespaces import NAMESPACES
CONTEXTS = ('root', 'body', 'p0', 'p', 'r', 't', 'tbl', 'tr', 'tc')
META_COMMANDS = ('up', 'prev', 'next', 'cloneprev', 'clonenext', 'delete')
DEFAULT_CMD_TO_CONTEXT_MAPPING = {
'for-each': ('w', 'p'), # xsl::for-each select=...
'choose': ('w', 'p'), # xsl::choose
'when': ('xsl', 'choose'), # xsl::when test=...
'otherwise': ('xsl', 'choose'), # xsl::otherwise
'if': ('w', 'p'), # xsl::if test=...
'sort': ('w', 'p'), # xsl::sort select=...
'value-of': ('w', 't'), # xsl::value-of select=...
'text': ('w', 't'), # xsl::text ...
'meta': ('w', 'r'), # xsl::meta up|prev|next|cloneprev|clonenext|delete
}
class XslError(Exception):
pass
class ParseError(XslError):
pass
class ElementNotFound(XslError):
pass
class XslCommand(object):
"""Object thats represents a single XSL command"""
def __init__(self, xsl):
self.parse(xsl)
def __str__(self):
return "xsl:%s:%s %s" % (self.context or '', self.cmd, self.meta_commands or self.text or self.options)
def parse(self, xsl='text'):
"""
TODO: add double-quoted string literals that allows for escaped double quotes
https://gist.github.com/prathe/2439752 or
http://www.metaltoad.com/blog/regex-quoted-string-escapable-quotes
"""
try:
cmd_text, option_text = xsl.split(None, 1)
except ValueError:
cmd_text = xsl
option_text = ''
try:
context, cmd = cmd_text.strip().lower().split(':', 1)
except ValueError:
cmd = cmd_text.lower()
context = None
if not cmd in DEFAULT_CMD_TO_CONTEXT_MAPPING:
raise ParseError("unknown command %s" % cmd)
if context and not context in CONTEXTS:
raise ParseError("unknown context %s" % context)
self.context = context
self.cmd = cmd
self.text = None
self.meta_commands = []
self.options = {}
try:
if cmd in ('choose', 'text', 'meta'):
raise ValueError()
option_name, expr = option_text.split('=', 1)
option_name = option_name.strip().lower()
expr = unescape(expr).strip("'").strip('"').strip()
self.options = {option_name: expr}
except ValueError:
text = unescape(option_text)
if cmd == 'meta':
for mc in filter(lambda c: c, map(lambda c: c.strip(), text.lower().split(';'))):
if mc in META_COMMANDS:
# store in stack order
self.meta_commands = [mc] + self.meta_commands
else:
raise ParseError("unknown meta command %s" % self.text)
else:
self.text = text
class XslElement(LoggerMixin):
"""List thats represents a XSL command set"""
namespaces = NAMESPACES
w_ns = 'w'
xsl_ns = 'xsl'
def __init__(self, r, logger=None):
self.commands = []
self.run = r
self.parse()
self.logger = logger
def parse(self):
def remove_junk(xsl):
xsl = unidecode(xsl)
xsl = re.sub(r"^[\t\s]*", "", xsl, re.DOTALL|re.MULTILINE|re.UNICODE)
xsl = os.linesep.join([s for s in xsl.splitlines() if s])
xsl = re.sub(r"\n", "", xsl, re.DOTALL|re.MULTILINE|re.UNICODE)
xsl = re.sub(r"\r", "", xsl, re.DOTALL|re.MULTILINE|re.UNICODE)
xsl = re.sub(r"xsl\s*:", "xsl:", xsl, re.DOTALL|re.MULTILINE|re.UNICODE|re.IGNORECASE)
xsl = re.sub(r"xsl:\s*", "xsl:", xsl, re.DOTALL|re.MULTILINE|re.UNICODE|re.IGNORECASE)
return xsl
t = self.run.xpath('.//w:t', namespaces=self.namespaces)[0]
text = remove_junk(unicode(t.text))
for xsl in filter(lambda xsl: xsl.strip(), text.split('xsl:')):
try:
cmd = XslCommand(xsl)
except ParseError as e:
self.error("%s ignore invalid XSL %s: %s" % (self.__class__.__name__, xsl, e))
else:
# store commands in stack order
self.commands = [cmd] + self.commands
# clean w:t
t.text = ''
@property
def xsl(self):
"""Returns content as XSL text"""
return ' '.join([str(cmd) for cmd in self.commands])
def __str__(self):
return self.xsl
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.xsl)
def hascontext(self, e, ns, context):
return str(e.tag) == '{%s}%s' % (self.namespaces[ns], context)
def getcontext(self, e, ns, context, raise_if_not_found=True):
f = e
while not self.hascontext(f, ns, context):
f = f.getparent()
if f is None:
break
if not f is None:
return f
elif raise_if_not_found:
raise ElementNotFound("could not find %s:%s element in %s" % (ns, context, e))
def root(self, e):
"""Returns root node"""
while not e.getparent() is None:
e = e.getparent()
return e
def body(self, e):
"""Returns top level w:body node"""
return self.getcontext(e, self.w_ns, 'body')
def p0(self, e):
"""Returns top level w:p node"""
body = self.body(e)
p = self.p(e)
if body == p.getparent():
return p
else:
raise ElementNotFound("could not find top level w:p element in %s" % e)
def _w_x(self, e, x):
d = self.getcontext(e, self.w_ns, x, raise_if_not_found=False)
f = e.xpath('.//w:%s' % x, namespaces=self.namespaces)
if d is not None:
return d
elif f:
return f[0]
else:
raise ElementNotFound("could not find w:%s element in %s" % (x, e))
def t(self, e):
"""Returns w:t node"""
return self._w_x(e, 't')
def r(self, e):
"""Returns w:r node"""
return self._w_x(e, 'r')
def p(self, e):
"""Returns w:p node"""
return self._w_x(e, 'p')
def tc(self, e):
"""Returns w:tc node"""
return self._w_x(e, 'tc')
def tr(self, e):
"""Returns w:tr node"""
return self._w_x(e, 'tr')
def tbl(self, e):
"""Returns w:tbl node"""
return self._w_x(e, 'tbl')
def render(self, current):
def append(e, qn, options={}):
return etree.SubElement(e, qn, **options)
def wrap(e, qn, options={}):
f = e.makeelement(qn, options)
p = e.getparent()
i = p.index(e)
p.remove(e)
f.append(e)
p.insert(i, f)
return f
def clone(e):
return etree.fromstring(etree.tostring(e))
def cloneprev(e):
c = clone(e)
p = e.getparent()
i = p.index(e)
if i > 0:
i -= 1
p.insert(i, c)
return c
def clonenext(e):
c = clone(e)
p = e.getparent()
i = p.index(e)
p.insert(i+1, c)
return c
ns, context = self.w_ns, None
self.debug("render %s %s with %s" % (id(current), current.xpath('name()'), self))
while self.commands:
cmd = self.commands.pop()
# initialize namespace and context
if context is None and ns == self.w_ns:
ns, context = DEFAULT_CMD_TO_CONTEXT_MAPPING.get(cmd.cmd, current.xpath('name()').split(':'))
current = getattr(self, context)(current)
self.debug("initialize context to %s %s" % (id(current), current.xpath('name()')))
# switch to required context
if cmd.context and cmd.context != context:
current = getattr(self, cmd.context)(current)
ns, context = current.xpath('name()').split(':')
self.debug("changed context to %s %s" % (id(current), current.xpath('name()')))
if cmd.cmd == 'meta':
while cmd.meta_commands:
mc = cmd.meta_commands.pop()
if mc == 'up' and current.getparent():
current = current.getparent()
self.debug("meta up to %s %s" % (id(current), current.xpath('name()')))
elif mc == 'prev':
parent = current.getparent()
i = parent.index(current)
if i > 0:
current = parent[i-1]
self.debug("meta prev to %s %s" % (id(current), current.xpath('name()')))
elif mc == 'next':
parent = current.getparent()
i = parent.index(current)
if len(parent) > i+1:
current = parent[i+1]
self.debug("meta next to %s %s" % (id(current), current.xpath('name()')))
elif mc == 'cloneprev':
cloneprev(current)
self.debug("meta cloneprev from %s %s" % (id(current), current.xpath('name()')))
elif mc == 'clonenext':
clonenext(current)
self.debug("meta clonenext from %s %s" % (id(current), current.xpath('name()')))
elif mc == 'delete':
parent = current.getparent()
parent.remove(current)
current = parent
self.debug("meta delete and goto parent %s %s" % (id(current), current.xpath('name()')))
elif cmd.cmd in ('text', 'value-of'):
current = append(current, etree.QName(self.namespaces[self.xsl_ns], cmd.cmd), cmd.options)
current.text = cmd.text
self.debug("append %s %s %s" % (id(current), current.xpath('name()'), cmd))
else:
wrap(current, etree.QName(self.namespaces[self.xsl_ns], cmd.cmd), cmd.options)
self.debug("wrap %s %s %s" % (id(current), current.xpath('name()'), cmd))
# fix current namespace and context
ns, context = current.xpath('name()').split(':')
# clear last cmd
cmd = None
|
backbohne/docx-xslt
|
docxxslt/xsl.py
|
Python
|
mit
| 10,619
|
#!/usr/bin/python
"""
Make single page versions of the documentation for release and
conversion into man pages etc.
"""
import os
import re
from datetime import datetime
docpath = "docs/content"
outfile = "MANUAL.md"
# Order to add docs segments to make outfile
docs = [
"about.md",
"install.md",
"docs.md",
"drive.md",
"s3.md",
"swift.md",
"dropbox.md",
"googlecloudstorage.md",
"local.md",
"changelog.md",
"bugs.md",
"faq.md",
"licence.md",
"authors.md",
"contact.md",
]
# Docs which aren't made into outfile
ignore_docs = [
"downloads.md",
"privacy.md",
]
def read_doc(doc):
"""Read file as a string"""
path = os.path.join(docpath, doc)
with open(path) as fd:
contents = fd.read()
parts = contents.split("---\n", 2)
if len(parts) != 3:
raise ValueError("Couldn't find --- markers: found %d parts" % len(parts))
contents = parts[2].strip()+"\n\n"
# Remove icons
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
# Make [...](/links/) absolute
contents = re.sub(r'\((\/.*?\/)\)', r"(http://rclone.org\1)", contents)
return contents
def check_docs(docpath):
"""Check all the docs are in docpath"""
files = set(f for f in os.listdir(docpath) if f.endswith(".md"))
files -= set(ignore_docs)
docs_set = set(docs)
if files == docs_set:
return
print "Files on disk but not in docs variable: %s" % ", ".join(files - docs_set)
print "Files in docs variable but not on disk: %s" % ", ".join(docs_set - files)
raise ValueError("Missing files")
def main():
check_docs(docpath)
with open(outfile, "w") as out:
out.write("""\
%% rclone(1) User Manual
%% Nick Craig-Wood
%% %s
""" % datetime.now().strftime("%b %d, %Y"))
for doc in docs:
out.write(read_doc(doc))
print "Written '%s'" % outfile
if __name__ == "__main__":
main()
|
X1011/rclone
|
make_manual.py
|
Python
|
mit
| 1,945
|
"""
The structure of Expression Tree is a binary tree to evaluate certain expressions. All leaves of the Expression Tree
have an number string value. All non-leaves of the Expression Tree have an operator string value.
Now, given an expression array, build the expression tree of this expression, return the root of this expression tree.
Example
For the expression (2*6-(23+7)/(1+2)) (which can be represented by ["2" "*" "6" "-" "(" "23" "+" "7" ")" "/" "(" "1" "+"
"2" ")"]). The expression tree will be like
[ - ]
/ \
[ * ] [ / ]
/ \ / \
[ 2 ] [ 6 ] [ + ] [ + ]
/ \ / \
[ 23 ][ 7 ] [ 1 ] [ 2 ] .
After building the tree, you just need to return root node [-].
"""
__author__ = 'Daniel'
class ExpressionTreeNode:
def __init__(self, symbol):
self.symbol = symbol
self.left, self.right = None, None
class Solution:
def build(self, expression):
"""
:param expression: A string list
:return: The root of expression tree
"""
post = self.infix2postfix(expression)
tree_node = self.postfix2tree(post)
return tree_node
def infix2postfix(self, expression):
"""
:param expression:
:return:
"""
post = []
op_stk = []
for elt in expression:
if elt.isdigit():
post.append(elt)
elif elt == "(":
op_stk.append(elt)
elif elt == ")":
while op_stk and op_stk[-1] != "(":
post.append(op_stk.pop())
op_stk.pop()
else:
while op_stk and self.precedence(op_stk[-1]) >= self.precedence(
elt): # notice equal for the order of operators of same precedence.
post.append(op_stk.pop())
op_stk.append(elt)
while op_stk:
post.append(op_stk.pop())
return post
def postfix2tree(self, post):
tree_stk = []
for elt in post:
if elt.isdigit():
tree_stk.append(ExpressionTreeNode(elt))
else:
pi = ExpressionTreeNode(elt)
pi.right = tree_stk.pop()
pi.left = tree_stk.pop()
tree_stk.append(pi)
try:
return tree_stk.pop()
except IndexError:
return None
def precedence(self, elt):
if elt in ("(", ")"):
return 0
if elt in ("+", "-"):
return 1
if elt in ("*", "/"):
return 2
return 3
if __name__ == "__main__":
tree_ndoe = Solution().build(["2", "*", "6", "-", "(", "23", "+", "7", ")", "/", "(", "1", "+", "2", ")"])
assert tree_ndoe.symbol == "-"
|
algorhythms/LintCode
|
Expression Tree Build.py
|
Python
|
apache-2.0
| 2,906
|
from __future__ import print_function
import os
import yaml
from flask.ext.debugtoolbar import DebugToolbarExtension
from flask.ext.script import Manager
from flask.ext.shellplus import Shell
from flask_migrate import MigrateCommand, Migrate
from sigh.apps import create_app
from sigh.models import db
from sigh.models import User, Sigh, Tag, tag_identifier, Comment
base_dir = os.path.dirname(os.path.realpath(__file__))
config_file = os.path.join(base_dir, 'config.py')
application = create_app(default_config=config_file)
Migrate(application, db)
manager = Manager(application)
def debug_app(app):
toolbar = DebugToolbarExtension()
toolbar.init_app(app)
def manage_app(app, app_manager):
app_manager.init_app(app)
@manager.command
def runserver(port=5000, host='0.0.0.0', init=False):
port = int(port)
debug_app(application)
application.run(host=host, port=port)
@manager.command
def load(fxtdir='fixtures/', app_name='sigh'):
fixture_dir = os.path.join(base_dir, app_name, fxtdir)
fixture_file = os.path.join(fixture_dir, 'testdata.yaml')
with open(fixture_file) as f:
fixture_data = yaml.load(f.read())
data_pairs = ((User, fixture_data['users']),
(Sigh, fixture_data['sighs']),
(Tag, fixture_data['tags']),
(Comment, fixture_data['comments']))
with application.app_context():
for model, datas in data_pairs:
for data in datas:
model(**data).save()
for data in fixture_data['tag_identifiers']:
db.engine.execute(tag_identifier.insert().values(**data))
manager.add_command('shell', Shell(context=dict(app=application, db=db)))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
kxxoling/Programmer-Sign
|
manage.py
|
Python
|
mit
| 1,772
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
# Copyright 2013 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE LiveDemo App
#
# FI-WARE LiveDemo App is free software: you can redistribute it and/or modify it under the terms
# of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# FI-WARE LiveDemo App is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License
# for more details.
#
# You should have received a copy of the GNU Affero General Public License along with FI-WARE LiveDemo App. If not,
# see http://www.gnu.org/licenses/.
#
# For those usages not covered by the GNU Affero General Public License please contact with fermin at tid dot es
__author__ = 'fermin'
import subprocess
from lxml import etree
p = subprocess.Popen(['./query-issue.sh'], shell=False, stdout=subprocess.PIPE)
output = p.stdout.read()
doc = etree.fromstring(output)
for ce in doc.findall('.//contextElement'):
id = ce.find('.//id').text
print id + ':'
for ca in ce.findall('.//contextAttribute'):
name = ca.find('name').text
value = ca.find('contextValue').text
print ' ', repr(name).ljust(30), ': ', str(value)
|
telefonicaid/fiware-livedemoapp
|
scripts/get-issues.py
|
Python
|
agpl-3.0
| 1,429
|
'''
Simulation Based on Hippocampus Recordings
Copyright Nate Sutton 2015
References:
Data from CRCNS.org hc3 .
Izhikevich neuron parameters from:
http://f1000research.com/articles/3-104/v1
'''
import pylab
import nest
'''
Create objects to run experiment with
'''
multimeter = nest.Create("multimeter",10)
nest.SetStatus(multimeter, {"withtime":True, "record_from":["V_m"]})
multimeter2 = nest.Create("multimeter")
nest.SetStatus(multimeter2, {"withtime":True, "record_from":["V_m"]})
spikedetector = nest.Create("spike_detector",
params={"withgid": True, "withtime": True})
'''noise = nest.Create("poisson_generator", 2)
nest.SetStatus(noise, [{"rate": 80000.0}, {"rate": 15000.0}])'''
e_c_2_layer = nest.Create("izhikevich",50,{'V_m':-70.0,'I_e':-15.0,'a':0.0012,'b':3.0,'c':-68.5,'d':10.0})
e_c_3_layer = nest.Create("izhikevich",90,{'V_m':-70.0,'I_e':-15.0,'a':0.0012,'b':3.0,'c':-68.5,'d':10.0})
e_c_5_layer = nest.Create("izhikevich",80,{'V_m':-70.0,'I_e':-15.0,'a':0.0012,'b':3.0,'c':-68.5,'d':10.0})
c_a_1_layer = nest.Create("izhikevich",340,{'V_m':-70.0,'I_e':-15.0,'a':0.0012,'b':3.0,'c':-68.5,'d':10.0})
c_a_3_layer = nest.Create("izhikevich",100,{'V_m':-70.0,'I_e':-15.0,'a':0.0012,'b':3.0,'c':-68.5,'d':10.0})
d_g_layer = nest.Create("izhikevich",12,{'V_m':-70.0,'I_e':-15.0,'a':0.0012,'b':3.0,'c':-68.5,'d':10.0})
'''
Form connections between neurons and run sim
NOTE: I may need to split the neurons into Ex and In
groups in layers for connections
With a number of neuron mismatch between layers
how is that processed in connections?
'''
syn_dict_ex = {"weight": 1.2}
syn_dict_in = {"weight": -2.0}
nest.Connect(e_c_2_layer, e_c_3_layer, "all_to_all", syn_spec=syn_dict_ex)
nest.Connect(e_c_3_layer, e_c_5_layer, "all_to_all", syn_spec=syn_dict_ex)
nest.Connect(e_c_5_layer, c_a_1_layer, "all_to_all", syn_spec=syn_dict_ex)
nest.Connect(c_a_1_layer, c_a_3_layer, "all_to_all", syn_spec=syn_dict_ex)
nest.Connect(c_a_3_layer, d_g_layer, "all_to_all", syn_spec=syn_dict_ex)
nest.SetStatus(e_c_2_layer, {"I_e": 10.0})
nest.Connect(multimeter, e_c_2_layer)
nest.Connect(multimeter2, d_g_layer)
nest.Connect(e_c_2_layer, spikedetector)
#nest.Simulate(350.0)
nest.Simulate(1000.0)
'''
Record activity
'''
dmm = nest.GetStatus(multimeter)[0]
Vms = dmm["events"]["V_m"]
ts = dmm["events"]["times"]
dmm2 = nest.GetStatus(multimeter2)[0]
Vms2 = dmm2["events"]["V_m"]
ts2 = dmm2["events"]["times"]
'''
Plot results
'''
pylab.figure(1)
pylab.plot(ts, Vms)
pylab.figure(2)
pylab.plot(ts2, Vms2)
dSD = nest.GetStatus(spikedetector,keys='events')[0]
evs = dSD["senders"]
ts = dSD["times"]
pylab.figure(3)
pylab.plot(ts, evs, ".")
pylab.show()
|
nmsutton/MemoryModule
|
python_version/archive/memory_module_all_types.py
|
Python
|
mit
| 2,677
|
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
import MySQLdb
import pandas as pd
from scraper import utility
class kOptionRelatedTesting:
def __init__(self, csvFileName):
#parser = lambda date: pd.datetime.strptime(date, '%Y%m%d')
self.df = pd.read_csv(csvFileName)#, parse_dates = [0], date_parser = parser, index_col = "Date")
try:
self.db = MySQLdb.connect(utility.databaseHost, utility.databaseUsername,
utility.databasePassword, utility.databaseName, charset="utf8", use_unicode=True)
except Exception as e:
print "Error in kOptionRelatedTesting connecting MYSQLdb.py"
def __call__(self):
#self.plotOIGraphs()
self.plotOIGraph2()
def plotOIGraph2(self):
ceOISQL = """ select date ,sum(OpenInterest) from %s where optiontype="%s" group by date
""" %(utility.dbTableNameByOptionExpiry, "CE")
peOISQL = """ select date ,sum(OpenInterest) from %s where optiontype="%s" group by date
""" %(utility.dbTableNameByOptionExpiry, "PE")
cursor = self.db.cursor()
cursor.execute(ceOISQL)
ceRows = cursor.fetchall()
cursor.execute(peOISQL)
peRows = cursor.fetchall()
#print rows[0][0]
oiSeries = [["Date", "CE OI", "PE OI", "Nifty"]]
dates = []
if len(ceRows) != len(peRows):
print "Error CE and PE Rows out of sync"
return
for i in range(len(ceRows)):
dates.append(ceRows[i][0])
#oiSeries.append([utility.dateDecoding(ceRows[i][0]), int(ceRows[i][1]), int(peRows[i][1])])
spotValueSQL = """select date, close from %s where date >= %s and date <= %s
""" %(utility.dbTableName, min(dates), max(dates))
cursor.execute(spotValueSQL)
spotValue = cursor.fetchall()
#print spotValue
for i in range(len(ceRows)):
#dates.append(ceRows[i][0])
oiSeries.append([utility.dateDecoding(ceRows[i][0]), int(ceRows[i][1]), int(peRows[i][1]), spotValue[i][1]])
print oiSeries
cursor.close()
def plotOIGraphs(self):
groups = self.df.groupby(["Date", "OptionType"]).groups
ceOISeries = []
peOISeries = []
for i in groups.iteritems():
oiSum = 0
for j in i[1]:
#print groups[i][j]
oiSum += self.df.iloc[j]["OpenInterest"]
if i[0][1] == "CE":
#print str(i[0][0]) + " ce " + str(oiSum)
ceOISeries.append(oiSum)
elif i[0][1] == "PE":
#print str(i[0][0]) + " pe " + str(oiSum)
peOISeries.append(oiSum)
print ceOISeries
print peOISeries
|
puchchi/stock_scraper_latest
|
MainDriver/OptionRelatedTesting.py
|
Python
|
mit
| 2,849
|
import os
import xlrd
import urllib
import libxml2
from BeautifulSoup import BeautifulSoup
def initialize():
print "Initializing"
htmlDirectory = os.path.join(os.getcwd(), 'html')
if not os.path.exists(htmlDirectory):
os.makedirs(htmlDirectory)
return htmlDirectory
else:
print "Directory %s already exists, please remove and try again" % (htmlDirectory)
exit()
def openSpreadsheetFile():
spreadsheetLocation = os.path.join(os.getcwd(), "course_template.xlsx")
print "Opening spreadsheet located at %s " % (spreadsheetLocation)
spreadsheet = xlrd.open_workbook(spreadsheetLocation)
print "Spreadsheet opened successfully"
return spreadsheet
def getStyleConfiguration(firstSheet):
headerLogoLeft = firstSheet.cell(0,1).value
print "Header Logo Left %s " % (headerLogoLeft)
headerLogoRight = firstSheet.cell(1,1).value
print "Header Logo Right %s " % (headerLogoRight)
ColourDark = firstSheet.cell(2,1).value
print "Colour Dark %s " % (ColourDark)
ColourLight = firstSheet.cell(3,1).value
print "Colour Light %s " % (ColourLight)
CourseCode = firstSheet.cell(4,1).value
print "Course Code %s " % (CourseCode)
CourseName = firstSheet.cell(5,1).value
print "Course Name %s " % (CourseName)
MicroCode = firstSheet.cell(6,1).value
print "Micro Code %s " % (MicroCode)
MicroName = firstSheet.cell(7,1).value
print "Micro Name %s " % (MicroName)
GradientFill = "( " + ColourDark + ", " + ColourLight + " )"
styleConfiguration = {'headerLogoLeft' : headerLogoLeft, 'headerLogoRight' : headerLogoRight, 'ColourDark' : ColourDark, 'ColourLight' : ColourLight, 'CourseCode' : CourseCode, 'CourseName' : CourseName, 'MicroCode' : MicroCode, 'MicroName' : MicroName, 'GradientFill' : GradientFill}
return styleConfiguration
def getContent(secondSheet):
print "Creating list"
l = list()
print "Creating dictionary"
d = dict()
print "Setting num to zero"
num = 0
print "Iterating through worksheet"
for row in secondSheet.col(1):
num = num + 1
print "num = %d" % (num)
l.append(row.value)
if num == 3:
print "num equals 3"
print "Adding list to dictionary"
d[row.value] = l
print "Re-setting num to zero"
num = 0
print "Emptying the list"
l = list()
return d
def createCSS(htmlDirectory, styleConfiguration):
print "HTML Directory %s " % (htmlDirectory)
file2 = open(os.path.join(htmlDirectory, 'custom_oer.css'), 'w')
ContainerString = """.container{
background-color:#efe9e5;
background: -webkit-linear-gradient""" + styleConfiguration.get('GradientFill') + """!important;
background: -o-linear-gradient""" + styleConfiguration.get('GradientFill') + """!important;
background: -moz-linear-gradient""" + styleConfiguration.get('GradientFill') + """!important;
background: linear-gradient""" + styleConfiguration.get('GradientFill') + """!important;
border:1px solid #5b5b5b;
border-top-width: 0px;
-ms-box-shadow: 1px 4px 4px 0px #1E1E1E !important;
-moz-box-shadow: 1px 4px 4px 0px #1E1E1E !important;
-webkit-box-shadow: 1px 4px 4px 0px #1E1E1E !important;
box-shadow: 1px 4px 4px 0px #1E1E1E !important;
}"""
HeaderString = """.header-oer{
background: #FFF!important;
text-align: center!important;
border:1px solid #5b5b5b!important;
border-top-width: 0px!important;
-ms-box-shadow: 3px 4px 6px 0px #1E1E1E !important;
-moz-box-shadow: 3px 4px 6px 0px #1E1E1E !important;
-webkit-box-shadow: 3px 4px 6px 0px #1E1E1E !important;
box-shadow: 3px 4px 6px 0px #1E1E1E !important;
}"""
FooterString = """.footer-wrap {
background-color: #000;
min-height: 70px;
color: #fff;
padding-top: 20px;
font-size: 12px;
padding: 20px 10px;
text-align: center;
border-top-width: 2px!important;
-ms-box-shadow: 3px 4px 6px 0px #1E1E1E !important;
-moz-box-shadow: 3px 4px 6px 0px #1E1E1E !important;
-webkit-box-shadow: 3px 4px 6px 0px #1E1E1E !important;
box-shadow: 3px 4px 6px 0px #1E1E1E !important;
}
.footer-wrap p {
line-height: 18px;
text-align: center;
padding-bottom: 10px;
}
.footer-wrap .right {
text-align: center;
padding-bottom: 20px;
}
.footer-wrap a {
color: white;
}
@media (min-width: 766px) {
.footer-wrap {
text-align: center;
padding-bottom: 0px;
}
.footer-wrap .right {
float: right;
text-align: center;
padding-right: 10px;
padding-left: 20px;
}
.footer-wrap p {
line-height: 15px;
text-align: center;
padding-bottom: 0px;
margin-bottom: 2px;
}
}"""
OerImageString = """.image-oer {
position: relative;
display: block;
max-width: 100%;
left: 0;
right: 0;
bottom: 0;
margin: auto;
}"""
OerGreen = """.green-oer > li > a {
color : green!important;
}"""
file2.write(OerGreen)
file2.write(OerImageString)
file2.write(ContainerString)
file2.write(HeaderString)
file2.write(FooterString)
print OerGreen
print OerImageString
print ContainerString
print HeaderString
print FooterString
def fetchWikicontent(wikiUrl):
response = urllib.urlopen(wikiUrl)
html = response.read()
soup = BeautifulSoup(html)
cleanHtml = soup.prettify()
doc = libxml2.parseDoc(cleanHtml)
ctxt = doc.xpathNewContext()
res = ctxt.xpathEval("//*[@id='bodyContent']/p")
return res
def createHtml(htmlDirectory, styleConfiguration, wikiContent, mediaUrl, pageName):
print "HTML Directory %s " % (htmlDirectory)
file = open(os.path.join(htmlDirectory, pageName + '.html'), 'w')
#print type(wikiContent)
#print wikiContent
HtmlString = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="description" content="">
<meta name="author" content="">
<link rel="icon" href="../../favicon.ico">
<title>""" + pageName + """</title>
<!-- Bootstrap core CSS -->
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="custom_oer.css">
<link rel="stylesheet" href="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css">
<!-- Optional theme -->
<link rel="stylesheet" href="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap-theme.min.css">
<!-- Latest compiled and minified JavaScript -->
<script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/js/bootstrap.min.js"></script>
<!-- Custom styles for this template -->
<link href="http://getbootstrap.com/examples/justified-nav/justified-nav.css" rel="stylesheet">
<!-- Just for debugging purposes. Don't actually copy these 2 lines! -->
<!--[if lt IE 9]><script src="http://getbootstrap.com/assets/js/ie-emulation-modes-warning.js"></script><![endif]-->
<script src="http://getbootstrap.com/assets/js/ie-emulation-modes-warning.js"></script>
<!-- IE10 viewport hack for Surface/desktop Windows 8 bug -->
<script src="http://getbootstrap.com/assets/js/ie10-viewport-bug-workaround.js"></script>
<!-- HTML5 shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body>
<div class="row header-oer">
<div class="col-md-4"><img src=""" + styleConfiguration.get('headerLogoLeft') + """ alt="OERu"/></div>
<div class="col-md-4"><h2>""" + styleConfiguration.get('CourseCode') + """ </h2> <br /><b> """ + styleConfiguration.get('CourseName') + """</b></div>
<div class="col-md-4"><img src='""" + styleConfiguration.get('headerLogoRight') + """' alt="OERu"/></div>
</div>
<div class="container">
<div class="masthead">
<ul class="nav nav-justified">
<li class="active"><a href="#"> """ + pageName + """</a></li>
<li><a href="#">Projects</a></li>
<li><a href="#">Services</a></li>
<li><a href="#">Downloads</a></li>
<li><a href="#">About</a></li>
<li><a href="#">Contact</a></li>
</ul>
</div>
<br />
<!-- Example row of columns -->
<div class="row">
<div class="col-md-8">
<h2>""" + pageName + """</h2>
<p> """ + str(wikiContent) + """</p>
</div>
<div class="col-md-4">
<img class='image-oer' src=' """ + mediaUrl + """ '</div>
</div>
<br />
</div>
<br />
</div> <!-- /container -->
<!-- Site footer -->
<div class="row">
<div class="col-md-1"></div>
<div class="col-md-10">
<div class="footer-wrap" style="text-align: center;">
<p>#TODO ABN: 40 234 732 081 | CRICOS: QLD 00244B<span class="line-break">, </span>
NSW 02225M | TEQSA: PRV12081<span class="line-break"> | </span>
<a href="#">Disclaimer</a><span class="line-break"> | <a href="#">Contact us</a></p>
<p>University of Southern Queensland<span class="line-break"></span> #TODO</p>
</div>
</div>
<div class="col-md-1"></div>
</div>
<!-- Bootstrap core JavaScript
================================================== -->
<!-- Placed at the end of the document so the pages load faster -->
</body>
</html>"""
file.write(HtmlString)
file.close()
|
tpmccallum/web-site-creator
|
oer_modules.py
|
Python
|
mit
| 8,998
|
import urllib.parse
import requests
import time
import json
import os
from bs4 import BeautifulSoup
def search_momo(query):
query_enc = urllib.parse.quote(query)
url = "https://m.momoshop.com.tw/mosearch/" + query_enc + ".html"
headers = {'User-Agent': 'mozilla/5.0 (Linux; Android 6.0.1; '
'Nexus 5x build/mtc19t applewebkit/537.36 (KHTML, like Gecko) '
'Chrome/51.0.2702.81 Mobile Safari/537.36'}
resp = requests.get(url, headers=headers)
if not resp:
return []
resp.encoding = 'utf-8'
soup = BeautifulSoup(resp.text, 'html.parser')
items = []
for elem in soup.find_all("li", "goodsItemLi"):
item_name = elem.find("h3", "prdName").text.strip()
item_price = elem.find("b", {"class": "price"}).text.strip()
if not item_price:
continue
item_url = 'http://m.momoshop.com.tw' + elem.find('a')['href']
item_img_url = elem.a.div.find_all("img")[-1]['src']
item = {
'name': item_name,
'price': item_price,
'url': item_url,
'img_url': item_img_url,
}
items.append(item)
return items
if __name__ == '__main__':
query = 'iphone 13 pro'
items = search_momo(query)
today = time.strftime('%Y-%m-%d')
print('%s 搜尋 %s 共 %d 筆資料' % (today, query, len(items)))
for i in items:
print(i)
data = {
'date': today,
'store': 'momo',
'items': items
}
with open(os.path.join('json', today + '-momo.json'), 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
|
jwlin/web-crawler-tutorial
|
ch7/search_momo.py
|
Python
|
mit
| 1,677
|
from __future__ import absolute_import
from .Errors import error, message
from . import ExprNodes
from . import Nodes
from . import Builtin
from . import PyrexTypes
from .. import Utils
from .PyrexTypes import py_object_type, unspecified_type
from .Visitor import CythonTransform, EnvTransform
class TypedExprNode(ExprNodes.ExprNode):
# Used for declaring assignments of a specified type without a known entry.
def __init__(self, type):
self.type = type
object_expr = TypedExprNode(py_object_type)
class MarkParallelAssignments(EnvTransform):
# Collects assignments inside parallel blocks prange, with parallel.
# Perhaps it's better to move it to ControlFlowAnalysis.
# tells us whether we're in a normal loop
in_loop = False
parallel_errors = False
def __init__(self, context):
# Track the parallel block scopes (with parallel, for i in prange())
self.parallel_block_stack = []
super(MarkParallelAssignments, self).__init__(context)
def mark_assignment(self, lhs, rhs, inplace_op=None):
if isinstance(lhs, (ExprNodes.NameNode, Nodes.PyArgDeclNode)):
if lhs.entry is None:
# TODO: This shouldn't happen...
return
if self.parallel_block_stack:
parallel_node = self.parallel_block_stack[-1]
previous_assignment = parallel_node.assignments.get(lhs.entry)
# If there was a previous assignment to the variable, keep the
# previous assignment position
if previous_assignment:
pos, previous_inplace_op = previous_assignment
if (inplace_op and previous_inplace_op and
inplace_op != previous_inplace_op):
# x += y; x *= y
t = (inplace_op, previous_inplace_op)
error(lhs.pos,
"Reduction operator '%s' is inconsistent "
"with previous reduction operator '%s'" % t)
else:
pos = lhs.pos
parallel_node.assignments[lhs.entry] = (pos, inplace_op)
parallel_node.assigned_nodes.append(lhs)
elif isinstance(lhs, ExprNodes.SequenceNode):
for arg in lhs.args:
self.mark_assignment(arg, object_expr)
else:
# Could use this info to infer cdef class attributes...
pass
def visit_WithTargetAssignmentStatNode(self, node):
self.mark_assignment(node.lhs, node.rhs)
self.visitchildren(node)
return node
def visit_SingleAssignmentNode(self, node):
self.mark_assignment(node.lhs, node.rhs)
self.visitchildren(node)
return node
def visit_CascadedAssignmentNode(self, node):
for lhs in node.lhs_list:
self.mark_assignment(lhs, node.rhs)
self.visitchildren(node)
return node
def visit_InPlaceAssignmentNode(self, node):
self.mark_assignment(node.lhs, node.create_binop_node(), node.operator)
self.visitchildren(node)
return node
def visit_ForInStatNode(self, node):
# TODO: Remove redundancy with range optimization...
is_special = False
sequence = node.iterator.sequence
target = node.target
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name:
entry = self.current_env().lookup(function.name)
if not entry or entry.is_builtin:
if function.name == 'reversed' and len(sequence.args) == 1:
sequence = sequence.args[0]
elif function.name == 'enumerate' and len(sequence.args) == 1:
if target.is_sequence_constructor and len(target.args) == 2:
iterator = sequence.args[0]
if iterator.is_name:
iterator_type = iterator.infer_type(self.current_env())
if iterator_type.is_builtin_type:
# assume that builtin types have a length within Py_ssize_t
self.mark_assignment(
target.args[0],
ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
target = target.args[1]
sequence = sequence.args[0]
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name:
entry = self.current_env().lookup(function.name)
if not entry or entry.is_builtin:
if function.name in ('range', 'xrange'):
is_special = True
for arg in sequence.args[:2]:
self.mark_assignment(target, arg)
if len(sequence.args) > 2:
self.mark_assignment(
target,
ExprNodes.binop_node(node.pos,
'+',
sequence.args[0],
sequence.args[2]))
if not is_special:
# A for-loop basically translates to subsequent calls to
# __getitem__(), so using an IndexNode here allows us to
# naturally infer the base type of pointers, C arrays,
# Python strings, etc., while correctly falling back to an
# object type when the base type cannot be handled.
self.mark_assignment(target, ExprNodes.IndexNode(
node.pos,
base=sequence,
index=ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type)))
self.visitchildren(node)
return node
def visit_ForFromStatNode(self, node):
self.mark_assignment(node.target, node.bound1)
if node.step is not None:
self.mark_assignment(node.target,
ExprNodes.binop_node(node.pos,
'+',
node.bound1,
node.step))
self.visitchildren(node)
return node
def visit_WhileStatNode(self, node):
self.visitchildren(node)
return node
def visit_ExceptClauseNode(self, node):
if node.target is not None:
self.mark_assignment(node.target, object_expr)
self.visitchildren(node)
return node
def visit_FromCImportStatNode(self, node):
pass # Can't be assigned to...
def visit_FromImportStatNode(self, node):
for name, target in node.items:
if name != "*":
self.mark_assignment(target, object_expr)
self.visitchildren(node)
return node
def visit_DefNode(self, node):
# use fake expressions with the right result type
if node.star_arg:
self.mark_assignment(
node.star_arg, TypedExprNode(Builtin.tuple_type))
if node.starstar_arg:
self.mark_assignment(
node.starstar_arg, TypedExprNode(Builtin.dict_type))
EnvTransform.visit_FuncDefNode(self, node)
return node
def visit_DelStatNode(self, node):
for arg in node.args:
self.mark_assignment(arg, arg)
self.visitchildren(node)
return node
def visit_ParallelStatNode(self, node):
if self.parallel_block_stack:
node.parent = self.parallel_block_stack[-1]
else:
node.parent = None
nested = False
if node.is_prange:
if not node.parent:
node.is_parallel = True
else:
node.is_parallel = (node.parent.is_prange or not
node.parent.is_parallel)
nested = node.parent.is_prange
else:
node.is_parallel = True
# Note: nested with parallel() blocks are handled by
# ParallelRangeTransform!
# nested = node.parent
nested = node.parent and node.parent.is_prange
self.parallel_block_stack.append(node)
nested = nested or len(self.parallel_block_stack) > 2
if not self.parallel_errors and nested and not node.is_prange:
error(node.pos, "Only prange() may be nested")
self.parallel_errors = True
if node.is_prange:
child_attrs = node.child_attrs
node.child_attrs = ['body', 'target', 'args']
self.visitchildren(node)
node.child_attrs = child_attrs
self.parallel_block_stack.pop()
if node.else_clause:
node.else_clause = self.visit(node.else_clause)
else:
self.visitchildren(node)
self.parallel_block_stack.pop()
self.parallel_errors = False
return node
def visit_YieldExprNode(self, node):
if self.parallel_block_stack:
error(node.pos, "Yield not allowed in parallel sections")
return node
def visit_ReturnStatNode(self, node):
node.in_parallel = bool(self.parallel_block_stack)
return node
class MarkOverflowingArithmetic(CythonTransform):
# It may be possible to integrate this with the above for
# performance improvements (though likely not worth it).
might_overflow = False
def __call__(self, root):
self.env_stack = []
self.env = root.scope
return super(MarkOverflowingArithmetic, self).__call__(root)
def visit_safe_node(self, node):
self.might_overflow, saved = False, self.might_overflow
self.visitchildren(node)
self.might_overflow = saved
return node
def visit_neutral_node(self, node):
self.visitchildren(node)
return node
def visit_dangerous_node(self, node):
self.might_overflow, saved = True, self.might_overflow
self.visitchildren(node)
self.might_overflow = saved
return node
def visit_FuncDefNode(self, node):
self.env_stack.append(self.env)
self.env = node.local_scope
self.visit_safe_node(node)
self.env = self.env_stack.pop()
return node
def visit_NameNode(self, node):
if self.might_overflow:
entry = node.entry or self.env.lookup(node.name)
if entry:
entry.might_overflow = True
return node
def visit_BinopNode(self, node):
if node.operator in '&|^':
return self.visit_neutral_node(node)
else:
return self.visit_dangerous_node(node)
visit_UnopNode = visit_neutral_node
visit_UnaryMinusNode = visit_dangerous_node
visit_InPlaceAssignmentNode = visit_dangerous_node
visit_Node = visit_safe_node
def visit_assignment(self, lhs, rhs):
if (isinstance(rhs, ExprNodes.IntNode)
and isinstance(lhs, ExprNodes.NameNode)
and Utils.long_literal(rhs.value)):
entry = lhs.entry or self.env.lookup(lhs.name)
if entry:
entry.might_overflow = True
def visit_SingleAssignmentNode(self, node):
self.visit_assignment(node.lhs, node.rhs)
self.visitchildren(node)
return node
def visit_CascadedAssignmentNode(self, node):
for lhs in node.lhs_list:
self.visit_assignment(lhs, node.rhs)
self.visitchildren(node)
return node
class PyObjectTypeInferer(object):
"""
If it's not declared, it's a PyObject.
"""
def infer_types(self, scope):
"""
Given a dict of entries, map all unspecified types to a specified type.
"""
for name, entry in scope.entries.items():
if entry.type is unspecified_type:
entry.type = py_object_type
class SimpleAssignmentTypeInferer(object):
"""
Very basic type inference.
Note: in order to support cross-closure type inference, this must be
applies to nested scopes in top-down order.
"""
def set_entry_type(self, entry, entry_type):
entry.type = entry_type
for e in entry.all_entries():
e.type = entry_type
def infer_types(self, scope):
enabled = scope.directives['infer_types']
verbose = scope.directives['infer_types.verbose']
if enabled == True:
spanning_type = aggressive_spanning_type
elif enabled is None: # safe mode
spanning_type = safe_spanning_type
else:
for entry in scope.entries.values():
if entry.type is unspecified_type:
self.set_entry_type(entry, py_object_type)
return
# Set of assignemnts
assignments = set()
assmts_resolved = set()
dependencies = {}
assmt_to_names = {}
for name, entry in scope.entries.items():
for assmt in entry.cf_assignments:
names = assmt.type_dependencies()
assmt_to_names[assmt] = names
assmts = set()
for node in names:
assmts.update(node.cf_state)
dependencies[assmt] = assmts
if entry.type is unspecified_type:
assignments.update(entry.cf_assignments)
else:
assmts_resolved.update(entry.cf_assignments)
def infer_name_node_type(node):
types = [assmt.inferred_type for assmt in node.cf_state]
if not types:
node_type = py_object_type
else:
entry = node.entry
node_type = spanning_type(
types, entry.might_overflow, entry.pos)
node.inferred_type = node_type
def infer_name_node_type_partial(node):
types = [assmt.inferred_type for assmt in node.cf_state
if assmt.inferred_type is not None]
if not types:
return
entry = node.entry
return spanning_type(types, entry.might_overflow, entry.pos)
def resolve_assignments(assignments):
resolved = set()
for assmt in assignments:
deps = dependencies[assmt]
# All assignments are resolved
if assmts_resolved.issuperset(deps):
for node in assmt_to_names[assmt]:
infer_name_node_type(node)
# Resolve assmt
inferred_type = assmt.infer_type()
assmts_resolved.add(assmt)
resolved.add(assmt)
assignments.difference_update(resolved)
return resolved
def partial_infer(assmt):
partial_types = []
for node in assmt_to_names[assmt]:
partial_type = infer_name_node_type_partial(node)
if partial_type is None:
return False
partial_types.append((node, partial_type))
for node, partial_type in partial_types:
node.inferred_type = partial_type
assmt.infer_type()
return True
partial_assmts = set()
def resolve_partial(assignments):
# try to handle circular references
partials = set()
for assmt in assignments:
if assmt in partial_assmts:
continue
if partial_infer(assmt):
partials.add(assmt)
assmts_resolved.add(assmt)
partial_assmts.update(partials)
return partials
# Infer assignments
while True:
if not resolve_assignments(assignments):
if not resolve_partial(assignments):
break
inferred = set()
# First pass
for entry in scope.entries.values():
if entry.type is not unspecified_type:
continue
entry_type = py_object_type
if assmts_resolved.issuperset(entry.cf_assignments):
types = [assmt.inferred_type for assmt in entry.cf_assignments]
if types and all(types):
entry_type = spanning_type(
types, entry.might_overflow, entry.pos)
inferred.add(entry)
self.set_entry_type(entry, entry_type)
def reinfer():
dirty = False
for entry in inferred:
types = [assmt.infer_type()
for assmt in entry.cf_assignments]
new_type = spanning_type(types, entry.might_overflow, entry.pos)
if new_type != entry.type:
self.set_entry_type(entry, new_type)
dirty = True
return dirty
# types propagation
while reinfer():
pass
if verbose:
for entry in inferred:
message(entry.pos, "inferred '%s' to be of type '%s'" % (
entry.name, entry.type))
def find_spanning_type(type1, type2):
if type1 is type2:
result_type = type1
elif type1 is PyrexTypes.c_bint_type or type2 is PyrexTypes.c_bint_type:
# type inference can break the coercion back to a Python bool
# if it returns an arbitrary int type here
return py_object_type
else:
result_type = PyrexTypes.spanning_type(type1, type2)
if result_type in (PyrexTypes.c_double_type, PyrexTypes.c_float_type,
Builtin.float_type):
# Python's float type is just a C double, so it's safe to
# use the C type instead
return PyrexTypes.c_double_type
return result_type
def aggressive_spanning_type(types, might_overflow, pos):
result_type = reduce(find_spanning_type, types)
if result_type.is_reference:
result_type = result_type.ref_base_type
if result_type.is_const:
result_type = result_type.const_base_type
if result_type.is_cpp_class:
result_type.check_nullary_constructor(pos)
return result_type
def safe_spanning_type(types, might_overflow, pos):
result_type = reduce(find_spanning_type, types)
if result_type.is_const:
result_type = result_type.const_base_type
if result_type.is_reference:
result_type = result_type.ref_base_type
if result_type.is_cpp_class:
result_type.check_nullary_constructor(pos)
if result_type.is_pyobject:
# In theory, any specific Python type is always safe to
# infer. However, inferring str can cause some existing code
# to break, since we are also now much more strict about
# coercion from str to char *. See trac #553.
if result_type.name == 'str':
return py_object_type
else:
return result_type
elif result_type is PyrexTypes.c_double_type:
# Python's float type is just a C double, so it's safe to use
# the C type instead
return result_type
elif result_type is PyrexTypes.c_bint_type:
# find_spanning_type() only returns 'bint' for clean boolean
# operations without other int types, so this is safe, too
return result_type
elif result_type.is_ptr:
# Any pointer except (signed|unsigned|) char* can't implicitly
# become a PyObject, and inferring char* is now accepted, too.
return result_type
elif result_type.is_cpp_class:
# These can't implicitly become Python objects either.
return result_type
elif result_type.is_struct:
# Though we have struct -> object for some structs, this is uncommonly
# used, won't arise in pure Python, and there shouldn't be side
# effects, so I'm declaring this safe.
return result_type
# TODO: double complex should be OK as well, but we need
# to make sure everything is supported.
elif (result_type.is_int or result_type.is_enum) and not might_overflow:
return result_type
return py_object_type
def get_type_inferer():
return SimpleAssignmentTypeInferer()
|
thedrow/cython
|
Cython/Compiler/TypeInference.py
|
Python
|
apache-2.0
| 20,789
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################
# GXMAIL - COMMAND LINE SMTP USER AGENT
###############################################################
#
# gxmail.py
#
# Copyright 2014 GaboXandre <gabo.xandre@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import smtplib
import sys
import os
import argparse
import simplejson as json
def settings_mode():
print 'gxmail v%s Settings' %(AppInfo['Version'])
######################################################
# 0. Menu - will repeat until loop is false
######################################################
loop = True
while loop is True:
print '-'*80
print ' 1 - View profiles'
print ' 2 - Add new profile'
print ' 3 - Delete existing profile'
print ' 0 - Exit'
print '-'*80
MenuOption = raw_input('Enter a selection -> ')
if MenuOption == '1':
print 'Installed Profiles:'
f = []
for (dirpath, dirnames, filenames) in os.walk(FileLocations['ProfileDir']):
f.extend(filenames)
break
cntr = 0
length = len(f)
while cntr < length:
print str(cntr)+' - '+f[cntr]
cntr += 1
raw_input('Press ENTER to continue')
elif MenuOption == '2':
print "Let's create a new profile: "
prof_name = raw_input('Profile Name -> ')
server = raw_input('Server -> ')
port = raw_input('Port -> ')
email = raw_input('Your email -> ')
password = raw_input('Your password -> ')
defprofile = [prof_name]
defprofile.append(server)
defprofile.append(port)
defprofile.append(email)
defprofile.append(password)
create_profile(defprofile)
elif MenuOption == '3':
print ' 3 - Delete existing profile'
delete = raw_input('Enter profile name to delete -> ')
fullpath = FileLocations['ProfileDir']+delete
try:
os.remove(fullpath)
print 'Done, the profile at "%s" has been deleted.' %(fullpath)
raw_input('Press ENTER to continue')
except Exception:
print 'Opps, the profile at "%s" could not be deleted. Sorry' %(fullpath)
raw_input('Press ENTER to continue')
elif MenuOption == '0':
loop = False
else:
print 'Sorry, %s is not a valid option.' %(str(MenuOption))
def initialize_smtp_server(arguments):
server = arguments[0][1]
port = arguments[0][2]
email = arguments[0][3]
password = arguments[0][4]
smtpserver = smtplib.SMTP(server, port)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo()
smtpserver.login(email, password)
return smtpserver
def send_mail(arguments):
print 'Sending email ... '
######################################################
# 1. Common Variables
######################################################
to_email = arguments[1]
from_email = arguments[0][3]
subject = arguments[2]
mime_type = arguments[4]
xmailer = AppInfo['AppName']+'-v'+AppInfo['Version']
attachment = str(arguments[5])
######################################################
# 2. Extract message from file
######################################################
file_name = arguments[3] #args.message
the_file = open(file_name)
msg = the_file.read()
the_file.close()
######################################################
# 3a. Prepare email without attachment
######################################################
if attachment == 'None':
# parse message from text file
header = "To:%s\nFrom:%s\nSubject:%s\nX-Mailer: %s\nContent-type: %s\n " % (to_email, from_email, subject, xmailer, mime_type)
content = header + "\n" + msg
######################################################
# 3b. Prepare email with attachment
######################################################
else:
marker = '2325769521'
# prepare attachment
attachment_name = arguments[5]
extract = open(attachment_name, 'r')
encoded_attachment = extract.read()
#Main Header
header = "To:%s\nFrom:%s\nMIME-Version: 1.0\nX-Mailer: %s\nSubject:%s\nContent-Type: multipart/mixed; boundary=%s\n--%s\nContent-type: %s\nContent-Transfer-Encoding:8bit\n" % (to_email, from_email, xmailer, subject, marker, marker, mime_type)
after_body = '--%s' %(marker)
# attachment header
attachment_header = "Content-Type: text/plain; name=\"%s\"\nContent-Disposition: attachment; filename=%s\n%s\n--%s--" %(attachment_name, attachment_name, encoded_attachment, marker)
content = header + "\n" + msg +"\n"+after_body+'\n'+attachment_header
######################################################
# 4. Send email
######################################################
try:
smtpserver = initialize_smtp_server(arguments)
smtpserver.sendmail(from_email, to_email, content)
smtpserver.close()
print 'e-mail sent successfully to '+str(to_email)
print '='*80
except Exception:
print 'Opps, the email could not be sent.'
print '='*80
def batch_mode(arguments):
print 'you are now in batch mode...' ##########DEBUG
############################################
# 0. Prepare arguments
############################################
mime_type = arguments[4]
if mime_type is True:
arguments[4] = 'text/html'
else:
arguments[4] = 'text/plain'
############################################
# 1. Get the file with emails
############################################
batch_file_path = str(arguments[7])
batch_file = open(os.path.expanduser(batch_file_path))
############################################
# 2. Itirate file to make the list
############################################
mail_list = []
with batch_file as f:
for line in f:
x = line.rstrip( )
mail_list.append(x)
batch_file.close()
############################################
# 3. Iterate list and send emails
############################################
length = len(mail_list)
cntr = 0
while cntr < length:
arguments[1] = mail_list[cntr]
send_mail(arguments)
#print 'email sent to: '+str(mail_list[cntr])
#print '-'*80
cntr += 1
def interactive_mode():
print '-'*80
print 'Interactive Mode'
print '-'*80
############################################
# 0. Start the list with no values
############################################
global arguments
arguments = []
print arguments
############################################
# 1. Get main input
############################################
p = raw_input('Profile (default): ')
if p == '':
arguments.append('None')
else:
arguments.append(str(p))
h = raw_input('MIME (text or html): ')
if h == 'html':
h = 'text/html'
else:
h = 'text/plain'
t = raw_input('To: ')
s = raw_input('Subject: ')
# write the body with text editor
print 'Choose your editor:'
print '(v) for Vi'
print '(n) for Nano'
e = raw_input('editor -> ')
if e == 'v':
text = os.system('vi ~/.email-body')
else:
text = os.system('nano ~/.email-body')
m = os.path.expanduser('~/.email-body')
############################################
# 2. Start the list
# [0-profile, 1-to, 2-subject, 3-message, 4-text/html, 5-attachment, 6-interactive, 7-batch, 8-version ]
############################################
arguments.append(str(t))
arguments.append(str(s))
arguments.append(os.path.expanduser(m))
arguments.append(h)
############################################
# 3. Attachment?
############################################
question = raw_input('Include attachment?(y/n)-> ')
if question == 'y':
a = raw_input('Attachment: ')
a = os.path.expanduser(a)
arguments.append(os.path.expanduser(a))
else:
a = 'None'
arguments.append(str(a))
############################################
# 3. Get profile info
# Note this seciton is copy paste from select_profile
# Shame on me !!!!!
############################################
profile = str(arguments[0])
if profile == 'None':
profile_name = 'default'
else:
profile_name = profile
profile_location = FileLocations['ProfileDir']+profile_name
# load profile info
myfile = open(profile_location)
myfile2 = myfile.read()
profile = json.loads(myfile2)
arguments[0] = profile
############################################
# 3. Send email
############################################
arguments.append(False)
arguments.append('None')
arguments.append(False)
send_mail(arguments)
# Remove the temporary file
os.system('rm ~/.email-body')
def test_options(arguments):
######################################################
# Select Profile
######################################################
profile = str(arguments[0])
if profile == 'None':
profile_name = 'default'
else:
profile_name = profile
profile_location = FileLocations['ProfileDir']+profile_name
# load profile info
myfile = open(profile_location)
myfile2 = myfile.read()
profile = json.loads(myfile2)
arguments[0] = profile
############################################
# 1. Check general options
# a. version
# b. interactive
# c. batch
############################################
version = arguments[8]
interactive = arguments[6]
batch = str(arguments[7])
settings = arguments[9]
if version is True:
version_info = AppInfo['AppName']+'-v'+AppInfo['Version']
print version_info
quit()
# once version is ruled out comes the welcome to the program.
print '='*80
print 'gxmail - version %s' %(AppInfo['Version'])
print '='*80
if batch != 'None':
batch_mode(arguments)
quit()
if interactive is True:
interactive_mode()
quit()
if settings is True:
settings_mode()
quit()
######################################################
# 2. Check email option and pass them to send_email()
# 1. to
# 2. subject
# 3. message
# 4. type: text or html
# 5. attachment #### should t be here?
######################################################
to = str(arguments[1])
subject = str(arguments[2])
message = str(arguments[3])
mime_type = arguments[4]
attachment = str(arguments[5])
switch = 'ON'
# test that all mandatory arguments are included
if to == 'None':
switch = 'OFF'
if subject == 'None':
switch = 'OFF'
if message == 'None':
switch = 'OFF'
if switch == 'OFF':
print 'Sorry, information is missing...\nUse flag -h or --help \nAlso, you may use interactive mode with flag -i or --interactive.'
quit()
#switch == 'ON', so we continue testing...
if mime_type is True:
arguments[4] = 'text/html'
else:
arguments[4] = 'text/plain'
######################################################
# 3. READY TO SEND EMAIL
######################################################
send_mail(arguments)
def create_profile(defprofile):
try:
default_file = FileLocations['ProfileDir']+'/'+str(defprofile[0])
default = open(default_file, 'w')
default.write(json.dumps(defprofile))
default.close()
res = 'You are ready to send emails with your new profile!'
print res
quit()
except Exception:
print 'Error: Default profile could not be created. Sorry.'
quit()
def test_profiles():
f = []
for (dirpath, dirnames, filenames) in os.walk(FileLocations['ProfileDir']):
f.extend(filenames)
break
length = len(f)
if length == 0:
os.mkdir(FileLocations['ProfileDir'])
print '-'*80
print "You don't have a profile set up yet. Let's do it now!"
print '-'*80
server = raw_input('Server -> ')
port = raw_input('Port -> ')
email = raw_input('Your email -> ')
password = raw_input('Your password -> ')
defprofile = ['default']
defprofile.append(server)
defprofile.append(port)
defprofile.append(email)
defprofile.append(password)
setup = create_profile(defprofile)
def main():
global AppInfo
global FileLocations
global arguments
AppInfo = { 'AppName' : 'gxmail',
'Version' : '1.2.1',
'Author' : 'GaboXandre',
'License' : 'GPL3',
'copyright' : '2014 GaboXandre'
}
FileLocations = { 'ProfileDir' : os.path.expanduser('~/.gxmail/')}
# Command Line Arguments
parser = argparse.ArgumentParser(description='%s is a simple text smpt client to send email from the command line. Very useful for scripts. If called without parameters, it starts in interactive mode.' %(AppInfo['AppName']))
parser.add_argument('-p', '--profile', help='Select profile to be used.', required=False)
parser.add_argument('-to', help='Receipient. You may include several email addresses separating them with a comma. DO NOT use spaces', required=False)
parser.add_argument('-s', '--subject', help='subject line.', required=False)
parser.add_argument('-m', '--message', help='Import email body from text file.', required=False)
parser.add_argument('-b', '--batch', help='Batch mode: get recepients from a text file.', required=False)
parser.add_argument('-html', '--html', help='HTML mode: send html formated content.', required=False, action="store_true")
parser.add_argument('-v', '--version', help='Prints version and exits program.', required=False, action="store_true")
parser.add_argument('-i', '--interactive', help='Runs in interactive mode.', required=False, action="store_true")
parser.add_argument('-set', '--settings', help='Access your profile settings.', required=False, action="store_true")
parser.add_argument('-a', '--attachment', help='Send file attachment.', required=False)
args = parser.parse_args()
# list format:
# [0-profile, 1-to, 2-subject, 3-message, 4-text/html, 5-attachment, 6-interactive, 7-batch, 8-version, 9-settings ]
arguments = [args.profile, args.to, args.subject, args.message, args.html, args.attachment, args.interactive, args.batch, args.version, args.settings]
return arguments
if __name__ == '__main__':
main()
test_profiles()
test_options(arguments)
|
GaboXandre/gxmail
|
gxmail.py
|
Python
|
gpl-3.0
| 14,299
|
from concert.devices.samplechangers.dummy import SampleChanger
from concert.tests import TestCase
class TestSampleChanger(TestCase):
def setUp(self):
super(TestSampleChanger, self).setUp()
self.samplechanger = SampleChanger()
def test_set_sample(self):
self.samplechanger.sample = None
self.assertEqual(None, self.samplechanger.sample)
|
ufo-kit/concert
|
concert/tests/unit/devices/test_samplechanger.py
|
Python
|
lgpl-3.0
| 380
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
r_mapcalc.py
------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
def checkParameterValuesBeforeExecuting(alg, parameters, context):
""" Verify if we have the right parameters """
if (alg.parameterAsString(parameters, 'expression', context)
and alg.parameterAsString(parameters, 'file', context)):
return False, alg.tr("You need to set either inline expression or a rules file!")
return True, None
def processInputs(alg, parameters, context, feedback):
# We will use the same raster names than in QGIS to name the rasters in GRASS
rasters = alg.parameterAsLayerList(parameters, 'maps', context)
for idx, raster in enumerate(rasters):
rasterName = os.path.splitext(
os.path.basename(raster.source()))[0]
alg.inputLayers.append(raster)
alg.setSessionProjectionFromLayer(raster)
command = 'r.in.gdal input="{0}" output="{1}" --overwrite -o'.format(
os.path.normpath(raster.source()),
rasterName)
alg.commands.append(command)
alg.removeParameter('maps')
alg.postInputs()
def processCommand(alg, parameters, context, feedback):
alg.processCommand(parameters, context, feedback, True)
def processOutputs(alg, parameters, context, feedback):
# We need to export every raster from the GRASSDB
alg.exportRasterLayersIntoDirectory('output_dir',
parameters, context,
wholeDB=True)
|
dwadler/QGIS
|
python/plugins/processing/algs/grass7/ext/r_mapcalc.py
|
Python
|
gpl-2.0
| 2,550
|
from . import func
from .Var import var
class NodeBranchPart(object):
def __init__(self):
self.rMatrixNum = -1
self.gdasrvNum = -1
#self.bigP = None
class NodeBranch(object):
def __init__(self):
self.len = 0.1
#self.textDrawSymbol = '-' # See var.modelSymbols for some alternative symbols
self.rawSplitKey = None # Odd or even
self.splitKey = None # Only even
#self.name = None
#self.uName = None # under-name
#self.color = None # US spelling.
#self.support = None # A float, so that it can preserve all its significant
# digits, yet can be formatted flexibly for output.
#self.biRootCount = None # For cons trees, where the input trees are
# bi-Rooted, ie have bifurcating roots. This
# is the number of compatible input trees that
# were rooted on this branch.
self.parts = [] # NodeBranchPart() objects
self.lenChanged = False
class NodePart(object):
def __init__(self):
#self.pats = None
#self.nPats = 0
self.compNum = -1
#self.cl = None
#self.cl2 = None
class Node(object):
"""A Node is a vertex in a Tree. All but the root have a branch.
A Node has pointers to its parent, leftChild, and sibling, any of which may be None.
"""
def __init__(self):
self.name = None
self.nodeNum = -1
self.parent = None
self.leftChild = None
self.sibling = None
self.isLeaf = 0
self.cNode = None # Pointer to a c-struct
self.seqNum = -1 # Zero-based seq numbering of course, so -1 means no sequence.
self.br = NodeBranch()
#self.rootCount = None # For cons trees, where the input trees do not
# have bifurcating roots. This is the number of
# compatible input trees that were rooted on this node.
self.parts = [] # NodePart objects
self.doDataPart = 0
self.flag = 0
##Ignore
def wipe(self):
"""Set the pointers parent, leftChild, and sibling to None"""
self.parent = None
self.leftChild = None
self.sibling = None
def rightmostChild(self):
"""Find and return the rightmostChild of self.
If self has no children, return None.
"""
n = self.leftChild
if not n:
return None
while n.sibling:
n = n.sibling
return n
def leftSibling(self):
"""Find and return the sibling on the left.
A node has a pointer to its sibling, but that is the sibling
on the right. It is a bit awkward to find the sibling on the
left, as you need to go via the parent and the leftChild of
the parent.
If there is no parent, return None. If there is no
leftSibling, return None.
"""
if not self.parent:
#print 'leftSibling(%i). No parent. returning None.' % self.nodeNum
return None
lsib = self.parent.leftChild
if lsib == self:
#print 'leftSibling(%i). self is self.parent.leftChild. returning None.' % self.nodeNum
return None
while lsib:
if lsib.sibling == self:
#print 'leftSibling(%i): returning node %i' % (self.nodeNum, lsib.nodeNum)
return lsib
lsib = lsib.sibling
# These next 3 were suggestions from Rick Ree. Thanks, Rick!
# Then I added a couple more. Note that all of these use
# recursion, and so could bump into the recursion limit, and might
# fail on large trees. However, I tried iterPreOrder() on a
# random tree of 10,000 taxa, and it was fine.
# You can temporarily set a different recursion limit with the sys module.
# oldlimit = sys.getrecursionlimit()
# sys.setrecursionlimit(newLimit)
# See also Tree.iterNodesNoRoot()
def iterChildren(self):
n = self.leftChild
while n:
yield n
n = n.sibling
def iterPostOrder(self):
for c in self.iterChildren():
for n in c.iterPostOrder():
yield n
yield self
def iterPreOrder(self):
yield self
for c in self.iterChildren():
for n in c.iterPreOrder():
yield n
def iterLeaves(self):
for n in self.iterPreOrder():
if n.isLeaf:
yield n
def iterInternals(self):
for n in self.iterPreOrder():
if not n.isLeaf:
yield n
def iterDown(self, showDown=False):
"""Iterates over all the nodes below self (including self)
Starts by returning self. And then iterates over all nodes below self.
It does so by a combination of Node.iterPreOrder() and
Node.iterDown() (ie recursively). Now sometimes we want to
know if the nodes that are returned come from iterDown()
(strictly) or not (ie from iterPreOrder()). If that bit of
info is needed, then you can turn on the arg ``showDown``.
(The following is probably bad Python practice!) When that is done, whenever
iterDown() is called the first node that is returned will have
the attribute ``down`` set to True. But after it is returned,
that ``down`` attribute is zapped (to try to keep the bloat
down ...). So you need to test ``if hasattr(yourNode,
'down'):`` before you actually use it.
"""
if showDown:
self.down = True
yield self
if showDown:
del(self.down)
if self.parent:
for c in self.parent.iterChildren():
if c == self:
for n in c.parent.iterDown(showDown):
yield n
else:
for n in c.iterPreOrder():
yield n
# ###############################
def getNChildren(self):
"""Returns the number of children that the node has."""
if not self.leftChild:
return 0
c = self.leftChild
counter = 0
while c:
c = c.sibling
counter += 1
return counter
def isAncestorOf(self, otherNode):
"""Asks whether self is an an ancestor of otherNode."""
n = otherNode
while 1:
n = n.parent
if not n:
return False
elif n == self:
return True
def _ladderize(self, biggerGroupsOnBottom):
"""This is only used by Tree.ladderize()."""
#print '====Node %i' % self.nodeNum
if not self.leftChild:
pass
else:
nLeaves = []
children = []
ch = self.leftChild
while ch:
nL = len([n2 for n2 in ch.iterLeaves()])
nLeaves.append(nL)
ch.nLeaves = nL
children.append(ch)
ch = ch.sibling
#print ' nLeaves = %s' % nLeaves
allOnes = True
for ch in children:
if ch.nLeaves > 1:
allOnes = False
break
if not allOnes:
children = func.sortListOfObjectsOnAttribute(children, 'nLeaves')
if not biggerGroupsOnBottom:
children.reverse()
#print '\n Children\n ------------'
#for ch in children:
# print ' node=%i, nLeaves=%i' % (ch.nodeNum, ch.nLeaves)
self.leftChild = children[0]
theLeftChild = self.leftChild
theLeftChild.sibling = None
for ch in children[1:]:
theLeftChild.sibling = ch
theLeftChild = ch
theLeftChild.sibling = None
for ch in children:
del(ch.nLeaves)
for ch in self.iterChildren():
ch._ladderize(biggerGroupsOnBottom)
if var.usePfAndNumpy:
import sys
import pf
#def __del__(self, freeNode=pf.p4_freeNode, dp_freeNode=pf.dp_freeNode, mysys=sys):
def __del__(self, freeNode=pf.p4_freeNode, mysys=sys):
#def __del__(self, freeNode=pf.p4_freeNode, dp_freeNode=pf.dp_freeNode):
#def __del__(self, freeNode=pf.p4_freeNode):
#if self.nodeNum == 0:
#mysys.stdout.write('Node.__del__() deleting node %i\n' % self.nodeNum)
#mysys.stdout.flush()
if self.cNode: # Generally, cNodes are deleted before the cTree is freed. freeNode requires the cTree!
mysys.stdout.write('Node.__del__() node %i (%s) has a cNode (%s). How?!?\n' % (
self.nodeNum, self, self.cNode))
if self.doDataPart:
dp_freeNode(self.cNode)
else:
freeNode(self.cNode)
self.cNode = None
Node.__del__ = __del__
del(__del__)
|
Anaphory/p4-phylogeny
|
p4/Node.py
|
Python
|
gpl-2.0
| 9,304
|
import os
import django
TEST_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'tests')
COMPRESS_CACHE_BACKEND = 'locmem://'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = (
'compressor',
'jingo',
)
TEMPLATE_LOADERS = (
'jingo.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MEDIA_URL = '/media/'
STATIC_URL = MEDIA_URL
MEDIA_ROOT = os.path.join(TEST_DIR, 'media')
TEMPLATE_DIRS = (
# Specifically choose a name that will not be considered
# by app_directories loader, to make sure each test uses
# a specific template without considering the others.
os.path.join(TEST_DIR, 'test_templates'),
)
TEST_RUNNER = 'discover_runner.DiscoverRunner'
SECRET_KEY = "iufoj=mibkpdz*%bob952x(%49rqgv8gg45k36kjcg76&-y5=!"
|
rtucker-mozilla/WhistlePig
|
vendor-local/lib/python/jingo_offline_compressor/test_settings.py
|
Python
|
bsd-3-clause
| 912
|
from App.model import UserAgents
async def agent_save(request, user):
ua = request.headers['User-Agent']
now = datetime.datetime.now()
uas = [await i for i in user.ips]
if ua in [i.content for i in contents]:
ua_in = [i for i in uas if i.content == content][0]
ua_in.utime = now
ua_in.count += 1
await ip_in.save()
else:
await UserAgents.insert_many([{
"content": ua,
'ctime': now,
'utime': now,
'user': user
}])
|
Basic-Components/auth-center
|
auth-center/App/auth/login_agents.py
|
Python
|
mit
| 533
|
"""
Registration page.
"""
import os
from edxapp_acceptance.pages.lms.login_and_register import CombinedLoginAndRegisterPage
from edxapp_acceptance.pages.common.utils import disable_animations
from regression.pages.whitelabel import LMS_URL_WITH_AUTH, ORG
from regression.tests.helpers.utils import click_checkbox, fill_input_fields, select_drop_down_values
class RegisterPageExtended(CombinedLoginAndRegisterPage):
"""
This class is an extended class of Register Page,
where we add methods that are different or not used in Register Page
"""
url = os.path.join(LMS_URL_WITH_AUTH, "register")
def register_white_label_user(self, registration_fields, submit=True):
"""
Registers a whitelabel users for whitelabel tests.
Arguments:
registration_fields(dict): A dictionary of all fields to be filled.
submit(bool): If True then registration form will be submitted.
"""
disable_animations(self)
self.wait_for_element_visibility(
'.form-toggle[data-type="login"]', 'Registration form is visible.'
)
elements_and_values = {
'#register-email': registration_fields['email'],
'#register-name': registration_fields['name'],
'#register-username': registration_fields['username'],
'#register-password': registration_fields['password'],
'#register-first_name': registration_fields['first_name'],
'#register-last_name': registration_fields['last_name'],
'#register-state': registration_fields['state']
}
drop_down_names_and_values = {
"country": registration_fields['country'],
}
select_drop_down_values(self, drop_down_names_and_values)
fill_input_fields(self, elements_and_values)
# Some tests still don't display the new registration page when running
# on Jenkins. Once registration page is updated, remove this condition.
if self.q(css='#register-honor_code').visible:
click_checkbox(self, '#register-honor_code')
click_checkbox(self, '#register-terms_of_service')
if ORG == 'MITxPRO':
fill_input_fields(
self,
{
'#register-company': registration_fields['company'],
'#register-title': registration_fields['title']
}
)
if ORG != 'HarvardMedGlobalAcademy':
select_drop_down_values(
self,
{
"year_of_birth": registration_fields['year_of_birth'],
"gender": registration_fields['gender'],
"level_of_education": registration_fields[
'level_of_education'
]
}
)
else:
select_drop_down_values(
self,
{
"profession": registration_fields['profession'],
"specialty": registration_fields['specialty']
}
)
if submit:
self.q(css='.register-button').click()
def toggle_to_login_page(self):
"""
Toggle to login page
"""
self.q(css='.form-toggle[data-type="login"]').click()
|
edx/edx-e2e-tests
|
regression/pages/whitelabel/registration_page.py
|
Python
|
agpl-3.0
| 3,344
|
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""This module contains a generic class for testing data connectors.
Note that this class DOES NOT inherit from unittest.TestCase, being
an abstract test. If you create a new data connector and would like
to test it, simply create a __init__.py and test.py file in a sub-
package called like your data connector. In the test.py file,
create a class inherited from AbstractDCTest.
Your test.py file should contains something like:
# Don't forget the license and at least a line of documentation
from unittest import TestCase
from tests.dc.test import AbstractDCTest
from dc.{nour_connector} import {YourConnector}
class DCTest(AbstractDCTest, TestCase):
name = "dc_name"
connector = {YourConnector}
"""
import os
from datetime import datetime
import yaml
from model import exceptions as mod_exceptions
from model.functions import *
from model import Model
from repository import Repository
from tests.model import *
for model in models:
model._repository = Repository(None, model)
class AbstractDCTest:
"""Abstract class for testing data connectors.
This class is abstract. It shouldn't be considered a regular
test case and doesn't have enough informations to perform a test.
It's a base test for a data connector (each data connector should
have a class inherited from it). This allows to test different
data connector to check that each one has the same basic abilities
as any other.
Testing methods (some could be added, NOT MODIFIED):
test_create -- try to create an object from a model
test_update -- try to update an object
test_save -- try to save and retrieve stored datas
test_delete -- try to delete an object
test_primary_keys -- test that the primary keys are unique
test_auto_increment -- test the behavior of an autoincrement field
test_auto_increment_delete -- check that old keys are not re-used
test_default -- test the default value of a field
test_find -- try to a retrieve a single object
test_get_all -- try to retrieve all the created objects
Other methods:
setUp -- set up the test case
tearDown -- tear down the test case
"""
def setUp(self):
"""Set up the data connector."""
self.setup_data_connector()
#self.dc.repository_manager.clear()
def tearDown(self):
"""Destroy the data connector and tear it down."""
self.teardown_data_connector(destroy=True)
def setup_data_connector(self):
"""Setup the data_connector.
If available, read the configuration file found in
test/config/dc/{data_connector_name}.yml. Otherwise, the
file is created with the default configuration found in
dc/{data_connector_name}/parameters.yml.
"""
self.dc = type(self).connector()
self.dc.setup_test()
self.dc.repository_manager.record_models(models)
for model in models:
model._repository.data_connector = self.dc
type(model).extend(model)
self.dc.repository_manager.add_model(model)
def teardown_data_connector(self, destroy=False):
"""Tear down the data connector."""
self.dc.repository_manager.save()
if destroy:
self.dc.driver.destroy()
else:
self.dc.driver.close()
self.dc = None
def test_create(self):
"""Create a simple user."""
repository = User._repository
user = repository.create(username="Kredh", password="fore123")
self.assertEqual(user.username, "Kredh")
self.assertEqual(user.password, "fore123")
def test_update(self):
"""Create and update a simple user."""
repository = User._repository
user = repository.create(username="Nitrate")
user.username = "Erwyn"
self.assertEqual(user.username, "Erwyn")
def test_save(self):
"""Create a simple user and start a new data connector.
This tests that the created datas are stored and can be retrieved
exactly as they were stored.
"""
repository = User._repository
user = repository.create(username="Percyst")
uid = user.id
username = user.username
users = repository.get_all()
self.teardown_data_connector()
self.setup_data_connector()
retrieved = repository.find(uid)
self.assertEqual(retrieved.id, uid)
self.assertEqual(retrieved.username, username)
self.assertIsNot(retrieved, user)
self.assertEqual(len(repository.get_all()), len(users))
def test_delete(self):
"""Create and delete an user.
After the user was deleted, try to update it (which souldn't
work).
"""
repository = User._repository
user = repository.create(username="Noway")
repository.delete(user)
self.assertRaises(mod_exceptions.UpdateDeletedObject, setattr,
user, "username", "no")
def test_primary_keys(self):
"""Test that no created user has the same ID as another."""
repository = User._repository
users = repository.get_all()
uids = set()
for user in users:
uids.add(user.id)
self.assertEqual(len(uids), len(users))
def test_update_primary_key(self):
"""Create and update the primary key of a product."""
repository = Product._repository
product = repository.create(name="a jug of milk", price=2, quantity=1)
product.name = "a croissant"
self.assertIs(product, repository.find("a croissant"))
self.assertRaises(mod_exceptions.ObjectNotFound, repository.find, \
"a jug of milk")
def test_auto_increment(self):
"""Test the good behavior of a autoincrement field.
First, we get the user with the highest id. When we create
another user, its id should be greater than the previous one.
This should still be true when we start a new data connection.
"""
# Create at least one user (otherwise max will go crazy)
repository = User._repository
at_least_one_user = repository.create(username="Atlist")
# Get the user with the biggest id
max_user = max(repository.get_all(), key=lambda user: user.id)
new_user = repository.create(username="Ideafix")
self.assertTrue(max_user.id < new_user.id)
# Reset the data connection
self.teardown_data_connector()
self.setup_data_connector()
still_new_user = repository.create(username="Overall")
self.assertTrue(new_user.id < still_new_user.id)
self.assertTrue(new_user.id < still_new_user.id)
def test_auto_increment_delete(self):
"""Check that old IDs are not re-used after deletion.
If we create a new user, an ID is allocated to it. If we delete
it and create a different user, its ID should be different (the old
ID should not be used).
"""
# Create the first user
repository = User._repository
first_user = repository.create(username="Uone")
uid = first_user.id
repository.delete(first_user)
# Reset the data connection
self.teardown_data_connector()
self.setup_data_connector()
second_user = repository.create(username="Utwo")
self.assertTrue(second_user.id > uid)
self.assertRaises(mod_exceptions.ObjectNotFound, repository.find, uid)
def test_default(self):
"""Create a user to test the default value of 'password'."""
repository = User._repository
user = repository.create(username="Delfin")
self.assertEqual(user.password, "unknown")
def test_callable_default(self):
"""Create a product to test the default with callable."""
repository = Product._repository
product = repository.create(name="a huge car", price=100000, quantity=3)
self.assertEqual(product.total_price, 300000)
def test_find(self):
"""Create and try to find the created user."""
repository = User._repository
user = repository.create(username="Martha")
# Test the find method with a positional argument
found_1 = repository.find(user.id)
self.assertIs(user, found_1)
# Test the find method with keyword arguments
found_2 = repository.find(id=user.id)
self.assertIs(found_1, found_2)
def test_get_all(self):
"""Create an user and look for it in the User.get_all()."""
repository = User._repository
user = repository.create(username="Crowd")
users = repository.get_all()
self.assertIn(user, users)
def test_datetime(self):
"""Test that a datetime field is well stored."""
repository = Post._repository
post = repository.create(title="Something", content="No wait",
published_at=datetime.now())
published_at = post.published_at
self.teardown_data_connector()
self.setup_data_connector()
post = repository.find(post.id)
stored = int(published_at.timestamp())
published_at = int(post.published_at.timestamp())
self.assertEqual(stored, published_at)
def test_one2many(self):
"""Test the one2many relation between posts and comments.
This test is primarily used to see if, when a comment is defined
in a blog post, the blog post can retrieve this comment
accurately.
"""
post_repository = Post._repository
comment_repository = Comment._repository
# Create two blog posts (without comments)
post_1 = post_repository.create(title="my trip to California",
content="excellent!")
post_2 = post_repository.create(title="one day in Paris",
content="great!")
# Create two comments in the first blog post
comment_1 = comment_repository.create(post=post_1, content="always")
comment_2 = comment_repository.create(post=post_1,
content="definitely")
# Check that the two comments are in the first blog post
self.assertIs(comment_1.post, post_1)
self.assertIs(comment_2.post, post_1)
self.assertIn(comment_1, post_1.comments)
self.assertIn(comment_2, post_1.comments)
# Try to update the second comment
comment_2.post = post_2
self.assertIs(comment_2.post, post_2)
self.assertIn(comment_2, post_2.comments)
def test_many2one(self):
"""Test the many2one relation between posts and comments."""
post_repository = Post._repository
comment_repository = Comment._repository
# Create two blog posts (without comments)
post_1 = post_repository.create(title="a snowy day in New York City",
content="sometimes it happens.")
post_2 = post_repository.create(title="the car I didn't buy",
content="Shame")
# Create two comments in the first blog post
comment_1 = comment_repository.create(post=post_1, content="cold")
comment_2 = comment_repository.create(post=post_1,
content="no fear")
# Update the comments (delete)
del post_1.comments[1]
self.assertNotIn(comment_2, post_1.comments)
self.assertIsNot(comment_2.post, post_1)
# Update the comments (add)
post_2.comments.append(comment_2)
self.assertIn(comment_2, post_2.comments)
self.assertIs(comment_2.post, post_2)
def test_uni_many2one(self):
"""Test the unidirectional many2one relation."""
command_repository = Command._repository
product_repository = Product._repository
cmd = command_repository.create(opportunity="birthday")
product_1 = product_repository.create(name="an atlas", price=250,
quantity=1)
product_2 = product_repository.create(name="a map", price=2,
quantity=1)
cmd.products.append(product_1)
cmd.products.append(product_2)
self.assertIn(product_1, cmd.products)
self.assertIn(product_2, cmd.products)
self.assertRaises(TypeError, getattr, product_1, "command")
|
v-legoff/pa-poc3
|
src/tests/dc/test.py
|
Python
|
bsd-3-clause
| 13,883
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for two_sat.dnf_circuit_lib."""
import math
from absl.testing import absltest
from absl.testing import parameterized
import cirq
from cirq.circuits import insert_strategy
import numpy as np
from bangbang_qaoa import circuit_lib
from bangbang_qaoa.two_sat import dnf_circuit_lib
from bangbang_qaoa.two_sat import dnf_lib
class HamiltonianGeneratorTest(parameterized.TestCase):
@parameterized.parameters(
(True, True, 1),
(True, False, -1),
(False, True, -1),
(False, False, 1),
)
def test_get_sign(self, is_negative, other_is_negative, expected):
self.assertEqual(dnf_circuit_lib._get_sign(is_negative, other_is_negative),
expected)
@parameterized.parameters(
(
dnf_lib.Clause(4, 2, True, False),
1,
[
(
(cirq.LineQubit(2),),
cirq.ZPowGate(exponent=-2/math.pi, global_shift=-0.5)
),
(
(cirq.LineQubit(4),),
cirq.ZPowGate(exponent=2/math.pi, global_shift=-0.5)
),
(
(cirq.LineQubit(2), cirq.LineQubit(4)),
cirq.ZZPowGate(exponent=2/math.pi, global_shift=-0.5)
),
],
),
(
dnf_lib.Clause(1, 3, False, True),
0,
[
(
(cirq.LineQubit(1),),
cirq.ZPowGate(exponent=0, global_shift=-0.5)
),
(
(cirq.LineQubit(3),),
cirq.ZPowGate(exponent=0, global_shift=-0.5)
),
(
(cirq.LineQubit(1), cirq.LineQubit(3)),
cirq.ZZPowGate(exponent=0, global_shift=-0.5)
),
],
)
)
def test_generate_clause_hamiltonian_exponential(self, clause, time,
expected_answers):
circuit = cirq.Circuit()
circuit.append(
dnf_circuit_lib.generate_clause_hamiltonian_exponential(clause, time),
strategy=insert_strategy.InsertStrategy.NEW_THEN_INLINE)
generator = circuit.all_operations()
answer_generator = iter(expected_answers)
operation = next(generator)
expected_qubits, expected_gate = next(answer_generator)
self.assertTupleEqual(operation.qubits, expected_qubits)
self.assertEqual(operation.gate, expected_gate)
operation = next(generator)
expected_qubits, expected_gate = next(answer_generator)
self.assertTupleEqual(operation.qubits, expected_qubits)
self.assertEqual(operation.gate, expected_gate)
operation = next(generator)
expected_qubits, expected_gate = next(answer_generator)
self.assertTupleEqual(operation.qubits, expected_qubits)
self.assertEqual(operation.gate, expected_gate)
with self.assertRaises(StopIteration):
next(generator)
with self.assertRaises(StopIteration):
next(answer_generator)
def test_generate_dnf_hamiltonian_order(self):
circuit = cirq.Circuit()
dnf = dnf_lib.DNF(10, [dnf_lib.Clause(5, 7, False, False)])
circuit.append(
dnf_circuit_lib.generate_dnf_hamiltonian_exponential(dnf, 0.5),
strategy=insert_strategy.InsertStrategy.NEW_THEN_INLINE)
generator = circuit.all_operations()
operation = next(generator)
self.assertEqual(
operation,
cirq.ZPowGate(exponent=-1.0 / math.pi,
global_shift=-0.5).on(cirq.LineQubit(5)))
operation = next(generator)
self.assertEqual(
operation,
cirq.ZPowGate(exponent=-1.0 / math.pi,
global_shift=-0.5).on(cirq.LineQubit(7)))
operation = next(generator)
self.assertEqual(
operation,
cirq.ZZPowGate(exponent=-1.0 / math.pi,
global_shift=-0.5).on(cirq.LineQubit(5),
cirq.LineQubit(7)))
with self.assertRaises(StopIteration):
next(generator)
def test_generate_dnf_hamiltonian_content(self):
clauses = [
dnf_lib.Clause(2, 4, True, False),
dnf_lib.Clause(5, 4, True, True)
]
dnf = dnf_lib.DNF(10, clauses)
circuit = cirq.Circuit()
circuit.append(
dnf_circuit_lib.generate_dnf_hamiltonian_exponential(dnf, 0.25),
strategy=insert_strategy.InsertStrategy.NEW_THEN_INLINE)
operations = list(circuit.all_operations())
self.assertCountEqual(
operations,
[
cirq.ZPowGate(exponent=-0.5 / math.pi,
global_shift=-0.5).on(cirq.LineQubit(4)),
cirq.ZPowGate(exponent=0.5 / math.pi,
global_shift=-0.5).on(cirq.LineQubit(2)),
cirq.ZZPowGate(exponent=0.5 / math.pi,
global_shift=-0.5).on(cirq.LineQubit(2),
cirq.LineQubit(4)),
cirq.ZPowGate(exponent=0.5 / math.pi,
global_shift=-0.5).on(cirq.LineQubit(4)),
cirq.ZPowGate(exponent=0.5 / math.pi,
global_shift=-0.5).on(cirq.LineQubit(5)),
cirq.ZZPowGate(exponent=-0.5 / math.pi,
global_shift=-0.5).on(cirq.LineQubit(4),
cirq.LineQubit(5)),
]
)
class BangBangProtocolCircuitTest(parameterized.TestCase):
def test_bangbang_protocol_circuit_init(self):
dnf = dnf_lib.DNF(4, [dnf_lib.Clause(0, 3, True, True)])
circuit = dnf_circuit_lib.BangBangProtocolCircuit(5.3, dnf)
self.assertEqual(circuit.dnf, dnf)
self.assertEqual(circuit.chunk_time, 5.3)
self.assertEqual(circuit.num_qubits, 4)
def test_get_hamiltonian_diagonal(self):
dnf = dnf_lib.DNF(4, [dnf_lib.Clause(0, 3, True, True)])
circuit = dnf_circuit_lib.BangBangProtocolCircuit(5.3, dnf)
np.testing.assert_allclose(
circuit.get_hamiltonian_diagonal(),
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0])
def test_bangbang_protocol_circuit_init_neg_chunk_time(self):
with self.assertRaisesRegex(
ValueError,
'chunk_time must be positive, not -1.2'
):
dnf_circuit_lib.BangBangProtocolCircuit(-1.2, dnf_lib.DNF(22, []))
def test_generate_qaoa_circuit(self):
dnf = dnf_lib.DNF(5, [dnf_lib.Clause(1, 2, False, False)])
circuit = dnf_circuit_lib.BangBangProtocolCircuit(0.2, dnf)
qaoa_circuit = circuit.qaoa_circuit([
circuit_lib.HamiltonianType.CONSTRAINT,
circuit_lib.HamiltonianType.CONSTRAINT,
circuit_lib.HamiltonianType.CONSTRAINT,
circuit_lib.HamiltonianType.X,
circuit_lib.HamiltonianType.CONSTRAINT,
circuit_lib.HamiltonianType.X,
circuit_lib.HamiltonianType.X,
])
# Should 21 contain gates
# 5 gates from Hadamard Layer
# 2 layers of X Hamiltonian, which has 5 gates each
# 2 layers of DNF Hamiltonian, which has 3 gates each
# 5 + 2*5 + 2*3 = 21
self.assertLen(list(qaoa_circuit.all_operations()), 21)
def test_get_wavefunction(self):
dnf = dnf_lib.DNF(2, [dnf_lib.Clause(0, 1, True, False)])
# time is chosen so that ZPowGate = sqrt(Z) and Rx = sqrt(X) up to phase.
circuit = dnf_circuit_lib.BangBangProtocolCircuit(math.pi / 4, dnf)
bangbang_protocol = [
circuit_lib.HamiltonianType.CONSTRAINT,
circuit_lib.HamiltonianType.X,
]
cirq.testing.assert_allclose_up_to_global_phase(
circuit.get_wavefunction(bangbang_protocol),
np.array([0, (1 + 1j) / 2, (-1 + 1j) / 2, 0]),
atol=0.000001)
def test_get_probabilities_wrong_shape(self):
dnf = dnf_lib.DNF(2, [dnf_lib.Clause(0, 1, True, False)])
# time is chosen so that ZPowGate = sqrt(Z) and Rx = sqrt(X) up to phase.
circuit = dnf_circuit_lib.BangBangProtocolCircuit(math.pi / 4, dnf)
with self.assertRaisesRegex(
ValueError,
r'The shape of wavefunction should be \(4\,\) but got \(3\,\)'):
circuit.get_probabilities(wavefunction=np.array([1., 0., 0.]))
def test_get_probabilities(self):
dnf = dnf_lib.DNF(2, [dnf_lib.Clause(0, 1, True, False)])
# time is chosen so that ZPowGate = sqrt(Z) and Rx = sqrt(X) up to phase.
circuit = dnf_circuit_lib.BangBangProtocolCircuit(math.pi / 4, dnf)
bangbang_protocol = [
circuit_lib.HamiltonianType.CONSTRAINT,
circuit_lib.HamiltonianType.X,
]
probabilities = circuit.get_probabilities(
circuit.get_wavefunction(bangbang_protocol))
np.testing.assert_allclose(
probabilities,
[0, 0.5, 0.5, 0],
atol=0.00001)
@parameterized.parameters(
([False, False, False, False], 3 / 4),
([False, False, False, True], 1),
([False, False, True, False], 1),
([False, False, True, True], 1),
([False, True, False, False], 3 /4),
([False, True, False, True], 1),
([False, True, True, False], 1),
([False, True, True, True], 1),
([True, False, False, False], 3 / 4),
([True, False, False, True], 1),
([True, False, True, False], 1),
([True, False, True, True], 1),
([True, True, False, False], 3 / 4),
([True, True, False, True], 1),
([True, True, True, False], 1),
([True, True, True, True], 1),
)
def test_constraint_evaluation(self, measurement, expected_value):
dnf = dnf_lib.DNF(4, [dnf_lib.Clause(0, 1, False, False),
dnf_lib.Clause(0, 1, True, True),
dnf_lib.Clause(0, 1, False, True),
dnf_lib.Clause(0, 1, True, False),
dnf_lib.Clause(2, 3, False, False)])
self.assertEqual(dnf.optimal_num_satisfied, 4)
circuit = dnf_circuit_lib.BangBangProtocolCircuit(1, dnf)
self.assertEqual(circuit.constraint_evaluation(measurement),
expected_value)
def test_get_constraint_expectation(self):
dnf = dnf_lib.DNF(2, [dnf_lib.Clause(0, 1, True, False)])
# time is chosen so that ZPowGate = sqrt(Z) and Rx = sqrt(X) up to phase.
circuit = dnf_circuit_lib.BangBangProtocolCircuit(math.pi / 4, dnf)
bangbang_protocol = [
circuit_lib.HamiltonianType.CONSTRAINT,
circuit_lib.HamiltonianType.X,
]
self.assertAlmostEqual(
circuit.get_constraint_expectation(
circuit.get_wavefunction(bangbang_protocol)),
0.5,
places=5)
if __name__ == '__main__':
absltest.main()
|
google-research/google-research
|
bangbang_qaoa/two_sat/dnf_circuit_lib_test.py
|
Python
|
apache-2.0
| 11,116
|
import unittest.mock
from labonneboite.common.models import Office
from labonneboite.common import pdf
from labonneboite.tests.test_base import DatabaseTest
from labonneboite.common.load_data import load_groupements_employeurs
class DownloadTest(DatabaseTest):
def setUp(self):
super().setUp()
# Create an office.
self.office = Office(
departement='75',
siret='78548035101646',
company_name='NICOLAS',
headcount='03',
city_code='75110',
zipcode='75010',
naf='4646Z',
tel='0100000000',
score=80,
x=2.3488,
y=48.8534,
)
self.office.save()
# Remove pdf file if it already exists
pdf.delete_file(self.office)
def test_office_fields_and_properties_are_str(self):
"""
Check if office fields are str
"""
self.assertEqual(type(self.office.company_name), str)
self.assertEqual(type(self.office.address_as_text), str)
self.assertEqual(type(self.office.phone), str)
self.assertEqual(type(self.office.google_url), str)
def test_office_details_page(self):
"""
Test the office details page of a regular office.
"""
rv = self.app.get(self.url_for('office.details', siret=self.office.siret))
self.assertEqual(rv.status_code, 200)
def test_office_details_page_of_non_existing_office(self):
"""
Test the office details page of a non existing office.
"""
# The details page of an nonexistent office should raise a 404.
rv = self.app.get(self.url_for('office.details', siret='7x5x8x3x1x1x46'))
self.assertEqual(rv.status_code, 404)
def test_office_details_page_of_office_having_buggy_naf(self):
"""
Test the office details page of an office having NAF 9900Z.
"""
self.office.naf = '9900Z'
self.office.save()
rv = self.app.get(self.url_for('office.details', siret=self.office.siret))
self.assertEqual(rv.status_code, 200)
def test_office_details_page_of_office_which_is_a_groupement_employeurs(self):
"""
Test the office details page of an office being a groupement d'employeurs.
"""
self.office.siret = '30651644400024' # first siret in groupements_employeurs.csv
self.office.save()
self.assertIn(self.office.siret, load_groupements_employeurs())
rv = self.app.get(self.url_for('office.details', siret=self.office.siret))
self.assertEqual(rv.status_code, 200)
self.assertIn(
u"Cette entreprise est un groupement d'employeurs.",
rv.data.decode(),
)
def test_office_details_page_of_office_which_is_not_a_groupement_employeurs(self):
"""
Test the office details page of an office not being a groupement d'employeurs.
"""
self.assertFalse(self.office.siret in load_groupements_employeurs())
rv = self.app.get(self.url_for('office.details', siret=self.office.siret))
self.assertEqual(rv.status_code, 200)
self.assertNotIn(
u"Cette entreprise est un groupement d'employeurs.",
rv.data.decode(),
)
def test_download_regular_office(self):
"""
Test the office PDF download
"""
# normal behavior
rv = self.app.get(self.url_for('office.download', siret=self.office.siret))
self.assertEqual(rv.status_code, 200)
self.assertEqual('application/pdf', rv.mimetype)
self.assertLess(1000, rv.content_length)
def test_download_triggers_activity_log(self):
with unittest.mock.patch('labonneboite.common.activity.log') as activity_log:
self.app.get(self.url_for('office.download', siret=self.office.siret))
activity_log.assert_called_with('telecharger-pdf', siret=self.office.siret)
def test_download_missing_siret(self):
"""
Test the office PDF download of a non existing office
"""
# siret does not exist
rv = self.app.get(self.url_for('office.download', siret='1234567890'))
self.assertEqual(rv.status_code, 404)
def test_download_of_office_having_buggy_naf(self):
"""
Test the office PDF download of an office having NAF 9900Z.
"""
self.office.naf = '9900Z'
self.office.save()
rv = self.app.get(self.url_for('office.download', siret=self.office.siret))
self.assertEqual(rv.status_code, 200)
def test_toggle_details_event(self):
with unittest.mock.patch('labonneboite.common.activity.log') as activity_log:
rv = self.app.post(self.url_for('office.toggle_details_event', siret=self.office.siret))
activity_log.assert_called_with('afficher-details', siret=self.office.siret)
self.assertEqual(rv.status_code, 200)
|
StartupsPoleEmploi/labonneboite
|
labonneboite/tests/web/front/test_companies.py
|
Python
|
agpl-3.0
| 4,969
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v3 import types_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestTypesClient(base.BaseServiceTest):
FAKE_CREATE_VOLUME_TYPE = {
'volume_type': {
'id': '6685584b-1eac-4da6-b5c3-555430cf68ff',
'name': 'vol-type-001',
'description': 'volume type 0001',
'is_public': True,
'os-volume-type-access:is_public': True,
'extra_specs': {
'volume_backend_name': 'rbd'
}
}
}
FAKE_DEFAULT_VOLUME_TYPE_INFO = {
'volume_type': {
'id': '6685584b-1eac-4da6-b5c3-555430cf68ff',
'qos_specs_id': None,
'name': 'volume-type-test',
'description': 'default volume type',
'is_public': True,
'os-volume-type-access:is_public': True,
'extra_specs': {
'volume_backend_name': 'rbd'
}
}
}
FAKE_UPDATE_VOLUME_TYPE = {
'volume_type': {
'id': '6685584b-1eac-4da6-b5c3-555430cf68ff',
'qos_specs_id': None,
'name': 'volume-type-test',
'description': 'default volume type',
'os-volume-type-access:is_public': True,
'is_public': True,
'extra_specs': {
'volume_backend_name': 'rbd'
}
}
}
FAKE_VOLUME_TYPES = {
'volume_types': [
{
'name': 'volume_type01',
'qos_specs_id': None,
'extra_specs': {
'volume_backend_name': 'lvmdriver-1'
},
'os-volume-type-access:is_public': True,
'is_public': True,
'id': '6685584b-1eac-4da6-b5c3-555430cf68ff',
'description': None
},
{
'name': 'volume_type02',
'qos_specs_id': None,
'extra_specs': {
'volume_backend_name': 'lvmdriver-1'
},
'os-volume-type-access:is_public': True,
'is_public': True,
'id': '8eb69a46-df97-4e41-9586-9a40a7533803',
'description': None
}
]
}
FAKE_VOLUME_TYPE_EXTRA_SPECS = {
'extra_specs': {
'capabilities': 'gpu'
}
}
FAKE_SHOW_VOLUME_TYPE_EXTRA_SPECS = {
'capabilities': 'gpu'
}
FAKE_VOLUME_TYPE_ACCESS = {
'volume_type_access': [{
'volume_type_id': '3c67e124-39ad-4ace-a507-8bb7bf510c26',
'project_id': 'f270b245cb11498ca4031deb7e141cfa'
}]
}
def setUp(self):
super(TestTypesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = types_client.TypesClient(fake_auth,
'volume',
'regionOne')
def _test_list_volume_types(self, bytes_body=False):
self.check_service_client_function(
self.client.list_volume_types,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_VOLUME_TYPES,
bytes_body)
def _test_show_volume_type(self, bytes_body=False):
self.check_service_client_function(
self.client.show_volume_type,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_DEFAULT_VOLUME_TYPE_INFO,
to_utf=bytes_body,
volume_type_id="6685584b-1eac-4da6-b5c3-555430cf68ff")
def _test_create_volume_type(self, bytes_body=False):
self.check_service_client_function(
self.client.create_volume_type,
'tempest.lib.common.rest_client.RestClient.post',
self.FAKE_CREATE_VOLUME_TYPE,
to_utf=bytes_body,
name='volume-type-test')
def _test_delete_volume_type(self):
self.check_service_client_function(
self.client.delete_volume_type,
'tempest.lib.common.rest_client.RestClient.delete',
{}, status=202,
volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff')
def _test_list_volume_types_extra_specs(self, bytes_body=False):
self.check_service_client_function(
self.client.list_volume_types_extra_specs,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_VOLUME_TYPE_EXTRA_SPECS,
to_utf=bytes_body,
volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff')
def _test_show_volume_type_extra_specs(self, bytes_body=False):
self.check_service_client_function(
self.client.show_volume_type_extra_specs,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_SHOW_VOLUME_TYPE_EXTRA_SPECS,
volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff',
extra_specs_name='capabilities',
to_utf=bytes_body)
def _test_create_volume_type_extra_specs(self, bytes_body=False):
self.check_service_client_function(
self.client.create_volume_type_extra_specs,
'tempest.lib.common.rest_client.RestClient.post',
self.FAKE_VOLUME_TYPE_EXTRA_SPECS,
volume_type_id="6685584b-1eac-4da6-b5c3-555430cf68ff",
extra_specs=self.FAKE_VOLUME_TYPE_EXTRA_SPECS,
to_utf=bytes_body)
def _test_delete_volume_type_extra_specs(self):
self.check_service_client_function(
self.client.delete_volume_type_extra_specs,
'tempest.lib.common.rest_client.RestClient.delete',
{}, status=202,
volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff',
extra_spec_name='volume_backend_name')
def _test_update_volume_type(self, bytes_body=False):
self.check_service_client_function(
self.client.update_volume_type,
'tempest.lib.common.rest_client.RestClient.put',
self.FAKE_UPDATE_VOLUME_TYPE,
volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff',
to_utf=bytes_body,
name='update-volume-type-test',
description='test update volume type description')
def _test_update_volume_type_extra_specs(self, bytes_body=False):
self.check_service_client_function(
self.client.update_volume_type_extra_specs,
'tempest.lib.common.rest_client.RestClient.put',
self.FAKE_SHOW_VOLUME_TYPE_EXTRA_SPECS,
extra_spec_name='capabilities',
volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff',
extra_specs=self.FAKE_SHOW_VOLUME_TYPE_EXTRA_SPECS,
to_utf=bytes_body)
def _test_add_type_access(self):
self.check_service_client_function(
self.client.add_type_access,
'tempest.lib.common.rest_client.RestClient.post',
{}, status=202,
volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff')
def _test_remove_type_access(self):
self.check_service_client_function(
self.client.remove_type_access,
'tempest.lib.common.rest_client.RestClient.post',
{}, status=202,
volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff')
def _test_list_type_access(self, bytes_body=False):
self.check_service_client_function(
self.client.list_type_access,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_VOLUME_TYPE_ACCESS,
volume_type_id='3c67e124-39ad-4ace-a507-8bb7bf510c26',
to_utf=bytes_body)
def test_list_volume_types_with_str_body(self):
self._test_list_volume_types()
def test_list_volume_types_with_bytes_body(self):
self._test_list_volume_types(bytes_body=True)
def test_show_volume_type_with_str_body(self):
self._test_show_volume_type()
def test_show_volume_type_with_bytes_body(self):
self._test_show_volume_type(bytes_body=True)
def test_create_volume_type_str_body(self):
self._test_create_volume_type()
def test_create_volume_type_with_bytes_body(self):
self._test_create_volume_type(bytes_body=True)
def test_list_volume_types_extra_specs_with_str_body(self):
self._test_list_volume_types_extra_specs()
def test_list_volume_types_extra_specs_with_bytes_body(self):
self._test_list_volume_types_extra_specs(bytes_body=True)
def test_show_volume_type_extra_specs_with_str_body(self):
self._test_show_volume_type_extra_specs()
def test_show_volume_type_extra_specs_with_bytes_body(self):
self._test_show_volume_type_extra_specs(bytes_body=True)
def test_create_volume_type_extra_specs_with_str_body(self):
self._test_create_volume_type_extra_specs()
def test_create_volume_type_extra_specs_with_bytes_body(self):
self._test_create_volume_type_extra_specs(bytes_body=True)
def test_delete_volume_type_extra_specs(self):
self._test_delete_volume_type_extra_specs()
def test_update_volume_type_with_str_body(self):
self._test_update_volume_type()
def test_update_volume_type_with_bytes_body(self):
self._test_update_volume_type(bytes_body=True)
def test_delete_volume_type(self):
self._test_delete_volume_type()
def test_update_volume_type_extra_specs_with_str_body(self):
self._test_update_volume_type_extra_specs()
def test_update_volume_type_extra_specs_with_bytes_body(self):
self._test_update_volume_type_extra_specs(bytes_body=True)
def test_add_type_access(self):
self._test_add_type_access()
def test_remove_type_access(self):
self._test_remove_type_access()
def test_list_type_access_with_str_body(self):
self._test_list_type_access()
def test_list_type_access_with_bytes_body(self):
self._test_list_type_access(bytes_body=True)
|
masayukig/tempest
|
tempest/tests/lib/services/volume/v3/test_types_client.py
|
Python
|
apache-2.0
| 10,619
|
# Copyright 2012-2021 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import mlog, mparser
import pickle, os, uuid
import sys
from itertools import chain
from pathlib import PurePath
from collections import OrderedDict
from .mesonlib import (
MesonException, EnvironmentException, MachineChoice, PerMachine,
default_libdir, default_libexecdir, default_prefix, split_args,
OptionKey, OptionType,
)
from .wrap import WrapMode
import ast
import argparse
import configparser
import enum
import shlex
import typing as T
if T.TYPE_CHECKING:
from . import dependencies
from .compilers.compilers import Compiler, CompileResult # noqa: F401
from .environment import Environment
from .mesonlib import OptionOverrideProxy
OptionDictType = T.Union[T.Dict[str, 'UserOption[T.Any]'], OptionOverrideProxy]
KeyedOptionDictType = T.Union[T.Dict['OptionKey', 'UserOption[T.Any]'], OptionOverrideProxy]
CompilerCheckCacheKey = T.Tuple[T.Tuple[str, ...], str, str, T.Tuple[str, ...], str]
version = '0.58.999'
backendlist = ['ninja', 'vs', 'vs2010', 'vs2015', 'vs2017', 'vs2019', 'xcode']
default_yielding = False
# Can't bind this near the class method it seems, sadly.
_T = T.TypeVar('_T')
class MesonVersionMismatchException(MesonException):
'''Build directory generated with Meson version is incompatible with current version'''
def __init__(self, old_version: str, current_version: str) -> None:
super().__init__('Build directory has been generated with Meson version {}, '
'which is incompatible with the current version {}.'
.format(old_version, current_version))
self.old_version = old_version
self.current_version = current_version
class UserOption(T.Generic[_T]):
def __init__(self, description: str, choices: T.Optional[T.Union[str, T.List[_T]]], yielding: T.Optional[bool]):
super().__init__()
self.choices = choices
self.description = description
if yielding is None:
yielding = default_yielding
if not isinstance(yielding, bool):
raise MesonException('Value of "yielding" must be a boolean.')
self.yielding = yielding
def printable_value(self) -> T.Union[str, int, bool, T.List[T.Union[str, int, bool]]]:
assert isinstance(self.value, (str, int, bool, list))
return self.value
# Check that the input is a valid value and return the
# "cleaned" or "native" version. For example the Boolean
# option could take the string "true" and return True.
def validate_value(self, value: T.Any) -> _T:
raise RuntimeError('Derived option class did not override validate_value.')
def set_value(self, newvalue: T.Any) -> None:
self.value = self.validate_value(newvalue)
class UserStringOption(UserOption[str]):
def __init__(self, description: str, value: T.Any, yielding: T.Optional[bool] = None):
super().__init__(description, None, yielding)
self.set_value(value)
def validate_value(self, value: T.Any) -> str:
if not isinstance(value, str):
raise MesonException('Value "%s" for string option is not a string.' % str(value))
return value
class UserBooleanOption(UserOption[bool]):
def __init__(self, description: str, value, yielding: T.Optional[bool] = None) -> None:
super().__init__(description, [True, False], yielding)
self.set_value(value)
def __bool__(self) -> bool:
return self.value
def validate_value(self, value: T.Any) -> bool:
if isinstance(value, bool):
return value
if not isinstance(value, str):
raise MesonException(f'Value {value} cannot be converted to a boolean')
if value.lower() == 'true':
return True
if value.lower() == 'false':
return False
raise MesonException('Value %s is not boolean (true or false).' % value)
class UserIntegerOption(UserOption[int]):
def __init__(self, description: str, value: T.Any, yielding: T.Optional[bool] = None):
min_value, max_value, default_value = value
self.min_value = min_value
self.max_value = max_value
c = []
if min_value is not None:
c.append('>=' + str(min_value))
if max_value is not None:
c.append('<=' + str(max_value))
choices = ', '.join(c)
super().__init__(description, choices, yielding)
self.set_value(default_value)
def validate_value(self, value: T.Any) -> int:
if isinstance(value, str):
value = self.toint(value)
if not isinstance(value, int):
raise MesonException('New value for integer option is not an integer.')
if self.min_value is not None and value < self.min_value:
raise MesonException('New value %d is less than minimum value %d.' % (value, self.min_value))
if self.max_value is not None and value > self.max_value:
raise MesonException('New value %d is more than maximum value %d.' % (value, self.max_value))
return value
def toint(self, valuestring: str) -> int:
try:
return int(valuestring)
except ValueError:
raise MesonException('Value string "%s" is not convertible to an integer.' % valuestring)
class OctalInt(int):
# NinjaBackend.get_user_option_args uses str() to converts it to a command line option
# UserUmaskOption.toint() uses int(str, 8) to convert it to an integer
# So we need to use oct instead of dec here if we do not want values to be misinterpreted.
def __str__(self):
return oct(int(self))
class UserUmaskOption(UserIntegerOption, UserOption[T.Union[str, OctalInt]]):
def __init__(self, description: str, value: T.Any, yielding: T.Optional[bool] = None):
super().__init__(description, (0, 0o777, value), yielding)
self.choices = ['preserve', '0000-0777']
def printable_value(self) -> str:
if self.value == 'preserve':
return self.value
return format(self.value, '04o')
def validate_value(self, value: T.Any) -> T.Union[str, OctalInt]:
if value is None or value == 'preserve':
return 'preserve'
return OctalInt(super().validate_value(value))
def toint(self, valuestring: T.Union[str, OctalInt]) -> int:
try:
return int(valuestring, 8)
except ValueError as e:
raise MesonException(f'Invalid mode: {e}')
class UserComboOption(UserOption[str]):
def __init__(self, description: str, choices: T.List[str], value: T.Any, yielding: T.Optional[bool] = None):
super().__init__(description, choices, yielding)
if not isinstance(self.choices, list):
raise MesonException('Combo choices must be an array.')
for i in self.choices:
if not isinstance(i, str):
raise MesonException('Combo choice elements must be strings.')
self.set_value(value)
def validate_value(self, value: T.Any) -> str:
if value not in self.choices:
if isinstance(value, bool):
_type = 'boolean'
elif isinstance(value, (int, float)):
_type = 'number'
else:
_type = 'string'
optionsstring = ', '.join([f'"{item}"' for item in self.choices])
raise MesonException('Value "{}" (of type "{}") for combo option "{}" is not one of the choices.'
' Possible choices are (as string): {}.'.format(
value, _type, self.description, optionsstring))
return value
class UserArrayOption(UserOption[T.List[str]]):
def __init__(self, description: str, value: T.Union[str, T.List[str]], split_args: bool = False, user_input: bool = False, allow_dups: bool = False, **kwargs: T.Any) -> None:
super().__init__(description, kwargs.get('choices', []), yielding=kwargs.get('yielding', None))
self.split_args = split_args
self.allow_dups = allow_dups
self.value = self.validate_value(value, user_input=user_input)
def validate_value(self, value: T.Union[str, T.List[str]], user_input: bool = True) -> T.List[str]:
# User input is for options defined on the command line (via -D
# options). Users can put their input in as a comma separated
# string, but for defining options in meson_options.txt the format
# should match that of a combo
if not user_input and isinstance(value, str) and not value.startswith('['):
raise MesonException('Value does not define an array: ' + value)
if isinstance(value, str):
if value.startswith('['):
try:
newvalue = ast.literal_eval(value)
except ValueError:
raise MesonException(f'malformed option {value}')
elif value == '':
newvalue = []
else:
if self.split_args:
newvalue = split_args(value)
else:
newvalue = [v.strip() for v in value.split(',')]
elif isinstance(value, list):
newvalue = value
else:
raise MesonException(f'"{newvalue}" should be a string array, but it is not')
if not self.allow_dups and len(set(newvalue)) != len(newvalue):
msg = 'Duplicated values in array option is deprecated. ' \
'This will become a hard error in the future.'
mlog.deprecation(msg)
for i in newvalue:
if not isinstance(i, str):
raise MesonException('String array element "{}" is not a string.'.format(str(newvalue)))
if self.choices:
bad = [x for x in newvalue if x not in self.choices]
if bad:
raise MesonException('Options "{}" are not in allowed choices: "{}"'.format(
', '.join(bad), ', '.join(self.choices)))
return newvalue
def extend_value(self, value: T.Union[str, T.List[str]]) -> None:
"""Extend the value with an additional value."""
new = self.validate_value(value)
self.set_value(self.value + new)
class UserFeatureOption(UserComboOption):
static_choices = ['enabled', 'disabled', 'auto']
def __init__(self, description: str, value: T.Any, yielding: T.Optional[bool] = None):
super().__init__(description, self.static_choices, value, yielding)
def is_enabled(self) -> bool:
return self.value == 'enabled'
def is_disabled(self) -> bool:
return self.value == 'disabled'
def is_auto(self) -> bool:
return self.value == 'auto'
if T.TYPE_CHECKING:
CacheKeyType = T.Tuple[T.Tuple[T.Any, ...], ...]
SubCacheKeyType = T.Tuple[T.Any, ...]
class DependencyCacheType(enum.Enum):
OTHER = 0
PKG_CONFIG = 1
CMAKE = 2
@classmethod
def from_type(cls, dep: 'dependencies.Dependency') -> 'DependencyCacheType':
from . import dependencies
# As more types gain search overrides they'll need to be added here
if isinstance(dep, dependencies.PkgConfigDependency):
return cls.PKG_CONFIG
if isinstance(dep, dependencies.CMakeDependency):
return cls.CMAKE
return cls.OTHER
class DependencySubCache:
def __init__(self, type_: DependencyCacheType):
self.types = [type_]
self.__cache = {} # type: T.Dict[SubCacheKeyType, dependencies.Dependency]
def __getitem__(self, key: 'SubCacheKeyType') -> 'dependencies.Dependency':
return self.__cache[key]
def __setitem__(self, key: 'SubCacheKeyType', value: 'dependencies.Dependency') -> None:
self.__cache[key] = value
def __contains__(self, key: 'SubCacheKeyType') -> bool:
return key in self.__cache
def values(self) -> T.Iterable['dependencies.Dependency']:
return self.__cache.values()
class DependencyCache:
"""Class that stores a cache of dependencies.
This class is meant to encapsulate the fact that we need multiple keys to
successfully lookup by providing a simple get/put interface.
"""
def __init__(self, builtins: 'KeyedOptionDictType', for_machine: MachineChoice):
self.__cache = OrderedDict() # type: T.MutableMapping[CacheKeyType, DependencySubCache]
self.__builtins = builtins
self.__pkg_conf_key = OptionKey('pkg_config_path', machine=for_machine)
self.__cmake_key = OptionKey('cmake_prefix_path', machine=for_machine)
def __calculate_subkey(self, type_: DependencyCacheType) -> T.Tuple[T.Any, ...]:
if type_ is DependencyCacheType.PKG_CONFIG:
return tuple(self.__builtins[self.__pkg_conf_key].value)
elif type_ is DependencyCacheType.CMAKE:
return tuple(self.__builtins[self.__cmake_key].value)
assert type_ is DependencyCacheType.OTHER, 'Someone forgot to update subkey calculations for a new type'
return tuple()
def __iter__(self) -> T.Iterator['CacheKeyType']:
return self.keys()
def put(self, key: 'CacheKeyType', dep: 'dependencies.Dependency') -> None:
t = DependencyCacheType.from_type(dep)
if key not in self.__cache:
self.__cache[key] = DependencySubCache(t)
subkey = self.__calculate_subkey(t)
self.__cache[key][subkey] = dep
def get(self, key: 'CacheKeyType') -> T.Optional['dependencies.Dependency']:
"""Get a value from the cache.
If there is no cache entry then None will be returned.
"""
try:
val = self.__cache[key]
except KeyError:
return None
for t in val.types:
subkey = self.__calculate_subkey(t)
try:
return val[subkey]
except KeyError:
pass
return None
def values(self) -> T.Iterator['dependencies.Dependency']:
for c in self.__cache.values():
yield from c.values()
def keys(self) -> T.Iterator['CacheKeyType']:
return iter(self.__cache.keys())
def items(self) -> T.Iterator[T.Tuple['CacheKeyType', T.List['dependencies.Dependency']]]:
for k, v in self.__cache.items():
vs = []
for t in v.types:
subkey = self.__calculate_subkey(t)
if subkey in v:
vs.append(v[subkey])
yield k, vs
def clear(self) -> None:
self.__cache.clear()
# Can't bind this near the class method it seems, sadly.
_V = T.TypeVar('_V')
# This class contains all data that must persist over multiple
# invocations of Meson. It is roughly the same thing as
# cmakecache.
class CoreData:
def __init__(self, options: argparse.Namespace, scratch_dir: str, meson_command: T.List[str]):
self.lang_guids = {
'default': '8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942',
'c': '8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942',
'cpp': '8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942',
'test': '3AC096D0-A1C2-E12C-1390-A8335801FDAB',
'directory': '2150E333-8FDC-42A3-9474-1A3956D46DE8',
}
self.test_guid = str(uuid.uuid4()).upper()
self.regen_guid = str(uuid.uuid4()).upper()
self.install_guid = str(uuid.uuid4()).upper()
self.meson_command = meson_command
self.target_guids = {}
self.version = version
self.options: 'KeyedOptionDictType' = {}
self.cross_files = self.__load_config_files(options, scratch_dir, 'cross')
self.compilers = PerMachine(OrderedDict(), OrderedDict()) # type: PerMachine[T.Dict[str, Compiler]]
# Set of subprojects that have already been initialized once, this is
# required to be stored and reloaded with the coredata, as we don't
# want to overwrite options for such subprojects.
self.initialized_subprojects: T.Set[str] = set()
build_cache = DependencyCache(self.options, MachineChoice.BUILD)
host_cache = DependencyCache(self.options, MachineChoice.HOST)
self.deps = PerMachine(build_cache, host_cache) # type: PerMachine[DependencyCache]
self.compiler_check_cache = OrderedDict() # type: T.Dict[CompilerCheckCacheKey, compiler.CompileResult]
# Only to print a warning if it changes between Meson invocations.
self.config_files = self.__load_config_files(options, scratch_dir, 'native')
self.builtin_options_libdir_cross_fixup()
self.init_builtins('')
@staticmethod
def __load_config_files(options: argparse.Namespace, scratch_dir: str, ftype: str) -> T.List[str]:
# Need to try and make the passed filenames absolute because when the
# files are parsed later we'll have chdir()d.
if ftype == 'cross':
filenames = options.cross_file
else:
filenames = options.native_file
if not filenames:
return []
found_invalid = [] # type: T.List[str]
missing = [] # type: T.List[str]
real = [] # type: T.List[str]
for i, f in enumerate(filenames):
f = os.path.expanduser(os.path.expandvars(f))
if os.path.exists(f):
if os.path.isfile(f):
real.append(os.path.abspath(f))
continue
elif os.path.isdir(f):
found_invalid.append(os.path.abspath(f))
else:
# in this case we've been passed some kind of pipe, copy
# the contents of that file into the meson private (scratch)
# directory so that it can be re-read when wiping/reconfiguring
copy = os.path.join(scratch_dir, f'{uuid.uuid4()}.{ftype}.ini')
with open(f) as rf:
with open(copy, 'w') as wf:
wf.write(rf.read())
real.append(copy)
# Also replace the command line argument, as the pipe
# probably won't exist on reconfigure
filenames[i] = copy
continue
if sys.platform != 'win32':
paths = [
os.environ.get('XDG_DATA_HOME', os.path.expanduser('~/.local/share')),
] + os.environ.get('XDG_DATA_DIRS', '/usr/local/share:/usr/share').split(':')
for path in paths:
path_to_try = os.path.join(path, 'meson', ftype, f)
if os.path.isfile(path_to_try):
real.append(path_to_try)
break
else:
missing.append(f)
else:
missing.append(f)
if missing:
if found_invalid:
mlog.log('Found invalid candidates for', ftype, 'file:', *found_invalid)
mlog.log('Could not find any valid candidate for', ftype, 'files:', *missing)
raise MesonException(f'Cannot find specified {ftype} file: {f}')
return real
def builtin_options_libdir_cross_fixup(self):
# By default set libdir to "lib" when cross compiling since
# getting the "system default" is always wrong on multiarch
# platforms as it gets a value like lib/x86_64-linux-gnu.
if self.cross_files:
BUILTIN_OPTIONS[OptionKey('libdir')].default = 'lib'
def sanitize_prefix(self, prefix):
prefix = os.path.expanduser(prefix)
if not os.path.isabs(prefix):
raise MesonException('prefix value {!r} must be an absolute path'
''.format(prefix))
if prefix.endswith('/') or prefix.endswith('\\'):
# On Windows we need to preserve the trailing slash if the
# string is of type 'C:\' because 'C:' is not an absolute path.
if len(prefix) == 3 and prefix[1] == ':':
pass
# If prefix is a single character, preserve it since it is
# the root directory.
elif len(prefix) == 1:
pass
else:
prefix = prefix[:-1]
return prefix
def sanitize_dir_option_value(self, prefix: str, option: OptionKey, value: T.Any) -> T.Any:
'''
If the option is an installation directory option and the value is an
absolute path, check that it resides within prefix and return the value
as a path relative to the prefix.
This way everyone can do f.ex, get_option('libdir') and be sure to get
the library directory relative to prefix.
.as_posix() keeps the posix-like file seperators Meson uses.
'''
try:
value = PurePath(value)
except TypeError:
return value
if option.name.endswith('dir') and value.is_absolute() and \
option not in BULITIN_DIR_NOPREFIX_OPTIONS:
# Value must be a subdir of the prefix
# commonpath will always return a path in the native format, so we
# must use pathlib.PurePath to do the same conversion before
# comparing.
msg = ('The value of the \'{!s}\' option is \'{!s}\' which must be a '
'subdir of the prefix {!r}.\nNote that if you pass a '
'relative path, it is assumed to be a subdir of prefix.')
# os.path.commonpath doesn't understand case-insensitive filesystems,
# but PurePath().relative_to() does.
try:
value = value.relative_to(prefix)
except ValueError:
raise MesonException(msg.format(option, value, prefix))
if '..' in str(value):
raise MesonException(msg.format(option, value, prefix))
return value.as_posix()
def init_builtins(self, subproject: str) -> None:
# Create builtin options with default values
for key, opt in BUILTIN_OPTIONS.items():
self.add_builtin_option(self.options, key.evolve(subproject=subproject), opt)
for for_machine in iter(MachineChoice):
for key, opt in BUILTIN_OPTIONS_PER_MACHINE.items():
self.add_builtin_option(self.options, key.evolve(subproject=subproject, machine=for_machine), opt)
@staticmethod
def add_builtin_option(opts_map: 'KeyedOptionDictType', key: OptionKey,
opt: 'BuiltinOption') -> None:
if key.subproject:
if opt.yielding:
# This option is global and not per-subproject
return
value = opts_map[key.as_root()].value
else:
value = None
opts_map[key] = opt.init_option(key, value, default_prefix())
def init_backend_options(self, backend_name: str) -> None:
if backend_name == 'ninja':
self.options[OptionKey('backend_max_links')] = UserIntegerOption(
'Maximum number of linker processes to run or 0 for no '
'limit',
(0, None, 0))
elif backend_name.startswith('vs'):
self.options[OptionKey('backend_startup_project')] = UserStringOption(
'Default project to execute in Visual Studio',
'')
def get_option(self, key: OptionKey) -> T.Union[str, int, bool, WrapMode]:
try:
v = self.options[key].value
if key.name == 'wrap_mode':
return WrapMode[v]
return v
except KeyError:
pass
try:
v = self.options[key.as_root()]
if v.yielding:
if key.name == 'wrap_mode':
return WrapMode[v.value]
return v.value
except KeyError:
pass
raise MesonException(f'Tried to get unknown builtin option {str(key)}')
def set_option(self, key: OptionKey, value) -> None:
if key.is_builtin():
if key.name == 'prefix':
value = self.sanitize_prefix(value)
else:
prefix = self.options[OptionKey('prefix')].value
value = self.sanitize_dir_option_value(prefix, key, value)
try:
self.options[key].set_value(value)
except KeyError:
raise MesonException(f'Tried to set unknown builtin option {str(key)}')
if key.name == 'buildtype':
self._set_others_from_buildtype(value)
def get_nondefault_buildtype_args(self):
result= []
value = self.options[OptionKey('buildtype')].value
if value == 'plain':
opt = '0'
debug = False
elif value == 'debug':
opt = '0'
debug = True
elif value == 'debugoptimized':
opt = '2'
debug = True
elif value == 'release':
opt = '3'
debug = False
elif value == 'minsize':
opt = 's'
debug = True
else:
assert(value == 'custom')
return []
actual_opt = self.options[OptionKey('optimization')].value
actual_debug = self.options[OptionKey('debug')].value
if actual_opt != opt:
result.append(('optimization', actual_opt, opt))
if actual_debug != debug:
result.append(('debug', actual_debug, debug))
return result
def _set_others_from_buildtype(self, value: str) -> None:
if value == 'plain':
opt = '0'
debug = False
elif value == 'debug':
opt = '0'
debug = True
elif value == 'debugoptimized':
opt = '2'
debug = True
elif value == 'release':
opt = '3'
debug = False
elif value == 'minsize':
opt = 's'
debug = True
else:
assert(value == 'custom')
return
self.options[OptionKey('optimization')].set_value(opt)
self.options[OptionKey('debug')].set_value(debug)
@staticmethod
def is_per_machine_option(optname: OptionKey) -> bool:
if optname.name in BUILTIN_OPTIONS_PER_MACHINE:
return True
return optname.lang is not None
def validate_option_value(self, option_name: OptionKey, override_value):
try:
opt = self.options[option_name]
except KeyError:
raise MesonException(f'Tried to validate unknown option {str(option_name)}')
try:
return opt.validate_value(override_value)
except MesonException as e:
raise type(e)(('Validation failed for option %s: ' % option_name) + str(e)) \
.with_traceback(sys.exc_info()[2])
def get_external_args(self, for_machine: MachineChoice, lang: str) -> T.Union[str, T.List[str]]:
return self.options[OptionKey('args', machine=for_machine, lang=lang)].value
def get_external_link_args(self, for_machine: MachineChoice, lang: str) -> T.Union[str, T.List[str]]:
return self.options[OptionKey('link_args', machine=for_machine, lang=lang)].value
def update_project_options(self, options: 'KeyedOptionDictType') -> None:
for key, value in options.items():
if not key.is_project():
continue
if key not in self.options:
self.options[key] = value
continue
oldval = self.options[key]
if type(oldval) != type(value):
self.options[key] = value
elif oldval.choices != value.choices:
# If the choices have changed, use the new value, but attempt
# to keep the old options. If they are not valid keep the new
# defaults but warn.
self.options[key] = value
try:
value.set_value(oldval.value)
except MesonException as e:
mlog.warning(f'Old value(s) of {key} are no longer valid, resetting to default ({value.value}).')
def is_cross_build(self, when_building_for: MachineChoice = MachineChoice.HOST) -> bool:
if when_building_for == MachineChoice.BUILD:
return False
return len(self.cross_files) > 0
def copy_build_options_from_regular_ones(self) -> None:
assert not self.is_cross_build()
for k in BUILTIN_OPTIONS_PER_MACHINE:
o = self.options[k]
self.options[k.as_build()].set_value(o.value)
for bk, bv in self.options.items():
if bk.machine is MachineChoice.BUILD:
hk = bk.as_host()
try:
hv = self.options[hk]
bv.set_value(hv.value)
except KeyError:
continue
def set_options(self, options: T.Dict[OptionKey, T.Any], subproject: str = '', warn_unknown: bool = True) -> None:
if not self.is_cross_build():
options = {k: v for k, v in options.items() if k.machine is not MachineChoice.BUILD}
# Set prefix first because it's needed to sanitize other options
pfk = OptionKey('prefix')
if pfk in options:
prefix = self.sanitize_prefix(options[pfk])
self.options[OptionKey('prefix')].set_value(prefix)
for key in BULITIN_DIR_NOPREFIX_OPTIONS:
if key not in options:
self.options[key].set_value(BUILTIN_OPTIONS[key].prefixed_default(key, prefix))
unknown_options: T.List[OptionKey] = []
for k, v in options.items():
if k == pfk:
continue
elif k not in self.options:
unknown_options.append(k)
else:
self.set_option(k, v)
if unknown_options and warn_unknown:
unknown_options_str = ', '.join(sorted(str(s) for s in unknown_options))
sub = f'In subproject {subproject}: ' if subproject else ''
mlog.warning(f'{sub}Unknown options: "{unknown_options_str}"')
mlog.log('The value of new options can be set with:')
mlog.log(mlog.bold('meson setup <builddir> --reconfigure -Dnew_option=new_value ...'))
if not self.is_cross_build():
self.copy_build_options_from_regular_ones()
def set_default_options(self, default_options: T.MutableMapping[OptionKey, str], subproject: str, env: 'Environment') -> None:
# Preserve order: if env.options has 'buildtype' it must come after
# 'optimization' if it is in default_options.
options: T.MutableMapping[OptionKey, T.Any]
if not subproject:
options = OrderedDict(default_options)
options.update(env.options)
env.options = options
# Create a subset of options, keeping only project and builtin
# options for this subproject.
# Language and backend specific options will be set later when adding
# languages and setting the backend (builtin options must be set first
# to know which backend we'll use).
options = OrderedDict()
for k, v in chain(default_options.items(), env.options.items()):
# If this is a subproject, don't use other subproject options
if k.subproject and k.subproject != subproject:
continue
# If the option is a builtin and is yielding then it's not allowed per subproject.
#
# Always test this using the HOST machine, as many builtin options
# are not valid for the BUILD machine, but the yielding value does
# not differ between them even when they are valid for both.
if subproject and k.is_builtin() and self.options[k.evolve(subproject='', machine=MachineChoice.HOST)].yielding:
continue
# Skip base, compiler, and backend options, they are handled when
# adding languages and setting backend.
if k.type in {OptionType.COMPILER, OptionType.BACKEND, OptionType.BASE}:
continue
options[k] = v
self.set_options(options, subproject=subproject)
def add_compiler_options(self, options: 'KeyedOptionDictType', lang: str, for_machine: MachineChoice,
env: 'Environment') -> None:
for k, o in options.items():
value = env.options.get(k)
if value is not None:
o.set_value(value)
self.options.setdefault(k, o)
def add_lang_args(self, lang: str, comp: T.Type['Compiler'],
for_machine: MachineChoice, env: 'Environment') -> None:
"""Add global language arguments that are needed before compiler/linker detection."""
from .compilers import compilers
# These options are all new at this point, because the compiler is
# responsible for adding its own options, thus calling
# `self.options.update()`` is perfectly safe.
self.options.update(compilers.get_global_options(lang, comp, for_machine, env))
def process_new_compiler(self, lang: str, comp: 'Compiler', env: 'Environment') -> None:
from . import compilers
self.compilers[comp.for_machine][lang] = comp
self.add_compiler_options(comp.get_options(), lang, comp.for_machine, env)
enabled_opts: T.List[OptionKey] = []
for key in comp.base_options:
if key in self.options:
continue
oobj = compilers.base_options[key]
if key in env.options:
oobj.set_value(env.options[key])
enabled_opts.append(key)
self.options[key] = oobj
self.emit_base_options_warnings(enabled_opts)
def emit_base_options_warnings(self, enabled_opts: T.List[OptionKey]) -> None:
if OptionKey('b_bitcode') in enabled_opts:
mlog.warning('Base option \'b_bitcode\' is enabled, which is incompatible with many linker options. Incompatible options such as \'b_asneeded\' have been disabled.', fatal=False)
mlog.warning('Please see https://mesonbuild.com/Builtin-options.html#Notes_about_Apple_Bitcode_support for more details.', fatal=False)
class CmdLineFileParser(configparser.ConfigParser):
def __init__(self) -> None:
# We don't want ':' as key delimiter, otherwise it would break when
# storing subproject options like "subproject:option=value"
super().__init__(delimiters=['='], interpolation=None)
def optionxform(self, option: str) -> str:
# Don't call str.lower() on keys
return option
class MachineFileParser():
def __init__(self, filenames: T.List[str]) -> None:
self.parser = CmdLineFileParser()
self.constants = {'True': True, 'False': False}
self.sections = {}
self.parser.read(filenames)
# Parse [constants] first so they can be used in other sections
if self.parser.has_section('constants'):
self.constants.update(self._parse_section('constants'))
for s in self.parser.sections():
if s == 'constants':
continue
self.sections[s] = self._parse_section(s)
def _parse_section(self, s):
self.scope = self.constants.copy()
section = {}
for entry, value in self.parser.items(s):
if ' ' in entry or '\t' in entry or "'" in entry or '"' in entry:
raise EnvironmentException(f'Malformed variable name {entry!r} in machine file.')
# Windows paths...
value = value.replace('\\', '\\\\')
try:
ast = mparser.Parser(value, 'machinefile').parse()
res = self._evaluate_statement(ast.lines[0])
except MesonException:
raise EnvironmentException(f'Malformed value in machine file variable {entry!r}.')
except KeyError as e:
raise EnvironmentException('Undefined constant {!r} in machine file variable {!r}.'.format(e.args[0], entry))
section[entry] = res
self.scope[entry] = res
return section
def _evaluate_statement(self, node):
if isinstance(node, (mparser.StringNode)):
return node.value
elif isinstance(node, mparser.BooleanNode):
return node.value
elif isinstance(node, mparser.NumberNode):
return node.value
elif isinstance(node, mparser.ArrayNode):
return [self._evaluate_statement(arg) for arg in node.args.arguments]
elif isinstance(node, mparser.IdNode):
return self.scope[node.value]
elif isinstance(node, mparser.ArithmeticNode):
l = self._evaluate_statement(node.left)
r = self._evaluate_statement(node.right)
if node.operation == 'add':
if (isinstance(l, str) and isinstance(r, str)) or \
(isinstance(l, list) and isinstance(r, list)):
return l + r
elif node.operation == 'div':
if isinstance(l, str) and isinstance(r, str):
return os.path.join(l, r)
raise EnvironmentException('Unsupported node type')
def parse_machine_files(filenames):
parser = MachineFileParser(filenames)
return parser.sections
def get_cmd_line_file(build_dir: str) -> str:
return os.path.join(build_dir, 'meson-private', 'cmd_line.txt')
def read_cmd_line_file(build_dir: str, options: argparse.Namespace) -> None:
filename = get_cmd_line_file(build_dir)
if not os.path.isfile(filename):
return
config = CmdLineFileParser()
config.read(filename)
# Do a copy because config is not really a dict. options.cmd_line_options
# overrides values from the file.
d = {OptionKey.from_string(k): v for k, v in config['options'].items()}
d.update(options.cmd_line_options)
options.cmd_line_options = d
properties = config['properties']
if not options.cross_file:
options.cross_file = ast.literal_eval(properties.get('cross_file', '[]'))
if not options.native_file:
# This will be a string in the form: "['first', 'second', ...]", use
# literal_eval to get it into the list of strings.
options.native_file = ast.literal_eval(properties.get('native_file', '[]'))
def write_cmd_line_file(build_dir: str, options: argparse.Namespace) -> None:
filename = get_cmd_line_file(build_dir)
config = CmdLineFileParser()
properties = OrderedDict()
if options.cross_file:
properties['cross_file'] = options.cross_file
if options.native_file:
properties['native_file'] = options.native_file
config['options'] = {str(k): str(v) for k, v in options.cmd_line_options.items()}
config['properties'] = properties
with open(filename, 'w') as f:
config.write(f)
def update_cmd_line_file(build_dir: str, options: argparse.Namespace):
filename = get_cmd_line_file(build_dir)
config = CmdLineFileParser()
config.read(filename)
config['options'].update({str(k): str(v) for k, v in options.cmd_line_options.items()})
with open(filename, 'w') as f:
config.write(f)
def get_cmd_line_options(build_dir: str, options: argparse.Namespace) -> str:
copy = argparse.Namespace(**vars(options))
read_cmd_line_file(build_dir, copy)
cmdline = ['-D{}={}'.format(str(k), v) for k, v in copy.cmd_line_options.items()]
if options.cross_file:
cmdline += [f'--cross-file {f}' for f in options.cross_file]
if options.native_file:
cmdline += [f'--native-file {f}' for f in options.native_file]
return ' '.join([shlex.quote(x) for x in cmdline])
def major_versions_differ(v1: str, v2: str) -> bool:
return v1.split('.')[0:2] != v2.split('.')[0:2]
def load(build_dir: str) -> CoreData:
filename = os.path.join(build_dir, 'meson-private', 'coredata.dat')
load_fail_msg = f'Coredata file {filename!r} is corrupted. Try with a fresh build tree.'
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except (pickle.UnpicklingError, EOFError):
raise MesonException(load_fail_msg)
except (ModuleNotFoundError, AttributeError):
raise MesonException(
"Coredata file {!r} references functions or classes that don't "
"exist. This probably means that it was generated with an old "
"version of meson.".format(filename))
if not isinstance(obj, CoreData):
raise MesonException(load_fail_msg)
if major_versions_differ(obj.version, version):
raise MesonVersionMismatchException(obj.version, version)
return obj
def save(obj: CoreData, build_dir: str) -> str:
filename = os.path.join(build_dir, 'meson-private', 'coredata.dat')
prev_filename = filename + '.prev'
tempfilename = filename + '~'
if major_versions_differ(obj.version, version):
raise MesonException('Fatal version mismatch corruption.')
if os.path.exists(filename):
import shutil
shutil.copyfile(filename, prev_filename)
with open(tempfilename, 'wb') as f:
pickle.dump(obj, f)
f.flush()
os.fsync(f.fileno())
os.replace(tempfilename, filename)
return filename
def register_builtin_arguments(parser: argparse.ArgumentParser) -> None:
for n, b in BUILTIN_OPTIONS.items():
b.add_to_argparse(str(n), parser, '')
for n, b in BUILTIN_OPTIONS_PER_MACHINE.items():
b.add_to_argparse(str(n), parser, ' (just for host machine)')
b.add_to_argparse(str(n.as_build()), parser, ' (just for build machine)')
parser.add_argument('-D', action='append', dest='projectoptions', default=[], metavar="option",
help='Set the value of an option, can be used several times to set multiple options.')
def create_options_dict(options: T.List[str], subproject: str = '') -> T.Dict[OptionKey, str]:
result: T.OrderedDict[OptionKey, str] = OrderedDict()
for o in options:
try:
(key, value) = o.split('=', 1)
except ValueError:
raise MesonException(f'Option {o!r} must have a value separated by equals sign.')
k = OptionKey.from_string(key)
if subproject:
k = k.evolve(subproject=subproject)
result[k] = value
return result
def parse_cmd_line_options(args: argparse.Namespace) -> None:
args.cmd_line_options = create_options_dict(args.projectoptions)
# Merge builtin options set with --option into the dict.
for key in chain(
BUILTIN_OPTIONS.keys(),
(k.as_build() for k in BUILTIN_OPTIONS_PER_MACHINE.keys()),
BUILTIN_OPTIONS_PER_MACHINE.keys(),
):
name = str(key)
value = getattr(args, name, None)
if value is not None:
if key in args.cmd_line_options:
cmdline_name = BuiltinOption.argparse_name_to_arg(name)
raise MesonException(
'Got argument {0} as both -D{0} and {1}. Pick one.'.format(name, cmdline_name))
args.cmd_line_options[key] = value
delattr(args, name)
_U = T.TypeVar('_U', bound=UserOption[_T])
class BuiltinOption(T.Generic[_T, _U]):
"""Class for a builtin option type.
There are some cases that are not fully supported yet.
"""
def __init__(self, opt_type: T.Type[_U], description: str, default: T.Any, yielding: bool = True, *,
choices: T.Any = None):
self.opt_type = opt_type
self.description = description
self.default = default
self.choices = choices
self.yielding = yielding
def init_option(self, name: 'OptionKey', value: T.Optional[T.Any], prefix: str) -> _U:
"""Create an instance of opt_type and return it."""
if value is None:
value = self.prefixed_default(name, prefix)
keywords = {'yielding': self.yielding, 'value': value}
if self.choices:
keywords['choices'] = self.choices
return self.opt_type(self.description, **keywords)
def _argparse_action(self) -> T.Optional[str]:
# If the type is a boolean, the presence of the argument in --foo form
# is to enable it. Disabling happens by using -Dfoo=false, which is
# parsed under `args.projectoptions` and does not hit this codepath.
if isinstance(self.default, bool):
return 'store_true'
return None
def _argparse_choices(self) -> T.Any:
if self.opt_type is UserBooleanOption:
return [True, False]
elif self.opt_type is UserFeatureOption:
return UserFeatureOption.static_choices
return self.choices
@staticmethod
def argparse_name_to_arg(name: str) -> str:
if name == 'warning_level':
return '--warnlevel'
else:
return '--' + name.replace('_', '-')
def prefixed_default(self, name: 'OptionKey', prefix: str = '') -> T.Any:
if self.opt_type in [UserComboOption, UserIntegerOption]:
return self.default
try:
return BULITIN_DIR_NOPREFIX_OPTIONS[name][prefix]
except KeyError:
pass
return self.default
def add_to_argparse(self, name: str, parser: argparse.ArgumentParser, help_suffix: str) -> None:
kwargs = OrderedDict()
c = self._argparse_choices()
b = self._argparse_action()
h = self.description
if not b:
h = '{} (default: {}).'.format(h.rstrip('.'), self.prefixed_default(name))
else:
kwargs['action'] = b
if c and not b:
kwargs['choices'] = c
kwargs['default'] = argparse.SUPPRESS
kwargs['dest'] = name
cmdline_name = self.argparse_name_to_arg(name)
parser.add_argument(cmdline_name, help=h + help_suffix, **kwargs)
# Update `docs/markdown/Builtin-options.md` after changing the options below
# Also update mesonlib._BUILTIN_NAMES. See the comment there for why this is required.
BUILTIN_DIR_OPTIONS: 'KeyedOptionDictType' = OrderedDict([
(OptionKey('prefix'), BuiltinOption(UserStringOption, 'Installation prefix', default_prefix())),
(OptionKey('bindir'), BuiltinOption(UserStringOption, 'Executable directory', 'bin')),
(OptionKey('datadir'), BuiltinOption(UserStringOption, 'Data file directory', 'share')),
(OptionKey('includedir'), BuiltinOption(UserStringOption, 'Header file directory', 'include')),
(OptionKey('infodir'), BuiltinOption(UserStringOption, 'Info page directory', 'share/info')),
(OptionKey('libdir'), BuiltinOption(UserStringOption, 'Library directory', default_libdir())),
(OptionKey('libexecdir'), BuiltinOption(UserStringOption, 'Library executable directory', default_libexecdir())),
(OptionKey('localedir'), BuiltinOption(UserStringOption, 'Locale data directory', 'share/locale')),
(OptionKey('localstatedir'), BuiltinOption(UserStringOption, 'Localstate data directory', 'var')),
(OptionKey('mandir'), BuiltinOption(UserStringOption, 'Manual page directory', 'share/man')),
(OptionKey('sbindir'), BuiltinOption(UserStringOption, 'System executable directory', 'sbin')),
(OptionKey('sharedstatedir'), BuiltinOption(UserStringOption, 'Architecture-independent data directory', 'com')),
(OptionKey('sysconfdir'), BuiltinOption(UserStringOption, 'Sysconf data directory', 'etc')),
])
BUILTIN_CORE_OPTIONS: 'KeyedOptionDictType' = OrderedDict([
(OptionKey('auto_features'), BuiltinOption(UserFeatureOption, "Override value of all 'auto' features", 'auto')),
(OptionKey('backend'), BuiltinOption(UserComboOption, 'Backend to use', 'ninja', choices=backendlist)),
(OptionKey('buildtype'), BuiltinOption(UserComboOption, 'Build type to use', 'debug',
choices=['plain', 'debug', 'debugoptimized', 'release', 'minsize', 'custom'])),
(OptionKey('debug'), BuiltinOption(UserBooleanOption, 'Debug', True)),
(OptionKey('default_library'), BuiltinOption(UserComboOption, 'Default library type', 'shared', choices=['shared', 'static', 'both'],
yielding=False)),
(OptionKey('errorlogs'), BuiltinOption(UserBooleanOption, "Whether to print the logs from failing tests", True)),
(OptionKey('install_umask'), BuiltinOption(UserUmaskOption, 'Default umask to apply on permissions of installed files', '022')),
(OptionKey('layout'), BuiltinOption(UserComboOption, 'Build directory layout', 'mirror', choices=['mirror', 'flat'])),
(OptionKey('optimization'), BuiltinOption(UserComboOption, 'Optimization level', '0', choices=['0', 'g', '1', '2', '3', 's'])),
(OptionKey('stdsplit'), BuiltinOption(UserBooleanOption, 'Split stdout and stderr in test logs', True)),
(OptionKey('strip'), BuiltinOption(UserBooleanOption, 'Strip targets on install', False)),
(OptionKey('unity'), BuiltinOption(UserComboOption, 'Unity build', 'off', choices=['on', 'off', 'subprojects'])),
(OptionKey('unity_size'), BuiltinOption(UserIntegerOption, 'Unity block size', (2, None, 4))),
(OptionKey('warning_level'), BuiltinOption(UserComboOption, 'Compiler warning level to use', '1', choices=['0', '1', '2', '3'], yielding=False)),
(OptionKey('werror'), BuiltinOption(UserBooleanOption, 'Treat warnings as errors', False, yielding=False)),
(OptionKey('wrap_mode'), BuiltinOption(UserComboOption, 'Wrap mode', 'default', choices=['default', 'nofallback', 'nodownload', 'forcefallback', 'nopromote'])),
(OptionKey('force_fallback_for'), BuiltinOption(UserArrayOption, 'Force fallback for those subprojects', [])),
])
BUILTIN_OPTIONS = OrderedDict(chain(BUILTIN_DIR_OPTIONS.items(), BUILTIN_CORE_OPTIONS.items()))
BUILTIN_OPTIONS_PER_MACHINE: 'KeyedOptionDictType' = OrderedDict([
(OptionKey('pkg_config_path'), BuiltinOption(UserArrayOption, 'List of additional paths for pkg-config to search', [])),
(OptionKey('cmake_prefix_path'), BuiltinOption(UserArrayOption, 'List of additional prefixes for cmake to search', [])),
])
# Special prefix-dependent defaults for installation directories that reside in
# a path outside of the prefix in FHS and common usage.
BULITIN_DIR_NOPREFIX_OPTIONS: T.Dict[OptionKey, T.Dict[str, str]] = {
OptionKey('sysconfdir'): {'/usr': '/etc'},
OptionKey('localstatedir'): {'/usr': '/var', '/usr/local': '/var/local'},
OptionKey('sharedstatedir'): {'/usr': '/var/lib', '/usr/local': '/var/local/lib'},
}
FORBIDDEN_TARGET_NAMES = {'clean': None,
'clean-ctlist': None,
'clean-gcno': None,
'clean-gcda': None,
'coverage': None,
'coverage-text': None,
'coverage-xml': None,
'coverage-html': None,
'phony': None,
'PHONY': None,
'all': None,
'test': None,
'benchmark': None,
'install': None,
'uninstall': None,
'build.ninja': None,
'scan-build': None,
'reconfigure': None,
'dist': None,
'distcheck': None,
}
|
QuLogic/meson
|
mesonbuild/coredata.py
|
Python
|
apache-2.0
| 52,406
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
#
# Easy AVR USB Keyboard Firmware Keymapper
# Copyright (C) 2018 David Howland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""The easykeymap package holds the EasyAVR keymapper application."""
from easykeymap.version import version_string
__version__ = version_string
|
dhowland/EasyAVR
|
keymapper/easykeymap/__init__.py
|
Python
|
gpl-2.0
| 911
|
import numpy as np
import sys
import imp
try:
imp.find_module('pycuda')
found_pycuda = True
except ImportError:
found_pycuda = False
try:
import mobility_cpp
found_cpp = True
except ImportError:
try:
from .mobility import mobility_cpp
found_cpp = True
except ImportError:
pass
sys.path.append('../')
from . import mobility as mob
from general_application_utils import timer
if __name__ == '__main__':
print('# Start')
# Create blobs
N = 1000
eta = 7.0
a = 0.13
r_vectors = 5 * a * np.random.rand(N, 3)
L = np.array([0., 0., 0.])
# Generate random forces
force = np.random.randn(len(r_vectors), 3)
# ================================================================
# NO WALL TESTS
# ================================================================
timer('zz_no_wall_loops_full_matrix')
mobility_no_wall_loops = mob.rotne_prager_tensor_loops(r_vectors, eta, a)
u_no_wall_loops_full = np.dot(mobility_no_wall_loops, force.flatten())
timer('zz_no_wall_loops_full_matrix')
timer('zz_no_wall_full_matrix')
mobility_no_wall = mob.rotne_prager_tensor(r_vectors, eta, a)
u_no_wall_full = np.dot(mobility_no_wall, force.flatten())
timer('zz_no_wall_full_matrix')
u_no_wall_numba = mob.no_wall_mobility_trans_times_force_numba(r_vectors, force, eta, a)
timer('zz_no_wall_numba')
u_no_wall_numba = mob.no_wall_mobility_trans_times_force_numba(r_vectors, force, eta, a)
timer('zz_no_wall_numba')
if found_pycuda:
u_no_wall_pycuda = mob.no_wall_mobility_trans_times_force_pycuda(r_vectors, force, eta, a)
timer('zz_no_wall_pycuda')
u_no_wall_pycuda = mob.no_wall_mobility_trans_times_force_pycuda(r_vectors, force, eta, a)
timer('zz_no_wall_pycuda')
# ================================================================
# WALL TESTS
# ================================================================
timer('python_loops')
mobility_loops = mob.single_wall_fluid_mobility_loops(r_vectors, eta, a)
u_loops = np.dot(mobility_loops, force.flatten())
timer('python_loops')
timer('python')
mobility = mob.single_wall_fluid_mobility(r_vectors, eta, a)
u = np.dot(mobility, force.flatten())
timer('python')
u_numba = mob.single_wall_mobility_trans_times_force_numba(r_vectors, force, eta, a)
timer('numba')
u_numba = mob.single_wall_mobility_trans_times_force_numba(r_vectors, force, eta, a, periodic_length = L)
timer('numba')
if found_pycuda:
u_gpu = mob.single_wall_mobility_trans_times_force_pycuda(r_vectors, force, eta, a)
timer('pycuda')
u_gpu = mob.single_wall_mobility_trans_times_force_pycuda(r_vectors, force, eta, a, periodic_length = L)
timer('pycuda')
if found_cpp:
timer('cpp')
u_cpp = mob.single_wall_mobility_trans_times_force_cpp(r_vectors, force, eta, a)
timer('cpp')
if False:
np.set_printoptions(precision=6)
print('no_wall_numba ', u_no_wall_numba)
print('numba ', u_numba)
print('pycuda ', u_gpu)
print('diff ', u_numba - u_gpu)
#print 'mobility_no_wall_loops \n', mobility_no_wall_loops
#print 'mobility_no_wall \n', mobility_no_wall
print('=================== No wall tests ===================')
print('|u_no_wall_full - u_no_wall_loops_full| / |u_no_wall_loops_full| = ', np.linalg.norm(u_no_wall_full - u_no_wall_loops_full) / np.linalg.norm(u_no_wall_loops_full))
if found_pycuda:
print('|u_no_wall_pycuda - u_no_wall_loops_full| / |u_no_wall_loops_full| = ', np.linalg.norm(u_no_wall_pycuda - u_no_wall_loops_full) / np.linalg.norm(u_no_wall_loops_full))
print('|u_no_wall_numba - u_no_wall_loops_full| / |u_no_wall_loops_full| = ', np.linalg.norm(u_no_wall_numba - u_no_wall_loops_full) / np.linalg.norm(u_no_wall_loops_full))
print('=================== Wall tests ===================')
print('|u - u_loops| / |u_loops| = ', np.linalg.norm(u - u_loops) / np.linalg.norm(u_loops))
print('|u_numba - u_loops| / |u_loops| = ', np.linalg.norm(u_numba - u_loops) / np.linalg.norm(u_loops))
if found_pycuda:
print('|u_gpu - u_loops| / |u_loops| = ', np.linalg.norm(u_gpu - u_loops) / np.linalg.norm(u_loops))
if found_cpp:
print('|u_cpp - u_loops| / |u_loops| = ', np.linalg.norm(u_cpp - u_loops) / np.linalg.norm(u_loops))
if L[0] > 0. or L[1] > 0.:
print('===================== Pseudo-periodic tests =============================')
if found_pycuda:
print('|u_numba - u_gpu| / |u_gpu| = ', np.linalg.norm(u_numba - u_gpu) / np.linalg.norm(u_gpu))
timer('', print_all=True, clean_all=True)
# ==========================================================
# Rot tests
# ==========================================================
print('\n\n\n\n')
print('==========================================================')
if found_pycuda:
timer('u_no_wall_trans_times_torque_gpu')
u_no_wall_trans_times_torque_gpu = mob.no_wall_mobility_trans_times_torque_pycuda(r_vectors, force, eta, a, periodic_length = L)
timer('u_no_wall_trans_times_torque_gpu')
u_no_wall_trans_times_torque_numba = mob.no_wall_mobility_trans_times_torque_numba(r_vectors, force, eta, a, periodic_length = L)
timer('u_no_wall_trans_times_torque_numba')
u_no_wall_trans_times_torque_numba = mob.no_wall_mobility_trans_times_torque_numba(r_vectors, force, eta, a, periodic_length = L)
timer('u_no_wall_trans_times_torque_numba')
print('|u_no_wall_trans_times_torque_numba - u_no_wall_trans_times_torque_gpu| / |u_no_wall_trans_times_torque_gpu| = ', \
np.linalg.norm(u_no_wall_trans_times_torque_numba - u_no_wall_trans_times_torque_gpu) / np.linalg.norm(u_no_wall_trans_times_torque_gpu))
u_wall_trans_times_torque_gpu = mob.single_wall_mobility_trans_times_torque_pycuda(r_vectors, force, eta, a, periodic_length = L)
timer('u_wall_trans_times_torque_gpu')
u_wall_trans_times_torque_gpu = mob.single_wall_mobility_trans_times_torque_pycuda(r_vectors, force, eta, a, periodic_length = L)
timer('u_wall_trans_times_torque_gpu')
u_wall_trans_times_torque_numba = mob.single_wall_mobility_trans_times_torque_numba(r_vectors, force, eta, a, periodic_length = L)
timer('u_wall_trans_times_torque_numba')
u_wall_trans_times_torque_numba = mob.single_wall_mobility_trans_times_torque_numba(r_vectors, force, eta, a, periodic_length = L)
timer('u_wall_trans_times_torque_numba')
print('|u_wall_trans_times_torque_numba - u_wall_trans_times_torque_gpu| / |u_wall_trans_times_torque_gpu| = ', \
np.linalg.norm(u_wall_trans_times_torque_numba - u_wall_trans_times_torque_gpu) / np.linalg.norm(u_wall_trans_times_torque_gpu))
timer('u_no_wall_rot_times_force_gpu')
u_no_wall_rot_times_force_gpu = mob.no_wall_mobility_rot_times_force_pycuda(r_vectors, force, eta, a, periodic_length = L)
timer('u_no_wall_rot_times_force_gpu')
u_no_wall_rot_times_force_numba = mob.no_wall_mobility_rot_times_force_numba(r_vectors, force, eta, a, periodic_length = L)
timer('u_no_wall_rot_times_force_numba')
u_no_wall_rot_times_force_numba = mob.no_wall_mobility_rot_times_force_numba(r_vectors, force, eta, a, periodic_length = L)
timer('u_no_wall_rot_times_force_numba')
print('|u_no_wall_rot_times_force_numba - u_no_wall_rot_times_force_gpu| / |u_no_wall_rot_times_force_gpu| = ',
np.linalg.norm(u_no_wall_rot_times_force_numba - u_no_wall_rot_times_force_gpu) / np.linalg.norm(u_no_wall_rot_times_force_gpu))
timer('u_single_wall_rot_times_force_gpu')
u_single_wall_rot_times_force_gpu = mob.single_wall_mobility_rot_times_force_pycuda(r_vectors, force, eta, a, periodic_length = L)
timer('u_single_wall_rot_times_force_gpu')
u_single_wall_rot_times_force_numba = mob.single_wall_mobility_rot_times_force_numba(r_vectors, force, eta, a, periodic_length = L)
timer('u_single_wall_rot_times_force_numba')
u_single_wall_rot_times_force_numba = mob.single_wall_mobility_rot_times_force_numba(r_vectors, force, eta, a, periodic_length = L)
timer('u_single_wall_rot_times_force_numba')
print('|u_single_wall_rot_times_force_numba - u_single_wall_rot_times_force_gpu| / |u_single_wall_rot_times_force_gpu| = ',
np.linalg.norm(u_single_wall_rot_times_force_numba - u_single_wall_rot_times_force_gpu) / np.linalg.norm(u_single_wall_rot_times_force_gpu))
timer('u_no_wall_rot_times_torque_gpu')
u_no_wall_rot_times_torque_gpu = mob.no_wall_mobility_rot_times_torque_pycuda(r_vectors, force, eta, a, periodic_length = L)
timer('u_no_wall_rot_times_torque_gpu')
u_no_wall_rot_times_torque_numba = mob.no_wall_mobility_rot_times_torque_numba(r_vectors, force, eta, a, periodic_length = L)
timer('u_no_wall_rot_times_torque_numba')
u_no_wall_rot_times_torque_numba = mob.no_wall_mobility_rot_times_torque_numba(r_vectors, force, eta, a, periodic_length = L)
timer('u_no_wall_rot_times_torque_numba')
print('|u_no_wall_rot_times_torque_numba - u_no_wall_rot_times_torque_gpu| / |u_no_wall_rot_times_torque_gpu| = ',
np.linalg.norm(u_no_wall_rot_times_torque_numba - u_no_wall_rot_times_torque_gpu) / np.linalg.norm(u_no_wall_rot_times_torque_gpu))
timer('u_single_wall_rot_times_force_gpu')
u_single_wall_rot_times_torque_gpu = mob.single_wall_mobility_rot_times_torque_pycuda(r_vectors, force, eta, a, periodic_length = L)
timer('u_single_wall_rot_times_force_gpu')
u_single_wall_rot_times_torque_numba = mob.single_wall_mobility_rot_times_torque_numba(r_vectors, force, eta, a, periodic_length = L)
timer('u_single_wall_rot_times_torque_numba')
u_single_wall_rot_times_torque_numba = mob.single_wall_mobility_rot_times_torque_numba(r_vectors, force, eta, a, periodic_length = L)
timer('u_single_wall_rot_times_torque_numba')
print('|u_single_wall_rot_times_torque_numba - u_single_wall_rot_times_torque_gpu| / |u_single_wall_rot_times_torque_gpu| = ',
np.linalg.norm(u_single_wall_rot_times_torque_numba - u_single_wall_rot_times_torque_gpu) / np.linalg.norm(u_single_wall_rot_times_torque_gpu))
if False:
np.set_printoptions(precision=6)
print('no_wall = ', u_no_wall_trans_times_torque_gpu)
print('gpu = ', u_wall_trans_times_torque_gpu)
print('numba = ', u_wall_trans_times_torque_numba)
timer('', print_all=True)
print('# End')
|
stochasticHydroTools/RigidMultiblobsWall
|
mobility/test_blobs.py
|
Python
|
gpl-3.0
| 11,090
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Core classes for markup processing."""
try:
reduce # builtin in Python < 3
except NameError:
from functools import reduce
import sys
from itertools import chain
import operator
from genshi.util import plaintext, stripentities, striptags, stringrepr
__all__ = ['Stream', 'Markup', 'escape', 'unescape', 'Attrs', 'Namespace',
'QName']
__docformat__ = 'restructuredtext en'
class StreamEventKind(str):
"""A kind of event on a markup stream."""
__slots__ = []
_instances = {}
def __new__(cls, val):
return cls._instances.setdefault(val, str.__new__(cls, val))
class Stream(object):
"""Represents a stream of markup events.
This class is basically an iterator over the events.
Stream events are tuples of the form::
(kind, data, position)
where ``kind`` is the event kind (such as `START`, `END`, `TEXT`, etc),
``data`` depends on the kind of event, and ``position`` is a
``(filename, line, offset)`` tuple that contains the location of the
original element or text in the input. If the original location is unknown,
``position`` is ``(None, -1, -1)``.
Also provided are ways to serialize the stream to text. The `serialize()`
method will return an iterator over generated strings, while `render()`
returns the complete generated text at once. Both accept various parameters
that impact the way the stream is serialized.
"""
__slots__ = ['events', 'serializer']
START = StreamEventKind('START') #: a start tag
END = StreamEventKind('END') #: an end tag
TEXT = StreamEventKind('TEXT') #: literal text
XML_DECL = StreamEventKind('XML_DECL') #: XML declaration
DOCTYPE = StreamEventKind('DOCTYPE') #: doctype declaration
START_NS = StreamEventKind('START_NS') #: start namespace mapping
END_NS = StreamEventKind('END_NS') #: end namespace mapping
START_CDATA = StreamEventKind('START_CDATA') #: start CDATA section
END_CDATA = StreamEventKind('END_CDATA') #: end CDATA section
PI = StreamEventKind('PI') #: processing instruction
COMMENT = StreamEventKind('COMMENT') #: comment
def __init__(self, events, serializer=None):
"""Initialize the stream with a sequence of markup events.
:param events: a sequence or iterable providing the events
:param serializer: the default serialization method to use for this
stream
:note: Changed in 0.5: added the `serializer` argument
"""
self.events = events #: The underlying iterable producing the events
self.serializer = serializer #: The default serializion method
def __iter__(self):
return iter(self.events)
def __or__(self, function):
"""Override the "bitwise or" operator to apply filters or serializers
to the stream, providing a syntax similar to pipes on Unix shells.
Assume the following stream produced by the `HTML` function:
>>> from genshi.input import HTML
>>> html = HTML('''<p onclick="alert('Whoa')">Hello, world!</p>''', encoding='utf-8')
>>> print(html)
<p onclick="alert('Whoa')">Hello, world!</p>
A filter such as the HTML sanitizer can be applied to that stream using
the pipe notation as follows:
>>> from genshi.filters import HTMLSanitizer
>>> sanitizer = HTMLSanitizer()
>>> print(html | sanitizer)
<p>Hello, world!</p>
Filters can be any function that accepts and produces a stream (where
a stream is anything that iterates over events):
>>> def uppercase(stream):
... for kind, data, pos in stream:
... if kind is TEXT:
... data = data.upper()
... yield kind, data, pos
>>> print(html | sanitizer | uppercase)
<p>HELLO, WORLD!</p>
Serializers can also be used with this notation:
>>> from genshi.output import TextSerializer
>>> output = TextSerializer()
>>> print(html | sanitizer | uppercase | output)
HELLO, WORLD!
Commonly, serializers should be used at the end of the "pipeline";
using them somewhere in the middle may produce unexpected results.
:param function: the callable object that should be applied as a filter
:return: the filtered stream
:rtype: `Stream`
"""
return Stream(_ensure(function(self)), serializer=self.serializer)
def filter(self, *filters):
"""Apply filters to the stream.
This method returns a new stream with the given filters applied. The
filters must be callables that accept the stream object as parameter,
and return the filtered stream.
The call::
stream.filter(filter1, filter2)
is equivalent to::
stream | filter1 | filter2
:param filters: one or more callable objects that should be applied as
filters
:return: the filtered stream
:rtype: `Stream`
"""
return reduce(operator.or_, (self,) + filters)
def render(self, method=None, encoding=None, out=None, **kwargs):
"""Return a string representation of the stream.
Any additional keyword arguments are passed to the serializer, and thus
depend on the `method` parameter value.
:param method: determines how the stream is serialized; can be either
"xml", "xhtml", "html", "text", or a custom serializer
class; if `None`, the default serialization method of
the stream is used
:param encoding: how the output string should be encoded; if set to
`None`, this method returns a `unicode` object
:param out: a file-like object that the output should be written to
instead of being returned as one big string; note that if
this is a file or socket (or similar), the `encoding` must
not be `None` (that is, the output must be encoded)
:return: a `str` or `unicode` object (depending on the `encoding`
parameter), or `None` if the `out` parameter is provided
:rtype: `basestring`
:see: XMLSerializer, XHTMLSerializer, HTMLSerializer, TextSerializer
:note: Changed in 0.5: added the `out` parameter
"""
from genshi.output import encode
if method is None:
method = self.serializer or 'xml'
generator = self.serialize(method=method, **kwargs)
return encode(generator, method=method, encoding=encoding, out=out)
def select(self, path, namespaces=None, variables=None):
"""Return a new stream that contains the events matching the given
XPath expression.
>>> from genshi import HTML
>>> stream = HTML('<doc><elem>foo</elem><elem>bar</elem></doc>', encoding='utf-8')
>>> print(stream.select('elem'))
<elem>foo</elem><elem>bar</elem>
>>> print(stream.select('elem/text()'))
foobar
Note that the outermost element of the stream becomes the *context
node* for the XPath test. That means that the expression "doc" would
not match anything in the example above, because it only tests against
child elements of the outermost element:
>>> print(stream.select('doc'))
<BLANKLINE>
You can use the "." expression to match the context node itself
(although that usually makes little sense):
>>> print(stream.select('.'))
<doc><elem>foo</elem><elem>bar</elem></doc>
:param path: a string containing the XPath expression
:param namespaces: mapping of namespace prefixes used in the path
:param variables: mapping of variable names to values
:return: the selected substream
:rtype: `Stream`
:raises PathSyntaxError: if the given path expression is invalid or not
supported
"""
from genshi.path import Path
return Path(path).select(self, namespaces, variables)
def serialize(self, method='xml', **kwargs):
"""Generate strings corresponding to a specific serialization of the
stream.
Unlike the `render()` method, this method is a generator that returns
the serialized output incrementally, as opposed to returning a single
string.
Any additional keyword arguments are passed to the serializer, and thus
depend on the `method` parameter value.
:param method: determines how the stream is serialized; can be either
"xml", "xhtml", "html", "text", or a custom serializer
class; if `None`, the default serialization method of
the stream is used
:return: an iterator over the serialization results (`Markup` or
`unicode` objects, depending on the serialization method)
:rtype: ``iterator``
:see: XMLSerializer, XHTMLSerializer, HTMLSerializer, TextSerializer
"""
from genshi.output import get_serializer
if method is None:
method = self.serializer or 'xml'
return get_serializer(method, **kwargs)(_ensure(self))
def __str__(self):
return self.render()
def __unicode__(self):
return self.render(encoding=None)
def __html__(self):
return self
START = Stream.START
END = Stream.END
TEXT = Stream.TEXT
XML_DECL = Stream.XML_DECL
DOCTYPE = Stream.DOCTYPE
START_NS = Stream.START_NS
END_NS = Stream.END_NS
START_CDATA = Stream.START_CDATA
END_CDATA = Stream.END_CDATA
PI = Stream.PI
COMMENT = Stream.COMMENT
def _ensure(stream):
"""Ensure that every item on the stream is actually a markup event."""
stream = iter(stream)
event = stream.next()
# Check whether the iterable is a real markup event stream by examining the
# first item it yields; if it's not we'll need to do some conversion
if type(event) is not tuple or len(event) != 3:
for event in chain([event], stream):
if hasattr(event, 'totuple'):
event = event.totuple()
else:
event = TEXT, unicode(event), (None, -1, -1)
yield event
return
# This looks like a markup event stream, so we'll just pass it through
# unchanged
yield event
for event in stream:
yield event
class Attrs(tuple):
"""Immutable sequence type that stores the attributes of an element.
Ordering of the attributes is preserved, while access by name is also
supported.
>>> attrs = Attrs([('href', '#'), ('title', 'Foo')])
>>> attrs
Attrs([('href', '#'), ('title', 'Foo')])
>>> 'href' in attrs
True
>>> 'tabindex' in attrs
False
>>> attrs.get('title')
'Foo'
Instances may not be manipulated directly. Instead, the operators ``|`` and
``-`` can be used to produce new instances that have specific attributes
added, replaced or removed.
To remove an attribute, use the ``-`` operator. The right hand side can be
either a string or a set/sequence of strings, identifying the name(s) of
the attribute(s) to remove:
>>> attrs - 'title'
Attrs([('href', '#')])
>>> attrs - ('title', 'href')
Attrs()
The original instance is not modified, but the operator can of course be
used with an assignment:
>>> attrs
Attrs([('href', '#'), ('title', 'Foo')])
>>> attrs -= 'title'
>>> attrs
Attrs([('href', '#')])
To add a new attribute, use the ``|`` operator, where the right hand value
is a sequence of ``(name, value)`` tuples (which includes `Attrs`
instances):
>>> attrs | [('title', 'Bar')]
Attrs([('href', '#'), ('title', 'Bar')])
If the attributes already contain an attribute with a given name, the value
of that attribute is replaced:
>>> attrs | [('href', 'http://example.org/')]
Attrs([('href', 'http://example.org/')])
"""
__slots__ = []
def __contains__(self, name):
"""Return whether the list includes an attribute with the specified
name.
:return: `True` if the list includes the attribute
:rtype: `bool`
"""
for attr, _ in self:
if attr == name:
return True
def __getitem__(self, i):
"""Return an item or slice of the attributes list.
>>> attrs = Attrs([('href', '#'), ('title', 'Foo')])
>>> attrs[1]
('title', 'Foo')
>>> attrs[1:]
Attrs([('title', 'Foo')])
"""
items = tuple.__getitem__(self, i)
if type(i) is slice:
return Attrs(items)
return items
def __getslice__(self, i, j):
"""Return a slice of the attributes list.
>>> attrs = Attrs([('href', '#'), ('title', 'Foo')])
>>> attrs[1:]
Attrs([('title', 'Foo')])
"""
return Attrs(tuple.__getslice__(self, i, j))
def __or__(self, attrs):
"""Return a new instance that contains the attributes in `attrs` in
addition to any already existing attributes. Any attributes in the new
set that have a value of `None` are removed.
:return: a new instance with the merged attributes
:rtype: `Attrs`
"""
remove = set([an for an, av in attrs if av is None])
replace = dict([(an, av) for an, av in attrs
if an in self and av is not None])
return Attrs([(sn, replace.get(sn, sv)) for sn, sv in self
if sn not in remove] +
[(an, av) for an, av in attrs
if an not in self and an not in remove])
def __repr__(self):
if not self:
return 'Attrs()'
return 'Attrs([%s])' % ', '.join([repr(item) for item in self])
def __sub__(self, names):
"""Return a new instance with all attributes with a name in `names` are
removed.
:param names: the names of the attributes to remove
:return: a new instance with the attribute removed
:rtype: `Attrs`
"""
if isinstance(names, basestring):
names = (names,)
return Attrs([(name, val) for name, val in self if name not in names])
def get(self, name, default=None):
"""Return the value of the attribute with the specified name, or the
value of the `default` parameter if no such attribute is found.
:param name: the name of the attribute
:param default: the value to return when the attribute does not exist
:return: the attribute value, or the `default` value if that attribute
does not exist
:rtype: `object`
"""
for attr, value in self:
if attr == name:
return value
return default
def totuple(self):
"""Return the attributes as a markup event.
The returned event is a `TEXT` event, the data is the value of all
attributes joined together.
>>> Attrs([('href', '#'), ('title', 'Foo')]).totuple()
('TEXT', '#Foo', (None, -1, -1))
:return: a `TEXT` event
:rtype: `tuple`
"""
return TEXT, ''.join([x[1] for x in self]), (None, -1, -1)
class Markup(unicode):
"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped.
"""
__slots__ = []
def __add__(self, other):
return Markup(unicode.__add__(self, escape(other)))
def __radd__(self, other):
return Markup(unicode.__add__(escape(other), self))
def __mod__(self, args):
if isinstance(args, dict):
args = dict(zip(args.keys(), map(escape, args.values())))
elif isinstance(args, (list, tuple)):
args = tuple(map(escape, args))
else:
args = escape(args)
return Markup(unicode.__mod__(self, args))
def __mul__(self, num):
return Markup(unicode.__mul__(self, num))
__rmul__ = __mul__
def __repr__(self):
return "<%s %s>" % (type(self).__name__, unicode.__repr__(self))
def join(self, seq, escape_quotes=True):
"""Return a `Markup` object which is the concatenation of the strings
in the given sequence, where this `Markup` object is the separator
between the joined elements.
Any element in the sequence that is not a `Markup` instance is
automatically escaped.
:param seq: the sequence of strings to join
:param escape_quotes: whether double quote characters in the elements
should be escaped
:return: the joined `Markup` object
:rtype: `Markup`
:see: `escape`
"""
return Markup(unicode.join(self, [escape(item, quotes=escape_quotes)
for item in seq]))
@classmethod
def escape(cls, text, quotes=True):
"""Create a Markup instance from a string and escape special characters
it may contain (<, >, & and \").
>>> escape('"1 < 2"')
<Markup u'"1 < 2"'>
If the `quotes` parameter is set to `False`, the \" character is left
as is. Escaping quotes is generally only required for strings that are
to be used in attribute values.
>>> escape('"1 < 2"', quotes=False)
<Markup u'"1 < 2"'>
:param text: the text to escape
:param quotes: if ``True``, double quote characters are escaped in
addition to the other special characters
:return: the escaped `Markup` string
:rtype: `Markup`
"""
if not text:
return cls()
if type(text) is cls:
return text
if hasattr(text, '__html__'):
return cls(text.__html__())
text = text.replace('&', '&') \
.replace('<', '<') \
.replace('>', '>')
if quotes:
text = text.replace('"', '"')
return cls(text)
def unescape(self):
"""Reverse-escapes &, <, >, and \" and returns a `unicode` object.
>>> Markup('1 < 2').unescape()
u'1 < 2'
:return: the unescaped string
:rtype: `unicode`
:see: `genshi.core.unescape`
"""
if not self:
return ''
return unicode(self).replace('"', '"') \
.replace('>', '>') \
.replace('<', '<') \
.replace('&', '&')
def stripentities(self, keepxmlentities=False):
"""Return a copy of the text with any character or numeric entities
replaced by the equivalent UTF-8 characters.
If the `keepxmlentities` parameter is provided and evaluates to `True`,
the core XML entities (``&``, ``'``, ``>``, ``<`` and
``"``) are not stripped.
:return: a `Markup` instance with entities removed
:rtype: `Markup`
:see: `genshi.util.stripentities`
"""
return Markup(stripentities(self, keepxmlentities=keepxmlentities))
def striptags(self):
"""Return a copy of the text with all XML/HTML tags removed.
:return: a `Markup` instance with all tags removed
:rtype: `Markup`
:see: `genshi.util.striptags`
"""
return Markup(striptags(self))
try:
from genshi._speedups import Markup
except ImportError:
pass # just use the Python implementation
escape = Markup.escape
def unescape(text):
"""Reverse-escapes &, <, >, and \" and returns a `unicode` object.
>>> unescape(Markup('1 < 2'))
u'1 < 2'
If the provided `text` object is not a `Markup` instance, it is returned
unchanged.
>>> unescape('1 < 2')
'1 < 2'
:param text: the text to unescape
:return: the unescsaped string
:rtype: `unicode`
"""
if not isinstance(text, Markup):
return text
return text.unescape()
class Namespace(object):
"""Utility class creating and testing elements with a namespace.
Internally, namespace URIs are encoded in the `QName` of any element or
attribute, the namespace URI being enclosed in curly braces. This class
helps create and test these strings.
A `Namespace` object is instantiated with the namespace URI.
>>> html = Namespace('http://www.w3.org/1999/xhtml')
>>> html
Namespace('http://www.w3.org/1999/xhtml')
>>> html.uri
u'http://www.w3.org/1999/xhtml'
The `Namespace` object can than be used to generate `QName` objects with
that namespace:
>>> html.body
QName('http://www.w3.org/1999/xhtml}body')
>>> html.body.localname
u'body'
>>> html.body.namespace
u'http://www.w3.org/1999/xhtml'
The same works using item access notation, which is useful for element or
attribute names that are not valid Python identifiers:
>>> html['body']
QName('http://www.w3.org/1999/xhtml}body')
A `Namespace` object can also be used to test whether a specific `QName`
belongs to that namespace using the ``in`` operator:
>>> qname = html.body
>>> qname in html
True
>>> qname in Namespace('http://www.w3.org/2002/06/xhtml2')
False
"""
def __new__(cls, uri):
if type(uri) is cls:
return uri
return object.__new__(cls)
def __getnewargs__(self):
return (self.uri,)
def __getstate__(self):
return self.uri
def __setstate__(self, uri):
self.uri = uri
def __init__(self, uri):
self.uri = unicode(uri)
def __contains__(self, qname):
return qname.namespace == self.uri
def __ne__(self, other):
return not self == other
def __eq__(self, other):
if isinstance(other, Namespace):
return self.uri == other.uri
return self.uri == other
def __getitem__(self, name):
return QName(self.uri + '}' + name)
__getattr__ = __getitem__
def __hash__(self):
return hash(self.uri)
if sys.version_info[0] == 2:
# Only use stringrepr in python 2
def __repr__(self):
return '%s(%s)' % (type(self).__name__, stringrepr(self.uri))
else:
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.uri)
def __str__(self):
return self.uri.encode('utf-8')
def __unicode__(self):
return self.uri
# The namespace used by attributes such as xml:lang and xml:space
XML_NAMESPACE = Namespace('http://www.w3.org/XML/1998/namespace')
class QName(unicode):
"""A qualified element or attribute name.
The unicode value of instances of this class contains the qualified name of
the element or attribute, in the form ``{namespace-uri}local-name``. The
namespace URI can be obtained through the additional `namespace` attribute,
while the local name can be accessed through the `localname` attribute.
>>> qname = QName('foo')
>>> qname
QName('foo')
>>> qname.localname
u'foo'
>>> qname.namespace
>>> qname = QName('http://www.w3.org/1999/xhtml}body')
>>> qname
QName('http://www.w3.org/1999/xhtml}body')
>>> qname.localname
u'body'
>>> qname.namespace
u'http://www.w3.org/1999/xhtml'
"""
__slots__ = ['namespace', 'localname']
def __new__(cls, qname):
"""Create the `QName` instance.
:param qname: the qualified name as a string of the form
``{namespace-uri}local-name``, where the leading curly
brace is optional
"""
if type(qname) is cls:
return qname
qname = qname.lstrip('{')
parts = qname.split('}', 1)
if len(parts) > 1:
self = unicode.__new__(cls, '{%s' % qname)
self.namespace, self.localname = map(unicode, parts)
else:
self = unicode.__new__(cls, qname)
self.namespace, self.localname = None, unicode(qname)
return self
def __getnewargs__(self):
return (self.lstrip('{'),)
if sys.version_info[0] == 2:
# Only use stringrepr in python 2
def __repr__(self):
return '%s(%s)' % (type(self).__name__, stringrepr(self.lstrip('{')))
else:
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.lstrip('{'))
|
dag/genshi
|
genshi/core.py
|
Python
|
bsd-3-clause
| 25,672
|
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
PROJECT_NAME = 'releng-treestatus'
APP_NAME = 'releng_treestatus'
|
srfraser/services
|
src/releng_treestatus/releng_treestatus/config.py
|
Python
|
mpl-2.0
| 331
|
import sys
import getopt
from src.main import Main
def main(argv):
__keyword, __path = '', ''
try:
opts, args = getopt.getopt(argv, 'a:p:', ['list'])
except getopt.GetoptError:
raise Exception('Something went wrong')
sys.exit(2)
for opt, arg in opts:
if opt == '-a':
__keyword = arg
elif opt == '-p':
__path = arg
elif opt == '--list':
Main.list()
if '-a' in sys.argv[1:]:
Main.add_shortcut(__keyword, __path)
print('{0} is added with a pass {1}'.format(__keyword, __path))
def execute(__keyword):
Main.goto(__keyword)
if __name__ == '__main__':
args_len = len(sys.argv)
if args_len == 2 and '--list' not in sys.argv:
execute(sys.argv[1])
else:
main(sys.argv[1:])
|
Artie18/goto
|
app.py
|
Python
|
apache-2.0
| 823
|
from rest_framework.response import Response
from rest_framework.views import APIView
from mymoney.transactions.models import Transaction
from .utils import get_currencies
class ConfigAPIView(APIView):
def get(self, request, *args, **kwargs):
return Response({
'currencies': dict(get_currencies()),
'payment_methods': dict(Transaction.PAYMENT_METHODS),
'statuses': dict(Transaction.STATUSES),
})
|
ychab/mymoney-server
|
mymoney/core/views.py
|
Python
|
bsd-3-clause
| 457
|
# encoding: utf-8
import datetime
import django
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
# Django 1.5+ compatibility
if django.VERSION >= (1, 5):
from django.contrib.auth import get_user_model
else:
from django.contrib.auth.models import User
def get_user_model():
return User
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Flag'
db.create_table('waffle_flag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('everyone', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('percent', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=3, decimal_places=1, blank=True)),
('superusers', self.gf('django.db.models.fields.BooleanField')(default=True)),
('staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
('authenticated', self.gf('django.db.models.fields.BooleanField')(default=False)),
('rollout', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('waffle', ['Flag'])
# Adding M2M table for field groups on 'Flag'
db.create_table('waffle_flag_groups', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('flag', models.ForeignKey(orm['waffle.flag'], null=False)),
('group', models.ForeignKey(orm['auth.group'], null=False))
))
db.create_unique('waffle_flag_groups', ['flag_id', 'group_id'])
# Adding M2M table for field users on 'Flag'
db.create_table('waffle_flag_users', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('flag', models.ForeignKey(orm['waffle.flag'], null=False)),
('user', models.ForeignKey(get_user_model(), null=False))
))
db.create_unique('waffle_flag_users', ['flag_id', 'user_id'])
# Adding model 'Switch'
db.create_table('waffle_switch', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('active', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('waffle', ['Switch'])
def backwards(self, orm):
# Deleting model 'Flag'
db.delete_table('waffle_flag')
# Removing M2M table for field groups on 'Flag'
db.delete_table('waffle_flag_groups')
# Removing M2M table for field users on 'Flag'
db.delete_table('waffle_flag_users')
# Deleting model 'Switch'
db.delete_table('waffle_switch')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'waffle.flag': {
'Meta': {'object_name': 'Flag'},
'authenticated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'everyone': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '1', 'blank': 'True'}),
'rollout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'superusers': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'waffle.switch': {
'Meta': {'object_name': 'Switch'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['waffle']
|
mark-adams/django-waffle
|
waffle/south_migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 7,674
|
##############################################################################
#
# Copyright (C) 2004-2014 Pexego Sistemas Informáticos All Rights Reserved
# $Marta Vázquez Rodríguez$ <marta@pexego.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import models
from . import report
|
Comunitea/CMNT_004_15
|
project-addons/sale_display_stock/__init__.py
|
Python
|
agpl-3.0
| 1,022
|
from app.oauth import OAuthSignIn
from flask import render_template, flash, redirect, url_for, g
from flask.ext.login import login_user, logout_user, current_user, login_required
from app import app, db, lm
from app.forms import SettingsForm, EventForm
from app.models import User, Settings, Event
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@app.before_request
def before_request():
g.user = current_user
@app.errorhandler(404)
def not_found_error(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('500.html'), 500
@app.route('/')
@app.route('/index')
def index():
user = g.user
return render_template('index.html',
title='Home',
user=user)
@app.route('/authorize/<provider>')
def oauth_authorize(provider):
if not current_user.is_anonymous():
return redirect(url_for('index'))
oauth = OAuthSignIn.get_provider(provider)
return oauth.authorize()
@app.route('/callback/<provider>')
def oauth_callback(provider):
if not current_user.is_anonymous():
return redirect(url_for('index'))
oauth = OAuthSignIn.get_provider(provider)
social_id, username, email = oauth.callback()
if social_id is None:
flash('Authentication failed.')
return redirect(url_for('index'))
user = User.query.filter_by(social_id=social_id).first()
if not user:
user = User(social_id=social_id, nickname=username, email=email)
db.session.add(user)
db.session.commit()
login_user(user, True)
return redirect(url_for('index'))
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/settings')
@login_required
def settings():
form = SettingsForm()
settings_instance = g.user.settings.first()
if form.validate_on_submit():
if settings_instance is None:
settings_instance = Settings()
settings_instance.set_from_dict_form(form.__dict__)
settings_instance.User = g.user
db.session.add(settings_instance)
db.session.commit()
flash('Settings saved')
return redirect(url_for('index'))
if settings_instance is not None:
form.set_from_dict_model(settings_instance.__dict__)
return render_template('settings.html', form=form)
@app.route('/event')
def events():
return redirect(url_for('event_list'))
@app.route('/event/list', methods=['GET', 'POST'])
def event_list():
events = Event.query.all()
return render_template('event/list.html', events=events)
@app.route('/event/show/<id>')
def event_show(id):
if id is not None:
form = EventForm()
event_instance = Event.query.filter_by(id=id).first()
form.set_from_dict_model(event_instance.__dict__)
return render_template('event/show.html', form=form)
else:
flash('Unable to find event')
return redirect(url_for('event_list'))
@app.route('/event/create')
def event_create():
return event_edit(None)
@app.route('/event/edit/<id>')
def event_edit(id):
form = EventForm()
event_instance = Event()
if id is not None:
event_instance = Event.query.filter_by(id=id).first()
if form.validate_on_submit():
event_instance.set_from_dict_form(form.__dict__)
if id is None:
event_instance.creator = g.user
db.session.add(event_instance)
db.session.commit()
flash('Event saved')
return redirect(url_for('event_list'))
if event_instance is not None:
form.set_from_dict_model(event_instance.__dict__)
render_template('event/edit.html', form=form)
@app.route('/event/delete/<id>')
def event_delete(id):
if id is not None:
event = Event.query.filter_by(id=id).first()
event.status = 'deleted'
flash('Event deleted')
return redirect(url_for('event_list'))
else:
flash('Unable to find event')
return redirect(url_for('event_list'))
|
kozak127/gherkins
|
app/views.py
|
Python
|
mit
| 4,115
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.contrib import admin
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from weblate.trans.models import (
Project, SubProject, Translation, Advertisement,
Unit, Suggestion, Comment, Check, Dictionary, Change,
Source, WhiteboardMessage
)
class ProjectAdmin(admin.ModelAdmin):
list_display = (
'name', 'slug', 'web', 'owner', 'enable_acl', 'enable_hooks',
'num_vcs', 'num_strings', 'num_words', 'num_langs',
)
prepopulated_fields = {'slug': ('name',)}
search_fields = ['name', 'slug', 'web']
actions = ['update_from_git', 'update_checks', 'force_commit']
def num_vcs(self, obj):
return obj.subproject_set.exclude(repo__startswith='weblate:/').count()
num_vcs.short_description = _('VCS repositories')
def num_strings(self, obj):
return obj.get_total()
num_strings.short_description = _('Source strings')
def num_words(self, obj):
return obj.get_total_words()
num_words.short_description = _('Source words')
def num_langs(self, obj):
return obj.get_language_count()
num_langs.short_description = _('Languages')
def update_from_git(self, request, queryset):
"""
Updates selected components from git.
"""
for project in queryset:
project.do_update(request)
self.message_user(request, "Updated %d git repos." % queryset.count())
update_from_git.short_description = _('Update VCS repository')
def update_checks(self, request, queryset):
"""
Recalculates checks for selected components.
"""
cnt = 0
units = Unit.objects.filter(
translation__subproject__project__in=queryset
)
for unit in units.iterator():
unit.run_checks()
cnt += 1
self.message_user(request, "Updated checks for %d units." % cnt)
update_checks.short_description = _('Update quality checks')
def force_commit(self, request, queryset):
"""
Commits pending changes for selected components.
"""
for project in queryset:
project.commit_pending(request)
self.message_user(
request,
"Flushed changes in %d git repos." % queryset.count()
)
force_commit.short_description = _('Commit pending changes')
class SubProjectAdmin(admin.ModelAdmin):
list_display = [
'name', 'slug', 'project', 'repo', 'branch', 'vcs', 'file_format'
]
prepopulated_fields = {'slug': ('name',)}
search_fields = ['name', 'slug', 'repo', 'branch']
list_filter = ['project', 'vcs', 'file_format']
actions = ['update_from_git', 'update_checks', 'force_commit']
def update_from_git(self, request, queryset):
"""
Updates selected components from git.
"""
for project in queryset:
project.do_update(request)
self.message_user(request, "Updated %d git repos." % queryset.count())
update_from_git.short_description = _('Update VCS repository')
def update_checks(self, request, queryset):
"""
Recalculates checks for selected components.
"""
cnt = 0
units = Unit.objects.filter(
translation__subproject__in=queryset
)
for unit in units.iterator():
unit.run_checks()
cnt += 1
self.message_user(
request,
"Updated checks for %d units." % cnt
)
update_checks.short_description = _('Update quality checks')
def force_commit(self, request, queryset):
"""
Commits pending changes for selected components.
"""
for project in queryset:
project.commit_pending(request)
self.message_user(
request,
"Flushed changes in %d git repos." % queryset.count()
)
force_commit.short_description = _('Commit pending changes')
class TranslationAdmin(admin.ModelAdmin):
list_display = [
'subproject', 'language', 'translated', 'total',
'fuzzy', 'revision', 'filename', 'enabled'
]
search_fields = [
'subproject__slug', 'language__code', 'revision', 'filename'
]
list_filter = ['enabled', 'subproject__project', 'subproject', 'language']
actions = ['enable_translation', 'disable_translation']
def enable_translation(self, request, queryset):
"""
Mass enabling of translations.
"""
queryset.update(enabled=True)
self.message_user(
request,
"Enabled %d translations." % queryset.count()
)
def disable_translation(self, request, queryset):
"""
Mass disabling of translations.
"""
queryset.update(enabled=False)
self.message_user(
request,
"Disabled %d translations." % queryset.count()
)
class UnitAdmin(admin.ModelAdmin):
list_display = ['source', 'target', 'position', 'fuzzy', 'translated']
search_fields = ['source', 'target', 'checksum']
list_filter = [
'translation__subproject',
'translation__language',
'fuzzy',
'translated'
]
class SuggestionAdmin(admin.ModelAdmin):
list_display = ['contentsum', 'target', 'project', 'language', 'user']
list_filter = ['project', 'language']
search_fields = ['contentsum', 'target']
class CommentAdmin(admin.ModelAdmin):
list_display = [
'contentsum', 'comment', 'user', 'project', 'language', 'user'
]
list_filter = ['project', 'language']
search_fields = ['contentsum', 'comment']
class CheckAdmin(admin.ModelAdmin):
list_display = ['contentsum', 'check', 'project', 'language', 'ignore']
search_fields = ['contentsum', 'check']
list_filter = ['check', 'project', 'ignore']
class DictionaryAdmin(admin.ModelAdmin):
list_display = ['source', 'target', 'project', 'language']
search_fields = ['source', 'target']
list_filter = ['project', 'language']
class ChangeAdmin(admin.ModelAdmin):
list_display = ['unit', 'user', 'timestamp']
date_hierarchy = 'timestamp'
list_filter = [
'unit__translation__subproject',
'unit__translation__subproject__project',
'unit__translation__language'
]
raw_id_fields = ('unit',)
class WhiteboardAdmin(admin.ModelAdmin):
list_display = ['message']
prepopulated_fields = {}
search_fields = ['message']
class AdvertisementAdmin(admin.ModelAdmin):
list_display = ['placement', 'date_start', 'date_end', 'text']
search_fields = ['text', 'note']
date_hierarchy = 'date_end'
class SourceAdmin(admin.ModelAdmin):
list_display = ['checksum', 'priority', 'timestamp']
date_hierarchy = 'timestamp'
# Register in admin interface
admin.site.register(Project, ProjectAdmin)
admin.site.register(SubProject, SubProjectAdmin)
admin.site.register(Advertisement, AdvertisementAdmin)
admin.site.register(WhiteboardMessage, WhiteboardAdmin)
# Show some controls only in debug mode
if settings.DEBUG:
admin.site.register(Translation, TranslationAdmin)
admin.site.register(Unit, UnitAdmin)
admin.site.register(Suggestion, SuggestionAdmin)
admin.site.register(Comment, CommentAdmin)
admin.site.register(Check, CheckAdmin)
admin.site.register(Dictionary, DictionaryAdmin)
admin.site.register(Change, ChangeAdmin)
admin.site.register(Source, SourceAdmin)
|
renatofb/weblate
|
weblate/trans/admin.py
|
Python
|
gpl-3.0
| 8,277
|
# Copyright (C) 2013, Daniel Narvaez
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
from gi.repository import Gtk
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
from jarabe.desktop.activitieslist import ActivityListPalette
tests_dir = os.getcwd()
base_dir = os.path.dirname(tests_dir)
data_dir = os.path.join(tests_dir, "data")
class MockActivityInfo:
def get_bundle_id(self):
return "mock"
def get_activity_version(self):
return 1
def get_is_favorite(self):
return False
def get_icon(self):
return os.path.join(data_dir, "activity.svg")
def get_name(self):
return "mock"
def get_path(self):
return "mock"
def is_user_activity(self):
return True
os.environ["SUGAR_MIME_DEFAULTS"] = \
os.path.join(base_dir, "data", "mime.defaults")
palette = ActivityListPalette(MockActivityInfo())
palette.popup()
Gtk.main()
|
erilyth/sugar
|
tests/views/activitieslist.py
|
Python
|
gpl-2.0
| 1,613
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Integration tests for loading and saving netcdf files."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests # isort:skip
from os.path import dirname
from os.path import join as path_join
from os.path import sep as os_sep
import shutil
from subprocess import check_call
import tempfile
import iris
from iris.tests import stock
class TestClimatology(iris.tests.IrisTest):
reference_cdl_path = os_sep.join(
[
dirname(tests.__file__),
(
"results/integration/climatology/TestClimatology/"
"reference_simpledata.cdl"
),
]
)
@classmethod
def _simple_cdl_string(cls):
with open(cls.reference_cdl_path, "r") as f:
cdl_content = f.read()
# Add the expected CDL first line since this is removed from the
# stored results file.
cdl_content = "netcdf {\n" + cdl_content
return cdl_content
@staticmethod
def _load_sanitised_cube(filepath):
cube = iris.load_cube(filepath)
# Remove attributes convention, if any.
cube.attributes.pop("Conventions", None)
# Remove any var-names.
for coord in cube.coords():
coord.var_name = None
cube.var_name = None
return cube
@classmethod
def setUpClass(cls):
# Create a temp directory for temp files.
cls.temp_dir = tempfile.mkdtemp()
cls.path_ref_cdl = path_join(cls.temp_dir, "standard.cdl")
cls.path_ref_nc = path_join(cls.temp_dir, "standard.nc")
# Create reference CDL file.
with open(cls.path_ref_cdl, "w") as f_out:
f_out.write(cls._simple_cdl_string())
# Create reference netCDF file from reference CDL.
command = "ncgen -o {} {}".format(cls.path_ref_nc, cls.path_ref_cdl)
check_call(command, shell=True)
cls.path_temp_nc = path_join(cls.temp_dir, "tmp.nc")
# Create reference cube.
cls.cube_ref = stock.climatology_3d()
@classmethod
def tearDownClass(cls):
# Destroy a temp directory for temp files.
shutil.rmtree(cls.temp_dir)
###############################################################################
# Round-trip tests
def test_cube_to_cube(self):
# Save reference cube to file, load cube from same file, test against
# reference cube.
iris.save(self.cube_ref, self.path_temp_nc)
cube = self._load_sanitised_cube(self.path_temp_nc)
self.assertEqual(cube, self.cube_ref)
def test_file_to_file(self):
# Load cube from reference file, save same cube to file, test against
# reference CDL.
cube = iris.load_cube(self.path_ref_nc)
iris.save(cube, self.path_temp_nc)
self.assertCDL(
self.path_temp_nc,
reference_filename=self.reference_cdl_path,
flags="",
)
# NOTE:
# The saving half of the round-trip tests is tested in the
# appropriate dedicated test class:
# unit.fileformats.netcdf.test_Saver.Test_write.test_with_climatology .
# The loading half has no equivalent dedicated location, so is tested
# here as test_load_from_file.
def test_load_from_file(self):
# Create cube from file, test against reference cube.
cube = self._load_sanitised_cube(self.path_ref_nc)
self.assertEqual(cube, self.cube_ref)
if __name__ == "__main__":
tests.main()
|
SciTools/iris
|
lib/iris/tests/integration/test_climatology.py
|
Python
|
lgpl-3.0
| 3,735
|
name0_1_1_0_1_0_0 = None
name0_1_1_0_1_0_1 = None
name0_1_1_0_1_0_2 = None
name0_1_1_0_1_0_3 = None
name0_1_1_0_1_0_4 = None
|
siosio/intellij-community
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_1/_pkg0_1_1_0/_pkg0_1_1_0_1/_mod0_1_1_0_1_0.py
|
Python
|
apache-2.0
| 128
|
#!/usr/bin/python3
#
# pkgvalidator.py
#
# Copyright (C) 2015 Endless Mobile, Inc.
# Authors:
# Mario Sanchez Prada <mario@endlessm.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import config
import gnupg
import hashlib
import os
import re
import shutil
import utils
from gi.repository import GLib
from debug import *
class PackageValidator:
"""
Class that allows validating a debian package based on its full URL and the
fingerprint of the GPG public key used to sign the source APT repository.
"""
def __init__(self, uri, fingerprint, temporary_dir=config.TEMPORARY_DIR):
self._uri = uri
self._fingerprint = fingerprint
self._temporary_dir = temporary_dir
# Build needed URIs based on the package's full URI.
self._release_file_uri = self._getReleaseFileURI()
self._release_gpg_uri = self._release_file_uri + '.gpg'
self._packages_file_uri = os.path.join(os.path.dirname(self._uri), 'Packages')
# We need to make sure the directory for the the trusted.gpg
# file exists before starting to use GPG.
keyring_basedir = os.path.dirname(config.TRUSTED_KEYRING_FILE)
os.makedirs(keyring_basedir, exist_ok=True)
self._gpg = gnupg.GPG(keyring=config.TRUSTED_KEYRING_FILE)
self._gpg.encoding = 'utf-8'
def run(self, localfile=None):
"""
Run the checks required to validate the debian package, downloading the
package from the URI specified in the constructor, unless an absolute path
to an already present local file is provided via the localfile parameter.
Return True if the debian package could be validated, or False otherwise.
"""
# If no local file has been specified, we download it from
# the URI provided and check if it's valid from there.
with_localfile = localfile is not None
if not with_localfile:
localfile = utils.downloadToTemporaryFile(self._uri)
release_file_path = utils.downloadToTemporaryFile(self._release_file_uri, self._temporary_dir)
release_gpg_path = utils.downloadToTemporaryFile(self._release_gpg_uri, self._temporary_dir)
packages_file_path = utils.downloadToTemporaryFile(self._packages_file_uri, self._temporary_dir)
self._importKeyIfNeeded(self._fingerprint)
verified = self._verifySignature(release_gpg_path, release_file_path)
result = verified and \
self._findHashForFile(release_file_path, packages_file_path, hashlib.sha256) and \
self._findHashForFile(packages_file_path, localfile, hashlib.sha1)
os.remove(packages_file_path)
os.remove(release_gpg_path)
os.remove(release_file_path)
if not with_localfile:
os.remove(localfile)
return result
def _getReleaseFileURI(self, suffix=None):
try:
archive_root_index = self._uri.index('dists')
except ValueError:
debugprint("Could not find repository root for %s" %self._uri)
return ''
# URI is something like "$archive_root/dists/$distribution/",
# so we need to look for the first '/' after (root_index + 6)
# to be able to find the base URL of the distribution.
try:
dist_index = self._uri.find('/', archive_root_index + 6)
except ValueError:
debugprint("Could not find distribution base URL for %s" %self._uri)
return ''
return os.path.join(self._uri[:dist_index], 'Release')
def _importKeyIfNeeded(self, key):
keys = self._gpg.list_keys()
for fp in keys.fingerprints:
if fp == key:
debugprint("Key %s found in local keyring" % fp)
return
self._gpg.recv_keys(config.TRUSTED_KEY_SERVER, key)
def _verifySignature(self, signature_path, signed_path):
signature_bfile = open(signature_path, 'rb')
verified = self._gpg.verify_file(signature_bfile, signed_path)
signature_bfile.close()
if verified.trust_level is not None:
debugprint("%s verified with signature %s" %
(os.path.basename(signed_path),
os.path.basename(signature_path)))
return True
debugprint("%s could NOT be verified with signature %s" %
(os.path.basename(signed_path),
os.path.basename(signature_path)))
return False
def _findHashForFile(self, haystack_path, needle_path, hash_func):
needle_bfile = open(needle_path, 'rb')
needle_hash = hash_func(needle_bfile.read())
needle_hash_str = needle_hash.hexdigest()
needle_bfile.close()
debugprint("Hash %s for %s: %s" % (needle_hash.name, needle_path, needle_hash_str))
found = False
haystack_file = open(haystack_path, 'r')
for line in haystack_file:
if re.search(needle_hash_str, line):
found = True
break
haystack_file.close()
debugprint("%s hash %sfound in %s" % (os.path.basename(needle_path),
("" if found else "NOT "),
os.path.basename(haystack_path)))
return found
if __name__== "__main__":
# Values meant just for debugging purposes.
TEST_URL='http://www.openprinting.org/download/printdriver/debian/dists/lsb3.2/main/binary-amd64/openprinting-ppds-postscript-brother_20130226-1lsb3.2_all.deb'
TEST_KEY='F8897B6F00075648E248B7EC24CBF5474CFD1E2F'
set_debugging(True)
# By default every temporary file will be downloaded to config.TEMPORARY_DIR.
shutil.rmtree(config.TEMPORARY_DIR, ignore_errors=True)
os.makedirs(config.TEMPORARY_DIR, exist_ok=True)
validator = PackageValidator(TEST_URL, TEST_KEY)
try:
debugprint("Validating package without specifying a local file...")
if validator.run():
debugprint("OK")
else:
debugprint("FAIL")
except GLib.GError as e:
debugprint("EXCEPTION: %s)" % repr(e))
try:
debpkg_path = utils.downloadToTemporaryFile(TEST_URL)
debugprint("Validating package specifying a local file in %s..." % debpkg_path)
if validator.run(localfile=debpkg_path):
debugprint("OK")
else:
debugprint("FAIL")
os.remove(debpkg_path)
except GLib.GError as e:
debugprint("EXCEPTION: %s)" % repr(e))
shutil.rmtree(config.TEMPORARY_DIR, ignore_errors=True)
|
endlessm/eos-config-printer
|
pkgvalidator.py
|
Python
|
gpl-2.0
| 7,289
|
# Copyright (C) 2013-2014 Computer Sciences Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gevent import monkey
monkey.patch_all()
import sys
import os
import gevent
import logging
import ezdiscovery
from modules import ezRPNginx
from modules import ezRPParser
from modules import ezRPService
from modules import ezRPConfig as gConfig
from ezconfiguration.EzConfiguration import EzConfiguration
from ezconfiguration.helpers import ZookeeperConfiguration, SystemConfiguration
from ezconfiguration.loaders.PropertiesConfigurationLoader import PropertiesConfigurationLoader
from ezconfiguration.loaders.DirectoryConfigurationLoader import DirectoryConfigurationLoader
from ezconfiguration.constants.EzBakePropertyConstants import EzBakePropertyConstants
from ezconfiguration.security.CryptoImplementations import SharedSecretTextCryptoImplementation
logger = logging.getLogger('ofe_control')
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
def log(arg):
print(arg)
def getEzSecurityServers():
rtn = []
ezdiscovery.connect(gConfig.zk)
endpoints = ezdiscovery.get_common_endpoints('EzbakeSecurityService')
ezdiscovery.disconnect()
for endpoint in endpoints:
name,port = endpoint.split(':',1)
rtn.append((name,port))
return rtn
def getEzProperties():
#load default configurations
config = EzConfiguration()
logger.info("loaded default ezconfiguration properties")
#load configuration overrides
overrideLoader = DirectoryConfigurationLoader(gConfig.ezconfig_dir)
config = EzConfiguration(PropertiesConfigurationLoader(config.getProperties()), overrideLoader)
logger.info("loaded property overrides")
#load cryptoImpl
cryptoImpl = SystemConfiguration(config.getProperties()).getTextCryptoImplementer()
if not isinstance(cryptoImpl, SharedSecretTextCryptoImplementation):
logger.warn("Couldn't get a SharedSecretTextCryptoImplementation. Is the EZB shared secret set properly?")
return config.getProperties(cryptoImpl)
if __name__ == '__main__':
parser = ezRPParser.setupParser()
args = parser.parse_args()
# we're going to run everything from within this packaged application
# so we need to find our own path
print os.getpid()
import logging.handlers
wfh = logging.handlers.WatchedFileHandler(os.path.join(gConfig.logDirectory,'ofe_control.log'))
wfh.setLevel(logging.INFO)
wfh.setFormatter(formatter)
logger.addHandler(wfh)
gConfig.ezproperties = getEzProperties()
if args.external_hostname is not None:
gConfig.external_hostname = args.external_hostname
else:
gConfig.external_hostname = gConfig.ezproperties['external_hostname']
if args.internal_hostname is not None:
gConfig.internal_hostname = args.internal_hostname
else:
gConfig.internal_hostname = gConfig.ezproperties['internal_hostname']
if args.zookeepers is not None:
gConfig.zk = args.zookeepers
else:
gConfig.zk = ZookeeperConfiguration(gConfig.ezproperties).getZookeeperConnectionString()
if args.port is not None:
gConfig.thriftPort = args.port
else:
gConfig.thriftPort = gConfig.ezproperties['ofe.port']
gConfig.nginx_worker_username = gConfig.ezproperties['ofe.nginx_worker_username']
gConfig.https_port = gConfig.ezproperties['ofe.https_port']
gConfig.http_port = gConfig.ezproperties['ofe.http_port']
gConfig.max_ca_depth = gConfig.ezproperties['ofe.max_ca_depth']
if 'ofe.crl_file' in gConfig.ezproperties and gConfig.ezproperties['ofe.crl_file'] is not None:
gConfig.ssl_crl_file = gConfig.ezproperties['ofe.crl_file']
gConfig.trustedLoadBalancers = gConfig.ezproperties.get('trusted.elastic.load.balancers');
# Drop the parsed argument dictionary into the global config object
gConfig.args = args
if args.no_clean_on_start:
# simply clean up the config and log directories for this instance
ezRPNginx.nginx_cleanup_self()
else:
# shut down all instances of nginx on this box and clean the
# config and log directories for this instance
ezRPNginx.nginx_cleanup()
# do the basic setup
ezRPNginx.nginx_basesetup(logger)
ezs = ezRPService.EzReverseProxyService(logger)
ezs.run()
ezRPNginx.nginx_cleanup_self(masterPID=ezRPNginx.get_nginx_master_pid())
|
ezbake/ezbake-frontend
|
ezReverseProxy/ezReverseProxy.py
|
Python
|
apache-2.0
| 4,998
|
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos import Drone
from eos import ModuleHigh
from eos import Restriction
from eos import State
from eos.const.eve import AttrId
from tests.integration.restriction.testcase import RestrictionTestCase
class TestMaxGroupActive(RestrictionTestCase):
"""Check functionality of max group active restriction."""
def setUp(self):
RestrictionTestCase.setUp(self)
self.mkattr(attr_id=AttrId.max_group_active)
def test_fail_all(self):
# Make sure error is raised for all items exceeding their group
# restriction
item_type = self.mktype(
group_id=6,
attrs={AttrId.max_group_active: 1})
item1 = ModuleHigh(item_type.id, state=State.active)
self.fit.modules.high.append(item1)
item2 = ModuleHigh(item_type.id, state=State.active)
self.fit.modules.high.append(item2)
# Action
error1 = self.get_error(item1, Restriction.max_group_active)
# Verification
self.assertIsNotNone(error1)
self.assertEqual(error1.group_id, 6)
self.assertEqual(error1.quantity, 2)
self.assertEqual(error1.max_allowed_quantity, 1)
# Action
error2 = self.get_error(item2, Restriction.max_group_active)
# Verification
self.assertIsNotNone(error2)
self.assertEqual(error2.group_id, 6)
self.assertEqual(error2.quantity, 2)
self.assertEqual(error2.max_allowed_quantity, 1)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_mix_one(self):
# Make sure error is raised for just items which excess restriction,
# even if both are from the same group
item1 = ModuleHigh(
self.mktype(group_id=92, attrs={AttrId.max_group_active: 1}).id,
state=State.active)
self.fit.modules.high.append(item1)
item2 = ModuleHigh(
self.mktype(group_id=92, attrs={AttrId.max_group_active: 2}).id,
state=State.active)
self.fit.modules.high.append(item2)
# Action
error1 = self.get_error(item1, Restriction.max_group_active)
# Verification
self.assertIsNotNone(error1)
self.assertEqual(error1.group_id, 92)
self.assertEqual(error1.quantity, 2)
self.assertEqual(error1.max_allowed_quantity, 1)
# Action
error2 = self.get_error(item2, Restriction.max_group_active)
# Verification
self.assertIsNone(error2)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_pass(self):
# Make sure no errors are raised when quantity of added items doesn't
# exceed any restrictions
item_type = self.mktype(
group_id=860,
attrs={AttrId.max_group_active: 2})
item1 = ModuleHigh(item_type.id, state=State.active)
self.fit.modules.high.append(item1)
item2 = ModuleHigh(item_type.id, state=State.active)
self.fit.modules.high.append(item2)
# Action
error1 = self.get_error(item1, Restriction.max_group_active)
# Verification
self.assertIsNone(error1)
# Action
error2 = self.get_error(item2, Restriction.max_group_active)
# Verification
self.assertIsNone(error2)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_pass_item_group_none(self):
# Check that items with None group are not affected
item_type = self.mktype(
group_id=None,
attrs={AttrId.max_group_active: 1})
item1 = ModuleHigh(item_type.id, state=State.active)
self.fit.modules.high.append(item1)
item2 = ModuleHigh(item_type.id, state=State.active)
self.fit.modules.high.append(item2)
# Action
error1 = self.get_error(item1, Restriction.max_group_active)
# Verification
self.assertIsNone(error1)
# Action
error2 = self.get_error(item2, Restriction.max_group_active)
# Verification
self.assertIsNone(error2)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_pass_item_state(self):
# No errors should occur if items are not active+
item_type = self.mktype(
group_id=886,
attrs={AttrId.max_group_active: 1})
item1 = ModuleHigh(item_type.id, state=State.online)
self.fit.modules.high.append(item1)
item2 = ModuleHigh(item_type.id, state=State.online)
self.fit.modules.high.append(item2)
# Action
error1 = self.get_error(item1, Restriction.max_group_active)
# Verification
self.assertIsNone(error1)
# Action
error2 = self.get_error(item2, Restriction.max_group_active)
# Verification
self.assertIsNone(error2)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_pass_item_class_other(self):
item_type = self.mktype(
group_id=12,
attrs={AttrId.max_group_active: 1})
item1 = Drone(item_type.id, state=State.active)
self.fit.drones.add(item1)
item2 = Drone(item_type.id, state=State.active)
self.fit.drones.add(item2)
# Action
error1 = self.get_error(item1, Restriction.max_group_active)
# Verification
self.assertIsNone(error1)
# Action
error2 = self.get_error(item2, Restriction.max_group_active)
# Verification
self.assertIsNone(error2)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_pass_item_not_loaded(self):
item = ModuleHigh(self.allocate_type_id(), state=State.active)
self.fit.modules.high.append(item)
# Action
error = self.get_error(item, Restriction.max_group_active)
# Verification
self.assertIsNone(error)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
|
pyfa-org/eos
|
tests/integration/restriction/restriction/test_max_group_active.py
|
Python
|
lgpl-3.0
| 7,182
|
# vim:tw=50
"""String Escaping
There is no difference between |'| and |"| - they both form
equivalent strings. People usually pick one based on preference,
changing only to include quotes inside, like this:
"Don't touch my quoting."
'I need to "work", now.'
Occasionally, you need to include both kinds of
quotes inside of a string. In these cases, you can
**escape** quotes using a backslash:
"This string contains the \\" delimiter."
Strings accept other escape sequences, like |'\n'|, which inserts
a line feed character, making a new line. More
info can be found here:
http://docs.python.org/2/reference/lexical_analysis.html#string-literals
Exercises
- Try creating a string that contains a backslash:
it will need to be escaped.
"""
__doc__ = """A demonstration of escape sequences.
This multi-line string is delimited with triple
""\", and tells you that by escaping at least one
of them (otherwise the string would end early).
"""
print "This has a double quote \" inside."
print 'This has a single quote \' inside.'
print "This has a second line:\n And this is it."
print
print __doc__ # Where is the backslash?
|
shiblon/pytour
|
tutorials/string_escaping.py
|
Python
|
apache-2.0
| 1,145
|
import zmq
import json
from . import redis
import logging
log = logging.getLogger(__name__)
def utf(val):
"""Little helper function which turns strings to utf-8 encoded bytes"""
if isinstance(val, str):
return val.encode('utf-8')
else:
return val
def blob(val):
"""Little helper function which turns data into bytes by either encoding
or json.dumping them
In production it can be a bad practice because you can't send json-dumped
string (it will be just encoded). But for our example its fun that we
can send pre-serialized data.
And you always send json objects anyway, don't you?
"""
if isinstance(val, (dict, list)):
return json.dumps(val).encode('utf-8')
elif isinstance(val, str):
return val.encode('utf-8')
else:
return val
def cid(val):
if hasattr(val, 'cid'):
return val.cid
assert isinstance(val, (bytes, bytearray)), ("Connection must be bytes "
"or object having cid property")
return val
class Loop(object):
def __init__(self):
self._ctx = zmq.Context(1)
self._poller = zmq.Poller()
self._handlers = {}
self._redises = {}
self._outputs = {}
def add_service(self, name, obj, **settings):
sock = self._make_socket(zmq.PULL, settings)
self._poller.register(sock, zmq.POLLIN)
self._handlers[sock] = obj
obj.configure(self)
def add_output(self, name, **settings):
sock = self._make_socket(zmq.PUB, settings)
self._outputs[name] = Output(sock)
def add_redis(self, name, *, socket):
self._redises[name] = redis.Redis(socket_path=socket)
def get(self, name):
if name in self._outputs:
return self._outputs[name]
if name in self._redises:
return self._redises[name]
raise KeyError(name)
def _make_socket(self, kind, settings):
sock = self._ctx.socket(kind)
sock.setsockopt(zmq.HWM, settings.get('hwm', 100))
# TODO(tailhook) implement socket options
for i in settings.get('bind', ()):
sock.bind(i)
for i in settings.get('connect', ()):
sock.connect(i)
return sock
def run(self):
while True:
socks = self._poller.poll()
for s, _ in socks:
msg = s.recv_multipart()
log.debug("Received from zerogw: %r", msg)
self._handlers[s](msg)
class Output(object):
def __init__(self, sock):
self._sock = sock
def subscribe(self, conn, topic):
self._do_send((b'subscribe', cid(conn), utf(topic)))
def unsubscribe(self, conn, topic):
self._do_send((b'unsubscribe', cid(conn), utf(topic)))
def drop(self, topic):
self._do_send((b'drop', utf(topic)))
def send(self, conn, data):
self._do_send((b'send', cid(conn), blob(data)))
def publish(self, topic, data):
self._do_send((b'publish', utf(topic), blob(data)))
def set_cookie(self, conn, cookie):
self._do_send((b'set_cookie', cid(conn), utf(cookie)))
def add_output(self, conn, prefix, name):
self._do_send((b'add_output', cid(conn), utf(prefix), utf(name)))
def del_output(self, conn, prefix, name):
self._do_send((b'del_output', cid(conn), utf(prefix), utf(name)))
def disconnect(self, conn):
self._do_send((b'disconnect', cid(conn)))
def _do_send(self, data):
log.debug("Sending to zerogw: %r", data)
# TODO(tailhook) handle errors
self._sock.send_multipart(data)
|
tailhook/debian-zerogw
|
examples/tabbedchat/tabbedchat/loop.py
|
Python
|
mit
| 3,628
|
FILES = [
'../../vhdl_utils/io_utils.vhd',
'../../vhdl_utils/txt_util.vhd',
'../../../luz_uc_rtl/peripherals/memory/sim_memory_onchip_wb.vhd',
'../../../luz_uc_rtl/cpu/defs.vhd',
'../../../luz_uc_rtl/cpu/utils.vhd',
'../../../luz_uc_rtl/cpu/alu.vhd',
'../../../luz_uc_rtl/cpu/controller.vhd',
'../../../luz_uc_rtl/cpu/registers.vhd',
'../../../luz_uc_rtl/cpu/program_counter.vhd',
'../../../luz_uc_rtl/cpu/cpu_top.vhd',
'cpu_top_tb.vhd',
]
##################################################################
def do_compile():
import sys
sys.path.append('../../python_utils')
from VHDLCompiler import VHDLCompiler, VHDLCompileError
params = '-work work -quiet -pedanticerrors -2002 -explicit'
all = True
vc = VHDLCompiler()
try:
vc.compile_files(FILES, params=params, lib='work', force_compile_all=all)
except VHDLCompileError, err:
print '!!!! Compile error !!!!'
print err
if __name__ == '__main__':
do_compile()
|
eliben/luz-cpu
|
experimental/luz_uc/luz_uc_testbench/cpu/cpu_top/compile.py
|
Python
|
unlicense
| 1,075
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import hashlib
import os
import pkgutil
from pants.backend.python.interpreter_cache import PythonInterpreterCache
from pants.backend.python.subsystems.python_setup import PythonSetup
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_target import PythonTarget
from pants.backend.python.tasks2.pex_build_util import (dump_requirements, dump_sources,
has_python_requirements, has_python_sources)
from pants.backend.python.tasks2.python_execution_task_base import WrappedPEX
from pants.backend.python.tasks2.resolve_requirements_task_base import ResolveRequirementsTaskBase
from pants.base.exceptions import TaskError
from pants.base.generator import Generator, TemplateData
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.python.python_repos import PythonRepos
from pants.util.dirutil import safe_concurrent_creation, safe_mkdir
from pants.util.memo import memoized_method
from pex.pex import PEX
from pex.pex_builder import PEXBuilder
from pex.pex_info import PexInfo
class PythonEval(ResolveRequirementsTaskBase):
class Error(TaskError):
"""A richer failure exception type useful for tests."""
def __init__(self, *args, **kwargs):
compiled = kwargs.pop('compiled')
failed = kwargs.pop('failed')
super(PythonEval.Error, self).__init__(*args, **kwargs)
self.compiled = compiled
self.failed = failed
_EXEC_NAME = '__pants_executable__'
_EVAL_TEMPLATE_PATH = os.path.join('templates', 'python_eval', 'eval.py.mustache')
@classmethod
def prepare(cls, options, round_manager):
# We don't need an interpreter selected for all targets in play, so prevent one being selected.
pass
@staticmethod
def _is_evalable(target):
return isinstance(target, (PythonLibrary, PythonBinary))
deprecated_options_scope = 'compile.python-eval'
deprecated_options_scope_removal_version = '1.5.0.dev0'
@classmethod
def register_options(cls, register):
super(PythonEval, cls).register_options(register)
register('--skip', type=bool,
help='If enabled, skip eval of python targets.')
register('--fail-slow', type=bool,
help='Compile all targets and present the full list of errors.')
register('--closure', type=bool,
help='Eval all targets in the closure individually instead of just the targets '
'specified on the command line.')
def execute(self):
if self.get_options().skip:
return
targets = self.context.targets() if self.get_options().closure else self.context.target_roots
with self.invalidated(filter(self._is_evalable, targets),
invalidate_dependents=True,
topological_order=True) as invalidation_check:
compiled = self._compile_targets(invalidation_check.invalid_vts)
return compiled # Collected and returned for tests
@memoized_method
def _interpreter_cache(self):
interpreter_cache = PythonInterpreterCache(PythonSetup.global_instance(),
PythonRepos.global_instance(),
logger=self.context.log.debug)
# Cache setup's requirement fetching can hang if run concurrently by another pants proc.
self.context.acquire_lock()
try:
interpreter_cache.setup()
finally:
self.context.release_lock()
return interpreter_cache
def _compile_targets(self, invalid_vts):
with self.context.new_workunit(name='eval-targets', labels=[WorkUnitLabel.MULTITOOL]):
compiled = []
failed = []
for vt in invalid_vts:
target = vt.target
return_code = self._compile_target(vt)
if return_code == 0:
vt.update() # Ensure partial progress is marked valid.
compiled.append(target)
else:
if self.get_options().fail_slow:
failed.append(target)
else:
raise self.Error('Failed to eval {}'.format(target.address.spec),
compiled=compiled,
failed=[target])
if failed:
msg = 'Failed to evaluate {} targets:\n {}'.format(
len(failed),
'\n '.join(t.address.spec for t in failed))
raise self.Error(msg, compiled=compiled, failed=failed)
return compiled
def _compile_target(self, vt):
"""'Compiles' a python target.
'Compiling' means forming an isolated chroot of its sources and transitive deps and then
attempting to import each of the target's sources in the case of a python library or else the
entry point in the case of a python binary.
For a library with sources lib/core.py and lib/util.py a "compiler" main file would look like:
if __name__ == '__main__':
import lib.core
import lib.util
For a binary with entry point lib.bin:main the "compiler" main file would look like:
if __name__ == '__main__':
from lib.bin import main
In either case the main file is executed within the target chroot to reveal missing BUILD
dependencies.
"""
target = vt.target
with self.context.new_workunit(name=target.address.spec):
modules = self._get_modules(target)
if not modules:
# Nothing to eval, so a trivial compile success.
return 0
interpreter = self._get_interpreter_for_target_closure(target)
reqs_pex = self._resolve_requirements_for_versioned_target_closure(interpreter, vt)
srcs_pex = self._source_pex_for_versioned_target_closure(interpreter, vt)
# Create the executable pex.
exec_pex_parent = os.path.join(self.workdir, 'executable_pex')
executable_file_content = self._get_executable_file_content(exec_pex_parent, modules)
hasher = hashlib.sha1()
hasher.update(executable_file_content)
exec_file_hash = hasher.hexdigest()
exec_pex_path = os.path.realpath(os.path.join(exec_pex_parent, exec_file_hash))
if not os.path.isdir(exec_pex_path):
with safe_concurrent_creation(exec_pex_path) as safe_path:
# Write the entry point.
safe_mkdir(safe_path)
with open(os.path.join(safe_path, '{}.py'.format(self._EXEC_NAME)), 'w') as outfile:
outfile.write(executable_file_content)
pex_info = (target.pexinfo if isinstance(target, PythonBinary) else None) or PexInfo()
# Override any user-specified entry point, under the assumption that the
# executable_file_content does what the user intends (including, probably, calling that
# underlying entry point).
pex_info.entry_point = self._EXEC_NAME
builder = PEXBuilder(safe_path, interpreter, pex_info=pex_info)
builder.freeze()
exec_pex = PEX(exec_pex_path, interpreter)
extra_pex_paths = [pex.path() for pex in filter(None, [reqs_pex, srcs_pex])]
pex = WrappedPEX(exec_pex, extra_pex_paths, interpreter)
with self.context.new_workunit(name='eval',
labels=[WorkUnitLabel.COMPILER, WorkUnitLabel.RUN,
WorkUnitLabel.TOOL],
cmd=' '.join(exec_pex.cmdline())) as workunit:
returncode = pex.run(stdout=workunit.output('stdout'), stderr=workunit.output('stderr'))
workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE)
if returncode != 0:
self.context.log.error('Failed to eval {}'.format(target.address.spec))
return returncode
@staticmethod
def _get_modules(target):
modules = []
if isinstance(target, PythonBinary):
source = 'entry_point {}'.format(target.entry_point)
components = target.entry_point.rsplit(':', 1)
if not all([x.strip() for x in components]):
raise TaskError('Invalid entry point {} for target {}'.format(
target.entry_point, target.address.spec))
module = components[0]
if len(components) == 2:
function = components[1]
data = TemplateData(source=source,
import_statement='from {} import {}'.format(module, function))
else:
data = TemplateData(source=source, import_statement='import {}'.format(module))
modules.append(data)
else:
for path in target.sources_relative_to_source_root():
if path.endswith('.py'):
if os.path.basename(path) == '__init__.py':
module_path = os.path.dirname(path)
else:
module_path, _ = os.path.splitext(path)
source = 'file {}'.format(os.path.join(target.target_base, path))
module = module_path.replace(os.path.sep, '.')
if module:
data = TemplateData(source=source, import_statement='import {}'.format(module))
modules.append(data)
return modules
def _get_executable_file_content(self, exec_pex_parent, modules):
generator = Generator(pkgutil.get_data(__name__, self._EVAL_TEMPLATE_PATH),
chroot_parent=exec_pex_parent, modules=modules)
return generator.render()
def _get_interpreter_for_target_closure(self, target):
targets = [t for t in target.closure() if isinstance(t, PythonTarget)]
return self._interpreter_cache().select_interpreter_for_targets(targets)
def _resolve_requirements_for_versioned_target_closure(self, interpreter, vt):
reqs_pex_path = os.path.realpath(os.path.join(self.workdir, str(interpreter.identity),
vt.cache_key.hash))
if not os.path.isdir(reqs_pex_path):
req_libs = filter(has_python_requirements, vt.target.closure())
with safe_concurrent_creation(reqs_pex_path) as safe_path:
builder = PEXBuilder(safe_path, interpreter=interpreter, copy=True)
dump_requirements(builder, interpreter, req_libs, self.context.log)
builder.freeze()
return PEX(reqs_pex_path, interpreter=interpreter)
def _source_pex_for_versioned_target_closure(self, interpreter, vt):
source_pex_path = os.path.realpath(os.path.join(self.workdir, vt.cache_key.hash))
if not os.path.isdir(source_pex_path):
with safe_concurrent_creation(source_pex_path) as safe_path:
self._build_source_pex(interpreter, safe_path, vt.target.closure())
return PEX(source_pex_path, interpreter=interpreter)
def _build_source_pex(self, interpreter, path, targets):
builder = PEXBuilder(path=path, interpreter=interpreter, copy=True)
for target in targets:
if has_python_sources(target):
dump_sources(builder, target, self.context.log)
builder.freeze()
|
pombredanne/pants
|
contrib/python/src/python/pants/contrib/python/checks/tasks2/python_eval.py
|
Python
|
apache-2.0
| 11,131
|
from anchore_engine.subsys import logger
from anchore_engine.subsys.auth.realms import CaseSensitivePermission
logger.enable_test_logging()
def test_anchore_permissions():
"""
Test permission comparisons with mixed-case, wild-cards, etc
:return:
"""
logger.info("Testing permission wildcard matches and mixed-case comparisions")
# Default, case-sensitive, exact match
assert CaseSensitivePermission(wildcard_string="Account1:listImages:*").implies(
CaseSensitivePermission(wildcard_string="Account1:listImages:*")
)
# Ignore case
assert CaseSensitivePermission(
wildcard_string="account1:listImages:*", case_sensitive=False
).implies(
CaseSensitivePermission(
wildcard_string="Account1:listImages:*", case_sensitive=False
)
)
# Mixed case, mismatch
assert not CaseSensitivePermission(wildcard_string="account1:listImages:*").implies(
CaseSensitivePermission(wildcard_string="Account1:listImages:*")
)
|
anchore/anchore-engine
|
tests/unit/anchore_engine/subsys/auth/test_permissions.py
|
Python
|
apache-2.0
| 1,020
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StorageAccountKeys(Model):
"""The access keys for the storage account.
:param key1: Gets the value of key 1.
:type key1: str
:param key2: Gets the value of key 2.
:type key2: str
"""
_attribute_map = {
'key1': {'key': 'key1', 'type': 'str'},
'key2': {'key': 'key2', 'type': 'str'},
}
def __init__(self, key1=None, key2=None):
self.key1 = key1
self.key2 = key2
|
sergey-shandar/autorest
|
Samples/2a-validation/Python/storage/models/storage_account_keys.py
|
Python
|
mit
| 800
|
from libcloud.container.types import Provider
from libcloud.container.providers import get_driver
cls = get_driver(Provider.KUBERNETES)
# 1. Client side cert auth
conn = cls(host='192.168.99.103',
port=8443,
secure=True,
key_file='/home/user/.minikube/client.key',
cert_file='/home/user/.minikube/client.crt',
ca_cert='/home/user/.minikube/ca.crt')
# 2. Bearer bootstrap token auth
conn = cls(key='my_token',
host='126.32.21.4',
ex_token_bearer_auth=True)
# 3. Basic auth
conn = cls(key='my_username',
secret='THIS_IS)+_MY_SECRET_KEY+I6TVkv68o4H',
host='126.32.21.4')
for container in conn.list_containers():
print(container.name)
for cluster in conn.list_clusters():
print(cluster.name)
|
Kami/libcloud
|
docs/examples/container/kubernetes/instantiate_driver.py
|
Python
|
apache-2.0
| 801
|
from backend import db
from . import models
from cruds.crud_courses.models import Courses
from cruds.crud_course_section_students.models import CourseSectionStudents
from cruds.crud_course_sections.models import CourseSections
from cruds.crud_users.models import Users
from sqlalchemy import or_, func
class Manager:
def course_times_by_student(self, course, student):
return (db.session.query(func.count(CourseSectionStudents.id)).
filter(CourseSectionStudents.course_section_id == CourseSections.id).
filter(CourseSections.course_id == Courses.id).
filter(Courses.program_id == models.Program.id).
filter(models.Program.id == student.program_id).
filter(CourseSectionStudents.user_id == student.id).
filter(Courses.id == course.id).
filter(or_ (CourseSectionStudents.status == 2,
CourseSectionStudents.status == 3)).
group_by(Courses.code,Courses.program_section).order_by(Courses.program_section).first())
def hours_and_credits_completed(self, student):
return (db.session.query(func.sum(Courses.hours), func.sum(Courses.credits)).
filter(CourseSectionStudents.course_section_id == CourseSections.id).
filter(CourseSections.course_id == Courses.id).
filter(Courses.program_id == models.Program.id).
filter(models.Program.id == student.program_id).
filter(CourseSectionStudents.user_id == student.id).
filter(CourseSectionStudents.status == 2).
group_by(models.Program.id).first())
def last_course_section_student(self, course, student):
return (db.session.query(CourseSectionStudents).
filter(CourseSectionStudents.course_section_id == CourseSections.id).
filter(CourseSections.course_id == Courses.id).
filter(Courses.program_id == models.Program.id).
filter(models.Program.id == student.program_id).
filter(CourseSectionStudents.user_id == student.id).
filter(Courses.id == course.id).
order_by(CourseSections.course_section_period.desc()).first())
def programs_users(self, program_id, user_type):
return (db.session.query(Users).
filter(models.Program.id==Users.program_id).
filter(models.Program.id==program_id).
filter(Users.type==user_type).all())
|
sandroandrade/emile-server
|
cruds/crud_program/manager.py
|
Python
|
gpl-3.0
| 3,163
|
from __future__ import unicode_literals
import argparse
import os
import io
import itertools
import csv
import shutil
from django.db import connection
from django.core.management.base import BaseCommand
from django.core.management import settings
from django.core import management
from odm2admin.models import Dataloggerfiles
from templatesAndSettings.settings import exportdb
#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "templatesAndSettings.settings.exportdb")
os.environ['DJANGO_SETTINGS_MODULE'] = "templatesAndSettings.settings.exportdb"
__author__ = 'leonmi'
parser = argparse.ArgumentParser(description='this command will create an sqlite database dump of the provided JSON files.')
# just passing database='export' to loaddata doesn't work because we need to tell models.py to use the correct table names.
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('jsonfile1', nargs=1, type=str)
parser.add_argument('jsonfile2', nargs=1, type=str)
parser.add_argument('dbfile', nargs=1, type=str)
def handle(self, *args, **options): # (f,fileid, databeginson,columnheaderson, cmd):
# cmdline = bool(options['cmdline'][0])
try:
jsonfile1 = str(options['jsonfile1'][0])
jsonfile2 = str(options['jsonfile2'][0])
dbfile = str(options['dbfile'][0])
self.stdout.write('start data load')
# print('start data load')
#f = open('/home/azureadmin/webapps/logs/logfile.txt', 'rw')
#f.write('start load data')
#f.write(jsonfile1,jsonfile2)
exportdb.DATABASES['default']['NAME'] = dbfile
# print(connection.settings_dict['NAME'])
#f.close()
management.call_command('loaddata',jsonfile1) # ,database='export'
management.call_command('loaddata',jsonfile2)
# print('end data load')
except Exception as e:
return e
|
miguelcleon/ODM2-Admin
|
odm2admin/management/commands/create_sqlite_export.py
|
Python
|
mit
| 1,980
|
# -*- coding: utf-8 -*-
#
# QuBricks documentation build configuration file, created by
# sphinx-quickstart2 on Thu Mar 12 13:10:30 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'numfig'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'QuBricks'
copyright = u'2015, Matthew Wardrop'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import qubricks
version = qubricks.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'QuBricksdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'QuBricks.tex', u'QuBricks Documentation',
u'Matthew Wardrop', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/qubricks_logo.pdf"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'qubricks', u'QuBricks Documentation',
[u'Matthew Wardrop'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'QuBricks', u'QuBricks Documentation',
u'Matthew Wardrop', 'QuBricks', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
matthewwardrop/python-qubricks
|
docs/conf.py
|
Python
|
mit
| 8,595
|
# coding=utf-8
class bbs_property():
headers= {
}
host = '172.31.3.73:6020'
|
NJ-zero/Android
|
requests_demo/bbs/bbs_property.py
|
Python
|
mit
| 89
|
r'''
Fold the logo of Engineered Folding Research Center
---------------------------------------------------
This example shows one possible
The target face is defined as horizontal plane at the height 8
and nodes [0,1,2] are involved in the minimum distance criterion.
'''
from oricreate.gu import GuConstantLength, GuDofConstraints, fix
from oricreate.simulation_step import \
SimulationStep, SimulationConfig
def create_cp_factory():
# begin
import numpy as np
from oricreate.api import CreasePatternState, CustomCPFactory
x = np.array([[0, 0, 0],
[1, 0, 0],
[2, 0, 0],
[0, 1, 0],
[1, 1, 0],
[2, 1, 0],
], dtype='float_')
L = np.array([[0, 1], [1, 2],
[3, 4], [4, 5],
[0, 3], [1, 4], [2, 5]],
dtype='int_')
F = np.array([[0, 1, 4, 3],
[1, 2, 5, 4],
], dtype='int_')
x_mid = (x[F[:, 1]] + x[F[:, 3]]) / 2.0
x_mid[:, 2] -= 1.0
n_F = len(F)
n_x = len(x)
x_mid_i = np.arange(n_x, n_x + n_F)
L_mid = np.array([[F[:, 0], x_mid_i[:]],
[F[:, 1], x_mid_i[:]],
[F[:, 2], x_mid_i[:]],
[F[:, 3], x_mid_i[:]]])
L_mid = np.vstack([L_mid[0].T, L_mid[1].T, L_mid[2].T, L_mid[3].T])
x_derived = np.vstack([x, x_mid])
L_derived = np.vstack([L, F[:, (1, 3)], L_mid])
F_derived = np.vstack([F[:, (0, 1, 2)], F[:, (0, 2, 3)]])
cp = CreasePatternState(X=x_derived,
L=L_derived,
F=F_derived
)
cp.u[5, 2] = 0.01
cp_factory = CustomCPFactory(formed_object=cp)
# end
return cp_factory
if __name__ == '__main__':
import mayavi.mlab as m
cp_factory = create_cp_factory()
cp = cp_factory.formed_object
# Link the crease factory it with the constraint client
gu_constant_length = GuConstantLength()
dof_constraints = fix([0], [0, 1, 2]) + fix([1], [1, 2]) + fix([3], [2]) + \
fix([5], 2, lambda t: 0.5)
gu_dof_constraints = GuDofConstraints(dof_constraints=dof_constraints)
sim_config = SimulationConfig(gu={'cl': gu_constant_length,
'dofs': gu_dof_constraints},
acc=1e-5, MAX_ITER=10)
sim_step = SimulationStep(forming_task=cp_factory,
config=sim_config)
sim_step._solve_nr()
m.figure(bgcolor=(1.0, 1.0, 1.0), fgcolor=(0.6, 0.6, 0.6))
cp.plot_mlab(m, nodes=True, lines=True)
m.show()
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
cp.plot_mpl(ax, facets=True)
plt.tight_layout()
plt.show()
|
simvisage/oricreate
|
docs/howtos/ex08_rigid_facets/sim02_single_fold_quad.py
|
Python
|
gpl-3.0
| 2,817
|
from context import nflinterface
import pytest
import tempfile
from flask import json
@pytest.fixture
def client(request):
nflinterface.app.config['TESTING'] = True
client = nflinterface.app.test_client()
return client
def test_app():
assert nflinterface.app != None
def test_home(client):
response = client.get('/', follow_redirects=True)
assert b'nflstats' in response.data
def test_get_player(client):
payload = {
'last_name':'Brees',
'first_name':'Drew',
'team':'NO'}
response = client.post('/get_player', data=json.dumps(payload))
json_data = json.loads(response.data)
assert json_data['position'] == 'QB'
assert json_data['status'] == 'Active'
assert json_data['full_name'] == 'Drew Brees'
def test_get_player_all_time_stats(client):
payload = {
'id':'00-0022803'
}
response = client.post('/get_player_all_time_stats', data=json.dumps(payload))
json_data = json.loads(response.data)
assert json_data[0]['passing_tds'] == 205
def test_get_player_stats_for_year(client):
payload = {
'last_name':'Manning',
'first_name':'Eli',
'team':'NYG',
'year': 2015}
response = client.post('/get_player_stats_for_year', data=json.dumps(payload))
json_data = json.loads(response.data)
assert json_data[0]['passing_tds'] == 35
def test_get_player_all_time_stats_by_year(client):
payload = {
'id':'00-0022803'
}
response = client.post('/get_player_all_time_stats_by_year', data=json.dumps(payload))
json_data = json.loads(response.data)
print len(json_data)
assert json_data[6]['passing_tds'] == 35
assert json_data[6]['year'] == 2015
def test_get_team_roster(client):
payload = {
'team': 'NO'
}
response = client.post('/get_team_roster', data=json.dumps(payload))
roster = json.loads(response.data)
assert roster[0]['team'] == 'NO'
assert roster[0]['status'] == 'Active'
def test_fuzzy_player_search(client):
payload = {
'name': 'peyton Mannz'
}
response = client.post('/fuzzy_player_search', data=json.dumps(payload))
players = json.loads(response.data)
assert players[0]['full_name'] == 'Peyton Manning'
def test_get_all_names(client):
response = client.post('/get_all_names')
names = json.loads(response.data)
assert len(names) == 6778
def test_get_player_from_id(client):
payload = {
'id': '00-0027685'
}
response = client.post('/get_player_from_id', data=json.dumps(payload))
player = json.loads(response.data)
assert player['first_name'] == 'Emmanuel'
|
strandx/nflstats
|
tests/test_nflinterface.py
|
Python
|
mit
| 2,644
|
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This project incorporates work covered by the following copyright and permission notice:
# Copyright (c) 2009, Julien Fache
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""Views for Gstudio nodetypes"""
from django.shortcuts import redirect
from django.shortcuts import get_object_or_404
from django.views.generic.list_detail import object_list
from django.views.generic.date_based import archive_year
from django.views.generic.date_based import archive_month
from django.views.generic.date_based import archive_day
from django.views.generic.date_based import object_detail
from gstudio.models import Nodetype
from gstudio.views.decorators import protect_nodetype
from gstudio.views.decorators import update_queryset
nodetype_index = update_queryset(object_list, Nodetype.objects.all)
nodetype_year = update_queryset(archive_year, Nodetype.published.all)
nodetype_month = update_queryset(archive_month, Nodetype.published.all)
nodetype_day = update_queryset(archive_day, Nodetype.published.all)
nodetype_detail = protect_nodetype(object_detail)
def nodetype_shortlink(request, object_id):
"""
Redirect to the 'get_absolute_url' of a nodetype,
accordingly to 'object_id' argument
"""
nodetype = get_object_or_404(Nodetype, pk=object_id)
return redirect(nodetype, permanent=True)
|
gnowledge/ncert_nroer
|
gstudio/views/nodetypes1.py
|
Python
|
agpl-3.0
| 3,600
|
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
APP = ['Burner.py']
DATA_FILES = []
PACKAGES = ['argparse', 'pyserial', 'wxPython']
OPTIONS = {'argv_emulation': False, 'iconfile':'Burner.icns'}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
|
atom3dp/Burner
|
setup_mac.py
|
Python
|
gpl-3.0
| 383
|
"""
Main routine of GSEA.
"""
import sys
import config
from analysis import Analysis
def main(args=None):
# Determine filenames.
if args is None:
args = sys.argv[1:]
out_name = 'results.csv'
# Analyze the data and write the results.
A = Analysis(args[0], args[1], config.path['input'], config.analysis)
A.analyzefiles(out_name, config.path['output'])
if __name__ == "__main__":
main()
|
tristanbrown/gsea
|
gsea/__main__.py
|
Python
|
mit
| 446
|
# Copyright 2020 Alfredo de la fuente - AvanzOSC
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
{
"name": "Stock Production Lot With Partner",
"version": "12.0.1.0.0",
"category": "Customized modules",
"license": "AGPL-3",
"author": "AvanzOSC",
"website": "http://www.avanzosc.es",
"depends": [
"sale_stock",
"purchase_stock",
],
"data": [
"views/stock_production_lot_views.xml",
],
"installable": True,
}
|
oihane/odoo-addons
|
stock_production_lot_with_partner/__manifest__.py
|
Python
|
agpl-3.0
| 497
|
###############################################################################
# Copyright (C) 2008 Johann Haarhoff <johann.haarhoff@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of Version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
###############################################################################
#
# Originally written:
# 2008 Johann Haarhoff, <johann.haarhoff@gmail.com>
# Modifications:
#
###############################################################################
import sys, string
from xml.sax import saxutils, handler, make_parser
#
# define the errors we use
#
class Error(Exception):
"""
Just Extending Exception
"""
pass
class OutOfOrderError(Error):
"""
Thrown when you close a xml tag before
all its children are closed
"""
pass
class TagNotOpenError(Error):
"""
Thrown when trying to close a tag that is
not open
"""
class BasicXMLWriter(handler.ContentHandler):
"""
This just inherits from handler.ContentHandler and
allows basic output with indents etc.
"""
def __init__(self, out = sys.stdout,indentstr = '\t'):
handler.ContentHandler.__init__(self)
self._out = out
self._indentlevel = 0
self._printnl = 1
self._indentstr = indentstr
# ContentHandler methods
def startDocument(self):
self._out.write('<?xml version="1.0" encoding="iso-8859-1"?>\n')
def startElement(self, name, attrs):
self._out.write('\n'+self._indentstr*self._indentlevel + '<' + name)
for (name, value) in attrs.items():
self._out.write(' %s="%s"' % (name, saxutils.escape(value)))
self._out.write('>')
self._indentlevel = self._indentlevel + 1
def endElement(self, name):
self._indentlevel = self._indentlevel - 1
if self._printnl:
self._out.write('\n' + self._indentstr*self._indentlevel + '</%s>' % name)
else:
self._out.write('</%s>' % name)
self._printnl=1
def characters(self, content):
self._out.write(saxutils.escape(content))
self._printnl = 0;
def ignorableWhitespace(self, content):
self._out.write(content)
def processingInstruction(self, target, data):
self._out.write('<?%s %s?>' % (target, data))
class BetterXMLWriter(BasicXMLWriter):
"""
This makes life a bit easier by making sure the xml
is relatively intact, it checks for closing tags
out of order, and closing tags that have not been
opened
It also overrides endDocument to close the remaining
open tags in the correct order.
"""
def __init__(self, out = sys.stdout,indentstr = '\t'):
BasicXMLWriter.__init__(self,out,indentstr)
self._openElements = []
def openElement(self,name,attrs={}):
self.startElement(name,attrs)
self._openElements.append(name)
def closeLast(self):
"""
Closes the last open tag
"""
self.endElement(self._openElements.pop())
def closeElement(self,name):
if not (name in self._openElements):
raise TagNotOpenError()
#print 'TagNotOpenError()'
elif not (name == self._openElements[len(self._openElements)-1]):
raise OutOfOrderError()
#print 'OutOfOrderError()'
else:
self.endElement(name)
self._openElements.pop()
def addData(self,content):
"""
this is truly just here because i think addData()
is more descriptive than characters
"""
self.characters(content)
def addCData(self,content):
"""
allows us to add CDATA sections
"""
self._out.write('<![CDATA[')
self._out.write(content)
self._out.write(']]>')
def endDocument(self):
for i in range(0,len(self._openElements)):
self.endElement(self._openElements.pop())
|
djhenderson/shape2ge
|
src/xmlwriter.py
|
Python
|
gpl-2.0
| 4,246
|
import sys
import base64
import os
import pytest
import shutil
import subprocess
import yaml
yaml_loader = yaml.SafeLoader
yaml_dumper = yaml.SafeDumper
try:
yaml_loader = yaml.CSafeLoader
yaml_dumper = yaml.CSafeDumper
except AttributeError:
pass
from typing import Any, ClassVar, Dict, List, Optional, Sequence
from typing import cast as typecast
from kat.harness import abstract_test, sanitize, Name, Node, Test, Query, load_manifest
from kat.utils import ShellCommand
RBAC_CLUSTER_SCOPE = load_manifest("rbac_cluster_scope")
RBAC_NAMESPACE_SCOPE = load_manifest("rbac_namespace_scope")
AMBASSADOR = load_manifest("ambassador")
BACKEND = load_manifest("backend")
GRPC_ECHO_BACKEND = load_manifest("grpc_echo_backend")
AUTH_BACKEND = load_manifest("auth_backend")
GRPC_AUTH_BACKEND = load_manifest("grpc_auth_backend")
GRPC_RLS_BACKEND = load_manifest("grpc_rls_backend")
AMBASSADOR_LOCAL = """
---
apiVersion: v1
kind: Secret
metadata:
name: {self.path.k8s}
annotations:
kubernetes.io/service-account.name: {self.path.k8s}
type: kubernetes.io/service-account-token
"""
def assert_default_errors(errors, include_ingress_errors=True):
default_errors = [
["",
"Ambassador could not find core CRD definitions. Please visit https://www.getambassador.io/docs/edge-stack/latest/topics/install/upgrade-to-edge-stack/#5-update-and-restart for more information. You can continue using Ambassador via Kubernetes annotations, any configuration via CRDs will be ignored..."],
["",
"Ambassador could not find Resolver type CRD definitions. Please visit https://www.getambassador.io/docs/edge-stack/latest/topics/install/upgrade-to-edge-stack/#5-update-and-restart for more information. You can continue using Ambassador via Kubernetes annotations, any configuration via CRDs will be ignored..."],
["",
"Ambassador could not find the Host CRD definition. Please visit https://www.getambassador.io/docs/edge-stack/latest/topics/install/upgrade-to-edge-stack/#5-update-and-restart for more information. You can continue using Ambassador via Kubernetes annotations, any configuration via CRDs will be ignored..."],
["",
"Ambassador could not find the LogService CRD definition. Please visit https://www.getambassador.io/docs/edge-stack/latest/topics/install/upgrade-to-edge-stack/#5-update-and-restart for more information. You can continue using Ambassador via Kubernetes annotations, any configuration via CRDs will be ignored..."]
]
if include_ingress_errors:
default_errors.append(
["",
"Ambassador is not permitted to read Ingress resources. Please visit https://www.getambassador.io/docs/edge-stack/latest/topics/running/ingress-controller/#ambassador-as-an-ingress-controller for more information. You can continue using Ambassador, but Ingress resources will be ignored..."
]
)
number_of_default_errors = len(default_errors)
if errors[:number_of_default_errors] != default_errors:
assert False, f"default error table mismatch: got\n{errors}"
for error in errors[number_of_default_errors:]:
assert 'found invalid port' in error[1], "Could not find 'found invalid port' in the error {}".format(error[1])
DEV = os.environ.get("AMBASSADOR_DEV", "0").lower() in ("1", "yes", "true")
@abstract_test
class AmbassadorTest(Test):
"""
AmbassadorTest is a top level ambassador test.
"""
OFFSET: ClassVar[int] = 0
IMAGE_BUILT: ClassVar[bool] = False
_index: Optional[int] = None
_ambassador_id: Optional[str] = None
single_namespace: bool = False
disable_endpoints: bool = False
name: Name
path: Name
extra_ports: Optional[List[int]] = None
debug_diagd: bool = True
debug_envoy: bool = False
manifest_envs = ""
is_ambassador = True
allow_edge_stack_redirect = False
edge_stack_cleartext_host = True
envoy_api_version = None
env = []
def manifests(self) -> str:
rbac = RBAC_CLUSTER_SCOPE
self.manifest_envs += """
- name: POLL_EVERY_SECS
value: "0"
- name: CONSUL_WATCHER_PORT
value: "8500"
"""
if os.environ.get('AMBASSADOR_LEGACY_MODE', 'false').lower() == 'true':
self.manifest_envs += """
- name: AMBASSADOR_LEGACY_MODE
value: "true"
"""
if os.environ.get('AMBASSADOR_FAST_RECONFIGURE', 'false').lower() == 'true':
self.manifest_envs += """
- name: AMBASSADOR_FAST_RECONFIGURE
value: "true"
"""
amb_debug = []
if self.debug_diagd:
amb_debug.append("diagd")
if self.debug_envoy:
amb_debug.append("envoy")
if amb_debug:
self.manifest_envs += """
- name: AMBASSADOR_DEBUG
value: "%s"
""" % ":".join(amb_debug)
if self.ambassador_id:
self.manifest_envs += f"""
- name: AMBASSADOR_LABEL_SELECTOR
value: "kat-ambassador-id={self.ambassador_id}"
"""
if self.single_namespace:
self.manifest_envs += """
- name: AMBASSADOR_SINGLE_NAMESPACE
value: "yes"
"""
rbac = RBAC_NAMESPACE_SCOPE
if self.disable_endpoints:
self.manifest_envs += """
- name: AMBASSADOR_DISABLE_ENDPOINTS
value: "yes"
"""
if not self.allow_edge_stack_redirect:
self.manifest_envs += """
- name: AMBASSADOR_NO_TLS_REDIRECT
value: "yes"
"""
if self.envoy_api_version is not None:
self.manifest_envs += f"""
- name: AMBASSADOR_ENVOY_API_VERSION
value: "{self.envoy_api_version}"
"""
elif os.environ.get('KAT_USE_ENVOY_V3', '') != '':
self.manifest_envs += """
- name: AMBASSADOR_ENVOY_API_VERSION
value: "V3"
"""
eports = ""
if self.extra_ports:
for port in self.extra_ports:
eports += f"""
- name: extra-{port}
protocol: TCP
port: {port}
targetPort: {port}
"""
if DEV:
return self.format(rbac + AMBASSADOR_LOCAL, extra_ports=eports)
else:
return self.format(rbac + AMBASSADOR,
image=os.environ["AMBASSADOR_DOCKER_IMAGE"], envs=self.manifest_envs, extra_ports=eports, capabilities_block = "")
# Will tear this out of the harness shortly
@property
def ambassador_id(self) -> str:
if self._ambassador_id is None:
return self.name.k8s
else:
return typecast(str, self._ambassador_id)
@ambassador_id.setter
def ambassador_id(self, val: str) -> None:
self._ambassador_id = val
@property
def index(self) -> int:
if self._index is None:
# lock here?
self._index = AmbassadorTest.OFFSET
AmbassadorTest.OFFSET += 1
return typecast(int, self._index)
def post_manifest(self):
if not DEV:
return
if os.environ.get('KAT_SKIP_DOCKER'):
return
image = os.environ["AMBASSADOR_DOCKER_IMAGE"]
cached_image = os.environ["BASE_PY_IMAGE"]
ambassador_base_image = os.environ["BASE_GO_IMAGE"]
if not AmbassadorTest.IMAGE_BUILT:
AmbassadorTest.IMAGE_BUILT = True
cmd = ShellCommand('docker', 'ps', '-a', '-f', 'label=kat-family=ambassador', '--format', '{{.ID}}')
if cmd.check('find old docker container IDs'):
ids = cmd.stdout.split('\n')
while ids:
if ids[-1]:
break
ids.pop()
if ids:
print("Killing old containers...")
ShellCommand.run('kill old containers', 'docker', 'kill', *ids, verbose=True)
ShellCommand.run('rm old containers', 'docker', 'rm', *ids, verbose=True)
context = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
print("Starting docker build...", end="")
sys.stdout.flush()
cmd = ShellCommand("docker", "build", "--build-arg", "BASE_PY_IMAGE={}".format(cached_image), "--build-arg", "BASE_GO_IMAGE={}".format(ambassador_base_image), context, "-t", image)
if cmd.check("docker build Ambassador image"):
print("done.")
else:
pytest.exit("container failed to build")
fname = "/tmp/k8s-%s.yaml" % self.path.k8s
if os.path.exists(fname):
with open(fname) as fd:
content = fd.read()
else:
nsp = getattr(self, 'namespace', None) or 'default'
cmd = ShellCommand("kubectl", "get", "-n", nsp, "-o", "yaml", "secret", self.path.k8s)
if not cmd.check(f'fetch secret for {self.path.k8s}'):
pytest.exit(f'could not fetch secret for {self.path.k8s}')
content = cmd.stdout
with open(fname, "wb") as fd:
fd.write(content.encode('utf-8'))
try:
secret = yaml.load(content, Loader=yaml_loader)
except Exception as e:
print("could not parse YAML:\n%s" % content)
raise e
data = secret['data']
# secret_dir = tempfile.mkdtemp(prefix=self.path.k8s, suffix="secret")
secret_dir = "/tmp/%s-ambassadormixin-%s" % (self.path.k8s, 'secret')
shutil.rmtree(secret_dir, ignore_errors=True)
os.mkdir(secret_dir, 0o777)
for k, v in data.items():
with open(os.path.join(secret_dir, k), "wb") as f:
f.write(base64.decodebytes(bytes(v, "utf8")))
print("Launching %s container." % self.path.k8s)
command = ["docker", "run", "-d", "-l", "kat-family=ambassador", "--name", self.path.k8s]
envs = [ "KUBERNETES_SERVICE_HOST=kubernetes",
"KUBERNETES_SERVICE_PORT=443",
"AMBASSADOR_SNAPSHOT_COUNT=1",
"AMBASSADOR_CONFIG_BASE_DIR=/tmp/ambassador",
"POLL_EVERY_SECS=0",
"CONSUL_WATCHER_PORT=8500",
"AMBASSADOR_UPDATE_MAPPING_STATUS=false",
"AMBASSADOR_ID=%s" % self.ambassador_id]
if self.namespace:
envs.append("AMBASSADOR_NAMESPACE=%s" % self.namespace)
if self.single_namespace:
envs.append("AMBASSADOR_SINGLE_NAMESPACE=yes")
if self.disable_endpoints:
envs.append("AMBASSADOR_DISABLE_ENDPOINTS=yes")
amb_debug = []
if self.debug_diagd:
amb_debug.append("diagd")
if self.debug_envoy:
amb_debug.append("envoy")
if amb_debug:
envs.append("AMBASSADOR_DEBUG=%s" % ":".join(amb_debug))
envs.extend(self.env)
[command.extend(["-e", env]) for env in envs]
ports = ["%s:8877" % (8877 + self.index), "%s:8001" % (8001 + self.index), "%s:8080" % (8080 + self.index), "%s:8443" % (8443 + self.index)]
if self.extra_ports:
for port in self.extra_ports:
ports.append(f'{port}:{port}')
[command.extend(["-p", port]) for port in ports]
volumes = ["%s:/var/run/secrets/kubernetes.io/serviceaccount" % secret_dir]
[command.extend(["-v", volume]) for volume in volumes]
command.append(image)
if os.environ.get('KAT_SHOW_DOCKER'):
print(" ".join(command))
cmd = ShellCommand(*command)
if not cmd.check(f'start container for {self.path.k8s}'):
pytest.exit(f'could not start container for {self.path.k8s}')
def queries(self):
if DEV:
cmd = ShellCommand("docker", "ps", "-qf", "name=%s" % self.path.k8s)
if not cmd.check(f'docker check for {self.path.k8s}'):
if not cmd.stdout.strip():
log_cmd = ShellCommand("docker", "logs", self.path.k8s, stderr=subprocess.STDOUT)
if log_cmd.check(f'docker logs for {self.path.k8s}'):
print(cmd.stdout)
pytest.exit(f'container failed to start for {self.path.k8s}')
return ()
def scheme(self) -> str:
return "http"
def url(self, prefix, scheme=None, port=None) -> str:
if scheme is None:
scheme = self.scheme()
if DEV:
if not port:
port = 8443 if scheme == 'https' else 8080
port += self.index
return "%s://%s/%s" % (scheme, "localhost:%s" % port, prefix)
else:
host_and_port = self.path.fqdn
if port:
host_and_port += f':{port}'
return "%s://%s/%s" % (scheme, host_and_port, prefix)
def requirements(self):
yield ("url", Query(self.url("ambassador/v0/check_ready")))
yield ("url", Query(self.url("ambassador/v0/check_alive")))
@abstract_test
class ServiceType(Node):
path: Name
_manifests: Optional[str]
use_superpod: bool = True
def __init__(self, service_manifests: str=None, namespace: str=None, *args, **kwargs) -> None:
super().__init__(namespace=namespace, *args, **kwargs)
self._manifests = service_manifests
if self._manifests:
self.use_superpod = False
def config(self):
yield from ()
def manifests(self):
if self.use_superpod:
return None
return self.format(self._manifests)
def requirements(self):
if self.use_superpod:
yield from ()
yield ("url", Query("http://%s" % self.path.fqdn))
yield ("url", Query("https://%s" % self.path.fqdn))
@abstract_test
class ServiceTypeGrpc(Node):
path: Name
def __init__(self, service_manifests: str=None, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._manifests = service_manifests or BACKEND
def config(self):
yield from ()
def manifests(self):
return self.format(self._manifests)
def requirements(self):
yield ("url", Query("http://%s" % self.path.fqdn))
yield ("url", Query("https://%s" % self.path.fqdn))
class HTTP(ServiceType):
pass
class GRPC(ServiceType):
pass
class EGRPC(ServiceType):
skip_variant: ClassVar[bool] = True
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, service_manifests=GRPC_ECHO_BACKEND, **kwargs)
def requirements(self):
yield ("url", Query("http://%s/echo.EchoService/Echo" % self.path.fqdn,
headers={ "content-type": "application/grpc",
"requested-status": "0" },
expected=200,
grpc_type="real"))
class AHTTP(ServiceType):
skip_variant: ClassVar[bool] = True
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, service_manifests=AUTH_BACKEND, **kwargs)
class AGRPC(ServiceType):
skip_variant: ClassVar[bool] = True
def __init__(self, protocol_version: str="v2", *args, **kwargs) -> None:
self.protocol_version = protocol_version
super().__init__(*args, service_manifests=GRPC_AUTH_BACKEND, **kwargs)
def requirements(self):
yield ("pod", self.path.k8s)
class RLSGRPC(ServiceType):
skip_variant: ClassVar[bool] = True
def __init__(self, protocol_version: str="v2", *args, **kwargs) -> None:
self.protocol_version = protocol_version
super().__init__(*args, service_manifests=GRPC_RLS_BACKEND, **kwargs)
def requirements(self):
yield ("pod", self.path.k8s)
@abstract_test
class MappingTest(Test):
target: ServiceType
options: Sequence['OptionTest']
parent: AmbassadorTest
no_local_mode = True
skip_local_instead_of_xfail = "Plain (MappingTest)"
def init(self, target: ServiceType, options=()) -> None:
self.target = target
self.options = list(options)
self.is_ambassador = True
@abstract_test
class OptionTest(Test):
VALUES: ClassVar[Any] = None
value: Any
parent: Test
no_local_mode = True
skip_local_instead_of_xfail = "Plain (OptionTests)"
@classmethod
def variants(cls):
if cls.VALUES is None:
yield cls()
else:
for val in cls.VALUES:
yield cls(val, name=sanitize(val))
def init(self, value=None):
self.value = value
|
datawire/ambassador
|
python/tests/kat/abstract_tests.py
|
Python
|
apache-2.0
| 16,480
|
"""
WSGI config for roomfinder project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "roomfinder.settings")
application = get_wsgi_application()
|
thyagostall/roomfinder
|
server/roomfinder/wsgi.py
|
Python
|
gpl-2.0
| 397
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-03 13:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frontend', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='frontenddeployment',
name='deployed_at',
field=models.DateTimeField(auto_now=True),
),
]
|
sussexstudent/falmer
|
falmer/frontend/migrations/0002_auto_20170703_1345.py
|
Python
|
mit
| 462
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.base.build_environment import pants_version
from pants.version import VERSION as _VERSION
from pants.testutil.test_base import TestBase
class PantsPluginPantsRequirementTest(TestBase):
def test_version(self):
self.assertEqual(pants_version(), _VERSION)
|
tdyas/pants
|
testprojects/pants-plugins/tests/python/test_pants_plugin/test_pants_plugin_pants_requirement.py
|
Python
|
apache-2.0
| 404
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (c) 2014 CarlLee
# Copyright (c) 2015 Troels Kofoed Jacobsen
# Thanks to CarlLee for providing this file under the MIT license
# https://github.com/CarlLee/ENML_PY
import os
from bs4 import BeautifulSoup
MIME_TO_EXTESION_MAPPING = {
'image/png': '.png',
'image/jpg': '.jpg',
'image/jpeg': '.jpg',
'image/gif': '.gif'
}
def enmltohtml(content, pretty=True, header=True, **kwargs):
"""
converts ENML string into HTML string
:param header: If True, note is wrapped in a <HTML><BODY> block.
:type header: bool
"""
soup = BeautifulSoup(content, 'html.parser')
todos = soup.find_all('en-todo')
for todo in todos:
checkbox = soup.new_tag('input')
checkbox['type'] = 'checkbox'
checkbox['disabled'] = 'true'
if todo.has_attr('checked'):
checkbox['checked'] = todo['checked']
todo.replace_with(checkbox)
if 'media_store' in kwargs:
store = kwargs['media_store']
all_media = soup.find_all('en-media')
for media in all_media:
resource_url = store.save(media['hash'], media['type'])
# TODO: use different tags for different mime-types
new_tag = soup.new_tag('img')
new_tag['src'] = resource_url
media.replace_with(new_tag)
note = soup.find('en-note')
if note:
if header:
html = soup.new_tag('html')
html.append(note)
note.name = 'body'
else:
html = note
note.name = 'div'
output = html.prettify().encode('utf-8') if pretty else str(html)
return output
return content
class MediaStore(object):
def __init__(self, note_store, note_guid):
"""
note_store: NoteStore object from EvernoteSDK
note_guid: Guid of the note in which the resouces exist
"""
self.note_store = note_store
self.note_guid = note_guid
def _get_resource_by_hash(self, hash_str):
"""
get resource by its hash
"""
hash_bin = hash_str.decode('hex')
resource = self.note_store.getResourceByHash(self.note_guid, hash_bin, True, False, False);
return resource.data.body
def save(self, hash_str, mime_type):
pass
class FileMediaStore(MediaStore):
def __init__(self, note_store, note_guid, path):
"""
note_store: NoteStore object from EvernoteSDK
note_guid: Guid of the note in which the resouces exist
path: The path to store media file
"""
super(FileMediaStore, self).__init__(note_store, note_guid)
self.path = os.path.abspath(path)
def save(self, hash_str, mime_type):
"""
save the specified hash and return the saved file's URL
"""
if not os.path.exists(self.path):
os.makedirs(self.path)
data = self._get_resource_by_hash(hash_str)
file_path = self.path + '/' + hash_str + MIME_TO_EXTESION_MAPPING[mime_type]
f = open(file_path, "w")
f.write(data)
f.close()
return "file://" + file_path
|
tkjacobsen/enote
|
enote/enmltohtml.py
|
Python
|
mit
| 3,150
|
# -*- coding: UTF-8 -*-
#
# Copyright 2012 Michinobu Maeda.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
from google.appengine.ext import blobstore
from jpzipcode.model import Status
class ZipProvider(webapp2.RequestHandler):
"""生成物の出力"""
def get(self):
filename = self.request.path.replace('/download/', '')
task = filename[0:2]
cat = filename[3:4]
ts = "%(y)s-%(m)s-%(d)s" % {
'y':filename[5:9],
'm':filename[9:11],
'd':filename[11:13],
}
stts = Status()
if stts.get("ts_ar_%(cat)s" % {'cat':cat})[0:10] != ts:
self.error(404)
return
key = stts.get("key_%(task)s_%(cat)s" % {'cat':cat, 'task':task})
blob_info = blobstore.BlobInfo(blobstore.BlobKey(key))
zr = blob_info.open()
self.response.headers['Content-Type'] = 'application/zip'
self.response.out.write(zr.read())
zr.close()
app = webapp2.WSGIApplication([('/download/.*\.zip', ZipProvider)])
|
MichinobuMaeda/jpzipcode
|
src/gae/jpzipcode/controller/zipprovider.py
|
Python
|
apache-2.0
| 1,556
|
try:
1/1
print('Depois da excecao de divisão por 0')
except ZeroDivisionError :
print('Tratando qualquer excecao')
else:
print('Executado se não houver erro')
finally:
print('Sempre é executado')
|
renzon/poo-python
|
execao/tratamento.py
|
Python
|
mit
| 218
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq status`."""
from aquilon.aqdb.db_factory import db_prompt
from aquilon.worker.broker import BrokerCommand
class CommandStatus(BrokerCommand):
requires_readonly = True
def render(self, session, dbuser, **_):
stat = []
stat.append("Aquilon Broker %s" % self.config.get("broker", "version"))
stat.append("Server: %s" % self.config.get("broker", "servername"))
stat.append("Database: %s" % db_prompt(session))
stat.append("Sandboxes: %s" % self.config.get("broker", "templatesdir"))
if dbuser:
stat.append("Connected as: %s [%s]" % (dbuser, dbuser.role.name))
return stat
|
guillaume-philippon/aquilon
|
lib/aquilon/worker/commands/status.py
|
Python
|
apache-2.0
| 1,392
|
"""Implementation of various utilitaries"""
import bisect
import itertools
from collections import deque
def sliding(sequence:iter, size:int) -> iter:
"""Yield tuple of `size` elements"""
sequence = iter(sequence)
cont = deque(itertools.islice(sequence, 0, size), maxlen=size)
yield tuple(cont)
for elem in sequence:
cont.append(elem)
yield tuple(cont)
# found at https://docs.python.org/3.5/library/itertools.html
def grouper(iterable, size, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * size
return itertools.zip_longest(*args, fillvalue=fillvalue)
def group_level(value, levels:tuple, groups:tuple) -> 'group':
"""Return the group to which given value belongs,
according to given levels.
See also https://docs.python.org/3.5/library/bisect.html#other-examples
"""
assert len(levels) == len(groups) - 1
return groups[bisect.bisect(levels, value)]
|
Aluriak/MusicGenerator
|
generator/utils.py
|
Python
|
gpl-2.0
| 1,028
|
# This file is part of Copernicus
# http://www.copernicus-computing.org/
#
# Copyright (C) 2011, Sander Pronk, Iman Pouya, Erik Lindahl, and others.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''Very simple functions used by the command line tools'''
import os
import shutil
from cpc.util.conf import conf_base, server_conf
from cpc.util.conf.connection_bundle import ConnectionBundle
import cpc.util.openssl
import sys
import textwrap
from cpc.util.conf.conf_base import Conf
from cpc.util.conf.conf_base import NoConfError, ConfError
import cpc.util.conf.server_conf
from cpc.util.exception import ClientError
from cpc.util.conf.client_conf import ClientConf, NoServerError
def printSortedConfigListDescriptions(configs):
for key in sorted(configs.keys()):
value = configs[key]
spaces = ''
for i in range(20 - len(value.name)):
spaces = spaces + " "
print value.name + spaces + value.description #+ '\n'
def printSortedConfigListValues(configs):
for key in sorted(configs.keys()):
value = configs[key]
spaces = ''
for i in range(20 - len(value.name)):
spaces = spaces + " "
print value.name + spaces + str(value.get()) #+ '\n'
def initiateConnectionBundle(conffile):
cf = None
try:
cf = ConnectionBundle(conffile)
return cf
except NoConfError:
print "Could not find a connection bundle \nPlease specify one with " \
"with the -c flag or supply the file with the name\nclient.cnx" \
" in your configuration folder "
sys.exit(1)
def getClientConf():
try:
cfg = ClientConf()
#make sure there is configured server
cfg.getClientSecurePort()
cfg.getClientHost()
except (NoConfError, NoServerError):
raise ClientError("No servers."\
" Use cpcc add-server to add one.")
except cpc.util.conf.conf_base.ConfError as e:
raise ClientError(e)
def addServer(name, host, port):
ClientConf().addServer(name, host,port)
def useServer(name):
ClientConf().setDefaultServer(name)
def listServer():
return ClientConf().getServers()
def initiateWorkerSetup():
'''
Creates a connection bundle
@input configName String
@return ConnectionBundle
'''
openssl = cpc.util.openssl.OpenSSL()
connectionBundle = openssl.setupClient()
return connectionBundle
def getArg(arglist, argnr, name):
"""Get argument, or print out argument description."""
try:
ret = arglist[argnr]
except IndexError:
raise ClientError("Missing argument: %s" % name)
return ret
terminalWidth = 80
def printLogo():
logo = """
___ _
/ __\ ___ _ __ ___ _ __ _ __ (_) ___ _ _ ___
/ / / _ \ | '_ \ / _ \| '__|| '_ \ | | / __|| | | |/ __|
/ /___| (_) || |_) || __/| | | | | || || (__ | |_| |\__
\____/ \___/ | .__/ \___||_| |_| |_||_| \___| \__,_||___/
|_|"""
lines = logo.splitlines()
for line in lines:
print " %s"%line
def printAuthors():
developers = ["Magnus Lundborg", "Patrik Falkman","Grant Rotskoff","Per Larsson"]
authors = ["Sander Pronk","Iman Pouya","Erik Lindahl"]
contributorsTxt = "Contributions from: %s"%", ".join(developers)
authorsTxt = ", ".join(authors)
wrapper = textwrap.TextWrapper(width=terminalWidth)
lines = wrapper.wrap(contributorsTxt)
print "\n"
for line in lines:
print line.center(terminalWidth)
print "\n"
for line in wrapper.wrap(authorsTxt):
print line.center(terminalWidth)
print "\n\n"
def checkServerConfExistAndAskToRemove(hostConfDir,altDirName=None):
'''
This method checks if a server conf exists and asks if the user wants to wipe it and continue.
if Y the folder will be wiped and the setup flow will proceed
if N the setup flow will be aborted
returns: boolean
'''
dirname = server_conf.resolveSetupConfBaseDir(altDirName)
confDir=cpc.util.conf.conf_base.findAndCreateGlobalDir()
# now if a host-specific directory already exists, we use that
confDir = server_conf.resolveSetupConfDir(confDir, dirname, hostConfDir,altDirName=altDirName)
if os.path.exists(confDir):
answer = raw_input("A configuration already exists in %s. Overwrite? (y/N)? "%confDir)
if(answer.lower() == 'y'):
return True
else: # answer = n
print("Server setup aborted")
exit(0)
|
soellman/copernicus
|
cpc/util/cmd_line_utils.py
|
Python
|
gpl-2.0
| 5,176
|
from django import forms
from multiemailfield.widgets import MultiEmailWidget
from multiemailfield import utils
class MultiEmailFormField(forms.CharField):
def __init__(self, *args, **kwargs):
if 'widget' not in kwargs:
kwargs['widget'] = MultiEmailWidget
super(MultiEmailFormField, self).__init__(*args, **kwargs)
def clean(self, value):
return utils.load(super(MultiEmailFormField, self).clean(value))
|
sophilabs/django-multiemail-field
|
multiemailfield/forms.py
|
Python
|
mit
| 452
|
"""
The bundled WSGI apps.
* Routing: Uses URL prefixes to route to applications
* StaticFiles: Serves a directory
* StaticResources: Serves the resources from a python package
"""
import email.utils # For datetime formatting
import functools
from http import HTTPStatus
import logging
import mimetypes
import os
import posixpath
import traceback
import wsgiref.simple_server
import wsgiref.util
try:
# Python 3.7+
import importlib.resources as importlib_resources
except ImportError as e :
# Python 3.6
import importlib_resources
from .util import abspath
__all__ = ('StaticFiles', 'StaticResources', 'Routing')
logger = logging.getLogger(__name__)
CHUNK_SIZE = 4 * 1024 # 4k
# Follow Django in treating URLs as UTF-8 encoded (which requires undoing the
# implicit ISO-8859-1 decoding applied in Python 3). Strictly speaking, URLs
# should only be ASCII anyway, but UTF-8 can be found in the wild.
def decode_path_info(path_info):
return path_info.encode("iso-8859-1", "replace").decode("utf-8", "replace")
def send_simple_text(environ, start_response, status, body):
"""
Send a simple message as plain text
"""
if isinstance(status, int):
status = "{} {}".format(int(status), status.phrase)
if isinstance(body, str):
body = body.encode('utf-8')
response_headers = [
('Content-Type', 'text/plain'),
('Content-Length', str(len(body)))
]
start_response(status, response_headers)
return [body]
def do_403(environ, start_response):
"""
Generic app to produce a 403
"""
urlpath = environ['SCRIPT_NAME'] + environ['PATH_INFO']
return send_simple_text(
environ, start_response, HTTPStatus.FORBIDDEN, "Path {} is not allowed.".format(urlpath),
)
def do_404(environ, start_response):
"""
Generic app to produce a 404
"""
urlpath = environ['SCRIPT_NAME'] + environ['PATH_INFO']
return send_simple_text(
environ, start_response, HTTPStatus.NOT_FOUND, "Path {} was not found".format(urlpath),
)
def do_405(environ, start_response):
"""
Generic app to produce a 405
"""
urlpath = environ['SCRIPT_NAME'] + environ['PATH_INFO']
return send_simple_text(
environ, start_response, HTTPStatus.METHOD_NOT_ALLOWED,
"Method {} is not allowed on {}".format(
environ['REQUEST_METHOD'], urlpath,
),
)
def do_options(environ, start_response):
"""
Generic app to produce a response to OPTIONS
"""
start_response("204 No Content", [
('Allow', 'OPTIONS, GET, HEAD'),
])
return []
def wsgi_catch_errors(func):
@functools.wraps(func)
def handler(*p):
try:
return func(*p)
except BaseException:
start_response = p[-1]
start_response("500 Server Error", [
('Content-Type', 'text/plain'),
])
return [traceback.format_exc().encode('utf-8')]
return handler
class Routing(dict):
"""
Implements a basic URL routing system.
Path prefixes are compared to the request path. The longest prefix wins.
Example:
Routing({
'/': app,
'/static': Static('mystatic'),
})
"""
def no_route_found(self, environ, start_response):
"""
Handle if there was no matching route
"""
return do_404(environ, start_response)
@wsgi_catch_errors
def __call__(self, environ, start_response):
# SCRIPT_NAME + PATH_INFO = full url
urlpath = environ['SCRIPT_NAME'] + environ['PATH_INFO']
if not urlpath:
urlpath = '/'
potentials = [
prefix
for prefix in self.keys()
if posixpath.commonpath([prefix, urlpath]) == prefix
]
try:
match = max(potentials, key=len)
except ValueError:
# max() got an empty list, aka no matches found
return self.no_route_found(environ, start_response)
logger.debug("For %r found %r routes, selected %r", urlpath, potentials, match)
app = self[match]
environ['SCRIPT_NAME'] = urlpath[:len(match)]
environ['PATH_INFO'] = urlpath[len(match):]
return app(environ, start_response)
class StaticContentsApp:
"""
Base class for static serving implementatins
"""
max_age = 60 # 1min, takes the edge off any frequent responses while staying fresh
def method_not_allowed(self, environ, start_response):
"""
Handle if we got something besides GET or HEAD
"""
return do_405(environ, start_response)
def file_not_found(self, environ, start_response):
"""
Handle if the file cannot be found
"""
return do_404(environ, start_response)
def is_a_directory(self, environ, start_response):
"""
Handle if we were given a directory
"""
return do_404(environ, start_response)
def no_permissions(self, environ, start_response):
"""
Handle if we can't open the file
"""
return do_403(environ, start_response)
def open(path):
"""
Return a file-like object in 'rb' mode.
The path given is normalized.
Add a .name attribute to the file if applicable
Raise a FileNotFoundError, IsADirectoryError, or a PermissionError in
case of error.
"""
raise NotImplementedError
@wsgi_catch_errors
def __call__(self, environ, start_response):
if environ['REQUEST_METHOD'] == 'OPTIONS':
return do_options(environ, start_response)
elif environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):
return self.method_not_allowed(environ, start_response)
path = posixpath.normpath(environ['PATH_INFO'] or '/')
path_options = [path]
if path.endswith('/'):
path_options.append(path[:-1])
path_options.append(posixpath.join(path, 'index.html'))
responder = None
for option in path_options:
try:
file = self.open(option)
except FileNotFoundError:
logger.debug("file not found: %s", option)
if responder is None:
responder = self.file_not_found
except (IsADirectoryError, OSError): # OSError on Windows
logger.debug("is a directory: %s", option)
if responder is None:
responder = self.is_a_directory
except PermissionError:
logger.debug("permission error: %s", option)
if responder is None:
responder = self.no_permissions
except NotADirectoryError:
logger.debug("not a directory: %s", option)
# This can happen if we get a file with a trailing slash
# This should only happen with the first option, and should be
# covered by the next option
pass
else:
break
else:
assert responder
return responder(environ, start_response)
if hasattr(file, 'name'):
filename = file.name
else:
filename = path
mime, _ = mimetypes.guess_type(filename, strict=False)
# NOTE: We're not doing cache control checking, because we don't
# consistently have stat() available.
# TODO: Type negotiation
if 'HTTP_RANGE' in environ:
return self._serve_partial_file(environ, start_response, file, filename, mime)
else:
return self._serve_whole_file(environ, start_response, file, filename, mime)
def _default_headers(self, mime, file):
rv = wsgiref.headers.Headers([
('Content-Type', mime or 'application/octect-stream'),
('Accept-Ranges', 'bytes'),
('Cache-Control', 'max-age={}'.format(self.max_age))
])
if hasattr(file, 'fileno'):
try:
stat = os.fstat(file.fileno())
except OSError:
pass
else:
rv['Content-Length'] = str(stat.st_size)
# rv['Last-Modified'] = email.utils.formatdate(stat.st_mtime, usegmt=True)
return rv
def _serve_whole_file(self, environ, start_response, file, filename, mime):
response_headers = self._default_headers(mime, file)
start_response('200 OK', response_headers._headers)
if environ['REQUEST_METHOD'] == 'HEAD':
file.close()
return []
else:
wrapper = environ.get('wsgi.file_wrapper', wsgiref.util.FileWrapper)
return wrapper(file, CHUNK_SIZE)
def _parse_range(self, header, length):
logger.debug("Got range header %r (length=%s)", header, length)
unit, _, ranges = header.partition('=')
if unit != 'bytes':
raise ValueError("Range not satisfiable: {}".format(header))
ranges = [bit.strip().split('-') for bit in ranges.split(',')]
start, end = ranges[0]
start = int(start) if start else 0
end = int(end) if end else None
if length is not None:
if end is None:
end = length - 1
return start, end
def _compose_content_range(self, start, end, total):
rv = 'bytes '
if start is not None:
rv += str(start)
rv += '-'
if end is not None:
rv += str(end)
rv += '/'
if total is not None:
rv += str(total)
else:
rv += '*'
return rv
def _serve_partial_file(self, environ, start_response, file, filename, mime):
response_headers = self._default_headers(mime, file)
length = response_headers['Content-Length']
if length:
length = int(length)
else:
length = None
start, end = self._parse_range(environ['HTTP_RANGE'], length)
if length is not None:
# Check ranges
maxindex = length - 1
if start > maxindex or end > maxindex:
start_response('416 Range Not Satisfiable', [
('Content-Range', 'bytes */{}'.format(length))
])
return []
assert start <= end
assert length is None or end < length
logger.debug("Serving %s (%s to %s of %s)", filename, start, end, length)
response_headers['Content-Range'] = self._compose_content_range(start, end, length)
if end is None:
amount = None
del response_headers['Content-Length']
else:
amount = end - start + 1
response_headers['Content-Length'] = str(amount)
start_response('206 Partial Content', response_headers._headers)
if environ['REQUEST_METHOD'] == 'HEAD':
file.close()
return []
else:
return self._partial_file_wrapper(file, start, amount)
def _partial_file_wrapper(self, file, skip, amount):
served = 0
if skip:
file.seek(skip)
while (amount is None) or (served <= amount):
data = file.read(min(CHUNK_SIZE, amount - served))
if not data:
break
served += len(data)
yield data
logging.debug("Served %s of %s", served, amount)
class StaticFiles(StaticContentsApp):
"""
Serves static files from a directory on the file system.
"""
def __init__(self, root):
self.root = abspath(root)
def open(self, file):
if file:
path = os.path.join(self.root, file.lstrip('/'))
else:
path = self.root
logger.debug('Resolved %s to %s' % (file, path))
return open(path, 'rb')
class StaticResources(StaticContentsApp):
"""
Serves static files from resources in python packages
"""
def __init__(self, root):
self.root = root
def open(self, file):
slashed, basename = posixpath.split(file)
slashed = slashed.rstrip('/')
if slashed:
packagename = "{}.{}".format(self.root, slashed.replace('/', '.'))
else:
packagename = self.root
try:
return importlib_resources.open_binary(packagename, basename)
except ModuleNotFoundError:
raise FileNotFoundError
|
r0x0r/pywebview
|
webview/wsgi.py
|
Python
|
bsd-3-clause
| 12,536
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('gackle_bat')
mobileTemplate.setLevel(65)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Carnivore Meat")
mobileTemplate.setMeatAmount(4)
mobileTemplate.setHideType("Bristley Hide")
mobileTemplate.setHideAmount(2)
mobileTemplate.setBoneType("Animal Bones")
mobileTemplate.setBoneAmount(3)
mobileTemplate.setSocialGroup("gacklebat")
mobileTemplate.setAssistRange(12)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_gackle_bat.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_siphon_2')
attacks.add('bm_puncture_2')
attacks.add('bm_wing_buffet_4')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('gackle_bat', mobileTemplate)
return
|
agry/NGECore2
|
scripts/mobiles/yavin4/gackle_bat.py
|
Python
|
lgpl-3.0
| 1,708
|
import sqlalchemy
sqlalchemy.__version__
from sqlalchemy import create_engine
# Creating an inmemory sqlite database for the tutorial purposes
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Sequence, ForeignKey
from sqlalchemy.orm import sessionmaker, aliased, relationship, backref
from sqlalchemy.orm.exc import MultipleResultsFound,NoResultFound
Session = sessionmaker()
# when engine is created Session.configure(bind=engine)
engine = create_engine('sqlite:///:memory:', echo=True)
Base = declarative_base()
Session.configure(bind=engine)
class User(Base):
__tablename__ = 'users'
id = Column(Integer, Sequence('user_id_seq') ,primary_key=True)
name = Column(String(50))
fullname = Column(String(50))
password = Column(String(12))
def __repr__(self):
return "<User(name='%s', fullname='%s', password='%s')>" % (self.name, self.fullname, self.password)
Base.metadata.create_all(engine)
class Address(Base):
__tablename__ = 'Addresses'
id = Column(Integer, primary_key=True)
email_address = Column(String, nullable=False)
user_id = Column(Integer, ForeignKey('users.id'))
user = relationship("User", backref=backref('addresses', order_by=id))
def __repr__(self):
return "<Address(email_address=%s)>" % self.email_address
Base.metadata.create_all(engine)
session = Session()
ed_user = User(name='ed', fullname="Edward Jones", password='nonesuch')
session.add(ed_user)
chloe_user = User(name='Chloeoh', fullname="Chloe Tinker", password='bubba')
session.add(chloe_user)
# our_user = session.query(User).filter_by(name='Chloeoh').first()
# print our_user
# print ed_user is our_user
session.add_all([
User(name='Evan', fullname='Evan Brennan-White', password="fx3k9c"),
User(name='Leia', fullname='Leia Brennan-White', password="fx3k9c"),
User(name='Chance', fullname='Chance Brennan-White', password="dog")])
chloe_user.password = 'ra++le'
# print session.dirty
# print session.new
session.commit()
# print chloe_user.id
# ed_user.name = 'Edwardo'
# fake_user = User(name='broofus', fullname='Broofass',
# password='Nothing' )
# session.add(fake_user)
# print session.query(User).filter(User.name.in_(['Edwardo', 'broofus'])).all()
# session.rollback()
# print ed_user.name
# print fake_user in session
# print session.query(User).filter(User.name.in_(['ed', 'fake_user'])).all()
# for instance in session.query(User).order_by(User.id):
# print instance.name, instance.fullname
# for name, fullname in session.query(User.name, User.fullname):
# print name, fullname
# for row in session.query(User, User.name,).all():
# print row.User, row.name
# for row in session.query(User.name.label('full_label')).all():
# print row.full_label
# user_alias = aliased(User, name='user_alias')
# for row in session.query(user_alias, user_alias.name).all():
# print row.user_alias
# for u in session.query(User).order_by(User.id)[1:2]:
# print u
# for u in session.query(User).filter(User.name=='Chloeoh').\
# filter(User.fullname!='Broofus Tinker'):
# print u
# for u in session.query(User).filter(User.name.like('Leia')).\
# filter(User.fullname!='Broofus Tinker'):
# print u
# for u in session.query(User).filter(User.name.in_(['Evan', 'Chance'])).\
# filter(User.fullname!='Broofus Tinker'):
# print 'Boy',u
# for u in session.query(User).filter((User.name=='Leia') | (User.name=='Chloeoh')):
# print u.fullname
# u = session.query(User).filter(User.name.match('Chloeoh'))
query = session.query(User).filter(User.fullname.like('%Chloe%')).order_by(User.name)
# print query.all()
# for member in query:
# print member.name
# print query.first()
try:
user = query.one()
userid = user.id
msg = ''
except MultipleResultsFound, e:
user = query.first()
userid = user.id
msg = 'Multiple Results Found'
except NoResultFound,e:
userid = 0
msg = 'No Results Found'
print 'UserID:', userid,msg
jack = User(name="jack", fullname="Jack Bean", password="gjffdd")
jack.addresses = [
Address(email_address='jack@google.com'),
Address(email_address='j25@yahoo.com')
]
print jack.addresses[1]
print jack.addresses[1].user
session.add(jack)
session.commit()
jack = session.query(User).\
filter_by(name="jack").one()
# jack.addresses is a lazy load of the address information because of the backref
print jack, jack.addresses
for u, a in session.query(User, Address) \
.filter(User.id==Address.user_id) \
.filter(Address.email_address==\
'jack@google.com')\
.all():
print u
print a
qry = session.query(User).join(Address) \
.filter(Address.email_address=='jack@google.com') \
.all()
print qry
qry = query.join(Address, User.id==Address.user_id)
print qry.all()
# Using Aliases to query a table twice
adalias1 = aliased(Address)
adalias2 = aliased(Address)
for username, email1, email2 in \
session.query(User.name, \
adalias1.email_address, \
adalias2.email_address, ) \
.join(adalias1, User.addresses) \
.join(adalias2, User.addresses) \
.filter(adalias1.email_address=='jack@google.com') \
.filter(adalias2.email_address=='j25@yahoo.com'):
print username, email1, email2
|
micknh/EdFirst
|
relationships.py
|
Python
|
mit
| 5,104
|
import sys
import os
import os.path
import logging
import linecache
import time
#sys.path.remove('/usr/lib/python2.7/site-packages') # para test, probar dependencias
custom_path = '{0}/{1}'
sys.path.insert(1, custom_path.format(os.getcwd(), 'depen_packages'))
sys.path.insert(1, custom_path.format(os.getcwd(), 'payfi'))
#print sys.path
import yaml
from payfi import download_chunks as dw
from payfi.watchdog.observers import Observer
from payfi.watchdog.observers import Observer
from payfi.watchdog.events import LoggingEventHandler
filename = 'db_listener_config.yalm'
log_path = 'db_log.log'
def __load_config_file(file_name):
"""
Loads varibles and constants from yalm config file and turns them into module's global variables
:param filename: str, config file name
:return: None
"""
with open(file_name) as f:
data_map = yaml.safe_load(f)
f.close()
globals().update(data_map)
globals()['file_ext'] = tuple(globals()['file_ext'])
globals()['register'] = tuple(globals()['register'])
globals()['charge'] = tuple(globals()['charge'])
globals()['success'] = tuple(globals()['success'])
globals()['fail'] = tuple(globals()['fail'])
class MySLoggingEventHandler1(LoggingEventHandler):
def on_created(self, event):
print 'file has been created {0}'.format(event.src_path)
file_format = event.src_path[event.src_path.rfind('.'):] # finds the last point.
if file_format in file_ext:
t_op = linecache.getline(event.src_path, 1).rstrip() # removes '\n' 0 operation type
res = linecache.getline(event.src_path, 2).rstrip() # operation result success/fail
id_ter = linecache.getline(event.src_path, 3).rstrip() # terminal id
print res
# IMPORTANT: clear the cache, otherwise will read always the same values that come from an individual terminal
linecache.clearcache()
if t_op in register and res in fail: #'FAILURE':
print 'delete inserted fingerprint needed pin and img_filename.. to do'
# sends to the terminal's folder
try:
destiny_path = id_ter + file_format
# in case terminal_id folder does not exist, create it
dir_name = os.path.dirname(forward_path.format(id_ter, destiny_path)) # get destiny folder id
if not os.path.exists(dir_name): # in case does not exist, create it
os.mkdir(dir_name) # 0o777
epoch = str(time.time())
dw.prepare_client_file(forward_path.format(id_ter, destiny_path), (epoch[:epoch.index('.')], res))
except OSError:
logging.error('destiny path is a directory, not a file')
os.remove(event.src_path) # remove always everything
if __name__ == "__main__":
# init the logger
logging.basicConfig(filename=log_path, format='%(asctime)s - %(levelname)s: (%(module)s) %(funcName)s - '
'%(message)s ', level=logging.ERROR) # logging.WARNING
# load config file, in case of error close the program and register the error
try:
__load_config_file(filename)
print 'watchdog_db has been initialized'
#print globals() # just for tests
except IOError:
logging.error('Could not read config file')
raise SystemExit
path = sys.argv[1] if len(sys.argv) > 1 else db_listening_path # watchdog
event_handler = MySLoggingEventHandler1() # LoggingEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
rnov/Fingerpay
|
work_unit/watchfile_db.py
|
Python
|
mit
| 3,816
|
import json
import os
from django.conf import settings
from django.core.management.base import BaseCommand
import commonware.log
import amo
from applications.models import Application, AppVersion
log = commonware.log.getLogger('z.cron')
# The validator uses the file created here to keep up to date with the
# apps and versions on AMO.
class Command(BaseCommand):
help = 'Dump a json file containing AMO apps and versions.'
JSON_PATH = os.path.join(settings.NETAPP_STORAGE, 'apps.json')
def handle(self, *args, **kw):
apps = {}
for id, guid in Application.objects.values_list('id', 'guid'):
apps[id] = dict(guid=guid, versions=[],
name=amo.APPS_ALL[id].short)
versions = (AppVersion.objects.values_list('application', 'version')
.order_by('version_int'))
for app, version in versions:
apps[app]['versions'].append(version)
with open(self.JSON_PATH, 'w') as f:
json.dump(apps, f)
log.debug("Wrote: %s" % f.name)
|
jbalogh/zamboni
|
apps/applications/management/commands/dump_apps.py
|
Python
|
bsd-3-clause
| 1,065
|
import jsonpickle
import json
import xlwings as xw
DEBUG = False
SMOOTH_E = 0.1
testdata_results_file = open("data/intrusion.testlabels.categorized", "r")
testdata_results_str = testdata_results_file.read()
testdata_results = testdata_results_str.split("\n")
testdata_results = testdata_results[:-1]
print("Reading attacktypes from file...")
attacktypes_file = open("data/attacktypes.list", "r")
attacktypes_str = attacktypes_file.read()
attacktypes = attacktypes_str.split("\n")
attacktypes = attacktypes[:-1]
attacktypes_set = []
for idx, item in enumerate(attacktypes):
item = item.split(" ")
if item[1] not in attacktypes_set:
attacktypes_set.append(item[1])
attacktypes[idx] = item
attacktypes_file.close()
print("Loading processed test data")
testdata_processed_file = open("processed_data/intrusion.kmeanstestdataprocessed", "r")
testdata = json.loads(testdata_processed_file.read())
testdata_processed_file.close()
print("TESTDATA loaded...")
def loadClassifier(classifier_str):
print("About to load dtree")
traindata_processed_file = open(classifier_str, "r")
trained_dtree = jsonpickle.decode(traindata_processed_file.read())
traindata_processed_file.close()
print("trained dtree loaded")
return trained_dtree
def calc_and_save(tree_str, confusion_matrix, offset):
correct = 0
for e in attacktypes_set:
correct = correct + confusion_matrix[e][e]
print(((correct * 1.0) / len(testdata)) * 100)
score = (((correct * 1.0) / len(testdata)) * 100)
wb = xw.Book('part2_results.xlsx')
sht = wb.sheets['Sheet1']
sht.range('A' + str(1 + offset)).value = "Name: "
sht.range('B' + str(1 + offset)).value = tree_str
for i in range(offset + 4, offset + 4 + len(attacktypes_set)):
num = 'A' + str(i)
sht.range(num).value = attacktypes_set[i - 4 - offset]
alphas = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']
for i in range(1, 1 + len(attacktypes_set)):
num = alphas[i] + str(3 + offset)
sht.range(num).value = attacktypes_set[i - 1]
for i in range(offset + 4, offset + 4 + len(attacktypes_set)):
for j in range(1, 1 + len(attacktypes_set)):
num = alphas[j] + str(i)
sht.range(num).value = confusion_matrix[attacktypes_set[i-4-offset]][attacktypes_set[j-1]]
sht.range('A' + str(10 + offset)).value = "Accuracy:"
sht.range('B' + str(10 + offset)).value = score
offset += 13
return offset
def doTest(classifier):
confusion_matrix = {}
for i in attacktypes_set:
confusion_matrix[i] = {}
for j in attacktypes_set:
confusion_matrix[i][j] = 0
for i in range(len(testdata)):
prob_class = {}
for e in attacktypes_set:
prob = 1.0
attrs = testdata[i]
for j in range(len(attrs)):
# if(i > 1117000):
# print("~~~~~~~~~~~~~")
# print ("e", e)
# print ("j", j)
# print ("attrs", attrs)
# print("classifier[0][e][j]", classifier[0][str(e)][j])
if(str(attrs[j]) in classifier[0][e][j].keys()):
# print("~~~~~~~~~~~~~~~~~~~~~~~~")
# print((classifier[0][e][j][str(attrs[j])] * 1.0))
# print((classifier[1][e][j] * 1.0))
prob = prob * (((classifier[0][e][j][str(attrs[j])] * 1.0) + SMOOTH_E) / ((classifier[1][e][j] * 1.0) + (SMOOTH_E * len(classifier[0][e][j].keys()))))
#print(prob)
else:
prob = prob * ((SMOOTH_E * 1.0) / (classifier[1][e][j] + (SMOOTH_E * len(classifier[0][e][j].keys()))))
prob = prob * ((classifier[2][str(e)] * 1.0) / classifier[3])
prob_class[str(e)] = prob
cur_max = -1.0
max_class = None
for k in prob_class.keys():
if(prob_class[k] > cur_max):
cur_max = prob_class[k]
max_class = k
result = max_class
confusion_matrix[testdata_results[i]][result] = confusion_matrix[testdata_results[i]][result] + 1
#print(prob_class)
if(i % 1000 == 0):
print("Done testing " + str(i) + " instances...")
print(confusion_matrix)
return confusion_matrix
offset = 0
# Part 1 Testing
classifier_str = "part_2/processed_classifier/intrusion.trainedclassifier"
c_matrix = doTest(loadClassifier(classifier_str))
offset = calc_and_save(classifier_str, c_matrix, offset)
|
maher460/cmu10601
|
hw3/code/part2_validator.py
|
Python
|
mit
| 4,069
|
# Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from adapt.engine import IntentDeterminationEngine
from adapt.intent import IntentBuilder
__author__ = 'seanfitz'
class IntentEngineTests(unittest.TestCase):
def setUp(self):
self.engine = IntentDeterminationEngine()
def testRegisterIntentParser(self):
assert len(self.engine.intent_parsers) == 0
try:
self.engine.register_intent_parser("NOTAPARSER")
assert "Did not fail to register invalid intent parser" and False
except ValueError as e:
pass
parser = IntentBuilder("Intent").build()
self.engine.register_intent_parser(parser)
assert len(self.engine.intent_parsers) == 1
def testRegisterRegexEntity(self):
assert len(self.engine._regex_strings) == 0
assert len(self.engine.regular_expressions_entities) == 0
self.engine.register_regex_entity(".*")
assert len(self.engine._regex_strings) == 1
assert len(self.engine.regular_expressions_entities) == 1
def testSelectBestIntent(self):
parser1 = IntentBuilder("Parser1").require("Entity1").build()
self.engine.register_intent_parser(parser1)
self.engine.register_entity("tree", "Entity1")
utterance = "go to the tree house"
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser1'
parser2 = IntentBuilder("Parser2").require("Entity1").require("Entity2").build()
self.engine.register_intent_parser(parser2)
self.engine.register_entity("house", "Entity2")
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser2'
def testDropIntent(self):
parser1 = IntentBuilder("Parser1").require("Entity1").build()
self.engine.register_intent_parser(parser1)
self.engine.register_entity("tree", "Entity1")
parser2 = (IntentBuilder("Parser2").require("Entity1")
.require("Entity2").build())
self.engine.register_intent_parser(parser2)
self.engine.register_entity("house", "Entity2")
utterance = "go to the tree house"
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser2'
assert self.engine.drop_intent_parser('Parser2') is True
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser1'
def testDropEntity(self):
parser1 = IntentBuilder("Parser1").require("Entity1").build()
self.engine.register_intent_parser(parser1)
self.engine.register_entity("laboratory", "Entity1")
self.engine.register_entity("lab", "Entity1")
utterance = "get out of my lab"
utterance2 = "get out of my laboratory"
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser1'
intent = next(self.engine.determine_intent(utterance2))
assert intent
assert intent['intent_type'] == 'Parser1'
# Remove Entity and re-register laboratory and make sure only that
# matches.
self.engine.drop_entity(entity_type='Entity1')
self.engine.register_entity("laboratory", "Entity1")
# Sentence containing lab should not produce any results
with self.assertRaises(StopIteration):
intent = next(self.engine.determine_intent(utterance))
# But sentence with laboratory should
intent = next(self.engine.determine_intent(utterance2))
assert intent
assert intent['intent_type'] == 'Parser1'
def testCustomDropEntity(self):
parser1 = (IntentBuilder("Parser1").one_of("Entity1", "Entity2")
.build())
self.engine.register_intent_parser(parser1)
self.engine.register_entity("laboratory", "Entity1")
self.engine.register_entity("lab", "Entity2")
utterance = "get out of my lab"
utterance2 = "get out of my laboratory"
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser1'
intent = next(self.engine.determine_intent(utterance2))
assert intent
assert intent['intent_type'] == 'Parser1'
def matcher(data):
return data[1].startswith('Entity')
self.engine.drop_entity(match_func=matcher)
self.engine.register_entity("laboratory", "Entity1")
# Sentence containing lab should not produce any results
with self.assertRaises(StopIteration):
intent = next(self.engine.determine_intent(utterance))
# But sentence with laboratory should
intent = next(self.engine.determine_intent(utterance2))
assert intent
def testDropRegexEntity(self):
self.engine.register_regex_entity(r"the dog (?P<Dog>.*)")
self.engine.register_regex_entity(r"the cat (?P<Cat>.*)")
assert len(self.engine._regex_strings) == 2
assert len(self.engine.regular_expressions_entities) == 2
self.engine.drop_regex_entity(entity_type='Cat')
assert len(self.engine._regex_strings) == 1
assert len(self.engine.regular_expressions_entities) == 1
def testCustomDropRegexEntity(self):
self.engine.register_regex_entity(r"the dog (?P<SkillADog>.*)")
self.engine.register_regex_entity(r"the cat (?P<SkillACat>.*)")
self.engine.register_regex_entity(r"the mangy dog (?P<SkillBDog>.*)")
assert len(self.engine._regex_strings) == 3
assert len(self.engine.regular_expressions_entities) == 3
def matcher(regexp):
"""Matcher for all match groups defined for SkillB"""
match_groups = regexp.groupindex.keys()
return any([k.startswith('SkillB') for k in match_groups])
self.engine.drop_regex_entity(match_func=matcher)
assert len(self.engine._regex_strings) == 2
assert len(self.engine.regular_expressions_entities) == 2
def testAddingOfRemovedRegexp(self):
self.engine.register_regex_entity(r"the cool (?P<thing>.*)")
def matcher(regexp):
"""Matcher for all match groups defined for SkillB"""
match_groups = regexp.groupindex.keys()
return any([k.startswith('thing') for k in match_groups])
self.engine.drop_regex_entity(match_func=matcher)
assert len(self.engine.regular_expressions_entities) == 0
self.engine.register_regex_entity(r"the cool (?P<thing>.*)")
assert len(self.engine.regular_expressions_entities) == 1
def testUsingOfRemovedRegexp(self):
self.engine.register_regex_entity(r"the cool (?P<thing>.*)")
parser = IntentBuilder("Intent").require("thing").build()
self.engine.register_intent_parser(parser)
def matcher(regexp):
"""Matcher for all match groups defined for SkillB"""
match_groups = regexp.groupindex.keys()
return any([k.startswith('thing') for k in match_groups])
self.engine.drop_regex_entity(match_func=matcher)
assert len(self.engine.regular_expressions_entities) == 0
utterance = "the cool cat"
intents = [match for match in self.engine.determine_intent(utterance)]
assert len(intents) == 0
def testEmptyTags(self):
# Validates https://github.com/MycroftAI/adapt/issues/114
engine = IntentDeterminationEngine()
engine.register_entity("Kevin",
"who") # same problem if several entities
builder = IntentBuilder("Buddies")
builder.optionally("who") # same problem if several entity types
engine.register_intent_parser(builder.build())
intents = [i for i in engine.determine_intent("Julien is a friend")]
assert len(intents) == 0
def testResultsAreSortedByConfidence(self):
self.engine.register_entity('what is', 'Query', None)
self.engine.register_entity('weather', 'Weather', None)
self.engine.register_regex_entity('(at|in) (?P<Location>.+)')
self.engine.register_regex_entity('(?P<Entity>.*)')
i = IntentBuilder("CurrentWeatherIntent").require(
"Weather").optionally("Location").build()
self.engine.register_intent_parser(i)
utterance = "what is the weather like in stockholm"
intents = [
i for i in self.engine.determine_intent(utterance, num_results=100)
]
confidences = [intent.get('confidence', 0.0) for intent in intents]
assert len(confidences) > 1
assert all(confidences[i] >= confidences[i+1] for i in range(len(confidences)-1))
|
MycroftAI/adapt
|
test/IntentEngineTest.py
|
Python
|
apache-2.0
| 9,429
|
import select
import socket
import types
from base import SMB, NotConnectedError, SMBTimeout
from smb_structs import *
class SMBConnection(SMB):
log = logging.getLogger('SMB.SMBConnection')
#: SMB messages will never be signed regardless of remote server's configurations; access errors will occur if the remote server requires signing.
SIGN_NEVER = 0
#: SMB messages will be signed when remote server supports signing but not requires signing.
SIGN_WHEN_SUPPORTED = 1
#: SMB messages will only be signed when remote server requires signing.
SIGN_WHEN_REQUIRED = 2
def __init__(self, username, password, my_name, remote_name, domain = '', use_ntlm_v2 = True, sign_options = SIGN_WHEN_REQUIRED, is_direct_tcp = False):
"""
Create a new SMBConnection instance.
*username* and *password* are the user credentials required to authenticate the underlying SMB connection with the remote server.
File operations can only be proceeded after the connection has been authenticated successfully.
Note that you need to call *connect* method to actually establish the SMB connection to the remote server and perform authentication.
The default TCP port for most SMB/CIFS servers using NetBIOS over TCP/IP is 139.
Some newer server installations might also support Direct hosting of SMB over TCP/IP; for these servers, the default TCP port is 445.
:param string my_name: The local NetBIOS machine name that will identify where this connection is originating from.
You can freely choose a name as long as it contains a maximum of 15 alphanumeric characters and does not contain spaces and any of ``\/:*?";|+``
:param string remote_name: The NetBIOS machine name of the remote server.
On windows, you can find out the machine name by right-clicking on the "My Computer" and selecting "Properties".
This parameter must be the same as what has been configured on the remote server, or else the connection will be rejected.
:param string domain: The network domain. On windows, it is known as the workgroup. Usually, it is safe to leave this parameter as an empty string.
:param boolean use_ntlm_v2: Indicates whether pysmb should be NTLMv1 or NTLMv2 authentication algorithm for authentication.
The choice of NTLMv1 and NTLMv2 is configured on the remote server, and there is no mechanism to auto-detect which algorithm has been configured.
Hence, we can only "guess" or try both algorithms.
On Sambda, Windows Vista and Windows 7, NTLMv2 is enabled by default. On Windows XP, we can use NTLMv1 before NTLMv2.
:param int sign_options: Determines whether SMB messages will be signed. Default is *SIGN_WHEN_REQUIRED*.
If *SIGN_WHEN_REQUIRED* (value=2), SMB messages will only be signed when remote server requires signing.
If *SIGN_WHEN_SUPPORTED* (value=1), SMB messages will be signed when remote server supports signing but not requires signing.
If *SIGN_NEVER* (value=0), SMB messages will never be signed regardless of remote server's configurations; access errors will occur if the remote server requires signing.
:param boolean is_direct_tcp: Controls whether the NetBIOS over TCP/IP (is_direct_tcp=False) or the newer Direct hosting of SMB over TCP/IP (is_direct_tcp=True) will be used for the communication.
The default parameter is False which will use NetBIOS over TCP/IP for wider compatibility (TCP port: 139).
"""
SMB.__init__(self, username, password, my_name, remote_name, domain, use_ntlm_v2, sign_options, is_direct_tcp)
self.sock = None
self.auth_result = None
self.is_busy = False
self.is_direct_tcp = is_direct_tcp
#
# SMB (and its superclass) Methods
#
def onAuthOK(self):
self.auth_result = True
def onAuthFailed(self):
self.auth_result = False
def write(self, data):
assert self.sock
data_len = len(data)
total_sent = 0
while total_sent < data_len:
sent = self.sock.send(data[total_sent:])
if sent == 0:
raise NotConnectedError('Server disconnected')
total_sent = total_sent + sent
#
# Misc Properties
#
@property
def isUsingSMB2(self):
"""A convenient property to return True if the underlying SMB connection is using SMB2 protocol."""
return self.is_using_smb2
#
# Public Methods
#
def connect(self, ip, port = 139, sock_family = socket.AF_INET, timeout = 60):
"""
Establish the SMB connection to the remote SMB/CIFS server.
You must call this method before attempting any of the file operations with the remote server.
This method will block until the SMB connection has attempted at least one authentication.
:return: A boolean value indicating the result of the authentication atttempt: True if authentication is successful; False, if otherwise.
"""
if self.sock:
self.sock.close()
self.auth_result = None
self.sock = socket.socket(sock_family)
self.sock.settimeout(timeout)
self.sock.connect(( ip, port ))
self.is_busy = True
try:
if not self.is_direct_tcp:
self.requestNMBSession()
else:
self.onNMBSessionOK()
while self.auth_result is None:
self._pollForNetBIOSPacket(timeout)
finally:
self.is_busy = False
return self.auth_result
def close(self):
"""
Terminate the SMB connection (if it has been started) and release any sources held by the underlying socket.
"""
if self.sock:
self.sock.close()
self.sock = None
def listShares(self, timeout = 30):
"""
Retrieve a list of shared resources on remote server.
:return: A list of :doc:`smb.base.SharedDevice<smb_SharedDevice>` instances describing the shared resource
"""
if not self.sock:
raise NotConnectedError('Not connected to server')
results = [ ]
def cb(entries):
self.is_busy = False
results.extend(entries)
def eb(failure):
self.is_busy = False
raise failure
self.is_busy = True
try:
self._listShares(cb, eb, timeout)
while self.is_busy:
self._pollForNetBIOSPacket(timeout)
finally:
self.is_busy = False
return results
def listPath(self, service_name, path,
search = SMB_FILE_ATTRIBUTE_READONLY | SMB_FILE_ATTRIBUTE_HIDDEN | SMB_FILE_ATTRIBUTE_SYSTEM | SMB_FILE_ATTRIBUTE_DIRECTORY | SMB_FILE_ATTRIBUTE_ARCHIVE,
pattern = '*', timeout = 30):
"""
Retrieve a directory listing of files/folders at *path*
:param string/unicode service_name: the name of the shared folder for the *path*
:param string/unicode path: path relative to the *service_name* where we are interested to learn about its files/sub-folders.
:param integer search: integer value made up from a bitwise-OR of *SMB_FILE_ATTRIBUTE_xxx* bits (see smb_constants.py).
The default *search* value will query for all read-only, hidden, system, archive files and directories.
:param string/unicode pattern: the filter to apply to the results before returning to the client.
:return: A list of :doc:`smb.base.SharedFile<smb_SharedFile>` instances.
"""
if not self.sock:
raise NotConnectedError('Not connected to server')
results = [ ]
def cb(entries):
self.is_busy = False
results.extend(entries)
def eb(failure):
self.is_busy = False
raise failure
self.is_busy = True
try:
self._listPath(service_name, path, cb, eb, search = search, pattern = pattern, timeout = timeout)
while self.is_busy:
self._pollForNetBIOSPacket(timeout)
finally:
self.is_busy = False
return results
def listSnapshots(self, service_name, path, timeout = 30):
"""
Retrieve a list of available snapshots (shadow copies) for *path*.
Note that snapshot features are only supported on Windows Vista Business, Enterprise and Ultimate, and on all Windows 7 editions.
:param string/unicode service_name: the name of the shared folder for the *path*
:param string/unicode path: path relative to the *service_name* where we are interested in the list of available snapshots
:return: A list of python *datetime.DateTime* instances in GMT/UTC time zone
"""
if not self.sock:
raise NotConnectedError('Not connected to server')
results = [ ]
def cb(entries):
self.is_busy = False
results.extend(entries)
def eb(failure):
self.is_busy = False
raise failure
self.is_busy = True
try:
self._listSnapshots(service_name, path, cb, eb, timeout = timeout)
while self.is_busy:
self._pollForNetBIOSPacket(timeout)
finally:
self.is_busy = False
return results
def getAttributes(self, service_name, path, timeout = 30):
"""
Retrieve information about the file at *path* on the *service_name*.
:param string/unicode service_name: the name of the shared folder for the *path*
:param string/unicode path: Path of the file on the remote server. If the file cannot be opened for reading, an :doc:`OperationFailure<smb_exceptions>` will be raised.
:return: A :doc:`smb.base.SharedFile<smb_SharedFile>` instance containing the attributes of the file.
"""
if not self.sock:
raise NotConnectedError('Not connected to server')
results = [ ]
def cb(info):
self.is_busy = False
results.append(info)
def eb(failure):
self.is_busy = False
raise failure
self.is_busy = True
try:
self._getAttributes(service_name, path, cb, eb, timeout)
while self.is_busy:
self._pollForNetBIOSPacket(timeout)
finally:
self.is_busy = False
return results[0]
def retrieveFile(self, service_name, path, file_obj, timeout = 30):
"""
Retrieve the contents of the file at *path* on the *service_name* and write these contents to the provided *file_obj*.
Use *retrieveFileFromOffset()* method if you wish to specify the offset to read from the remote *path* and/or the number of bytes to write to the *file_obj*.
:param string/unicode service_name: the name of the shared folder for the *path*
:param string/unicode path: Path of the file on the remote server. If the file cannot be opened for reading, an :doc:`OperationFailure<smb_exceptions>` will be raised.
:param file_obj: A file-like object that has a *write* method. Data will be written continuously to *file_obj* until EOF is received from the remote service.
:return: A 2-element tuple of ( file attributes of the file on server, number of bytes written to *file_obj* ).
The file attributes is an integer value made up from a bitwise-OR of *SMB_FILE_ATTRIBUTE_xxx* bits (see smb_constants.py)
"""
return self.retrieveFileFromOffset(service_name, path, file_obj, 0L, -1L, timeout)
def retrieveFileFromOffset(self, service_name, path, file_obj, offset = 0L, max_length = -1L, timeout = 30):
"""
Retrieve the contents of the file at *path* on the *service_name* and write these contents to the provided *file_obj*.
:param string/unicode service_name: the name of the shared folder for the *path*
:param string/unicode path: Path of the file on the remote server. If the file cannot be opened for reading, an :doc:`OperationFailure<smb_exceptions>` will be raised.
:param file_obj: A file-like object that has a *write* method. Data will be written continuously to *file_obj* up to *max_length* number of bytes.
:param integer/long offset: the offset in the remote *path* where the first byte will be read and written to *file_obj*. Must be either zero or a positive integer/long value.
:param integer/long max_length: maximum number of bytes to read from the remote *path* and write to the *file_obj*. Specify a negative value to read from *offset* to the EOF.
If zero, the method returns immediately after the file is opened successfully for reading.
:return: A 2-element tuple of ( file attributes of the file on server, number of bytes written to *file_obj* ).
The file attributes is an integer value made up from a bitwise-OR of *SMB_FILE_ATTRIBUTE_xxx* bits (see smb_constants.py)
"""
if not self.sock:
raise NotConnectedError('Not connected to server')
results = [ ]
def cb(r):
self.is_busy = False
results.append(r[1:])
def eb(failure):
self.is_busy = False
raise failure
self.is_busy = True
try:
self._retrieveFileFromOffset(service_name, path, file_obj, cb, eb, offset, max_length, timeout = timeout)
while self.is_busy:
self._pollForNetBIOSPacket(timeout)
finally:
self.is_busy = False
return results[0]
def storeFile(self, service_name, path, file_obj, timeout = 30):
"""
Store the contents of the *file_obj* at *path* on the *service_name*.
If the file already exists on the remote server, it will be truncated and overwritten.
:param string/unicode service_name: the name of the shared folder for the *path*
:param string/unicode path: Path of the file on the remote server. If the file at *path* does not exist, it will be created. Otherwise, it will be overwritten.
If the *path* refers to a folder or the file cannot be opened for writing, an :doc:`OperationFailure<smb_exceptions>` will be raised.
:param file_obj: A file-like object that has a *read* method. Data will read continuously from *file_obj* until EOF.
:return: Number of bytes uploaded
"""
return self.storeFileFromOffset(service_name, path, file_obj, 0L, True, timeout)
def storeFileFromOffset(self, service_name, path, file_obj, offset = 0L, truncate = False, timeout = 30):
"""
Store the contents of the *file_obj* at *path* on the *service_name*.
:param string/unicode service_name: the name of the shared folder for the *path*
:param string/unicode path: Path of the file on the remote server. If the file at *path* does not exist, it will be created.
If the *path* refers to a folder or the file cannot be opened for writing, an :doc:`OperationFailure<smb_exceptions>` will be raised.
:param file_obj: A file-like object that has a *read* method. Data will read continuously from *file_obj* until EOF.
:param offset: Long integer value which specifies the offset in the remote server to start writing. First byte of the file is 0.
:param truncate: Boolean value. If True and the file exists on the remote server, it will be truncated first before writing. Default is False.
:return: the file position where the next byte will be written.
"""
if not self.sock:
raise NotConnectedError('Not connected to server')
results = [ ]
def cb(r):
self.is_busy = False
results.append(r[1])
def eb(failure):
self.is_busy = False
raise failure
self.is_busy = True
try:
self._storeFileFromOffset(service_name, path, file_obj, cb, eb, offset, truncate = truncate, timeout = timeout)
while self.is_busy:
self._pollForNetBIOSPacket(timeout)
finally:
self.is_busy = False
return results[0]
def deleteFiles(self, service_name, path_file_pattern, timeout = 30):
"""
Delete one or more regular files. It supports the use of wildcards in file names, allowing for deletion of multiple files in a single request.
:param string/unicode service_name: Contains the name of the shared folder.
:param string/unicode path_file_pattern: The pathname of the file(s) to be deleted, relative to the service_name.
Wildcards may be used in th filename component of the path.
If your path/filename contains non-English characters, you must pass in an unicode string.
:return: None
"""
if not self.sock:
raise NotConnectedError('Not connected to server')
def cb(r):
self.is_busy = False
def eb(failure):
self.is_busy = False
raise failure
self.is_busy = True
try:
self._deleteFiles(service_name, path_file_pattern, cb, eb, timeout = timeout)
while self.is_busy:
self._pollForNetBIOSPacket(timeout)
finally:
self.is_busy = False
def resetFileAttributes(self, service_name, path_file_pattern, timeout = 30):
"""
Reset file attributes of one or more regular files or folders.
It supports the use of wildcards in file names, allowing for unlocking of multiple files/folders in a single request.
This function is very helpful when deleting files/folders that are read-only.
Note: this function is currently only implemented for SMB2! Technically, it sets the FILE_ATTRIBUTE_NORMAL flag, therefore clearing all other flags. (See https://msdn.microsoft.com/en-us/library/cc232110.aspx for further information)
:param string/unicode service_name: Contains the name of the shared folder.
:param string/unicode path_file_pattern: The pathname of the file(s) to be deleted, relative to the service_name.
Wildcards may be used in th filename component of the path.
If your path/filename contains non-English characters, you must pass in an unicode string.
:return: None
"""
if not self.sock:
raise NotConnectedError('Not connected to server')
def cb(r):
self.is_busy = False
def eb(failure):
self.is_busy = False
raise failure
self.is_busy = True
try:
self._resetFileAttributes(service_name, path_file_pattern, cb, eb, timeout = timeout)
while self.is_busy:
self._pollForNetBIOSPacket(timeout)
finally:
self.is_busy = False
def createDirectory(self, service_name, path, timeout = 30):
"""
Creates a new directory *path* on the *service_name*.
:param string/unicode service_name: Contains the name of the shared folder.
:param string/unicode path: The path of the new folder (relative to) the shared folder.
If the path contains non-English characters, an unicode string must be used to pass in the path.
:return: None
"""
if not self.sock:
raise NotConnectedError('Not connected to server')
def cb(r):
self.is_busy = False
def eb(failure):
self.is_busy = False
raise failure
self.is_busy = True
try:
self._createDirectory(service_name, path, cb, eb, timeout = timeout)
while self.is_busy:
self._pollForNetBIOSPacket(timeout)
finally:
self.is_busy = False
def deleteDirectory(self, service_name, path, timeout = 30):
"""
Delete the empty folder at *path* on *service_name*
:param string/unicode service_name: Contains the name of the shared folder.
:param string/unicode path: The path of the to-be-deleted folder (relative to) the shared folder.
If the path contains non-English characters, an unicode string must be used to pass in the path.
:return: None
"""
if not self.sock:
raise NotConnectedError('Not connected to server')
def cb(r):
self.is_busy = False
def eb(failure):
self.is_busy = False
raise failure
self.is_busy = True
try:
self._deleteDirectory(service_name, path, cb, eb, timeout = timeout)
while self.is_busy:
self._pollForNetBIOSPacket(timeout)
finally:
self.is_busy = False
def rename(self, service_name, old_path, new_path, timeout = 30):
"""
Rename a file or folder at *old_path* to *new_path* shared at *service_name*. Note that this method cannot be used to rename file/folder across different shared folders
*old_path* and *new_path* are string/unicode referring to the old and new path of the renamed resources (relative to) the shared folder.
If the path contains non-English characters, an unicode string must be used to pass in the path.
:param string/unicode service_name: Contains the name of the shared folder.
:return: None
"""
if not self.sock:
raise NotConnectedError('Not connected to server')
def cb(r):
self.is_busy = False
def eb(failure):
self.is_busy = False
raise failure
self.is_busy = True
try:
self._rename(service_name, old_path, new_path, cb, eb)
while self.is_busy:
self._pollForNetBIOSPacket(timeout)
finally:
self.is_busy = False
def echo(self, data, timeout = 10):
"""
Send an echo command containing *data* to the remote SMB/CIFS server. The remote SMB/CIFS will reply with the same *data*.
:param string data: Data to send to the remote server.
:return: The *data* parameter
"""
if not self.sock:
raise NotConnectedError('Not connected to server')
results = [ ]
def cb(r):
self.is_busy = False
results.append(r)
def eb(failure):
self.is_busy = False
raise failure
self.is_busy = True
try:
self._echo(data, cb, eb)
while self.is_busy:
self._pollForNetBIOSPacket(timeout)
finally:
self.is_busy = False
return results[0]
#
# Protected Methods
#
def _pollForNetBIOSPacket(self, timeout):
expiry_time = time.time() + timeout
read_len = 4
data = ''
while read_len > 0:
try:
if expiry_time < time.time():
raise SMBTimeout
ready, _, _ = select.select([ self.sock.fileno() ], [ ], [ ], timeout)
if not ready:
raise SMBTimeout
d = self.sock.recv(read_len)
if len(d) == 0:
raise NotConnectedError
data = data + d
read_len -= len(d)
except select.error, ex:
if type(ex) is types.TupleType:
if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN:
raise ex
else:
raise ex
type, flags, length = struct.unpack('>BBH', data)
if flags & 0x01:
length = length | 0x10000
read_len = length
while read_len > 0:
try:
if expiry_time < time.time():
raise SMBTimeout
ready, _, _ = select.select([ self.sock.fileno() ], [ ], [ ], timeout)
if not ready:
raise SMBTimeout
d = self.sock.recv(read_len)
if len(d) == 0:
raise NotConnectedError
data = data + d
read_len -= len(d)
except select.error, ex:
if type(ex) is types.TupleType:
if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN:
raise ex
else:
raise ex
self.feedData(data)
|
Hernanarce/pelisalacarta
|
python/main-classic/lib/sambatools/smb/SMBConnection.py
|
Python
|
gpl-3.0
| 25,187
|
#!/usr/bin/env python2
from eagle import *
def changed(app, entry, value):
print "app %s, entry %s, value %r" % (app.id, entry.id, value)
App(title="Entries Test",
center=(Entry(id="single"),
Entry(id="multi", multiline=True),
Entry(id="non-editable",
label="non-editable", value="Value", editable=False),
Entry(id="non-editable-multi",
label="non-editable", value="Value", editable=False,
multiline=True),
),
data_changed_callback=changed,
)
run()
|
ramalho/eagle-py
|
tests/entries.py
|
Python
|
lgpl-2.1
| 572
|
from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from core.models import Claim
class Command(BaseCommand):
help = 'Cleans up old unauthorized claims'
def add_arguments(self, parser):
parser.add_argument('days_old', type=int)
parser.add_argument(
'--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Do not delete the claims',
)
def handle(self, *args, **options):
qs = Claim.objects.unauthorized().filter(created_at__lte=timezone.now() - timedelta(days=options['days_old']))
self.stdout.write('Found {} unautorized claims older than {} days for the clean up'
.format(qs.count(), options['days_old']))
if not options['dry_run']:
qs.delete()
|
dchaplinsky/badparking.in.ua
|
badparking/core/management/commands/cleanclaims.py
|
Python
|
mit
| 890
|
# This file is part of Invenio.
# Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""PERSONAL FEATURES - YOUR ALERTS"""
__revision__ = "$Id$"
import cgi
import time
from invenio.config import CFG_SITE_LANG
from invenio.legacy.dbquery import run_sql
from invenio.legacy.webuser import isGuestUser
from invenio.ext.logging import register_exception
from invenio.legacy.websession.webaccount import warning_guest_user
from invenio.legacy.webbasket.api import create_personal_baskets_selection_box
from invenio.legacy.webbasket.db_layer import check_user_owns_baskets
from invenio.base.i18n import gettext_set_language
from invenio.utils.date import convert_datestruct_to_datetext, convert_datetext_to_dategui
import invenio.legacy.template
webalert_templates = invenio.legacy.template.load('webalert')
### IMPLEMENTATION
class AlertError(Exception):
pass
def check_alert_name(alert_name, uid, ln=CFG_SITE_LANG):
"""check this user does not have another alert with this name."""
# load the right language
_ = gettext_set_language(ln)
sql = """select id_query
from user_query_basket
where id_user=%s and alert_name=%s"""
res = run_sql(sql, (uid, alert_name.strip()))
if len(res) > 0:
raise AlertError( _("You already have an alert named %(x_name)s.", x_name=('<b>' + cgi.escape(alert_name) + '</b>',)))
def get_textual_query_info_from_urlargs(urlargs, ln=CFG_SITE_LANG):
"""Return nicely formatted search pattern and catalogue from urlargs of the search query.
Suitable for 'your searches' display."""
out = ""
args = cgi.parse_qs(urlargs)
return webalert_templates.tmpl_textual_query_info_from_urlargs(
ln = ln,
args = args,
)
return out
def perform_display(permanent, uid, ln=CFG_SITE_LANG):
"""display the searches performed by the current user
input: default permanent="n"; permanent="y" display permanent queries(most popular)
output: list of searches in formatted html
"""
# load the right language
_ = gettext_set_language(ln)
# first detect number of queries:
nb_queries_total = 0
nb_queries_distinct = 0
query = "SELECT COUNT(*),COUNT(DISTINCT(id_query)) FROM user_query WHERE id_user=%s"
res = run_sql(query, (uid,), 1)
try:
nb_queries_total = res[0][0]
nb_queries_distinct = res[0][1]
except:
pass
# query for queries:
params = ()
if permanent == "n":
SQL_query = "SELECT DISTINCT(q.id),q.urlargs "\
"FROM query q, user_query uq "\
"WHERE uq.id_user=%s "\
"AND uq.id_query=q.id "\
"ORDER BY q.id DESC"
params = (uid,)
else:
# permanent="y"
SQL_query = "SELECT q.id,q.urlargs "\
"FROM query q "\
"WHERE q.type='p'"
query_result = run_sql(SQL_query, params)
queries = []
if len(query_result) > 0:
for row in query_result :
if permanent == "n":
res = run_sql("SELECT DATE_FORMAT(MAX(date),'%%Y-%%m-%%d %%H:%%i:%%s') FROM user_query WHERE id_user=%s and id_query=%s",
(uid, row[0]))
try:
lastrun = res[0][0]
except:
lastrun = _("unknown")
else:
lastrun = ""
queries.append({
'id' : row[0],
'args' : row[1],
'textargs' : get_textual_query_info_from_urlargs(row[1], ln=ln),
'lastrun' : lastrun,
})
return webalert_templates.tmpl_display_alerts(
ln = ln,
permanent = permanent,
nb_queries_total = nb_queries_total,
nb_queries_distinct = nb_queries_distinct,
queries = queries,
guest = isGuestUser(uid),
guesttxt = warning_guest_user(type="alerts", ln=ln)
)
def check_user_can_add_alert(id_user, id_query):
"""Check if ID_USER has really alert adding rights on ID_QUERY
(that is, the user made the query herself or the query is one of
predefined `popular' queries) and return True or False
accordingly. Useful to filter out malicious users trying to guess
idq URL parameter values in order to access potentially restricted
query alerts."""
# is this a predefined popular query?
res = run_sql("""SELECT COUNT(*) FROM query
WHERE id=%s AND type='p'""", (id_query,))
if res and res[0][0]:
return True
# has the user performed this query in the past?
res = run_sql("""SELECT COUNT(*) FROM user_query
WHERE id_query=%s AND id_user=%s""", (id_query, id_user))
if res and res[0][0]:
return True
return False
def perform_input_alert(action, id_query, alert_name, frequency, notification, id_basket, uid, old_id_basket=None, ln = CFG_SITE_LANG):
"""get the alert settings
input: action="add" for a new alert (blank form), action="modify" for an update
(get old values)
id_query id the identifier of the search to be alerted
for the "modify" action specify old alert_name, frequency of checking,
e-mail notification and basket id.
output: alert settings input form"""
# load the right language
_ = gettext_set_language(ln)
# security check:
if not check_user_can_add_alert(uid, id_query):
raise AlertError(_("You do not have rights for this operation."))
# display query information
res = run_sql("SELECT urlargs FROM query WHERE id=%s", (id_query,))
try:
urlargs = res[0][0]
except:
urlargs = "UNKNOWN"
baskets = create_personal_baskets_selection_box(uid=uid,
html_select_box_name='idb',
selected_bskid=old_id_basket,
ln=ln)
return webalert_templates.tmpl_input_alert(
ln = ln,
query = get_textual_query_info_from_urlargs(urlargs, ln = ln),
action = action,
frequency = frequency,
notification = notification,
alert_name = alert_name,
baskets = baskets,
old_id_basket = old_id_basket,
id_basket = id_basket,
id_query = id_query,
guest = isGuestUser(uid),
guesttxt = warning_guest_user(type="alerts", ln=ln)
)
def check_alert_is_unique(id_basket, id_query, uid, ln=CFG_SITE_LANG ):
"""check the user does not have another alert for the specified query and basket"""
_ = gettext_set_language(ln)
sql = """select id_query
from user_query_basket
where id_user = %s and id_query = %s
and id_basket = %s"""
res = run_sql(sql, (uid, id_query, id_basket))
if len(res):
raise AlertError(_("You already have an alert defined for the specified query and basket."))
def perform_add_alert(alert_name, frequency, notification,
id_basket, id_query, uid, ln = CFG_SITE_LANG):
"""add an alert to the database
input: the name of the new alert;
alert frequency: 'month', 'week' or 'day';
setting for e-mail notification: 'y' for yes, 'n' for no;
basket identifier: 'no' for no basket;
new basket name for this alert;
identifier of the query to be alerted
output: confirmation message + the list of alerts Web page"""
# sanity check
if (None in (alert_name, frequency, notification, id_basket, id_query, uid)):
return ''
# load the right language
_ = gettext_set_language(ln)
# security check:
if not check_user_can_add_alert(uid, id_query):
raise AlertError(_("You do not have rights for this operation."))
# check the alert name is not empty
alert_name = alert_name.strip()
if alert_name == "":
raise AlertError(_("The alert name cannot be empty."))
# check if the alert can be created
check_alert_name(alert_name, uid, ln)
check_alert_is_unique(id_basket, id_query, uid, ln)
if id_basket != 0 and not check_user_owns_baskets(uid, id_basket):
raise AlertError( _("You are not the owner of this basket.") )
# add a row to the alerts table: user_query_basket
query = """INSERT INTO user_query_basket (id_user, id_query, id_basket,
frequency, date_creation, date_lastrun,
alert_name, notification)
VALUES (%s,%s,%s,%s,%s,'',%s,%s)"""
params = (uid, id_query, id_basket,
frequency, convert_datestruct_to_datetext(time.localtime()),
alert_name, notification)
run_sql(query, params)
out = _("The alert %(x_name)s has been added to your profile.", x_name='<b>' + cgi.escape(alert_name) + '</b>')
# out %= '<b>' + cgi.escape(alert_name) + '</b>'
out += perform_list_alerts(uid, ln=ln)
return out
def perform_list_alerts(uid, ln=CFG_SITE_LANG):
"""perform_list_alerts display the list of alerts for the connected user"""
# set variables
out = ""
# query the database
query = """ SELECT q.id, q.urlargs,
a.id_basket, b.name,
a.alert_name, a.frequency,a.notification,
DATE_FORMAT(a.date_creation,'%%Y-%%m-%%d %%H:%%i:%%s'),
DATE_FORMAT(a.date_lastrun,'%%Y-%%m-%%d %%H:%%i:%%s')
FROM user_query_basket a LEFT JOIN query q ON a.id_query=q.id
LEFT JOIN bskBASKET b ON a.id_basket=b.id
WHERE a.id_user=%s
ORDER BY a.alert_name ASC """
res = run_sql(query, (uid,))
alerts = []
for (qry_id, qry_args,
bsk_id, bsk_name,
alrt_name, alrt_frequency, alrt_notification, alrt_creation, alrt_last_run) in res:
try:
if not qry_id:
raise StandardError("""\
Warning: I have detected a bad alert for user id %d.
It seems one of his/her alert queries was deleted from the 'query' table.
Please check this and delete it if needed.
Otherwise no problem, I'm continuing with the other alerts now.
Here are all the alerts defined by this user: %s""" % (uid, repr(res)))
alerts.append({
'queryid' : qry_id,
'queryargs' : qry_args,
'textargs' : get_textual_query_info_from_urlargs(qry_args, ln=ln),
'userid' : uid,
'basketid' : bsk_id,
'basketname' : bsk_name,
'alertname' : alrt_name,
'frequency' : alrt_frequency,
'notification' : alrt_notification,
'created' : convert_datetext_to_dategui(alrt_creation),
'lastrun' : convert_datetext_to_dategui(alrt_last_run)
})
except StandardError:
register_exception(alert_admin=True)
# link to the "add new alert" form
out = webalert_templates.tmpl_list_alerts(ln=ln, alerts=alerts,
guest=isGuestUser(uid),
guesttxt=warning_guest_user(type="alerts", ln=ln))
return out
def perform_remove_alert(alert_name, id_query, id_basket, uid, ln=CFG_SITE_LANG):
"""perform_remove_alert: remove an alert from the database
input: alert name
identifier of the query;
identifier of the basket
uid
output: confirmation message + the list of alerts Web page"""
# load the right language
_ = gettext_set_language(ln)
# security check:
if not check_user_can_add_alert(uid, id_query):
raise AlertError(_("You do not have rights for this operation."))
# set variables
out = ""
if (None in (alert_name, id_query, id_basket, uid)):
return out
# remove a row from the alerts table: user_query_basket
query = """DELETE FROM user_query_basket
WHERE id_user=%s AND id_query=%s AND id_basket=%s"""
params = (uid, id_query, id_basket)
res = run_sql(query, params)
if res:
out += "The alert <b>%s</b> has been removed from your profile.<br /><br />\n" % cgi.escape(alert_name)
else:
out += "Unable to remove alert <b>%s</b>.<br /><br />\n" % cgi.escape(alert_name)
out += perform_list_alerts(uid, ln=ln)
return out
def perform_update_alert(alert_name, frequency, notification, id_basket, id_query, old_id_basket, uid, ln = CFG_SITE_LANG):
"""update alert settings into the database
input: the name of the new alert;
alert frequency: 'month', 'week' or 'day';
setting for e-mail notification: 'y' for yes, 'n' for no;
new basket identifier: 'no' for no basket;
new basket name for this alert;
identifier of the query to be alerted
old identifier of the basket associated to the alert
output: confirmation message + the list of alerts Web page"""
out = ''
# sanity check
if (None in (alert_name, frequency, notification, id_basket, id_query, old_id_basket, uid)):
return out
# load the right language
_ = gettext_set_language(ln)
# security check:
if not check_user_can_add_alert(uid, id_query):
raise AlertError(_("You do not have rights for this operation."))
# check the alert name is not empty
if alert_name.strip() == "":
raise AlertError(_("The alert name cannot be empty."))
# check if the alert can be created
sql = """select alert_name
from user_query_basket
where id_user=%s
and id_basket=%s
and id_query=%s"""
try:
old_alert_name = run_sql(sql, (uid, old_id_basket, id_query))[0][0]
except IndexError:
# FIXME: I18N since this technique of the below raise message,
# since this technique (detecting old alert IDs) is not nice
# and should be replaced some day soon.
raise AlertError("Unable to detect old alert name.")
if old_alert_name.strip()!="" and old_alert_name != alert_name:
check_alert_name( alert_name, uid, ln)
if id_basket != old_id_basket:
check_alert_is_unique( id_basket, id_query, uid, ln)
# update a row into the alerts table: user_query_basket
query = """UPDATE user_query_basket
SET alert_name=%s,frequency=%s,notification=%s,
date_creation=%s,date_lastrun='',id_basket=%s
WHERE id_user=%s AND id_query=%s AND id_basket=%s"""
params = (alert_name, frequency, notification,
convert_datestruct_to_datetext(time.localtime()),
id_basket, uid, id_query, old_id_basket)
run_sql(query, params)
out += _("The alert %(x_name)s has been successfully updated.", x_name=("<b>" + cgi.escape(alert_name) + "</b>",))
out += "<br /><br />\n" + perform_list_alerts(uid, ln=ln)
return out
def is_selected(var, fld):
"Checks if the two are equal, and if yes, returns ' selected'. Useful for select boxes."
if var == fld:
return " selected"
else:
return ""
def account_list_alerts(uid, ln=CFG_SITE_LANG):
"""account_list_alerts: list alert for the account page
input: the user id
language
output: the list of alerts Web page"""
query = """ SELECT q.id, q.urlargs, a.id_user, a.id_query,
a.id_basket, a.alert_name, a.frequency,
a.notification,
DATE_FORMAT(a.date_creation,'%%d %%b %%Y'),
DATE_FORMAT(a.date_lastrun,'%%d %%b %%Y'),
a.id_basket
FROM query q, user_query_basket a
WHERE a.id_user=%s AND a.id_query=q.id
ORDER BY a.alert_name ASC """
res = run_sql(query, (uid,))
alerts = []
if len(res):
for row in res:
alerts.append({
'id' : row[0],
'name' : row[5]
})
return webalert_templates.tmpl_account_list_alerts(ln=ln, alerts=alerts)
def account_list_searches(uid, ln=CFG_SITE_LANG):
""" account_list_searches: list the searches of the user
input: the user id
output: resume of the searches"""
out = ""
# first detect number of queries:
nb_queries_total = 0
res = run_sql("SELECT COUNT(*) FROM user_query WHERE id_user=%s", (uid,), 1)
try:
nb_queries_total = res[0][0]
except:
pass
# load the right language
_ = gettext_set_language(ln)
out += _("You have made %(x_nb)s queries. A %(x_url_open)sdetailed list%(x_url_close)s is available with a possibility to (a) view search results and (b) subscribe to an automatic email alerting service for these queries.") % {'x_nb': nb_queries_total, 'x_url_open': '<a href="../youralerts/display?ln=%s">' % ln, 'x_url_close': '</a>'}
return out
|
zenodo/invenio
|
invenio/legacy/webalert/api.py
|
Python
|
gpl-2.0
| 18,014
|
# encoding: UTF-8
'''
本文件中实现了CTA策略引擎,针对CTA类型的策略,抽象简化了部分底层接口的功能。
关于平今和平昨规则:
1. 普通的平仓OFFSET_CLOSET等于平昨OFFSET_CLOSEYESTERDAY
2. 只有上期所的品种需要考虑平今和平昨的区别
3. 当上期所的期货有今仓时,调用Sell和Cover会使用OFFSET_CLOSETODAY,否则
会使用OFFSET_CLOSE
4. 以上设计意味着如果Sell和Cover的数量超过今日持仓量时,会导致出错(即用户
希望通过一个指令同时平今和平昨)
5. 采用以上设计的原因是考虑到vn.trader的用户主要是对TB、MC和金字塔类的平台
感到功能不足的用户(即希望更高频的交易),交易策略不应该出现4中所述的情况
6. 对于想要实现4中所述情况的用户,需要实现一个策略信号引擎和交易委托引擎分开
的定制化统结构(没错,得自己写)
'''
from __future__ import division
import traceback
import json
import os
import traceback
from collections import OrderedDict
from datetime import datetime, timedelta
from ctaBase import *
from strategy import STRATEGY_CLASS
from eventEngine import *
from vtConstant import *
from vtGateway import VtSubscribeReq, VtOrderReq, VtCancelOrderReq, VtLogData
from vtFunction import todayDate
########################################################################
class CtaEngine(object):
"""CTA策略引擎"""
settingFileName = 'CTA_setting.json'
path = os.path.abspath(os.path.dirname(__file__))
settingFileName = os.path.join(path, settingFileName)
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 当前日期
self.today = todayDate()
# 保存策略实例的字典
# key为策略名称,value为策略实例,注意策略名称不允许重复
self.strategyDict = {}
# 保存vtSymbol和策略实例映射的字典(用于推送tick数据)
# 由于可能多个strategy交易同一个vtSymbol,因此key为vtSymbol
# value为包含所有相关strategy对象的list
self.tickStrategyDict = {}
# 保存vtOrderID和strategy对象映射的字典(用于推送order和trade数据)
# key为vtOrderID,value为strategy对象
self.orderStrategyDict = {}
# 本地停止单编号计数
self.stopOrderCount = 0
# stopOrderID = STOPORDERPREFIX + str(stopOrderCount)
# 本地停止单字典
# key为stopOrderID,value为stopOrder对象
self.stopOrderDict = {} # 停止单撤销后不会从本字典中删除
self.workingStopOrderDict = {} # 停止单撤销后会从本字典中删除
# 持仓缓存字典
# key为vtSymbol,value为PositionBuffer对象
self.posBufferDict = {}
# 成交号集合,用来过滤已经收到过的成交推送
self.tradeSet = set()
# 引擎类型为实盘
self.engineType = ENGINETYPE_TRADING
# 注册事件监听
self.registerEvent()
#----------------------------------------------------------------------
def sendOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发单"""
contract = self.mainEngine.getContract(vtSymbol)
req = VtOrderReq()
req.symbol = contract.symbol
req.exchange = contract.exchange
req.price = self.roundToPriceTick(contract.priceTick, price)
req.volume = volume
req.productClass = strategy.productClass
req.currency = strategy.currency
# 设计为CTA引擎发出的委托只允许使用限价单
req.priceType = PRICETYPE_LIMITPRICE
# CTA委托类型映射
if orderType == CTAORDER_BUY:
req.direction = DIRECTION_LONG
req.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
req.direction = DIRECTION_SHORT
# 只有上期所才要考虑平今平昨
if contract.exchange != EXCHANGE_SHFE:
req.offset = OFFSET_CLOSE
else:
# 获取持仓缓存数据
posBuffer = self.posBufferDict.get(vtSymbol, None)
# 如果获取持仓缓存失败,则默认平昨
if not posBuffer:
req.offset = OFFSET_CLOSE
# 否则如果有多头今仓,则使用平今
elif posBuffer.longToday:
req.offset= OFFSET_CLOSETODAY
# 其他情况使用平昨
else:
req.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
req.direction = DIRECTION_SHORT
req.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
req.direction = DIRECTION_LONG
# 只有上期所才要考虑平今平昨
if contract.exchange != EXCHANGE_SHFE:
req.offset = OFFSET_CLOSE
else:
# 获取持仓缓存数据
posBuffer = self.posBufferDict.get(vtSymbol, None)
# 如果获取持仓缓存失败,则默认平昨
if not posBuffer:
req.offset = OFFSET_CLOSE
# 否则如果有空头今仓,则使用平今
elif posBuffer.shortToday:
req.offset= OFFSET_CLOSETODAY
# 其他情况使用平昨
else:
req.offset = OFFSET_CLOSE
vtOrderID = self.mainEngine.sendOrder(req, contract.gatewayName) # 发单
self.orderStrategyDict[vtOrderID] = strategy # 保存vtOrderID和策略的映射关系
self.writeCtaLog(u'策略%s发送委托,%s,%s,%s@%s'
%(strategy.name, vtSymbol, req.direction, volume, price))
return vtOrderID
#----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
# 查询报单对象
order = self.mainEngine.getOrder(vtOrderID)
# 如果查询成功
if order:
# 检查是否报单还有效,只有有效时才发出撤单指令
orderFinished = (order.status==STATUS_ALLTRADED or order.status==STATUS_CANCELLED)
if not orderFinished:
req = VtCancelOrderReq()
req.symbol = order.symbol
req.exchange = order.exchange
req.frontID = order.frontID
req.sessionID = order.sessionID
req.orderID = order.orderID
self.mainEngine.cancelOrder(req, order.gatewayName)
#----------------------------------------------------------------------
def sendStopOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发停止单(本地实现)"""
self.stopOrderCount += 1
stopOrderID = STOPORDERPREFIX + str(self.stopOrderCount)
so = StopOrder()
so.vtSymbol = vtSymbol
so.orderType = orderType
so.price = price
so.volume = volume
so.strategy = strategy
so.stopOrderID = stopOrderID
so.status = STOPORDER_WAITING
if orderType == CTAORDER_BUY:
so.direction = DIRECTION_LONG
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
so.direction = DIRECTION_LONG
so.offset = OFFSET_CLOSE
# 保存stopOrder对象到字典中
self.stopOrderDict[stopOrderID] = so
self.workingStopOrderDict[stopOrderID] = so
return stopOrderID
#----------------------------------------------------------------------
def cancelStopOrder(self, stopOrderID):
"""撤销停止单"""
# 检查停止单是否存在
if stopOrderID in self.workingStopOrderDict:
so = self.workingStopOrderDict[stopOrderID]
so.status = STOPORDER_CANCELLED
del self.workingStopOrderDict[stopOrderID]
#----------------------------------------------------------------------
def processStopOrder(self, tick):
"""收到行情后处理本地停止单(检查是否要立即发出)"""
vtSymbol = tick.vtSymbol
# 首先检查是否有策略交易该合约
if vtSymbol in self.tickStrategyDict:
# 遍历等待中的停止单,检查是否会被触发
for so in self.workingStopOrderDict.values():
if so.vtSymbol == vtSymbol:
longTriggered = so.direction==DIRECTION_LONG and tick.lastPrice>=so.price # 多头停止单被触发
shortTriggered = so.direction==DIRECTION_SHORT and tick.lastPrice<=so.price # 空头停止单被触发
if longTriggered or shortTriggered:
# 买入和卖出分别以涨停跌停价发单(模拟市价单)
if so.direction==DIRECTION_LONG:
price = tick.upperLimit
else:
price = tick.lowerLimit
so.status = STOPORDER_TRIGGERED
self.sendOrder(so.vtSymbol, so.orderType, price, so.volume, so.strategy)
del self.workingStopOrderDict[so.stopOrderID]
#----------------------------------------------------------------------
def processTickEvent(self, event):
"""处理行情推送"""
tick = event.dict_['data']
# 收到tick行情后,先处理本地停止单(检查是否要立即发出)
self.processStopOrder(tick)
# 推送tick到对应的策略实例进行处理
if tick.vtSymbol in self.tickStrategyDict:
# 将vtTickData数据转化为ctaTickData
ctaTick = CtaTickData()
d = ctaTick.__dict__
for key in d.keys():
if key != 'datetime':
d[key] = tick.__getattribute__(key)
# 添加datetime字段
ctaTick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S.%f')
# 逐个推送到策略实例中
l = self.tickStrategyDict[tick.vtSymbol]
for strategy in l:
self.callStrategyFunc(strategy, strategy.onTick, ctaTick)
#----------------------------------------------------------------------
def processOrderEvent(self, event):
"""处理委托推送"""
order = event.dict_['data']
if order.vtOrderID in self.orderStrategyDict:
strategy = self.orderStrategyDict[order.vtOrderID]
self.callStrategyFunc(strategy, strategy.onOrder, order)
#----------------------------------------------------------------------
def processTradeEvent(self, event):
"""处理成交推送"""
trade = event.dict_['data']
# 过滤已经收到过的成交回报
if trade.vtTradeID in self.tradeSet:
return
self.tradeSet.add(trade.vtTradeID)
# 将成交推送到策略对象中
if trade.vtOrderID in self.orderStrategyDict:
strategy = self.orderStrategyDict[trade.vtOrderID]
# 计算策略持仓
if trade.direction == DIRECTION_LONG:
strategy.pos += trade.volume
else:
strategy.pos -= trade.volume
self.callStrategyFunc(strategy, strategy.onTrade, trade)
# 更新持仓缓存数据
if trade.vtSymbol in self.tickStrategyDict:
posBuffer = self.posBufferDict.get(trade.vtSymbol, None)
if not posBuffer:
posBuffer = PositionBuffer()
posBuffer.vtSymbol = trade.vtSymbol
self.posBufferDict[trade.vtSymbol] = posBuffer
posBuffer.updateTradeData(trade)
#----------------------------------------------------------------------
def processPositionEvent(self, event):
"""处理持仓推送"""
pos = event.dict_['data']
# 更新持仓缓存数据
if pos.vtSymbol in self.tickStrategyDict:
posBuffer = self.posBufferDict.get(pos.vtSymbol, None)
if not posBuffer:
posBuffer = PositionBuffer()
posBuffer.vtSymbol = pos.vtSymbol
self.posBufferDict[pos.vtSymbol] = posBuffer
posBuffer.updatePositionData(pos)
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_TICK, self.processTickEvent)
self.eventEngine.register(EVENT_ORDER, self.processOrderEvent)
self.eventEngine.register(EVENT_TRADE, self.processTradeEvent)
self.eventEngine.register(EVENT_POSITION, self.processPositionEvent)
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""插入数据到数据库(这里的data可以是CtaTickData或者CtaBarData)"""
self.mainEngine.dbInsert(dbName, collectionName, data.__dict__)
#----------------------------------------------------------------------
def loadBar(self, dbName, collectionName, days):
"""从数据库中读取Bar数据,startDate是datetime对象"""
startDate = self.today - timedelta(days)
d = {'datetime':{'$gte':startDate}}
barData = self.mainEngine.dbQuery(dbName, collectionName, d)
l = []
for d in barData:
bar = CtaBarData()
bar.__dict__ = d
l.append(bar)
return l
#----------------------------------------------------------------------
def loadTick(self, dbName, collectionName, days):
"""从数据库中读取Tick数据,startDate是datetime对象"""
startDate = self.today - timedelta(days)
d = {'datetime':{'$gte':startDate}}
tickData = self.mainEngine.dbQuery(dbName, collectionName, d)
l = []
for d in tickData:
tick = CtaTickData()
tick.__dict__ = d
l.append(tick)
return l
#----------------------------------------------------------------------
def writeCtaLog(self, content):
"""快速发出CTA模块日志事件"""
log = VtLogData()
log.logContent = content
event = Event(type_=EVENT_CTA_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
#----------------------------------------------------------------------
def loadStrategy(self, setting):
"""载入策略"""
try:
name = setting['name']
className = setting['className']
except:
self.writeCtaLog(u'载入策略出错:%s' %traceback.format_exc())
return
# 获取策略类
strategyClass = STRATEGY_CLASS.get(className, None)
if not strategyClass:
self.writeCtaLog(u'找不到策略类:%s' %className)
return
# 防止策略重名
if name in self.strategyDict:
self.writeCtaLog(u'策略实例重名:%s' %name)
else:
# 创建策略实例
strategy = strategyClass(self, setting)
self.strategyDict[name] = strategy
# 保存Tick映射关系
if strategy.vtSymbol in self.tickStrategyDict:
l = self.tickStrategyDict[strategy.vtSymbol]
else:
l = []
self.tickStrategyDict[strategy.vtSymbol] = l
l.append(strategy)
# 订阅合约
contract = self.mainEngine.getContract(strategy.vtSymbol)
if contract:
req = VtSubscribeReq()
req.symbol = contract.symbol
req.exchange = contract.exchange
# 对于IB接口订阅行情时所需的货币和产品类型,从策略属性中获取
req.currency = strategy.currency
req.productClass = strategy.productClass
self.mainEngine.subscribe(req, contract.gatewayName)
else:
self.writeCtaLog(u'%s的交易合约%s无法找到' %(name, strategy.vtSymbol))
#----------------------------------------------------------------------
def initStrategy(self, name):
"""初始化策略"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
if not strategy.inited:
strategy.inited = True
self.callStrategyFunc(strategy, strategy.onInit)
else:
self.writeCtaLog(u'请勿重复初始化策略实例:%s' %name)
else:
self.writeCtaLog(u'策略实例不存在:%s' %name)
#---------------------------------------------------------------------
def startStrategy(self, name):
"""启动策略"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
if strategy.inited and not strategy.trading:
strategy.trading = True
self.callStrategyFunc(strategy, strategy.onStart)
else:
self.writeCtaLog(u'策略实例不存在:%s' %name)
#----------------------------------------------------------------------
def stopStrategy(self, name):
"""停止策略"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
if strategy.trading:
strategy.trading = False
self.callStrategyFunc(strategy, strategy.onStop)
# 对该策略发出的所有限价单进行撤单
for vtOrderID, s in self.orderStrategyDict.items():
if s is strategy:
self.cancelOrder(vtOrderID)
# 对该策略发出的所有本地停止单撤单
for stopOrderID, so in self.workingStopOrderDict.items():
if so.strategy is strategy:
self.cancelStopOrder(stopOrderID)
else:
self.writeCtaLog(u'策略实例不存在:%s' %name)
#----------------------------------------------------------------------
def saveSetting(self):
"""保存策略配置"""
with open(self.settingFileName, 'w') as f:
l = []
for strategy in self.strategyDict.values():
setting = {}
for param in strategy.paramList:
setting[param] = strategy.__getattribute__(param)
l.append(setting)
jsonL = json.dumps(l, indent=4)
f.write(jsonL)
#----------------------------------------------------------------------
def loadSetting(self):
"""读取策略配置"""
with open(self.settingFileName) as f:
l = json.load(f)
for setting in l:
self.loadStrategy(setting)
self.loadPosition()
#----------------------------------------------------------------------
def getStrategyVar(self, name):
"""获取策略当前的变量字典"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
varDict = OrderedDict()
for key in strategy.varList:
varDict[key] = strategy.__getattribute__(key)
return varDict
else:
self.writeCtaLog(u'策略实例不存在:' + name)
return None
#----------------------------------------------------------------------
def getStrategyParam(self, name):
"""获取策略的参数字典"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
paramDict = OrderedDict()
for key in strategy.paramList:
paramDict[key] = strategy.__getattribute__(key)
return paramDict
else:
self.writeCtaLog(u'策略实例不存在:' + name)
return None
#----------------------------------------------------------------------
def putStrategyEvent(self, name):
"""触发策略状态变化事件(通常用于通知GUI更新)"""
event = Event(EVENT_CTA_STRATEGY+name)
self.eventEngine.put(event)
#----------------------------------------------------------------------
def callStrategyFunc(self, strategy, func, params=None):
"""调用策略的函数,若触发异常则捕捉"""
try:
if params:
func(params)
else:
func()
except Exception:
# 停止策略,修改状态为未初始化
strategy.trading = False
strategy.inited = False
# 发出日志
content = '\n'.join([u'策略%s触发异常已停止' %strategy.name,
traceback.format_exc()])
self.writeCtaLog(content)
#----------------------------------------------------------------------
def savePosition(self):
"""保存所有策略的持仓情况到数据库"""
for strategy in self.strategyDict.values():
flt = {'name': strategy.name,
'vtSymbol': strategy.vtSymbol}
d = {'name': strategy.name,
'vtSymbol': strategy.vtSymbol,
'pos': strategy.pos}
self.mainEngine.dbUpdate(POSITION_DB_NAME, strategy.className,
d, flt, True)
content = '策略%s持仓保存成功' %strategy.name
self.writeCtaLog(content)
#----------------------------------------------------------------------
def loadPosition(self):
"""从数据库载入策略的持仓情况"""
for strategy in self.strategyDict.values():
flt = {'name': strategy.name,
'vtSymbol': strategy.vtSymbol}
posData = self.mainEngine.dbQuery(POSITION_DB_NAME, strategy.className, flt)
for d in posData:
strategy.pos = d['pos']
#----------------------------------------------------------------------
def roundToPriceTick(self, priceTick, price):
"""取整价格到合约最小价格变动"""
if not priceTick:
return price
newPrice = round(price/priceTick, 0) * priceTick
return newPrice
########################################################################
class PositionBuffer(object):
"""持仓缓存信息(本地维护的持仓数据)"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.vtSymbol = EMPTY_STRING
# 多头
self.longPosition = EMPTY_INT
self.longToday = EMPTY_INT
self.longYd = EMPTY_INT
# 空头
self.shortPosition = EMPTY_INT
self.shortToday = EMPTY_INT
self.shortYd = EMPTY_INT
#----------------------------------------------------------------------
def updatePositionData(self, pos):
"""更新持仓数据"""
if pos.direction == DIRECTION_LONG:
self.longPosition = pos.position
self.longYd = pos.ydPosition
self.longToday = self.longPosition - self.longYd
else:
self.shortPosition = pos.position
self.shortYd = pos.ydPosition
self.shortToday = self.shortPosition - self.shortYd
#----------------------------------------------------------------------
def updateTradeData(self, trade):
"""更新成交数据"""
if trade.direction == DIRECTION_LONG:
# 多方开仓,则对应多头的持仓和今仓增加
if trade.offset == OFFSET_OPEN:
self.longPosition += trade.volume
self.longToday += trade.volume
# 多方平今,对应空头的持仓和今仓减少
elif trade.offset == OFFSET_CLOSETODAY:
self.shortPosition -= trade.volume
self.shortToday -= trade.volume
# 多方平昨,对应空头的持仓和昨仓减少
else:
self.shortPosition -= trade.volume
self.shortYd -= trade.volume
else:
# 空头和多头相同
if trade.offset == OFFSET_OPEN:
self.shortPosition += trade.volume
self.shortToday += trade.volume
elif trade.offset == OFFSET_CLOSETODAY:
self.longPosition -= trade.volume
self.longToday -= trade.volume
else:
self.longPosition -= trade.volume
self.longYd -= trade.volume
|
ujfjhz/vnpy
|
docker/dockerTrader/ctaStrategy/ctaEngine.py
|
Python
|
mit
| 25,335
|
"""
Django settings for model_my_watershed project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
from os import environ
from os.path import abspath, basename, dirname, join, normpath
from sys import path
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
from omgeo import postprocessors
def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
# PATH CONFIGURATION
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
# END PATH CONFIGURATION
# DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG CONFIGURATION
# FILE STORAGE CONFIGURATION
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# END FILE STORAGE CONFIGURATION
# CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
# The Redis database at index 0 is used by Logstash/Beaver
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://{0}:{1}/1'.format(
environ.get('MMW_CACHE_HOST', 'localhost'),
environ.get('MMW_CACHE_PORT', 6379)),
'OPTIONS': {
'PARSER_CLASS': 'redis.connection.HiredisParser',
'SOCKET_TIMEOUT': 3,
}
}
}
# Don't throw exceptions if Redis is down.
DJANGO_REDIS_IGNORE_EXCEPTIONS = True
# END CACHE CONFIGURATION
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': environ.get('MMW_DB_NAME', 'mmw'),
'USER': environ.get('MMW_DB_USER', 'mmw'),
'PASSWORD': environ.get('MMW_DB_PASSWORD', 'mmw'),
'HOST': environ.get('MMW_DB_HOST', 'localhost'),
'PORT': environ.get('MMW_DB_PORT', 5432),
'TEST_NAME': environ.get('DJANGO_TEST_DB_NAME', 'test_mmw')
}
}
POSTGIS_VERSION = tuple(
map(int, environ.get('DJANGO_POSTGIS_VERSION', '2.1.3').split("."))
)
# END DATABASE CONFIGURATION
# CELERY CONFIGURATION
BROKER_URL = 'redis://{0}:{1}/2'.format(
environ.get('MMW_CACHE_HOST', 'localhost'),
environ.get('MMW_CACHE_PORT', 6379))
CELERY_IMPORTS = ('celery.task.http',)
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
STATSD_CELERY_SIGNALS = True
# END CELERY CONFIGURATION
# LOGGING CONFIGURATION
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
},
}
}
# END LOGGING CONFIGURATION
# GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/New_York'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# This generates false positives and is being removed
# (https://code.djangoproject.com/ticket/23469)
SILENCED_SYSTEM_CHECKS = ['1_6.W001', '1_6.W002']
# END GENERAL CONFIGURATION
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = environ['DJANGO_MEDIA_ROOT']
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
# STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = environ['DJANGO_STATIC_ROOT']
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS # NOQA
STATICFILES_DIR = '/var/cache/mmw/static/'
STATICFILES_DIRS = (
STATICFILES_DIR,
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders # NOQA
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key should only be used for development and testing.
SECRET_KEY = get_env_setting('DJANGO_SECRET_KEY')
# END SECRET CONFIGURATION
# SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# END SITE CONFIGURATION
# FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS # NOQA
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
# END FIXTURE CONFIGURATION
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors # NOQA
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
# END TEMPLATE CONFIGURATION
# MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_statsd.middleware.GraphiteRequestTimingMiddleware',
'django_statsd.middleware.GraphiteMiddleware',
)
# END MIDDLEWARE CONFIGURATION
# URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
# END URL CONFIGURATION
# APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.gis',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'rest_framework',
'watchman',
'registration',
)
# THIRD-PARTY CONFIGURATION
# watchman
# Disable Storage checking, to avoid creating files on S3 on every health check
WATCHMAN_CHECKS = (
'watchman.checks.caches',
'watchman.checks.databases',
)
# rest_framework
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
# registration
ACCOUNT_ACTIVATION_DAYS = 7 # One-week activation window.
REGISTRATION_AUTO_LOGIN = True # Automatically log the user in.
# Add custom authentication classes
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'apps.user.backends.ItsiAuthenticationBackend',
)
# END THIRD-PARTY CONFIGURATION
# Apps specific for this project go here.
LOCAL_APPS = (
'apps.core',
'apps.modeling',
'apps.home',
'apps.geocode',
'apps.water_balance',
'apps.user'
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# END APP CONFIGURATION
# WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = '%s.wsgi.application' % SITE_NAME
# END WSGI CONFIGURATION
# Work-around to get django-registration-redux to work with Django 1.8.
# Source: https://github.com/evonove/django-oauth-toolkit/issues/204
REGISTRATION_MANAGER_MODEL = 'registration.RegistrationManager'
REGISTRATION_PROFILE_MODEL = 'registration.RegistrationProfile'
MIGRATION_MODULES = {
'registration': 'mmw.migrations.registration',
}
OMGEO_SETTINGS = [[
'omgeo.services.EsriWGSSSL',
{
'preprocessors': [],
'postprocessors': [
postprocessors.UseHighScoreIfAtLeast(99),
postprocessors.DupePicker(
attr_dupes='match_addr',
attr_sort='locator_type',
ordered_list=['PointAddress', 'BuildingName', 'StreetAddress']
),
postprocessors.ScoreSorter(),
postprocessors.GroupBy('match_addr'),
postprocessors.GroupBy(('x', 'y')),
postprocessors.SnapPoints(distance=10)
]
}
]]
# ITSI Portal Settings
ITSI = {
'client_id': environ.get('MMW_ITSI_CLIENT_ID', 'model-my-watershed'),
'client_secret': environ.get('MMW_ITSI_SECRET_KEY', 'itsi_secret_key'),
'base_url': environ.get('MMW_ITSI_BASE_URL',
'http://learn.staging.concord.org/'),
'authorize_url': 'auth/concord_id/authorize',
'access_token_url': 'auth/concord_id/access_token',
'user_json_url': 'auth/concord_id/user.json',
}
|
lewfish/model-my-watershed
|
src/mmw/mmw/settings/base.py
|
Python
|
apache-2.0
| 10,648
|
"""
SAX driver for the pyexpat C module. This driver works with
pyexpat.__version__ == '2.22'.
"""
version = "0.20"
from xml.sax._exceptions import *
from xml.sax.handler import feature_validation, feature_namespaces
from xml.sax.handler import feature_namespace_prefixes
from xml.sax.handler import feature_external_ges, feature_external_pes
from xml.sax.handler import feature_string_interning
from xml.sax.handler import property_xml_string, property_interning_dict
# xml.parsers.expat does not raise ImportError in Jython
import sys
if sys.platform[:4] == "java":
raise SAXReaderNotAvailable("expat not available in Java", None)
del sys
try:
from xml.parsers import expat
except ImportError:
raise SAXReaderNotAvailable("expat not supported", None)
else:
if not hasattr(expat, "ParserCreate"):
raise SAXReaderNotAvailable("expat not supported", None)
from xml.sax import xmlreader, saxutils, handler
AttributesImpl = xmlreader.AttributesImpl
AttributesNSImpl = xmlreader.AttributesNSImpl
# If we're using a sufficiently recent version of Python, we can use
# weak references to avoid cycles between the parser and content
# handler, otherwise we'll just have to pretend.
try:
import _weakref
except ImportError:
def _mkproxy(o):
return o
else:
import weakref
_mkproxy = weakref.proxy
del weakref, _weakref
# --- ExpatLocator
class ExpatLocator(xmlreader.Locator):
"""Locator for use with the ExpatParser class.
This uses a weak reference to the parser object to avoid creating
a circular reference between the parser and the content handler.
"""
def __init__(self, parser):
self._ref = _mkproxy(parser)
def getColumnNumber(self):
parser = self._ref
if parser._parser is None:
return None
return parser._parser.ErrorColumnNumber
def getLineNumber(self):
parser = self._ref
if parser._parser is None:
return 1
return parser._parser.ErrorLineNumber
def getPublicId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getPublicId()
def getSystemId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getSystemId()
# --- ExpatParser
class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
"""SAX driver for the pyexpat C module."""
def __init__(self, namespaceHandling=0, bufsize=2**16-20):
xmlreader.IncrementalParser.__init__(self, bufsize)
self._source = xmlreader.InputSource()
self._parser = None
self._namespaces = namespaceHandling
self._lex_handler_prop = None
self._parsing = 0
self._entity_stack = []
self._external_ges = 1
self._interning = None
# XMLReader methods
def parse(self, source):
"Parse an XML document from a URL or an InputSource."
source = saxutils.prepare_input_source(source)
self._source = source
self.reset()
self._cont_handler.setDocumentLocator(ExpatLocator(self))
xmlreader.IncrementalParser.parse(self, source)
def prepareParser(self, source):
if source.getSystemId() != None:
self._parser.SetBase(source.getSystemId())
# Redefined setContentHandler to allow changing handlers during parsing
def setContentHandler(self, handler):
xmlreader.IncrementalParser.setContentHandler(self, handler)
if self._parsing:
self._reset_cont_handler()
def getFeature(self, name):
if name == feature_namespaces:
return self._namespaces
elif name == feature_string_interning:
return self._interning is not None
elif name in (feature_validation, feature_external_pes,
feature_namespace_prefixes):
return 0
elif name == feature_external_ges:
return self._external_ges
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
if self._parsing:
raise SAXNotSupportedException("Cannot set features while parsing")
if name == feature_namespaces:
self._namespaces = state
elif name == feature_external_ges:
self._external_ges = state
elif name == feature_string_interning:
if state:
if self._interning is None:
self._interning = {}
else:
self._interning = None
elif name == feature_validation:
if state:
raise SAXNotSupportedException(
"expat does not support validation")
elif name == feature_external_pes:
if state:
raise SAXNotSupportedException(
"expat does not read external parameter entities")
elif name == feature_namespace_prefixes:
if state:
raise SAXNotSupportedException(
"expat does not report namespace prefixes")
else:
raise SAXNotRecognizedException(
"Feature '%s' not recognized" % name)
def getProperty(self, name):
if name == handler.property_lexical_handler:
return self._lex_handler_prop
elif name == property_interning_dict:
return self._interning
elif name == property_xml_string:
if self._parser:
if hasattr(self._parser, "GetInputContext"):
return self._parser.GetInputContext()
else:
raise SAXNotRecognizedException(
"This version of expat does not support getting"
" the XML string")
else:
raise SAXNotSupportedException(
"XML string cannot be returned when not parsing")
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
if name == handler.property_lexical_handler:
self._lex_handler_prop = value
if self._parsing:
self._reset_lex_handler_prop()
elif name == property_interning_dict:
self._interning = value
elif name == property_xml_string:
raise SAXNotSupportedException("Property '%s' cannot be set" %
name)
else:
raise SAXNotRecognizedException("Property '%s' not recognized" %
name)
# IncrementalParser methods
def feed(self, data, isFinal = 0):
if not self._parsing:
self.reset()
self._parsing = 1
self._cont_handler.startDocument()
try:
# The isFinal parameter is internal to the expat reader.
# If it is set to true, expat will check validity of the entire
# document. When feeding chunks, they are not normally final -
# except when invoked from close.
self._parser.Parse(data, isFinal)
except expat.error, e:
exc = SAXParseException(expat.ErrorString(e.code), e, self)
# FIXME: when to invoke error()?
self._err_handler.fatalError(exc)
def close(self):
if self._entity_stack:
# If we are completing an external entity, do nothing here
return
self.feed("", isFinal = 1)
self._cont_handler.endDocument()
self._parsing = 0
# break cycle created by expat handlers pointing to our methods
self._parser = None
def _reset_cont_handler(self):
self._parser.ProcessingInstructionHandler = \
self._cont_handler.processingInstruction
self._parser.CharacterDataHandler = self._cont_handler.characters
def _reset_lex_handler_prop(self):
lex = self._lex_handler_prop
parser = self._parser
if lex is None:
parser.CommentHandler = None
parser.StartCdataSectionHandler = None
parser.EndCdataSectionHandler = None
parser.StartDoctypeDeclHandler = None
parser.EndDoctypeDeclHandler = None
else:
parser.CommentHandler = lex.comment
parser.StartCdataSectionHandler = lex.startCDATA
parser.EndCdataSectionHandler = lex.endCDATA
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EndDoctypeDeclHandler = lex.endDTD
def reset(self):
if self._namespaces:
self._parser = expat.ParserCreate(self._source.getEncoding(), " ",
intern=self._interning)
self._parser.namespace_prefixes = 1
self._parser.StartElementHandler = self.start_element_ns
self._parser.EndElementHandler = self.end_element_ns
else:
self._parser = expat.ParserCreate(self._source.getEncoding(),
intern = self._interning)
self._parser.StartElementHandler = self.start_element
self._parser.EndElementHandler = self.end_element
self._reset_cont_handler()
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
self._parser.NotationDeclHandler = self.notation_decl
self._parser.StartNamespaceDeclHandler = self.start_namespace_decl
self._parser.EndNamespaceDeclHandler = self.end_namespace_decl
self._decl_handler_prop = None
if self._lex_handler_prop:
self._reset_lex_handler_prop()
# self._parser.DefaultHandler =
# self._parser.DefaultHandlerExpand =
# self._parser.NotStandaloneHandler =
self._parser.ExternalEntityRefHandler = self.external_entity_ref
try:
self._parser.SkippedEntityHandler = self.skipped_entity_handler
except AttributeError:
# This pyexpat does not support SkippedEntity
pass
self._parser.SetParamEntityParsing(
expat.XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE)
self._parsing = 0
self._entity_stack = []
# Locator methods
def getColumnNumber(self):
if self._parser is None:
return None
return self._parser.ErrorColumnNumber
def getLineNumber(self):
if self._parser is None:
return 1
return self._parser.ErrorLineNumber
def getPublicId(self):
return self._source.getPublicId()
def getSystemId(self):
return self._source.getSystemId()
# event handlers
def start_element(self, name, attrs):
self._cont_handler.startElement(name, AttributesImpl(attrs))
def end_element(self, name):
self._cont_handler.endElement(name)
def start_element_ns(self, name, attrs):
pair = name.split()
if len(pair) == 1:
# no namespace
pair = (None, name)
elif len(pair) == 3:
pair = pair[0], pair[1]
else:
# default namespace
pair = tuple(pair)
newattrs = {}
qnames = {}
for (aname, value) in attrs.items():
parts = aname.split()
length = len(parts)
if length == 1:
# no namespace
qname = aname
apair = (None, aname)
elif length == 3:
qname = "%s:%s" % (parts[2], parts[1])
apair = parts[0], parts[1]
else:
# default namespace
qname = parts[1]
apair = tuple(parts)
newattrs[apair] = value
qnames[apair] = qname
self._cont_handler.startElementNS(pair, None,
AttributesNSImpl(newattrs, qnames))
def end_element_ns(self, name):
pair = name.split()
if len(pair) == 1:
pair = (None, name)
elif len(pair) == 3:
pair = pair[0], pair[1]
else:
pair = tuple(pair)
self._cont_handler.endElementNS(pair, None)
# this is not used (call directly to ContentHandler)
def processing_instruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
# this is not used (call directly to ContentHandler)
def character_data(self, data):
self._cont_handler.characters(data)
def start_namespace_decl(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def end_namespace_decl(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
self._lex_handler_prop.startDTD(name, pubid, sysid)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
self._dtd_handler.unparsedEntityDecl(name, pubid, sysid, notation_name)
def notation_decl(self, name, base, sysid, pubid):
self._dtd_handler.notationDecl(name, pubid, sysid)
def external_entity_ref(self, context, base, sysid, pubid):
if not self._external_ges:
return 1
source = self._ent_handler.resolveEntity(pubid, sysid)
source = saxutils.prepare_input_source(source,
self._source.getSystemId() or
"")
self._entity_stack.append((self._parser, self._source))
self._parser = self._parser.ExternalEntityParserCreate(context)
self._source = source
try:
xmlreader.IncrementalParser.parse(self, source)
except:
return 0 # FIXME: save error info here?
(self._parser, self._source) = self._entity_stack[-1]
del self._entity_stack[-1]
return 1
def skipped_entity_handler(self, name, is_pe):
if is_pe:
# The SAX spec requires to report skipped PEs with a '%'
name = '%'+name
self._cont_handler.skippedEntity(name)
# ---
def create_parser(*args, **kwargs):
return ExpatParser(*args, **kwargs)
# ---
if __name__ == "__main__":
import xml.sax
p = create_parser()
p.setContentHandler(xml.sax.XMLGenerator())
p.setErrorHandler(xml.sax.ErrorHandler())
p.parse("../../../hamlet.xml")
|
ztane/zsos
|
userland/lib/python2.5/xml/sax/expatreader.py
|
Python
|
gpl-3.0
| 14,504
|
from pipeline.compilers import SubProcessCompiler, CompilerBase
from os.path import dirname
from django.conf import settings
import subprocess
# DEPRECATED ... NOT REQUIRED. USED DJANGO-COMPRESSOR INSTEAD OF
# DJANGO-PIPELINES
class BabelCompiler(SubProcessCompiler):
output_extension = 'js'
def match_file(self, path):
return path.endswith('.jsx')
def compile_file(self, infile, outfile, outdated=False, force=False):
if not force and not outdated:
# File doesn't need to be recompiled
return
pipeline_settings = settings.PIPELINE
command = [
pipeline_settings.get('BABEL_BINARY', '{}/node_modules/babel-cli/bin/babel.js'.format(settings.DJANGO_ROOT)),
infile,
"--out-file",
outfile,
pipeline_settings.get('BABEL_ARGUMENTS', '--presets react'),
]
return self.execute_command(command, cwd=dirname(infile))
|
grvty-labs/A1-136
|
contrib/django_pipeline/compilers.py
|
Python
|
mit
| 957
|
# example.py
from collections import namedtuple
Stock = namedtuple('Stock', ['name', 'shares', 'price'])
def compute_cost(records):
total = 0.0
for rec in records:
s = Stock(*rec)
total += s.shares * s.price
return total
# Some Data
records = [
('GOOG', 100, 490.1),
('ACME', 100, 123.45),
('IBM', 50, 91.15)
]
print(compute_cost(records))
|
tuanavu/python-cookbook-3rd
|
src/1/mapping_names_to_sequence_elements/example1.py
|
Python
|
mit
| 386
|
"""
Command based UI for the obfuscator.
"""
# OAT - Obfuscation and Analysis Tool
# Copyright (C) 2011 Andy Gurden
#
# This file is part of OAT.
#
# OAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OAT. If not, see <http://www.gnu.org/licenses/>.
from . import commandui
from . import parsecmd
from . import formatcmd
from . import explorecmd
from . import reordercmd
from . import markcmd
from . import visualisecmd
from . import branchcmd
class SolidConsole(commandui.CommandUI):
"""Solid command class."""
def __init__(self):
commandui.CommandUI.__init__(self)
parse = parsecmd.ParseCommand()
explore = explorecmd.ExploreCommand(parse)
format = formatcmd.FormatCommand(parse, explore)
reorder = reordercmd.ReorderCommand(parse, explore)
mark = markcmd.MarkCommand(parse, explore)
visualise = visualisecmd.VisualiseCommand(parse, explore)
branch = branchcmd.BranchCommand(parse, explore)
self.add_command(parse)
self.add_command(format)
self.add_command(explore)
self.add_command(reorder)
self.add_command(mark)
self.add_command(visualise)
self.add_command(branch)
|
andyrooger/OAT
|
src/interactive/solidconsole.py
|
Python
|
gpl-3.0
| 1,759
|
import os
import json
from mako.lookup import TemplateLookup
globalsJson = []
beginFileText = "/// GENERATED CODE START\n/// WARNING! This code has been generated automatically. Any changes will be overwritten.\n"
endFileText = "\n\n/// GENERATED CODE END\n"
def list(words):
wordsStr = ""
if words != None:
lastWord = len(words) - 1
for i, w in enumerate(words):
wordsStr += "\"" + w + "\""
if(i < lastWord):
wordsStr += ", "
return wordsStr
def getMethodParamListN(method):
args = []
if len(method['params']) > 0:
for i in range(0, len(method['params'])):
args.append("arg" + str(i))
return ", ".join(args)
def getParamFlags(param):
flags = []
if param['isIn']: flags.append("in")
if param['isOut']: flags.append("out")
if param['isReturn']: flags.append("return")
return "|".join(flags)
def getParamTypeId(param):
if param['abstractType'] == 'void':
return 0
elif param['abstractType'] == 'int':
return 3
elif param['abstractType'] == 'float':
return 4
elif param['abstractType'] == 'string':
return 5
def getGlobalId(className):
for c in globalsJson['classes']:
if c['name'] == className:
return c['globalId']
return -1
def generate(ctx, template, outputPath):
with open(outputPath, 'w') as output:
data = beginFileText
data += template.render(ctx=ctx)
data += endFileText
output.write(data)
class TemplateSet:
def __init__(self, lookup, prefix):
self.declaration = lookup.get_template(prefix + '.hpp')
self.inline = lookup.get_template(prefix + '.inl')
self.definition = lookup.get_template(prefix + '.cpp')
def run(taskList):
scriptDir = os.path.dirname(os.path.realpath(__file__))
mylookup = TemplateLookup(
directories=[scriptDir],
imports=['from generator.rcg import list, getMethodParamListN, getParamFlags, getParamTypeId']
)
templates = {
'library': TemplateSet(mylookup, 'library'),
'object': TemplateSet(mylookup, 'object'),
'scene': TemplateSet(mylookup, 'object'),
'component': TemplateSet(mylookup, 'object'),
'actor': TemplateSet(mylookup, 'object'),
'entity': TemplateSet(mylookup, 'object'),
}
with open('temp/globals.cdef', 'r') as globalsFile:
globalsContent = globalsFile.read()
globalsJson = json.loads(globalsContent)
for t in taskList:
cdefPath = t['cdef']
with open(cdefPath, 'r') as cdefFile:
cdefContent = cdefFile.read()
cdefList = json.loads(cdefContent)
if cdefList == None:
continue
for c in cdefList:
cName = c['name']
cType = c['type']
cTemplate = c['template']
c['globals'] = globalsJson
if t['regenerate'] == True:
pathFmt = 'temp/generated/{0}.generated.{1}'
templSet = templates[cTemplate]
generate(c, templSet.declaration, pathFmt.format(cName, 'hpp'))
generate(c, templSet.inline, pathFmt.format(cName, 'inl'))
generate(c, templSet.definition, pathFmt.format(cName, 'cpp'))
# If task list was empty, then nothing has changed and no need to regenerate register.cpp
if len(taskList) > 0:
template = mylookup.get_template('game.generated.cpp')
generate({'globals': globalsJson}, template, 'game.generated.cpp')
template = mylookup.get_template('game.generated.hpp')
generate({'globals': globalsJson}, template, 'game.generated.hpp')
|
creepydragon/r2
|
tools/generator/rcg.py
|
Python
|
gpl-3.0
| 3,256
|
import argparse
default_move_command = "/opt/quads/quads/tools/move_and_rebuild_hosts.py"
parser = argparse.ArgumentParser(description="Query current cloud for a given host")
action_group = parser.add_mutually_exclusive_group()
# ---- Generic actions
action_group.add_argument(
"--version",
dest="action",
action="store_const",
const="version",
help="Display version of QUADS",
)
action_group.add_argument(
"--mark-broken",
dest="action",
action="store_const",
const="mark_broken",
help="Mark host as broken",
)
action_group.add_argument(
"--mark-repaired",
dest="action",
action="store_const",
const="mark_repaired",
help="Mark broken host as repaired",
)
action_group.add_argument(
"--retire",
dest="action",
action="store_const",
const="retire",
help="Mark host as retired",
)
action_group.add_argument(
"--unretire",
dest="action",
action="store_const",
const="retire",
help="Mark broken host as back in business",
)
# ---- Generic args
parser.add_argument(
"--debug",
action="store_true",
default=False,
help="Show debugging information.",
)
parser.add_argument(
"--force",
dest="force",
action="store_true",
help="Force host or cloud update when already defined",
)
parser.add_argument(
"--dry-run",
dest="dryrun",
action="store_true",
default=None,
help="Don't update state when used with --move-hosts",
)
parser.add_argument(
"--log-path",
dest="logpath",
type=str,
default=None,
help="Path to QUADS log file",
)
time_args = parser.add_mutually_exclusive_group()
time_args.add_argument(
"-d",
"--date",
dest="datearg",
type=str,
default=None,
help='date and time to query; e.g. "2016-06-01 08:00"',
)
time_args.add_argument(
"--months",
dest="months",
type=str,
default=None,
help="Number of months for reporting scheduled assignments",
)
time_args.add_argument(
"--year",
dest="year",
type=str,
default=None,
help="Year for reporting scheduled assignments",
)
time_args.add_argument(
"--weeks",
dest="weeks",
type=str,
default=None,
help="Number of weeks to extend an existing schedule",
)
time_args.add_argument(
"--now",
dest="now",
action="store_true",
default=None,
help="Now flag for use with --extend or --shrink instead of --week",
)
# ---- Object args
object_args = parser.add_mutually_exclusive_group()
object_args.add_argument(
"--host",
dest="host",
type=str,
default=None,
help="Specify the host to query",
)
object_args.add_argument(
"--host-list",
dest="host_list",
type=str,
default=None,
help="Specify file path to host list",
)
object_args.add_argument(
"--cloud",
dest="cloud",
type=str,
default=None,
help="Specify cloud name",
)
# ---- Advanced actions
action_group.add_argument(
"--ls-owner",
dest="action",
action="store_const",
const="owner",
help="List owners",
)
action_group.add_argument(
"--ls-cc-users",
dest="action",
action="store_const",
const="ccuser",
help="List CC list",
)
action_group.add_argument(
"--ls-ticket",
dest="action",
action="store_const",
const="ticket",
help="List request ticket",
)
action_group.add_argument(
"--ls-qinq",
dest="action",
action="store_const",
const="qinq",
help="List cloud qinq state",
)
action_group.add_argument(
"--ls-wipe",
dest="action",
action="store_const",
const="wipe",
help="List cloud wipe state",
)
action_group.add_argument(
"--extend",
dest="action",
action="store_const",
const="extend",
help="Extend an existing schedule",
)
action_group.add_argument(
"--shrink",
dest="action",
action="store_const",
const="shrink",
help="Shrink an existing schedule",
)
action_group.add_argument(
"--define-host",
dest="hostresource",
type=str,
default=None,
help="Define a host resource",
)
action_group.add_argument(
"--define-host-details",
dest="action",
action="store_const",
const="define_host_metadata",
help="Define a host resource details via yaml",
)
action_group.add_argument(
"--export-host-details",
dest="host_metadata_export",
type=str,
default=None,
help="Path to QUADS log file",
)
action_group.add_argument(
"--define-cloud",
dest="cloudresource",
type=str,
default=None,
help="Define a cloud environment",
)
action_group.add_argument(
"--mod-cloud",
dest="modcloud",
type=str,
default=None,
help="Modify a cloud",
)
action_group.add_argument(
"--add-schedule",
dest="action",
action="store_const",
const="add_schedule",
help="Define a host reservation",
)
action_group.add_argument(
"--mod-schedule",
dest="modschedule",
type=str,
default=None,
help="Modify a host reservation",
)
action_group.add_argument(
"--add-interface",
dest="addinterface",
type=str,
default=None,
help="Define a host interface",
)
action_group.add_argument(
"--rm-schedule",
dest="rmschedule",
type=str,
default=None,
help="Remove a host reservation",
)
action_group.add_argument(
"--rm-interface",
dest="rminterface",
type=str,
default=None,
help="Remove a host interface",
)
action_group.add_argument(
"--mod-interface",
dest="modinterface",
type=str,
default=None,
help="Modify a host interface",
)
action_group.add_argument(
"--ls-hosts",
dest="action",
action="store_const",
const="ls_hosts",
help="List all hosts",
)
action_group.add_argument(
"--ls-clouds",
dest="action",
action="store_const",
const="cloud",
help="List all clouds",
)
action_group.add_argument(
"--rm-host",
dest="rmhost",
type=str,
default=None,
help="Remove a host",
)
action_group.add_argument(
"--rm-cloud",
dest="rmcloud",
type=str,
default=None,
help="Remove a cloud"
)
action_group.add_argument(
"--ls-available",
dest="action",
action="store_const",
const="available",
help="List available hosts on a specific time frame",
)
action_group.add_argument(
"--ls-schedule",
dest="action",
action="store_const",
const="schedule",
help="List the host reservations",
)
action_group.add_argument(
"--ls-interface",
dest="action",
action="store_const",
const="interface",
help="List the host interfaces",
)
action_group.add_argument(
"--ls-vlan",
dest="action",
action="store_const",
const="ls_vlan",
help="List the available vlans with the clouds assigned",
)
action_group.add_argument(
"--find-free-cloud",
dest="action",
action="store_const",
const="free_cloud",
help="List available hosts on a specific time frame",
)
action_group.add_argument(
"--report-available",
dest="action",
action="store_const",
const="report_available",
help="QUADS reporting server availability",
)
action_group.add_argument(
"--report-scheduled",
dest="action",
action="store_const",
const="report_scheduled",
help="QUADS reporting detailed scheduled assignments",
)
action_group.add_argument(
"--report-detailed",
dest="action",
action="store_const",
const="report_detailed",
help="QUADS reporting scheduled assignments",
)
action_group.add_argument(
"--ls-broken",
dest="action",
action="store_const",
const="ls_broken",
help="List all hosts marked as broken",
)
action_group.add_argument(
"--ls-retired",
dest="action",
action="store_const",
const="ls_retired",
help="List all hosts marked as retired",
)
parser.add_argument(
"--cloud-only",
dest="cloudonly",
type=str,
default=None,
help="Limit full report to hosts only in this cloud",
)
parser.add_argument(
"--cloud-owner",
dest="cloudowner",
type=str,
default=None,
help="Define environment owner",
)
parser.add_argument(
"--cc-users",
dest="ccusers",
type=str,
default=None,
help="Define environment CC list",
)
parser.add_argument(
"--qinq",
dest="qinq",
type=int,
choices=[0, 1],
default=0,
help="Define environment qinq state",
)
wipe_group_args = parser.add_mutually_exclusive_group()
wipe_group_args.add_argument(
"--no-wipe",
dest="wipe",
default=argparse.SUPPRESS,
action="store_false",
help="Define no wipe for safeguarding data after assignment",
)
wipe_group_args.add_argument(
"--wipe",
dest="wipe",
default=argparse.SUPPRESS,
action="store_true",
help="Define wipe for reprovisioning server before assignment",
)
parser.add_argument(
"--cloud-ticket",
dest="cloudticket",
type=str,
default=None,
help="Define environment ticket",
)
parser.add_argument(
"--description",
dest="description",
type=str,
default=None,
help="Defined description of cloud",
)
parser.add_argument(
"--default-cloud",
dest="hostcloud",
type=str,
default=None,
help="Defined default cloud for a host",
)
parser.add_argument(
"--summary",
dest="summary",
action="store_true",
help="Generate a summary report",
)
parser.add_argument(
"--detail",
dest="detail",
action="store_true",
help="Get additional data over the summary",
)
parser.add_argument(
"--full-summary",
dest="fullsummary",
action="store_true",
help="Generate a summary report",
)
parser.add_argument(
"--schedule-start",
dest="schedstart",
type=str,
default=None,
help="Schedule start date/time",
)
parser.add_argument(
"--schedule-end",
dest="schedend",
type=str,
default=None,
help="Schedule end date/time",
)
parser.add_argument(
"--check",
dest="check",
action="store_true",
default=None,
help="Check for cloud extension",
)
parser.add_argument(
"--schedule-cloud",
dest="schedcloud",
type=str,
default=None,
help="Schedule cloud",
)
parser.add_argument(
"--interface-bios-id",
dest="ifbiosid",
type=str,
default=None,
help="Interface BIOS ID name",
)
parser.add_argument(
"--interface-mac",
dest="ifmac",
type=str,
default=None,
help="Interface MAC address",
)
parser.add_argument(
"--interface-ip",
dest="ifip",
type=str,
default=None,
help="Interface IP address",
)
parser.add_argument(
"--interface-port",
dest="ifport",
type=str,
default=None,
help="Switch port",
)
parser.add_argument(
"--interface-speed",
dest="ifspeed",
type=str,
default=None,
help="Interface speed",
)
parser.add_argument(
"--interface-vendor",
dest="ifvendor",
type=str,
default=None,
help="Interface vendor",
)
pxe_group_args = parser.add_mutually_exclusive_group()
pxe_group_args.add_argument(
"--pxe-boot",
dest="ifpxe",
action="store_true",
default=argparse.SUPPRESS,
help="Interface pxe boot flag",
)
pxe_group_args.add_argument(
"--no-pxe-boot",
dest="ifpxe",
action="store_false",
default=argparse.SUPPRESS,
help="Disable Interface pxe boot flag",
)
maintenance_group_args = parser.add_mutually_exclusive_group()
maintenance_group_args.add_argument(
"--maintenance",
dest="ifmaintenance",
action="store_true",
default=argparse.SUPPRESS,
help="Interface maintenance flag",
)
maintenance_group_args.add_argument(
"--no-maintenance",
dest="ifmaintenance",
action="store_false",
default=argparse.SUPPRESS,
help="Disable Interface maintenance flag",
)
parser.add_argument(
"--move-hosts",
dest="movehosts",
action="store_true",
default=None,
help="Move hosts if schedule has changed",
)
parser.add_argument(
"--move-command",
dest="movecommand",
type=str,
default=default_move_command,
help="External command to move a host",
)
parser.add_argument(
"--host-type",
dest="hosttype",
type=str,
default=None,
help="Open-ended identifier for host: util, baremetal, aws, openstack, libvirt, etc.",
)
parser.add_argument(
"--vlan",
dest="vlan",
type=int,
default=None,
help="VLAN id number for public routable network",
)
parser.add_argument(
"--metadata",
dest="metadata",
type=str,
default=None,
help="Path to yml with hosts metadata",
)
parser.add_argument(
"--filter",
dest="filter",
type=str,
default=None,
help="Filter search by host metadata",
)
if __name__ == '__main__':
# debugging helper
parser.print_help()
|
redhat-performance/quads
|
quads/cli/parser.py
|
Python
|
gpl-3.0
| 12,769
|
# -*- coding: utf-8 -*-
#
# This file is part of Radicale Server - Calendar Server
# Copyright © 2008 Nicolas Kandel
# Copyright © 2008 Pascal Halter
# Copyright © 2008-2013 Guillaume Ayoub
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Authentication management.
"""
import sys
from .. import config, log
def load():
"""Load list of available authentication managers."""
auth_type = config.get("auth", "type")
log.LOGGER.debug("Authentication type is %s" % auth_type)
if auth_type == "None":
return None
elif auth_type == 'custom':
auth_module = config.get("auth", "custom_handler")
__import__(auth_module)
module = sys.modules[auth_module]
else:
root_module = __import__(
"auth.%s" % auth_type, globals=globals(), level=2)
module = getattr(root_module, auth_type)
# Override auth.is_authenticated
sys.modules[__name__].is_authenticated = module.is_authenticated
return module
def is_authenticated(user, password):
"""Check if the user is authenticated.
This method is overriden if an auth module is loaded.
"""
return True # Default is always True: no authentication
|
wohnsinn2/Radicale
|
radicale/auth/__init__.py
|
Python
|
gpl-3.0
| 1,784
|
"""
Mostly equivalent to the views from django.contrib.auth.views, but
implemented as class-based views.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth import get_user_model, REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (AuthenticationForm, SetPasswordForm,
PasswordChangeForm, PasswordResetForm)
from django.contrib.auth.tokens import default_token_generator
from django.contrib import auth
from django.contrib.sites.models import get_current_site
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import redirect, resolve_url
from django.utils.functional import lazy
from django.utils.http import base36_to_int, is_safe_url
from django.utils import six
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import FormView, TemplateView, RedirectView
from authtools.forms import FriendlyPasswordResetForm
User = get_user_model()
def _safe_resolve_url(url):
"""
Previously, resolve_url_lazy would fail if the url was a unicode object.
See <https://github.com/fusionbox/django-authtools/issues/13> for more
information.
Thanks to GitHub user alanwj for pointing out the problem and providing
this solution.
"""
return six.text_type(resolve_url(url))
resolve_url_lazy = lazy(_safe_resolve_url, six.text_type)
class WithCurrentSiteMixin(object):
def get_current_site(self):
return get_current_site(self.request)
def get_context_data(self, **kwargs):
kwargs = super(WithCurrentSiteMixin, self).get_context_data(**kwargs)
current_site = self.get_current_site()
kwargs.update({
'site': current_site,
'site_name': current_site.name,
})
return kwargs
class WithNextUrlMixin(object):
redirect_field_name = REDIRECT_FIELD_NAME
success_url = None
def get_next_url(self):
if self.redirect_field_name in self.request.REQUEST:
redirect_to = self.request.REQUEST[self.redirect_field_name]
if is_safe_url(redirect_to, host=self.request.get_host()):
return redirect_to
# This mixin can be mixed with FormViews and RedirectViews. They
# each use a different method to get the URL to redirect to, so we
# need to provide both methods.
def get_success_url(self):
return self.get_next_url() or super(WithNextUrlMixin, self).get_success_url()
def get_redirect_url(self, **kwargs):
return self.get_next_url() or super(WithNextUrlMixin, self).get_redirect_url(**kwargs)
def DecoratorMixin(decorator):
"""
Converts a decorator written for a function view into a mixin for a
class-based view.
::
LoginRequiredMixin = DecoratorMixin(login_required)
class MyView(LoginRequiredMixin):
pass
class SomeView(DecoratorMixin(some_decorator),
DecoratorMixin(something_else)):
pass
"""
class Mixin(object):
__doc__ = decorator.__doc__
@classmethod
def as_view(cls, *args, **kwargs):
view = super(Mixin, cls).as_view(*args, **kwargs)
return decorator(view)
Mixin.__name__ = str('DecoratorMixin(%s)' % decorator.__name__)
return Mixin
NeverCacheMixin = DecoratorMixin(never_cache)
CsrfProtectMixin = DecoratorMixin(csrf_protect)
LoginRequiredMixin = DecoratorMixin(login_required)
SensitivePostParametersMixin = DecoratorMixin(
sensitive_post_parameters('password', 'old_password', 'password1',
'password2', 'new_password1', 'new_password2')
)
class AuthDecoratorsMixin(NeverCacheMixin, CsrfProtectMixin, SensitivePostParametersMixin):
pass
class LoginView(AuthDecoratorsMixin, WithCurrentSiteMixin, WithNextUrlMixin, FormView):
form_class = AuthenticationForm
template_name = 'registration/login.html'
disallow_authenticated = True
success_url = resolve_url_lazy(settings.LOGIN_REDIRECT_URL)
def dispatch(self, *args, **kwargs):
if self.disallow_authenticated and self.request.user.is_authenticated():
return redirect(self.get_success_url())
return super(LoginView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
auth.login(self.request, form.get_user())
return super(LoginView, self).form_valid(form)
def get_context_data(self, **kwargs):
kwargs = super(LoginView, self).get_context_data(**kwargs)
kwargs.update({
self.redirect_field_name: self.request.REQUEST.get(
self.redirect_field_name, '',
),
})
return kwargs
login = LoginView.as_view()
class LogoutView(NeverCacheMixin, WithCurrentSiteMixin, WithNextUrlMixin, TemplateView, RedirectView):
template_name = 'registration/logged_out.html'
permanent = False
def get(self, *args, **kwargs):
auth.logout(self.request)
# If we have a url to redirect to, do it. Otherwise render the logged-out template.
if self.get_redirect_url(**kwargs):
return RedirectView.get(self, *args, **kwargs)
else:
return TemplateView.get(self, *args, **kwargs)
logout = LogoutView.as_view()
logout_then_login = LogoutView.as_view(
url=reverse_lazy('login')
)
class PasswordChangeView(LoginRequiredMixin, WithNextUrlMixin, AuthDecoratorsMixin, FormView):
template_name = 'registration/password_change_form.html'
form_class = PasswordChangeForm
success_url = reverse_lazy('password_change_done')
def get_form_kwargs(self):
kwargs = super(PasswordChangeView, self).get_form_kwargs()
kwargs['user'] = self.get_user()
return kwargs
def get_user(self):
return self.request.user
def form_valid(self, form):
form.save()
return super(PasswordChangeView, self).form_valid(form)
password_change = PasswordChangeView.as_view()
class PasswordChangeDoneView(LoginRequiredMixin, TemplateView):
template_name = 'registration/password_change_done.html'
password_change_done = PasswordChangeDoneView.as_view()
# 4 views for password reset:
# - password_reset sends the mail
# - password_reset_done shows a success message for the above
# - password_reset_confirm checks the link the user clicked and
# prompts for a new password
# - password_reset_complete shows a success message for the above
class PasswordResetView(CsrfProtectMixin, FormView):
template_name = 'registration/password_reset_form.html'
token_generator = default_token_generator
success_url = reverse_lazy('password_reset_done')
domain_override = None
subject_template_name = 'registration/password_reset_subject.txt'
email_template_name = 'registration/password_reset_email.html'
from_email = None
form_class = PasswordResetForm
def form_valid(self, form):
form.save(
domain_override=self.domain_override,
subject_template_name=self.subject_template_name,
email_template_name=self.email_template_name,
token_generator=self.token_generator,
from_email=self.from_email,
request=self.request,
use_https=self.request.is_secure(),
)
return super(PasswordResetView, self).form_valid(form)
password_reset = PasswordResetView.as_view()
friendly_password_reset = PasswordResetView.as_view(
form_class=FriendlyPasswordResetForm
)
class PasswordResetDoneView(TemplateView):
template_name = 'registration/password_reset_done.html'
password_reset_done = PasswordResetDoneView.as_view()
class PasswordResetConfirmView(AuthDecoratorsMixin, FormView):
template_name = 'registration/password_reset_confirm.html'
token_generator = default_token_generator
form_class = SetPasswordForm
success_url = reverse_lazy('password_reset_complete')
def dispatch(self, *args, **kwargs):
assert self.kwargs.get('token') is not None
self.user = self.get_user()
return super(PasswordResetConfirmView, self).dispatch(*args, **kwargs)
def get_queryset(self):
return User._default_manager.all()
def get_user(self):
# django 1.5 uses uidb36, django 1.6 uses uidb64
uidb36 = self.kwargs.get('uidb36')
uidb64 = self.kwargs.get('uidb64')
assert bool(uidb36) ^ bool(uidb64)
try:
if uidb36:
uid = base36_to_int(uidb36)
else:
# urlsafe_base64_decode is not available in django 1.5
from django.utils.http import urlsafe_base64_decode
uid = urlsafe_base64_decode(uidb64)
return self.get_queryset().get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
return None
def valid_link(self):
user = self.user
return user is not None and self.token_generator.check_token(user, self.kwargs.get('token'))
def get_form_kwargs(self):
kwargs = super(PasswordResetConfirmView, self).get_form_kwargs()
kwargs['user'] = self.user
return kwargs
def get_context_data(self, **kwargs):
kwargs = super(PasswordResetConfirmView, self).get_context_data(**kwargs)
if self.valid_link():
kwargs['validlink'] = True
else:
kwargs['validlink'] = False
kwargs['form'] = None
return kwargs
def form_valid(self, form):
if not self.valid_link():
return self.form_invalid(form)
self.save_form(form)
return super(PasswordResetConfirmView, self).form_valid(form)
def save_form(self, form):
return form.save()
password_reset_confirm = PasswordResetConfirmView.as_view()
# Django 1.6 added this as a temporary shim, see #14881. Since our view
# works with base 36 or base 64, we can use the same view for both.
password_reset_confirm_uidb36 = PasswordResetConfirmView.as_view()
class PasswordResetConfirmAndLoginView(PasswordResetConfirmView):
success_url = resolve_url_lazy(settings.LOGIN_REDIRECT_URL)
def save_form(self, form):
ret = super(PasswordResetConfirmAndLoginView, self).save_form(form)
user = auth.authenticate(username=self.user.get_username(),
password=form.cleaned_data['new_password1'])
auth.login(self.request, user)
return ret
password_reset_confirm_and_login = PasswordResetConfirmAndLoginView.as_view()
class PasswordResetCompleteView(TemplateView):
template_name = 'registration/password_reset_complete.html'
login_url = settings.LOGIN_URL
def get_login_url(self):
return resolve_url(self.login_url)
def get_context_data(self, **kwargs):
kwargs = super(PasswordResetCompleteView, self).get_context_data(**kwargs)
kwargs['login_url'] = self.get_login_url()
return kwargs
password_reset_complete = PasswordResetCompleteView.as_view()
|
ziposoft/godiva
|
src/members/views.py
|
Python
|
mit
| 11,223
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.