repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
halfwit/qutebrowser | tests/unit/completion/test_completer.py | 2 | 8668 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016 Ryan Roden-Corrent (rcorre) <ryan@rcorre.net>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for the Completer Object."""
import unittest.mock
import pytest
from PyQt5.QtGui import QStandardItemModel
from qutebrowser.completion import completer
from qutebrowser.utils import usertypes
class FakeCompletionModel(QStandardItemModel):
"""Stub for a completion model."""
DUMB_SORT = None
def __init__(self, kind, parent=None):
super().__init__(parent)
self.kind = kind
@pytest.fixture
def cmd(stubs, qtbot):
"""Create the statusbar command prompt the completer uses."""
cmd = stubs.FakeStatusbarCommand()
qtbot.addWidget(cmd)
return cmd
@pytest.fixture
def completer_obj(qtbot, cmd, config_stub):
"""Create the completer used for testing."""
config_stub.data = {'completion': {'auto-open': False}}
return completer.Completer(cmd, 0)
@pytest.fixture(autouse=True)
def instances(monkeypatch):
"""Mock the instances module so get returns a fake completion model."""
# populate a model for each completion type, with a nested structure for
# option and value completion
instances = {kind: FakeCompletionModel(kind)
for kind in usertypes.Completion}
instances[usertypes.Completion.option] = {
'general': FakeCompletionModel(usertypes.Completion.option),
}
instances[usertypes.Completion.value] = {
'general': {
'ignore-case': FakeCompletionModel(usertypes.Completion.value),
}
}
monkeypatch.setattr('qutebrowser.completion.completer.instances',
instances)
@pytest.fixture(autouse=True)
def cmdutils_patch(monkeypatch, stubs):
"""Patch the cmdutils module to provide fake commands."""
cmds = {
'set': [usertypes.Completion.section, usertypes.Completion.option,
usertypes.Completion.value],
'help': [usertypes.Completion.helptopic],
'quickmark-load': [usertypes.Completion.quickmark_by_name],
'bookmark-load': [usertypes.Completion.bookmark_by_url],
'open': [usertypes.Completion.url],
'buffer': [usertypes.Completion.tab],
'session-load': [usertypes.Completion.sessions],
'bind': [usertypes.Completion.empty, usertypes.Completion.command],
'tab-detach': None,
}
cmd_utils = stubs.FakeCmdUtils({
name: stubs.FakeCommand(completion=compl)
for name, compl in cmds.items()
})
monkeypatch.setattr('qutebrowser.completion.completer.cmdutils',
cmd_utils)
def _set_cmd_prompt(cmd, txt):
"""Set the command prompt's text and cursor position.
Args:
cmd: The command prompt object.
txt: The prompt text, using | as a placeholder for the cursor position.
"""
cmd.setText(txt.replace('|', ''))
cmd.setCursorPosition(txt.index('|'))
def _validate_cmd_prompt(cmd, txt):
"""Interpret fake command prompt text using | as the cursor placeholder.
Args:
cmd: The command prompt object.
txt: The prompt text, using | as a placeholder for the cursor position.
"""
assert cmd.cursorPosition() == txt.index('|')
assert cmd.text() == txt.replace('|', '')
@pytest.mark.parametrize('txt, expected', [
(':nope|', usertypes.Completion.command),
(':nope |', None),
(':set |', usertypes.Completion.section),
(':set gen|', usertypes.Completion.section),
(':set general |', usertypes.Completion.option),
(':set what |', None),
(':set general ignore-case |', usertypes.Completion.value),
(':set general huh |', None),
(':help |', usertypes.Completion.helptopic),
(':quickmark-load |', usertypes.Completion.quickmark_by_name),
(':bookmark-load |', usertypes.Completion.bookmark_by_url),
(':open |', usertypes.Completion.url),
(':buffer |', usertypes.Completion.tab),
(':session-load |', usertypes.Completion.sessions),
(':bind |', usertypes.Completion.empty),
(':bind <c-x> |', usertypes.Completion.command),
(':bind <c-x> foo|', usertypes.Completion.command),
(':bind <c-x>| foo', usertypes.Completion.empty),
(':set| general ', usertypes.Completion.command),
(':|set general ', usertypes.Completion.command),
(':set gene|ral ignore-case', usertypes.Completion.section),
(':|', usertypes.Completion.command),
(': |', usertypes.Completion.command),
(':bookmark-load |', usertypes.Completion.bookmark_by_url),
('/|', None),
(':open -t|', None),
(':open --tab|', None),
(':open -t |', usertypes.Completion.url),
(':open --tab |', usertypes.Completion.url),
(':open | -t', usertypes.Completion.url),
(':--foo --bar |', None),
(':tab-detach |', None),
(':bind --mode=caret <c-x> |', usertypes.Completion.command),
pytest.mark.xfail(reason='issue #74')((':bind --mode caret <c-x> |',
usertypes.Completion.command)),
(':set -t -p |', usertypes.Completion.section),
(':open -- |', None),
])
def test_update_completion(txt, expected, cmd, completer_obj,
completion_widget_stub):
"""Test setting the completion widget's model based on command text."""
# this test uses | as a placeholder for the current cursor position
_set_cmd_prompt(cmd, txt)
completer_obj.update_completion()
if expected is None:
assert not completion_widget_stub.set_model.called
else:
assert completion_widget_stub.set_model.call_count == 1
arg = completion_widget_stub.set_model.call_args[0][0]
# the outer model is just for sorting; srcmodel is the completion model
assert arg.srcmodel.kind == expected
def test_completion_item_prev(completer_obj, cmd, completion_widget_stub,
config_stub, qtbot):
"""Test that completion_item_prev emits next_prev_item."""
cmd.setText(':')
with qtbot.waitSignal(completer_obj.next_prev_item) as blocker:
completer_obj.completion_item_prev()
assert blocker.args == [True]
def test_completion_item_next(completer_obj, cmd, completion_widget_stub,
config_stub, qtbot):
"""Test that completion_item_next emits next_prev_item."""
cmd.setText(':')
with qtbot.waitSignal(completer_obj.next_prev_item) as blocker:
completer_obj.completion_item_next()
assert blocker.args == [False]
@pytest.mark.parametrize('before, newtxt, quick_complete, count, after', [
(':foo |', 'bar', False, 1, ':foo bar|'),
(':foo |', 'bar', True, 2, ':foo bar|'),
(':foo |', 'bar', True, 1, ':foo bar |'),
(':foo | bar', 'baz', False, 1, ':foo baz| bar'),
(':foo |', 'bar baz', True, 1, ":foo 'bar baz' |"),
(':foo |', '', True, 1, ":foo '' |"),
(':foo |', None, True, 1, ":foo |"),
])
def test_selection_changed(before, newtxt, count, quick_complete, after,
completer_obj, cmd, completion_widget_stub,
config_stub):
"""Test that change_completed_part modifies the cmd text properly.
The | represents the current cursor position in the cmd prompt.
If quick-complete is True and there is only 1 completion (count == 1),
then we expect a space to be appended after the current word.
"""
config_stub.data['completion']['quick-complete'] = quick_complete
model = unittest.mock.Mock()
model.data = unittest.mock.Mock(return_value=newtxt)
model.count = unittest.mock.Mock(return_value=count)
indexes = [unittest.mock.Mock()]
selection = unittest.mock.Mock()
selection.indexes = unittest.mock.Mock(return_value=indexes)
completion_widget_stub.model = unittest.mock.Mock(return_value=model)
_set_cmd_prompt(cmd, before)
completer_obj.update_cursor_part()
completer_obj.selection_changed(selection, None)
model.data.assert_called_with(indexes[0])
_validate_cmd_prompt(cmd, after)
| gpl-3.0 |
analyseuc3m/ANALYSE-v1 | common/djangoapps/terrain/stubs/edxnotes.py | 23 | 12039 | """
Stub implementation of EdxNotes for acceptance tests
"""
import json
import re
from uuid import uuid4
from datetime import datetime
from copy import deepcopy
from math import ceil
from urllib import urlencode
from .http import StubHttpRequestHandler, StubHttpService
class StubEdxNotesServiceHandler(StubHttpRequestHandler):
"""
Handler for EdxNotes requests.
"""
URL_HANDLERS = {
"GET": {
"/api/v1/annotations$": "_collection",
"/api/v1/annotations/(?P<note_id>[0-9A-Fa-f]+)$": "_read",
"/api/v1/search$": "_search",
},
"POST": {
"/api/v1/annotations$": "_create",
"/create_notes": "_create_notes",
},
"PUT": {
"/api/v1/annotations/(?P<note_id>[0-9A-Fa-f]+)$": "_update",
"/cleanup$": "_cleanup",
},
"DELETE": {
"/api/v1/annotations/(?P<note_id>[0-9A-Fa-f]+)$": "_delete",
},
}
def _match_pattern(self, pattern_handlers):
"""
Finds handler by the provided handler patterns and delegate response to
the matched handler.
"""
for pattern in pattern_handlers:
match = re.match(pattern, self.path_only)
if match:
handler = getattr(self, pattern_handlers[pattern], None)
if handler:
handler(**match.groupdict())
return True
return None
def _send_handler_response(self, method):
"""
Delegate response to handler methods.
If no handler defined, send a 404 response.
"""
# Choose the list of handlers based on the HTTP method
if method in self.URL_HANDLERS:
handlers_list = self.URL_HANDLERS[method]
else:
self.log_error("Unrecognized method '{method}'".format(method=method))
return
# Check the path (without querystring params) against our list of handlers
if self._match_pattern(handlers_list):
return
# If we don't have a handler for this URL and/or HTTP method,
# respond with a 404.
else:
self.send_response(404, content="404 Not Found")
def do_GET(self):
"""
Handle GET methods to the EdxNotes API stub.
"""
self._send_handler_response("GET")
def do_POST(self):
"""
Handle POST methods to the EdxNotes API stub.
"""
self._send_handler_response("POST")
def do_PUT(self):
"""
Handle PUT methods to the EdxNotes API stub.
"""
if self.path.startswith("/set_config"):
return StubHttpRequestHandler.do_PUT(self)
self._send_handler_response("PUT")
def do_DELETE(self):
"""
Handle DELETE methods to the EdxNotes API stub.
"""
self._send_handler_response("DELETE")
def do_OPTIONS(self):
"""
Handle OPTIONS methods to the EdxNotes API stub.
"""
self.send_response(200, headers={
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET, POST, PUT, DELETE, OPTIONS",
"Access-Control-Allow-Headers": "Content-Length, Content-Type, X-Annotator-Auth-Token, X-Requested-With, X-Annotator-Auth-Token, X-Requested-With, X-CSRFToken",
})
def respond(self, status_code=200, content=None):
"""
Send a response back to the client with the HTTP `status_code` (int),
the given content serialized as JSON (str), and the headers set appropriately.
"""
headers = {
"Access-Control-Allow-Origin": "*",
}
if status_code < 400 and content:
headers["Content-Type"] = "application/json"
content = json.dumps(content)
else:
headers["Content-Type"] = "text/html"
self.send_response(status_code, content, headers)
def _create(self):
"""
Create a note, assign id, annotator_schema_version, created and updated dates.
"""
note = json.loads(self.request_content)
note.update({
"id": uuid4().hex,
"annotator_schema_version": "v1.0",
"created": datetime.utcnow().isoformat(),
"updated": datetime.utcnow().isoformat(),
})
self.server.add_notes(note)
self.respond(content=note)
def _create_notes(self):
"""
The same as self._create, but it works a list of notes.
"""
try:
notes = json.loads(self.request_content)
except ValueError:
self.respond(400, "Bad Request")
return
if not isinstance(notes, list):
self.respond(400, "Bad Request")
return
for note in notes:
note.update({
"id": uuid4().hex,
"annotator_schema_version": "v1.0",
"created": note["created"] if note.get("created") else datetime.utcnow().isoformat(),
"updated": note["updated"] if note.get("updated") else datetime.utcnow().isoformat(),
})
self.server.add_notes(note)
self.respond(content=notes)
def _read(self, note_id):
"""
Return the note by note id.
"""
notes = self.server.get_all_notes()
result = self.server.filter_by_id(notes, note_id)
if result:
self.respond(content=result[0])
else:
self.respond(404, "404 Not Found")
def _update(self, note_id):
"""
Update the note by note id.
"""
note = self.server.update_note(note_id, json.loads(self.request_content))
if note:
self.respond(content=note)
else:
self.respond(404, "404 Not Found")
def _delete(self, note_id):
"""
Delete the note by note id.
"""
if self.server.delete_note(note_id):
self.respond(204, "No Content")
else:
self.respond(404, "404 Not Found")
@staticmethod
def _get_next_prev_url(url_path, query_params, page_num, page_size):
"""
makes url with the query params including pagination params
for pagination next and previous urls
"""
query_params = deepcopy(query_params)
query_params.update({
"page": page_num,
"page_size": page_size
})
return url_path + "?" + urlencode(query_params)
def _get_paginated_response(self, notes, page_num, page_size):
"""
Returns a paginated response of notes.
"""
start = (page_num - 1) * page_size
end = start + page_size
total_notes = len(notes)
url_path = "http://{server_address}:{port}{path}".format(
server_address=self.client_address[0],
port=self.server.port,
path=self.path_only
)
next_url = None if end >= total_notes else self._get_next_prev_url(
url_path, self.get_params, page_num + 1, page_size
)
prev_url = None if page_num == 1 else self._get_next_prev_url(
url_path, self.get_params, page_num - 1, page_size)
# Get notes from range
notes = deepcopy(notes[start:end])
paginated_response = {
'total': total_notes,
'num_pages': int(ceil(float(total_notes) / page_size)),
'current_page': page_num,
'rows': notes,
'next': next_url,
'start': start,
'previous': prev_url
}
return paginated_response
def _search(self):
"""
Search for a notes by user id, course_id and usage_id.
"""
user = self.get_params.get("user", None)
usage_id = self.get_params.get("usage_id", None)
course_id = self.get_params.get("course_id", None)
text = self.get_params.get("text", None)
page = int(self.get_params.get("page", 1))
page_size = int(self.get_params.get("page_size", 2))
if user is None:
self.respond(400, "Bad Request")
return
notes = self.server.get_all_notes()
if course_id is not None:
notes = self.server.filter_by_course_id(notes, course_id)
if usage_id is not None:
notes = self.server.filter_by_usage_id(notes, usage_id)
if text:
notes = self.server.search(notes, text)
self.respond(content=self._get_paginated_response(notes, page, page_size))
def _collection(self):
"""
Return all notes for the user.
"""
user = self.get_params.get("user", None)
page = int(self.get_params.get("page", 1))
page_size = int(self.get_params.get("page_size", 2))
notes = self.server.get_all_notes()
if user is None:
self.send_response(400, content="Bad Request")
return
notes = self._get_paginated_response(notes, page, page_size)
self.respond(content=notes)
def _cleanup(self):
"""
Helper method that removes all notes to the stub EdxNotes service.
"""
self.server.cleanup()
self.respond()
class StubEdxNotesService(StubHttpService):
"""
Stub EdxNotes service.
"""
HANDLER_CLASS = StubEdxNotesServiceHandler
def __init__(self, *args, **kwargs):
super(StubEdxNotesService, self).__init__(*args, **kwargs)
self.notes = list()
def get_all_notes(self):
"""
Returns a list of all notes without pagination
"""
notes = deepcopy(self.notes)
notes.reverse()
return notes
def add_notes(self, notes):
"""
Adds `notes(list)` to the stub EdxNotes service.
"""
if not isinstance(notes, list):
notes = [notes]
for note in notes:
self.notes.append(note)
def update_note(self, note_id, note_info):
"""
Updates the note with `note_id(str)` by the `note_info(dict)` to the
stub EdxNotes service.
"""
note = self.filter_by_id(self.notes, note_id)
if note:
note[0].update(note_info)
return note
else:
return None
def delete_note(self, note_id):
"""
Removes the note with `note_id(str)` to the stub EdxNotes service.
"""
note = self.filter_by_id(self.notes, note_id)
if note:
index = self.notes.index(note[0])
self.notes.pop(index)
return True
else:
return False
def cleanup(self):
"""
Removes all notes to the stub EdxNotes service.
"""
self.notes = list()
def filter_by_id(self, data, note_id):
"""
Filters provided `data(list)` by the `note_id(str)`.
"""
return self.filter_by(data, "id", note_id)
def filter_by_user(self, data, user):
"""
Filters provided `data(list)` by the `user(str)`.
"""
return self.filter_by(data, "user", user)
def filter_by_usage_id(self, data, usage_id):
"""
Filters provided `data(list)` by the `usage_id(str)`.
"""
return self.filter_by(data, "usage_id", usage_id)
def filter_by_course_id(self, data, course_id):
"""
Filters provided `data(list)` by the `course_id(str)`.
"""
return self.filter_by(data, "course_id", course_id)
def filter_by(self, data, field_name, value):
"""
Filters provided `data(list)` by the `field_name(str)` with `value`.
"""
return [note for note in data if note.get(field_name) == value]
def search(self, data, query):
"""
Search the `query(str)` text in the provided `data(list)`.
"""
return [note for note in data if unicode(query).strip() in note.get("text", "").split()]
| agpl-3.0 |
thecocce/crypto-un-locker | CryptoUnLocker.py | 2 | 10737 | #!/usr/bin/env python
import struct
import os
import argparse
import shutil
import sys
from collections import namedtuple
from datetime import datetime
import csv
import re
from Crypto.Cipher import AES
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
from Crypto.Hash import SHA
from Crypto.Util.number import bytes_to_long
"""
CryptoLocker file structure:
0x14 bytes : SHA1 hash of '\x00'*4 + next 0x100 bytes of file.
0x100 bytes : AES key encrypted with RSA PKCS#1 v1.5:
0x2c bytes : AES key blob
remainder : file data encrypted with AES256-CBC with IV of 0x00
Key blob is a Microsoft PUBLICKEYSTRUC:
typedef struct _PUBLICKEYSTRUC {
BYTE bType;
BYTE bVersion;
WORD reserved;
ALG_ID aiKeyAlg;
} BLOBHEADER, PUBLICKEYSTRUC;
where:
bType = 0x08
bVersion = 0x02
reserved = 0
aiKeyAlg = 0x6610 (AES-256)
followed by a DWORD length of 0x20, and finally the 32 byte AES key.
"""
PUBLICKEYSTRUC = namedtuple('PUBLICKEYSTRUC', 'bType bVersion reserved aiKeyAlg')
RSAPUBKEY = namedtuple('RSAPUBKEY', 'magic bitlen pubexp')
PRIVATEKEYBLOB = namedtuple('PRIVATEKEYBLOB', 'modulus prime1 prime2 exponent1 exponent2 coefficient privateExponent')
PUBLICKEYSTRUC_s = struct.Struct('<bbHI')
RSAPUBKEY_s = struct.Struct('<4sII')
key_re = re.compile('-----BEGIN.*KEY-----\n(.*)\n-----END.*KEY-----', re.DOTALL)
def subtract(a,b):
if a == None or b == None:
return None
else:
return ord(b)-ord(a)
class OutputLevel:
VerboseLevel, InfoLevel, WarnLevel, ErrorLevel = range(4)
class CryptoUnLocker(object):
def __init__(self):
self.keys = []
def loadKeyFromFile(self, fn):
d = open(fn, 'rb').read()
matches = key_re.match(d)
if matches:
self.loadKeyFromString(matches.group(0))
return
# fall through if the file does not contain a PEM encoded RSA key
# try the CryptImportKey Win32 file format
if self.CryptImportKey(d):
return
# Apparently a new version of CryptoLocker is adding what looks
# like a version number to the start of the RSA key format. Try
# skipping over the first four bytes of the file then interpreting
# the rest as an RSA private key.
if self.CryptImportKey(d[4:]):
return
# if we can't import the file, raise an exception
raise Exception("Could not parse a private key from file")
def CryptImportKey(self, d):
publickeystruc = PUBLICKEYSTRUC._make(PUBLICKEYSTRUC_s.unpack_from(d))
if publickeystruc.bType == 7 and publickeystruc.bVersion == 2 and publickeystruc.aiKeyAlg == 41984:
rsapubkey = RSAPUBKEY._make(RSAPUBKEY_s.unpack_from(d[8:]))
if rsapubkey.magic == 'RSA2':
bitlen8 = rsapubkey.bitlen/8
bitlen16 = rsapubkey.bitlen/16
PRIVATEKEYBLOB_s = struct.Struct('%ds%ds%ds%ds%ds%ds%ds' % (bitlen8, bitlen16, bitlen16, bitlen16, bitlen16, bitlen16, bitlen8))
privatekey = PRIVATEKEYBLOB._make(map(bytes_to_long, PRIVATEKEYBLOB_s.unpack_from(d[20:])))
r = RSA.construct((privatekey.modulus, long(rsapubkey.pubexp), privatekey.privateExponent,
privatekey.prime1, privatekey.prime2))
self.keys.append(r)
return True
return False
def loadKeyFromString(self, s):
r = RSA.importKey(s)
self.keys.append(r)
def isCryptoLocker(self, fn):
file_header = open(fn, 'rb').read(0x114)
if len(file_header) != 0x114:
return False
# validate that the header is correct
header_hash = SHA.new('\x00'*4 + file_header[0x14:0x114])
return header_hash.digest() == file_header[:0x14]
def guessIfWiped(self, fn):
file_header = open(fn, 'rb').read(64)
if len(file_header) != 64:
return False
lst = map(subtract, file_header[:32:2], file_header[1:32:2])
return not lst or [lst[0]]*len(lst) == lst
def decryptFile(self, fn):
aes_key = None
with open(fn, 'rb') as fp:
file_header = fp.read(0x114)
if len(file_header) != 0x114:
raise Exception("Not a CryptoLocker file")
for rsa_key in self.keys:
aes_key = self.retrieveAESKey(rsa_key, file_header)
if aes_key:
break
if not aes_key:
raise Exception("Could not find the private key for this CryptoLocker file")
# read the remaining data and decrypt with the AES key
d = fp.read()
a = AES.new(aes_key, mode=AES.MODE_CBC, IV='\x00'*16)
d = a.decrypt(d)
d = d[:-ord(d[-1])]
return d
def retrieveAESKey(self, r, file_header):
# we have to reverse the bytes in the header to conform with the CryptoAPI
# CryptDecrypt function.
file_header = file_header[0x14:0x114]
file_header = file_header[::-1]
# decrypt the AES key blob
c = PKCS1_v1_5.new(r)
sentinel = '\x00' * 16
blob = c.decrypt(file_header, sentinel)
# retrieve key from file_header
(bType, bVersion, reserved, aiKeyAlg, keyLen) = struct.unpack('<BBHII', blob[:0xc])
if bType == 0x08 and bVersion == 0x02 and reserved == 0 and \
aiKeyAlg == 0x6610 and keyLen == 32:
aes_key = blob[0x0c:0x0c+32]
return aes_key
else:
return None
class CryptoUnLockerProcess(object):
def __init__(self, args, unlocker):
self.args = args
self.unlocker = unlocker
self.csvfp = None
self.csv = None
def doit(self):
if self.args.csvfile:
self.csvfp = open(self.args.csvfile,'wb')
self.csv = csv.writer(self.csvfp)
self.csv.writerow(['Timestamp', 'Filename', 'Message'])
keyfiles = []
if self.args.keyfile:
keyfiles = [self.args.keyfile]
elif self.args.keydir:
keyfiles = [os.path.join(self.args.keydir, fn) for fn in os.listdir(self.args.keydir)]
for fn in keyfiles:
try:
self.unlocker.loadKeyFromFile(fn)
self.output(OutputLevel.VerboseLevel, fn, "Successfully loaded key file")
except Exception, e:
self.output(OutputLevel.ErrorLevel, fn, "Unsuccessful loading key file: %s" % e.message)
if not len(self.unlocker.keys) and not self.args.detect:
self.output(OutputLevel.ErrorLevel, '', 'No key files were successfully loaded. Exiting.')
return 1
if self.args.recursive:
for root, dirs, files in os.walk(self.args.encrypted_filenames[0]):
for fn in files:
self.processFile(root, fn)
else:
for fn in self.args.encrypted_filenames:
self.processFile('', fn)
return 0
def processFile(self, pathname, fn):
if fn.endswith('.bak'):
# skip backup files
return
fullpath = os.path.join(pathname, fn)
try:
if self.unlocker.guessIfWiped(fullpath):
self.output(OutputLevel.VerboseLevel, fullpath, "File appears wiped")
return
elif not self.unlocker.isCryptoLocker(fullpath):
self.output(OutputLevel.VerboseLevel, fullpath, "Not a CryptoLocker file")
return
else:
if self.args.detect:
self.output(OutputLevel.InfoLevel, fullpath, "Potential CryptoLocker file")
return
except Exception, e:
self.output(OutputLevel.ErrorLevel, fullpath, "Unsuccessful opening file: %s" % e.message)
return
try:
decrypted_file = self.unlocker.decryptFile(fullpath)
self.output(OutputLevel.InfoLevel, fullpath, "Successfully decrypted file")
if not self.args.dry_run:
if self.args.destdir:
destdir = os.path.join(self.args.destdir, pathname)
if not os.path.exists(destdir):
os.makedirs(destdir)
open(os.path.join(destdir, fn), 'wb').write(decrypted_file)
else:
shutil.copy2(fullpath, fullpath + ".bak")
open(os.path.join(pathname, fn), 'wb').write(decrypted_file)
except Exception, e:
self.output(OutputLevel.ErrorLevel, fullpath, "Unsuccessful decrypting file: %s" % e.message)
def output(self, level, fn, msg):
if level == OutputLevel.VerboseLevel and not self.args.verbose:
return
if self.csv:
self.csv.writerow([datetime.now(), fn, msg])
icon = '[.]'
if level == OutputLevel.InfoLevel:
icon = '[+]'
elif level > OutputLevel.InfoLevel:
icon = '[-]'
if fn:
sys.stderr.write('%s %s: %s\n' % (icon, msg, fn))
else:
sys.stderr.write('%s %s\n' % (icon, msg))
sys.stderr.flush()
def main():
parser = argparse.ArgumentParser(description='Decrypt CryptoLocker encrypted files.')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--keyfile', action='store', dest='keyfile',
help='File containing the private key, or the EXE file provided for decryption')
group.add_argument('--keydir', action='store', dest='keydir',
help='Directory containing any number of private keys; the appropriate private key will be used during the decryption process')
group.add_argument('--detect', action='store_true', dest='detect', help="Don't try to decrypt; just find files that may be CryptoLockered")
parser.add_argument('-r', action='store_true', dest='recursive', help="Recursively search subdirectories")
parser.add_argument('-v', action='store_true', dest='verbose', help="Verbose output")
parser.add_argument('--dry-run', action='store_true', dest='dry_run', help="Don't actually write decrypted files")
parser.add_argument('-o', action='store', dest='destdir', help='Copy all decrypted files to an output directory, mirroring the source path')
parser.add_argument('--csv', action='store', dest='csvfile', help='Output to a CSV file')
parser.add_argument('encrypted_filenames', nargs="+")
results = parser.parse_args()
unlocker = CryptoUnLocker()
processor = CryptoUnLockerProcess(results, unlocker)
return processor.doit()
if __name__ == '__main__':
sys.exit(main())
| mit |
home-assistant/home-assistant | homeassistant/components/firmata/binary_sensor.py | 14 | 2018 | """Support for Firmata binary sensor input."""
import logging
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_NAME, CONF_PIN
from homeassistant.core import HomeAssistant
from .const import CONF_NEGATE_STATE, CONF_PIN_MODE, DOMAIN
from .entity import FirmataPinEntity
from .pin import FirmataBinaryDigitalInput, FirmataPinUsedException
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Firmata binary sensors."""
new_entities = []
board = hass.data[DOMAIN][config_entry.entry_id]
for binary_sensor in board.binary_sensors:
pin = binary_sensor[CONF_PIN]
pin_mode = binary_sensor[CONF_PIN_MODE]
negate = binary_sensor[CONF_NEGATE_STATE]
api = FirmataBinaryDigitalInput(board, pin, pin_mode, negate)
try:
api.setup()
except FirmataPinUsedException:
_LOGGER.error(
"Could not setup binary sensor on pin %s since pin already in use",
binary_sensor[CONF_PIN],
)
continue
name = binary_sensor[CONF_NAME]
binary_sensor_entity = FirmataBinarySensor(api, config_entry, name, pin)
new_entities.append(binary_sensor_entity)
if new_entities:
async_add_entities(new_entities)
class FirmataBinarySensor(FirmataPinEntity, BinarySensorEntity):
"""Representation of a binary sensor on a Firmata board."""
async def async_added_to_hass(self) -> None:
"""Set up a binary sensor."""
await self._api.start_pin(self.async_write_ha_state)
async def async_will_remove_from_hass(self) -> None:
"""Stop reporting a binary sensor."""
await self._api.stop_pin()
@property
def is_on(self) -> bool:
"""Return true if binary sensor is on."""
return self._api.is_on
| apache-2.0 |
CCLab/sezam | djcelery/tests/test_loaders.py | 3 | 1434 | from __future__ import absolute_import
from celery import loaders
from djcelery import loaders as djloaders
from djcelery.tests.utils import unittest
class TestDjangoLoader(unittest.TestCase):
def setUp(self):
self.loader = djloaders.DjangoLoader()
def test_get_loader_cls(self):
self.assertEqual(loaders.get_loader_cls("django"),
self.loader.__class__)
# Execute cached branch.
self.assertEqual(loaders.get_loader_cls("django"),
self.loader.__class__)
def test_on_worker_init(self):
from django.conf import settings
old_imports = getattr(settings, "CELERY_IMPORTS", ())
settings.CELERY_IMPORTS = ("xxx.does.not.exist", )
try:
self.assertRaises(ImportError, self.loader.import_default_modules)
finally:
settings.CELERY_IMPORTS = old_imports
def test_race_protection(self):
djloaders._RACE_PROTECTION = True
try:
self.assertFalse(self.loader.on_worker_init())
finally:
djloaders._RACE_PROTECTION = False
def test_find_related_module_no_path(self):
self.assertFalse(djloaders.find_related_module("sys", "tasks"))
def test_find_related_module_no_related(self):
self.assertFalse(djloaders.find_related_module("someapp",
"frobulators"))
| bsd-3-clause |
google-research/dads | lib/py_uniform_replay_buffer.py | 1 | 8796 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Uniform replay buffer in Python.
The base class provides all the functionalities of a uniform replay buffer:
- add samples in a First In First Out way.
- read samples uniformly.
PyHashedReplayBuffer is a flavor of the base class which
compresses the observations when the observations have some partial overlap
(e.g. when using frame stacking).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import numpy as np
import tensorflow as tf
from tf_agents.replay_buffers import replay_buffer
from tf_agents.specs import array_spec
from tf_agents.utils import nest_utils
from tf_agents.utils import numpy_storage
class PyUniformReplayBuffer(replay_buffer.ReplayBuffer):
"""A Python-based replay buffer that supports uniform sampling.
Writing and reading to this replay buffer is thread safe.
This replay buffer can be subclassed to change the encoding used for the
underlying storage by overriding _encoded_data_spec, _encode, _decode, and
_on_delete.
"""
def __init__(self, data_spec, capacity):
"""Creates a PyUniformReplayBuffer.
Args:
data_spec: An ArraySpec or a list/tuple/nest of ArraySpecs describing a
single item that can be stored in this buffer.
capacity: The maximum number of items that can be stored in the buffer.
"""
super(PyUniformReplayBuffer, self).__init__(data_spec, capacity)
self._storage = numpy_storage.NumpyStorage(self._encoded_data_spec(),
capacity)
self._lock = threading.Lock()
self._np_state = numpy_storage.NumpyState()
# Adding elements to the replay buffer is done in a circular way.
# Keeps track of the actual size of the replay buffer and the location
# where to add new elements.
self._np_state.size = np.int64(0)
self._np_state.cur_id = np.int64(0)
# Total number of items that went through the replay buffer.
self._np_state.item_count = np.int64(0)
def _encoded_data_spec(self):
"""Spec of data items after encoding using _encode."""
return self._data_spec
def _encode(self, item):
"""Encodes an item (before adding it to the buffer)."""
return item
def _decode(self, item):
"""Decodes an item."""
return item
def _on_delete(self, encoded_item):
"""Do any necessary cleanup."""
pass
@property
def size(self):
return self._np_state.size
def _add_batch(self, items):
outer_shape = nest_utils.get_outer_array_shape(items, self._data_spec)
if outer_shape[0] != 1:
raise NotImplementedError('PyUniformReplayBuffer only supports a batch '
'size of 1, but received `items` with batch '
'size {}.'.format(outer_shape[0]))
item = nest_utils.unbatch_nested_array(items)
with self._lock:
if self._np_state.size == self._capacity:
# If we are at capacity, we are deleting element cur_id.
self._on_delete(self._storage.get(self._np_state.cur_id))
self._storage.set(self._np_state.cur_id, self._encode(item))
self._np_state.size = np.minimum(self._np_state.size + 1,
self._capacity)
self._np_state.cur_id = (self._np_state.cur_id + 1) % self._capacity
self._np_state.item_count += 1
def _get_next(self,
sample_batch_size=None,
num_steps=None,
time_stacked=True):
num_steps_value = num_steps if num_steps is not None else 1
def get_single():
"""Gets a single item from the replay buffer."""
with self._lock:
if self._np_state.size <= 0:
def empty_item(spec):
return np.empty(spec.shape, dtype=spec.dtype)
if num_steps is not None:
item = [tf.nest.map_structure(empty_item, self.data_spec)
for n in range(num_steps)]
if time_stacked:
item = nest_utils.stack_nested_arrays(item)
else:
item = tf.nest.map_structure(empty_item, self.data_spec)
return item
idx = np.random.randint(self._np_state.size - num_steps_value + 1)
if self._np_state.size == self._capacity:
# If the buffer is full, add cur_id (head of circular buffer) so that
# we sample from the range [cur_id, cur_id + size - num_steps_value].
# We will modulo the size below.
idx += self._np_state.cur_id
if num_steps is not None:
# TODO(b/120242830): Try getting data from numpy in one shot rather
# than num_steps_value.
item = [self._decode(self._storage.get((idx + n) % self._capacity))
for n in range(num_steps)]
else:
item = self._decode(self._storage.get(idx % self._capacity))
if num_steps is not None and time_stacked:
item = nest_utils.stack_nested_arrays(item)
return item
if sample_batch_size is None:
return get_single()
else:
samples = [get_single() for _ in range(sample_batch_size)]
return nest_utils.stack_nested_arrays(samples)
def _as_dataset(self, sample_batch_size=None, num_steps=None,
num_parallel_calls=None):
if num_parallel_calls is not None:
raise NotImplementedError('PyUniformReplayBuffer does not support '
'num_parallel_calls (must be None).')
data_spec = self._data_spec
if sample_batch_size is not None:
data_spec = array_spec.add_outer_dims_nest(
data_spec, (sample_batch_size,))
if num_steps is not None:
data_spec = (data_spec,) * num_steps
shapes = tuple(s.shape for s in tf.nest.flatten(data_spec))
dtypes = tuple(s.dtype for s in tf.nest.flatten(data_spec))
def generator_fn():
while True:
if sample_batch_size is not None:
batch = [self._get_next(num_steps=num_steps, time_stacked=False)
for _ in range(sample_batch_size)]
item = nest_utils.stack_nested_arrays(batch)
else:
item = self._get_next(num_steps=num_steps, time_stacked=False)
yield tuple(tf.nest.flatten(item))
def time_stack(*structures):
time_axis = 0 if sample_batch_size is None else 1
return tf.nest.map_structure(
lambda *elements: tf.stack(elements, axis=time_axis), *structures)
ds = tf.data.Dataset.from_generator(
generator_fn, dtypes,
shapes).map(lambda *items: tf.nest.pack_sequence_as(data_spec, items))
if num_steps is not None:
return ds.map(time_stack)
else:
return ds
def _gather_all(self):
data = [self._decode(self._storage.get(idx))
for idx in range(self._capacity)]
stacked = nest_utils.stack_nested_arrays(data)
batched = tf.nest.map_structure(lambda t: np.expand_dims(t, 0), stacked)
return batched
def _clear(self):
self._np_state.size = np.int64(0)
self._np_state.cur_id = np.int64(0)
def gather_all_transitions(self):
num_steps_value = 2
def get_single(idx):
"""Gets the idx item from the replay buffer."""
with self._lock:
if self._np_state.size <= idx:
def empty_item(spec):
return np.empty(spec.shape, dtype=spec.dtype)
item = [
tf.nest.map_structure(empty_item, self.data_spec)
for n in range(num_steps_value)
]
item = nest_utils.stack_nested_arrays(item)
return item
if self._np_state.size == self._capacity:
# If the buffer is full, add cur_id (head of circular buffer) so that
# we sample from the range [cur_id, cur_id + size - num_steps_value].
# We will modulo the size below.
idx += self._np_state.cur_id
item = [
self._decode(self._storage.get((idx + n) % self._capacity))
for n in range(num_steps_value)
]
item = nest_utils.stack_nested_arrays(item)
return item
samples = [
get_single(idx)
for idx in range(self._np_state.size - num_steps_value + 1)
]
return nest_utils.stack_nested_arrays(samples)
| apache-2.0 |
jankeromnes/depot_tools | third_party/boto/compat.py | 79 | 1335 | # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# This allows boto modules to say "from boto.compat import json". This is
# preferred so that all modules don't have to repeat this idiom.
try:
import simplejson as json
except ImportError:
import json
| bsd-3-clause |
moazzemi/HAMEX | cpu/gem5/ext/mcpat/regression/regression.py | 43 | 9230 | #!/usr/bin/env python
# Copyright (c) 2010-2013 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
SYNOPSIS
./regression/regression.py ./regression/
DESCRIPTION
Runs regression tester for McPAT.
This tester can compile and runs McPAT on the input contained in the
specified directory, and then compares the output to that of a prior run in
order to ensure that specific power and area calculations do not change.
AUTHORS
Joel Hestness <hestness@cs.wisc.edu> (while interning at AMD)
Yasuko Eckert <yasuko.eckert@amd.com>
"""
import os
import sys
import optparse
import re
################################
# Global Variables
################################
global mcpat_binary
mcpat_binary = "../../build/mcpat/mcpat"
global optionsparser
################################
# Global Functions
################################
def run_test(testdir):
test_passed = True
testfiles = os.listdir(testdir)
for testfile in testfiles:
# For each power_region file, run McPAT on it and check the
# output created against the regression
if re.match("power_region.*\.xml$", testfile):
# Get the region index of the test
fileparts = testfile.split(".")
region_index = fileparts[0][12:]
regression_test = os.path.join(testdir, testfile)
regression_output = os.path.join(
testdir, "region%s.out" % region_index)
regression_correct = os.path.join(
testdir, "region%s.out.ref" % region_index)
print "Running test: %s..." % regression_test
# Run McPAT on the input
os.system(
"%s -infile %s -print_level 10 > %s" %
(mcpat_binary, regression_test, regression_output) )
if os.path.exists(regression_correct):
diff = os.popen(
"diff %s %s" % (regression_output, regression_correct),
"r").read()
if diff != "":
print "WARN: Differences found in %s" % regression_output
if options.verbose:
print diff
test_passed = False
else:
print "WARN: Regression test not set up: %s..." % regression_test
print "WARN: Not able to verify test"
test_passed = False
if options.cleanup:
if options.verbose:
print "WARN: Cleaning (deleting) regression output file: "\
"%s" % regression_output
os.system("rm -f %s" % regression_output)
if test_passed:
print "PASSED: %s\n\n" % testdir
else:
print "FAILED: %s\n\n" % testdir
def has_power_region_files(testdir):
files = os.listdir(testdir)
for file in files:
if "power_region" in file and ".xml" in file:
return True
def is_valid_test_directory(testdir):
valid_regression = True
power_region_file_found = False
files = os.listdir(testdir)
for file in files:
if "power_region" in file and ".xml" in file:
power_region_file_found = True
fileparts = file.split(".")
region_index = fileparts[0][12:]
regression_output = os.path.join(
testdir, "region%s.out.ref" % region_index)
if os.path.exists(regression_output):
if options.verbose:
print "Valid regression test: %s/%s" % (testdir, file)
else:
valid_regression = False
return valid_regression and power_region_file_found
################################
# Execute here
################################
optionsparser = optparse.OptionParser(
formatter = optparse.TitledHelpFormatter(),
usage = globals()['__doc__'])
optionsparser.add_option(
"-b", "--build", action = "store_true", default = False,
help = "Build McPAT before running tests")
optionsparser.add_option(
"-c", "--cleanup", action = "store_true", default = False,
help = "Clean up the specified regression directory")
optionsparser.add_option(
"-f", "--force", action = "store_true", default = False,
help = "Force run regression even if directory isn't set up")
optionsparser.add_option(
"-m", "--maketest", action = "store_true", default = False,
help = "Set up the specified test directory")
optionsparser.add_option(
"-v", "--verbose", action = "store_true", default = False,
help = "Print verbose output")
(options, args) = optionsparser.parse_args()
if not os.path.exists(mcpat_binary) and not options.build:
print "ERROR: McPAT binary does not exist: %s" % mcpat_binary
exit(0)
if options.build:
print "Building McPAT..."
bin_dir = os.path.dirname(mcpat_binary)
directory = os.path.join(bin_dir, "../../ext/mcpat")
build_output = os.popen(
"cd %s; make clean; make -j 8 dbg 2>&1" % directory).read()
if "error" in build_output.lower():
print "Error during build: %s" % build_output
exit(0)
if len(args) < 1:
print "ERROR: Must specify regressions directory"
exit(0)
# check params
rootdir = args[0];
if not os.path.exists(rootdir):
print "ERROR: Regressions directory does not exist: %s" % rootdir
exit(0)
if options.maketest:
# The specified rootdir must exist since we got here
# Check if directory has tests
list = os.listdir(rootdir)
found_test = False
for file in list:
if "power_region" in file and "out" not in file and "ref" not in file:
found_test = True
# Prepare to run the test in order to set it up
fileparts = file.split(".")
region_index = fileparts[0][12:]
regression_test = os.path.join(rootdir, file)
regression_output = os.path.join(
rootdir, "region%s.out.ref" % region_index)
if os.path.exists(regression_output):
print "WARN: Overwriting old regression output: " \
"%s" % regression_output
# Run the test to set it up
print "Writing new regression output..."
os.system(
"%s -infile %s -print_level 10 > %s" %
(mcpat_binary, regression_test, regression_output))
if not found_test:
print "ERROR: Invalid test directory: %s" % rootdir
print "ERROR: Must contain XML file power_region*.xml"
exit(0)
found_test = False
if has_power_region_files(rootdir):
found_test = True
if is_valid_test_directory(rootdir) or options.force:
run_test(rootdir)
else:
print "WARN: Regression directory is not set up: %s" % rootdir
else:
folders = os.listdir(rootdir)
folders.sort()
for folder in folders:
testdir = os.path.join(rootdir, folder)
if os.path.isdir(testdir):
if has_power_region_files(testdir):
found_test = True
if is_valid_test_directory(testdir):
run_test(testdir)
else:
if options.force:
print "WARN: Regression directory is not set up: " \
"%s" % testdir
print "WARN: Running test anyway: %s..." % testdir
run_test(testdir)
else:
print "Regression directory is not set up: %s" % testdir
else:
print "Not a valid test directory: %s" % testdir
if not found_test:
print "No valid regressions found in %s" % rootdir
| mit |
chauhanhardik/populo | common/djangoapps/terrain/stubs/tests/test_lti_stub.py | 172 | 4269 | """
Unit tests for stub LTI implementation.
"""
from mock import Mock, patch
import unittest
import urllib2
import requests
from terrain.stubs.lti import StubLtiService
class StubLtiServiceTest(unittest.TestCase):
"""
A stub of the LTI provider that listens on a local
port and responds with pre-defined grade messages.
Used for lettuce BDD tests in lms/courseware/features/lti.feature
"""
def setUp(self):
super(StubLtiServiceTest, self).setUp()
self.server = StubLtiService()
self.uri = 'http://127.0.0.1:{}/'.format(self.server.port)
self.launch_uri = self.uri + 'correct_lti_endpoint'
self.addCleanup(self.server.shutdown)
self.payload = {
'user_id': 'default_user_id',
'roles': 'Student',
'oauth_nonce': '',
'oauth_timestamp': '',
'oauth_consumer_key': 'test_client_key',
'lti_version': 'LTI-1p0',
'oauth_signature_method': 'HMAC-SHA1',
'oauth_version': '1.0',
'oauth_signature': '',
'lti_message_type': 'basic-lti-launch-request',
'oauth_callback': 'about:blank',
'launch_presentation_return_url': '',
'lis_outcome_service_url': 'http://localhost:8001/test_callback',
'lis_result_sourcedid': '',
'resource_link_id': '',
}
def test_invalid_request_url(self):
"""
Tests that LTI server processes request with right program path but with wrong header.
"""
self.launch_uri = self.uri + 'wrong_lti_endpoint'
response = requests.post(self.launch_uri, data=self.payload)
self.assertIn('Invalid request URL', response.content)
def test_wrong_signature(self):
"""
Tests that LTI server processes request with right program
path and responses with incorrect signature.
"""
response = requests.post(self.launch_uri, data=self.payload)
self.assertIn('Wrong LTI signature', response.content)
@patch('terrain.stubs.lti.signature.verify_hmac_sha1', return_value=True)
def test_success_response_launch_lti(self, check_oauth):
"""
Success lti launch.
"""
response = requests.post(self.launch_uri, data=self.payload)
self.assertIn('This is LTI tool. Success.', response.content)
@patch('terrain.stubs.lti.signature.verify_hmac_sha1', return_value=True)
def test_send_graded_result(self, verify_hmac): # pylint: disable=unused-argument
response = requests.post(self.launch_uri, data=self.payload)
self.assertIn('This is LTI tool. Success.', response.content)
grade_uri = self.uri + 'grade'
with patch('terrain.stubs.lti.requests.post') as mocked_post:
mocked_post.return_value = Mock(content='Test response', status_code=200)
response = urllib2.urlopen(grade_uri, data='')
self.assertIn('Test response', response.read())
@patch('terrain.stubs.lti.signature.verify_hmac_sha1', return_value=True)
def test_lti20_outcomes_put(self, verify_hmac): # pylint: disable=unused-argument
response = requests.post(self.launch_uri, data=self.payload)
self.assertIn('This is LTI tool. Success.', response.content)
grade_uri = self.uri + 'lti2_outcome'
with patch('terrain.stubs.lti.requests.put') as mocked_put:
mocked_put.return_value = Mock(status_code=200)
response = urllib2.urlopen(grade_uri, data='')
self.assertIn('LTI consumer (edX) responded with HTTP 200', response.read())
@patch('terrain.stubs.lti.signature.verify_hmac_sha1', return_value=True)
def test_lti20_outcomes_put_like_delete(self, verify_hmac): # pylint: disable=unused-argument
response = requests.post(self.launch_uri, data=self.payload)
self.assertIn('This is LTI tool. Success.', response.content)
grade_uri = self.uri + 'lti2_delete'
with patch('terrain.stubs.lti.requests.put') as mocked_put:
mocked_put.return_value = Mock(status_code=200)
response = urllib2.urlopen(grade_uri, data='')
self.assertIn('LTI consumer (edX) responded with HTTP 200', response.read())
| agpl-3.0 |
alyosha1879/ryu | ryu/cmd/ryu_base.py | 14 | 2467 | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ryu.contrib
from ryu import cfg
from ryu import utils
from ryu import version
import argparse
import os.path
import sys
subcommands = {
'run': 'ryu.cmd.manager',
'of-config-cli': 'ryu.cmd.of_config_cli',
'rpc-cli': 'ryu.cmd.rpc_cli',
}
class RemainderOpt(cfg.MultiStrOpt):
def _get_argparse_kwargs(self, group, **kwargs):
kwargs = cfg.MultiStrOpt._get_argparse_kwargs(self, group, **kwargs)
kwargs['nargs'] = argparse.REMAINDER
return kwargs
base_conf = cfg.ConfigOpts()
base_conf.register_cli_opt(cfg.StrOpt('subcommand', positional=True,
required=True,
help='[%s]' % '|'.join(
subcommands.keys())))
base_conf.register_cli_opt(RemainderOpt('subcommand_args', default=[],
positional=True,
help='subcommand specific arguments'))
class SubCommand(object):
def __init__(self, name, entry):
self.name = name
self.entry = entry
def run(self, args):
prog = '%s %s' % (os.path.basename(sys.argv[0]), self.name,)
self.entry(args=args, prog=prog)
def main():
try:
base_conf(project='ryu', version='ryu %s' % version)
except cfg.RequiredOptError, e:
base_conf.print_help()
raise SystemExit(1)
subcmd_name = base_conf.subcommand
try:
subcmd_mod_name = subcommands[subcmd_name]
except KeyError:
base_conf.print_help()
raise SystemExit('Unknown subcommand %s' % subcmd_name)
subcmd_mod = utils.import_module(subcmd_mod_name)
subcmd = SubCommand(name=subcmd_name, entry=subcmd_mod.main)
subcmd.run(base_conf.subcommand_args)
| apache-2.0 |
pilou-/ansible | test/units/modules/network/cnos/test_cnos_linkagg.py | 32 | 4430 | #
# (c) 2018 Red Hat Inc.
# Copyright (C) 2017 Lenovo.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from units.compat.mock import patch
from ansible.modules.network.cnos import cnos_linkagg
from units.modules.utils import set_module_args
from .cnos_module import TestCnosModule, load_fixture
class TestCnosLinkaggModule(TestCnosModule):
module = cnos_linkagg
def setUp(self):
super(TestCnosLinkaggModule, self).setUp()
self._patch_get_config = patch(
'ansible.modules.network.cnos.cnos_linkagg.get_config'
)
self._patch_load_config = patch(
'ansible.modules.network.cnos.cnos_linkagg.load_config'
)
self._get_config = self._patch_get_config.start()
self._load_config = self._patch_load_config.start()
def tearDown(self):
super(TestCnosLinkaggModule, self).tearDown()
self._patch_get_config.stop()
self._patch_load_config.stop()
def load_fixtures(self, commands=None):
config_file = 'cnos_linkagg_config.cfg'
self._get_config.return_value = load_fixture(config_file)
self._load_config.return_value = None
def test_cnos_linkagg_group_present(self, *args, **kwargs):
set_module_args(dict(
group='10',
state='present'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface port-channel 10',
'exit'
],
'changed': True
}
)
def test_cnos_linkagg_group_members_active(self, *args, **kwargs):
set_module_args(dict(
group='10',
mode='active',
members=[
'Ethernet 1/33',
'Ethernet 1/44'
]
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface port-channel 10',
'exit',
'interface Ethernet 1/33',
'channel-group 10 mode active',
'interface Ethernet 1/44',
'channel-group 10 mode active'
],
'changed': True
}
)
def test_cnos_linkagg_group_member_removal(self, *args, **kwargs):
set_module_args(dict(
group='20',
mode='active',
members=[
'Ethernet 1/10',
]
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface port-channel 20',
'exit',
'interface Ethernet 1/10',
'channel-group 20 mode active'
],
'changed': True
}
)
def test_cnos_linkagg_group_members_absent(self, *args, **kwargs):
set_module_args(dict(
group='20',
state='absent'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'no interface port-channel 20'
],
'changed': True
}
)
set_module_args(dict(
group='10',
state='absent'
))
result = self.execute_module(changed=False)
self.assertEqual(
result,
{
'commands': [],
'changed': False
}
)
| gpl-3.0 |
fwilk/paperless | src/documents/migrations/0010_log.py | 3 | 1117 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-27 17:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('documents', '0009_auto_20160214_0040'),
]
operations = [
migrations.CreateModel(
name='Log',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group', models.UUIDField(blank=True)),
('message', models.TextField()),
('level', models.PositiveIntegerField(choices=[(10, 'Debugging'), (20, 'Informational'), (30, 'Warning'), (40, 'Error'), (50, 'Critical')], default=20)),
('component', models.PositiveIntegerField(choices=[(1, 'Consumer'), (2, 'Mail Fetcher')])),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ('-modified',),
},
),
]
| gpl-3.0 |
rajalokan/nova | nova/db/sqlalchemy/migrate_repo/versions/277_add_fixed_ip_updated_index.py | 73 | 1488 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from sqlalchemy import Index, MetaData, Table
from nova.i18n import _LI
LOG = logging.getLogger(__name__)
INDEX_COLUMNS = ['deleted', 'allocated', 'updated_at']
INDEX_NAME = 'fixed_ips_%s_idx' % ('_'.join(INDEX_COLUMNS),)
def _get_table_index(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table('fixed_ips', meta, autoload=True)
for idx in table.indexes:
if idx.columns.keys() == INDEX_COLUMNS:
break
else:
idx = None
return meta, table, idx
def upgrade(migrate_engine):
meta, table, index = _get_table_index(migrate_engine)
if index:
LOG.info(_LI('Skipped adding %s because an equivalent index'
' already exists.'), INDEX_NAME)
return
columns = [getattr(table.c, col_name) for col_name in INDEX_COLUMNS]
index = Index(INDEX_NAME, *columns)
index.create(migrate_engine)
| apache-2.0 |
KhronosGroup/COLLADA-CTS | StandardDataSets/collada/library_effects/effect/profile_COMMON/technique/constant/emission/_reference_effect_constant_emission_black/_reference_effect_constant_emission_black.py | 28 | 3721 |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = []
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
if (self.__assistant.GetResults() == False):
self.status_baseline = False
return False
# Compare the rendered images
self.__assistant.CompareRenderedImages(context)
self.status_baseline = self.__assistant.DeferJudgement(context)
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
| mit |
necsst-nms/PMAL_TRACE | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
BIGBALLON/cifar-10-cnn | 1_Lecun_Network/LeNet_dp_da_keras.py | 1 | 2768 | import keras
import numpy as np
from keras import optimizers
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from keras.callbacks import LearningRateScheduler, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
batch_size = 128
epochs = 200
iterations = 391
num_classes = 10
mean = [125.307, 122.95, 113.865]
std = [62.9932, 62.0887, 66.7048]
def build_model():
model = Sequential()
model.add(Conv2D(6, (5, 5), padding='valid', activation = 'relu', kernel_initializer='he_normal', input_shape=(32,32,3)))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Conv2D(16, (5, 5), padding='valid', activation = 'relu', kernel_initializer='he_normal'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(120, activation = 'relu', kernel_initializer='he_normal'))
model.add(Dense(84, activation = 'relu', kernel_initializer='he_normal'))
model.add(Dense(10, activation = 'softmax', kernel_initializer='he_normal'))
sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model
def scheduler(epoch):
if epoch < 100:
return 0.01
if epoch < 150:
return 0.005
return 0.001
if __name__ == '__main__':
# load data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# data preprocessing [raw - mean / std]
for i in range(3):
x_train[:,:,:,i] = (x_train[:,:,:,i] - mean[i]) / std[i]
x_test[:,:,:,i] = (x_test[:,:,:,i] - mean[i]) / std[i]
# build network
model = build_model()
print(model.summary())
# set callback
tb_cb = TensorBoard(log_dir='./lenet_dp_da', histogram_freq=0)
change_lr = LearningRateScheduler(scheduler)
cbks = [change_lr,tb_cb]
# using real-time data augmentation
print('Using real-time data augmentation.')
datagen = ImageDataGenerator(horizontal_flip=True,
width_shift_range=0.125,height_shift_range=0.125,fill_mode='constant',cval=0.)
datagen.fit(x_train)
# start train
model.fit_generator(datagen.flow(x_train, y_train,batch_size=batch_size),
steps_per_epoch=iterations,
epochs=epochs,
callbacks=cbks,
validation_data=(x_test, y_test))
# save model
model.save('lenet_dp_da.h5')
| mit |
plotly/plotly.py | packages/python/plotly/_plotly_future_/__init__.py | 2 | 1832 | import warnings
import functools
# Initialize _future_flags with all future flags that are now always in
# effect.
_future_flags = {
"renderer_defaults",
"template_defaults",
"extract_chart_studio",
"remove_deprecations",
"v4_subplots",
"orca_defaults",
"timezones",
"trace_uids",
}
def _assert_plotly_not_imported():
import sys
if "plotly" in sys.modules:
raise ImportError(
"""\
The _plotly_future_ module must be imported before the plotly module"""
)
warnings.filterwarnings(
"default", ".*?is deprecated, please use chart_studio*", DeprecationWarning
)
def _chart_studio_warning(submodule):
warnings.warn(
"The plotly.{submodule} module is deprecated, "
"please use chart_studio.{submodule} instead".format(submodule=submodule),
DeprecationWarning,
stacklevel=2,
)
def _chart_studio_error(submodule):
raise ImportError(
"""
The plotly.{submodule} module is deprecated,
please install the chart-studio package and use the
chart_studio.{submodule} module instead.
""".format(
submodule=submodule
)
)
def _chart_studio_deprecation(fn):
fn_name = fn.__name__
fn_module = fn.__module__
plotly_name = ".".join(["plotly"] + fn_module.split(".")[1:] + [fn_name])
chart_studio_name = ".".join(
["chart_studio"] + fn_module.split(".")[1:] + [fn_name]
)
msg = """\
{plotly_name} is deprecated, please use {chart_studio_name}\
""".format(
plotly_name=plotly_name, chart_studio_name=chart_studio_name
)
@functools.wraps(fn)
def wrapper(*args, **kwargs):
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return fn(*args, **kwargs)
return wrapper
__all__ = ["_future_flags", "_chart_studio_error"]
| mit |
byterom/android_external_chromium_org | build/android/buildbot/bb_run_bot.py | 25 | 11032 | #!/usr/bin/env python
#
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import json
import os
import pipes
import re
import subprocess
import sys
import bb_utils
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from pylib import constants
CHROMIUM_COVERAGE_BUCKET = 'chromium-code-coverage'
_BotConfig = collections.namedtuple(
'BotConfig', ['bot_id', 'host_obj', 'test_obj'])
HostConfig = collections.namedtuple(
'HostConfig',
['script', 'host_steps', 'extra_args', 'extra_gyp_defines', 'target_arch'])
TestConfig = collections.namedtuple('Tests', ['script', 'tests', 'extra_args'])
def BotConfig(bot_id, host_object, test_object=None):
return _BotConfig(bot_id, host_object, test_object)
def DictDiff(d1, d2):
diff = []
for key in sorted(set(d1.keys() + d2.keys())):
if key in d1 and d1[key] != d2.get(key):
diff.append('- %s=%s' % (key, pipes.quote(d1[key])))
if key in d2 and d2[key] != d1.get(key):
diff.append('+ %s=%s' % (key, pipes.quote(d2[key])))
return '\n'.join(diff)
def GetEnvironment(host_obj, testing, extra_env_vars=None):
init_env = dict(os.environ)
init_env['GYP_GENERATORS'] = 'ninja'
if extra_env_vars:
init_env.update(extra_env_vars)
envsetup_cmd = '. build/android/envsetup.sh'
if testing:
# Skip envsetup to avoid presubmit dependence on android deps.
print 'Testing mode - skipping "%s"' % envsetup_cmd
envsetup_cmd = ':'
else:
print 'Running %s' % envsetup_cmd
proc = subprocess.Popen(['bash', '-exc',
envsetup_cmd + ' >&2; python build/android/buildbot/env_to_json.py'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=bb_utils.CHROME_SRC, env=init_env)
json_env, envsetup_output = proc.communicate()
if proc.returncode != 0:
print >> sys.stderr, 'FATAL Failure in envsetup.'
print >> sys.stderr, envsetup_output
sys.exit(1)
env = json.loads(json_env)
env['GYP_DEFINES'] = env.get('GYP_DEFINES', '') + \
' OS=android fastbuild=1 use_goma=1 gomadir=%s' % bb_utils.GOMA_DIR
if host_obj.target_arch:
env['GYP_DEFINES'] += ' target_arch=%s' % host_obj.target_arch
extra_gyp = host_obj.extra_gyp_defines
if extra_gyp:
env['GYP_DEFINES'] += ' %s' % extra_gyp
if re.search('(asan|clang)=1', extra_gyp):
env.pop('CXX_target', None)
# Bots checkout chrome in /b/build/slave/<name>/build/src
build_internal_android = os.path.abspath(os.path.join(
bb_utils.CHROME_SRC, '..', '..', '..', '..', '..', 'build_internal',
'scripts', 'slave', 'android'))
if os.path.exists(build_internal_android):
env['PATH'] = os.pathsep.join([build_internal_android, env['PATH']])
return env
def GetCommands(options, bot_config):
"""Get a formatted list of commands.
Args:
options: Options object.
bot_config: A BotConfig named tuple.
host_step_script: Host step script.
device_step_script: Device step script.
Returns:
list of Command objects.
"""
property_args = bb_utils.EncodeProperties(options)
commands = [[bot_config.host_obj.script,
'--steps=%s' % ','.join(bot_config.host_obj.host_steps)] +
property_args + (bot_config.host_obj.extra_args or [])]
test_obj = bot_config.test_obj
if test_obj:
run_test_cmd = [test_obj.script] + property_args
for test in test_obj.tests:
run_test_cmd.extend(['-f', test])
if test_obj.extra_args:
run_test_cmd.extend(test_obj.extra_args)
commands.append(run_test_cmd)
return commands
def GetBotStepMap():
compile_step = ['compile']
chrome_proxy_tests = ['chrome_proxy']
chrome_sync_shell_tests = ['sync']
std_host_tests = ['check_webview_licenses', 'findbugs']
std_build_steps = ['compile', 'zip_build']
std_test_steps = ['extract_build']
std_tests = ['ui', 'unit', 'mojo']
telemetry_tests = ['telemetry_perf_unittests']
flakiness_server = (
'--flakiness-server=%s' % constants.UPSTREAM_FLAKINESS_SERVER)
experimental = ['--experimental']
bisect_chrome_output_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir,
os.pardir, 'bisect', 'src', 'out'))
B = BotConfig
H = (lambda steps, extra_args=None, extra_gyp=None, target_arch=None :
HostConfig('build/android/buildbot/bb_host_steps.py', steps, extra_args,
extra_gyp, target_arch))
T = (lambda tests, extra_args=None :
TestConfig('build/android/buildbot/bb_device_steps.py', tests,
extra_args))
bot_configs = [
# Main builders
B('main-builder-dbg', H(std_build_steps + std_host_tests)),
B('main-builder-rel', H(std_build_steps)),
B('main-clang-builder',
H(compile_step, extra_gyp='clang=1 component=shared_library')),
B('main-clobber', H(compile_step)),
B('main-tests-rel', H(std_test_steps),
T(std_tests + telemetry_tests + chrome_proxy_tests,
['--cleanup', flakiness_server])),
B('main-tests', H(std_test_steps),
T(std_tests,['--cleanup', flakiness_server])),
# Other waterfalls
B('asan-builder-tests', H(compile_step,
extra_gyp='asan=1 component=shared_library'),
T(std_tests, ['--asan', '--asan-symbolize'])),
B('blink-try-builder', H(compile_step)),
B('chromedriver-fyi-tests-dbg', H(std_test_steps),
T(['chromedriver'], ['--install=ChromeShell', '--skip-wipe',
'--cleanup'])),
B('fyi-x86-builder-dbg',
H(compile_step + std_host_tests, experimental, target_arch='ia32')),
B('fyi-builder-dbg',
H(std_build_steps + std_host_tests, experimental,
extra_gyp='emma_coverage=1')),
B('x86-builder-dbg',
H(compile_step + std_host_tests, target_arch='ia32')),
B('fyi-builder-rel', H(std_build_steps, experimental)),
B('fyi-tests', H(std_test_steps),
T(std_tests + chrome_sync_shell_tests,
['--experimental', flakiness_server,
'--coverage-bucket', CHROMIUM_COVERAGE_BUCKET,
'--cleanup'])),
B('fyi-component-builder-tests-dbg',
H(compile_step, extra_gyp='component=shared_library'),
T(std_tests, ['--experimental', flakiness_server])),
B('gpu-builder-tests-dbg',
H(compile_step),
T(['gpu'], ['--install=ContentShell'])),
# Pass empty T([]) so that logcat monitor and device status check are run.
B('perf-bisect-builder-tests-dbg',
H(['bisect_perf_regression']),
T([], ['--chrome-output-dir', bisect_chrome_output_dir])),
B('perf-tests-rel', H(std_test_steps),
T([], ['--install=ChromeShell', '--cleanup'])),
B('webkit-latest-webkit-tests', H(std_test_steps),
T(['webkit_layout', 'webkit'], ['--cleanup', '--auto-reconnect'])),
B('webkit-latest-contentshell', H(compile_step),
T(['webkit_layout'], ['--auto-reconnect'])),
B('builder-unit-tests', H(compile_step), T(['unit'])),
# Generic builder config (for substring match).
B('builder', H(std_build_steps)),
]
bot_map = dict((config.bot_id, config) for config in bot_configs)
# These bots have identical configuration to ones defined earlier.
copy_map = [
('lkgr-clobber', 'main-clobber'),
('try-builder-dbg', 'main-builder-dbg'),
('try-builder-rel', 'main-builder-rel'),
('try-clang-builder', 'main-clang-builder'),
('try-fyi-builder-dbg', 'fyi-builder-dbg'),
('try-x86-builder-dbg', 'x86-builder-dbg'),
('try-tests-rel', 'main-tests-rel'),
('try-tests', 'main-tests'),
('try-fyi-tests', 'fyi-tests'),
('webkit-latest-tests', 'main-tests'),
]
for to_id, from_id in copy_map:
assert to_id not in bot_map
# pylint: disable=W0212
bot_map[to_id] = copy.deepcopy(bot_map[from_id])._replace(bot_id=to_id)
# Trybots do not upload to flakiness dashboard. They should be otherwise
# identical in configuration to their trunk building counterparts.
test_obj = bot_map[to_id].test_obj
if to_id.startswith('try') and test_obj:
extra_args = test_obj.extra_args
if extra_args and flakiness_server in extra_args:
extra_args.remove(flakiness_server)
return bot_map
# Return an object from the map, looking first for an exact id match.
# If this fails, look for an id which is a substring of the specified id.
# Choose the longest of all substring matches.
# pylint: disable=W0622
def GetBestMatch(id_map, id):
config = id_map.get(id)
if not config:
substring_matches = filter(lambda x: x in id, id_map.iterkeys())
if substring_matches:
max_id = max(substring_matches, key=len)
print 'Using config from id="%s" (substring match).' % max_id
config = id_map[max_id]
return config
def GetRunBotOptParser():
parser = bb_utils.GetParser()
parser.add_option('--bot-id', help='Specify bot id directly.')
parser.add_option('--testing', action='store_true',
help='For testing: print, but do not run commands')
return parser
def GetBotConfig(options, bot_step_map):
bot_id = options.bot_id or options.factory_properties.get('android_bot_id')
if not bot_id:
print (sys.stderr,
'A bot id must be specified through option or factory_props.')
return
bot_config = GetBestMatch(bot_step_map, bot_id)
if not bot_config:
print 'Error: config for id="%s" cannot be inferred.' % bot_id
return bot_config
def RunBotCommands(options, commands, env):
print 'Environment changes:'
print DictDiff(dict(os.environ), env)
for command in commands:
print bb_utils.CommandToString(command)
sys.stdout.flush()
if options.testing:
env['BUILDBOT_TESTING'] = '1'
return_code = subprocess.call(command, cwd=bb_utils.CHROME_SRC, env=env)
if return_code != 0:
return return_code
def main(argv):
proc = subprocess.Popen(
['/bin/hostname', '-f'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
hostname_stdout, hostname_stderr = proc.communicate()
if proc.returncode == 0:
print 'Running on: ' + hostname_stdout
else:
print >> sys.stderr, 'WARNING: failed to run hostname'
print >> sys.stderr, hostname_stdout
print >> sys.stderr, hostname_stderr
sys.exit(1)
parser = GetRunBotOptParser()
options, args = parser.parse_args(argv[1:])
if args:
parser.error('Unused args: %s' % args)
bot_config = GetBotConfig(options, GetBotStepMap())
if not bot_config:
sys.exit(1)
print 'Using config:', bot_config
commands = GetCommands(options, bot_config)
for command in commands:
print 'Will run: ', bb_utils.CommandToString(command)
print
env = GetEnvironment(bot_config.host_obj, options.testing)
return RunBotCommands(options, commands, env)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
ryansb/boto | tests/unit/test_regioninfo.py | 96 | 5164 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import os
from tests.unit import unittest
import boto
from boto.regioninfo import RegionInfo, load_endpoint_json, merge_endpoints
from boto.regioninfo import load_regions, get_regions
class TestRegionInfo(object):
def __init__(self, connection=None, name=None, endpoint=None,
connection_cls=None):
self.connection = connection
self.name = name
self.endpoint = endpoint
self.connection_cls = connection_cls
class FakeConn(object):
pass
class TestEndpointLoading(unittest.TestCase):
def setUp(self):
super(TestEndpointLoading, self).setUp()
def test_load_endpoint_json(self):
endpoints = load_endpoint_json(boto.ENDPOINTS_PATH)
self.assertTrue('ec2' in endpoints)
self.assertEqual(
endpoints['ec2']['us-east-1'],
'ec2.us-east-1.amazonaws.com'
)
def test_merge_endpoints(self):
defaults = {
'ec2': {
'us-east-1': 'ec2.us-east-1.amazonaws.com',
'us-west-1': 'ec2.us-west-1.amazonaws.com',
}
}
additions = {
# Top-level addition.
's3': {
'us-east-1': 's3.amazonaws.com'
},
'ec2': {
# Overwrite. This doesn't exist, just test data.
'us-east-1': 'ec2.auto-resolve.amazonaws.com',
# Deep addition.
'us-west-2': 'ec2.us-west-2.amazonaws.com',
}
}
endpoints = merge_endpoints(defaults, additions)
self.assertEqual(endpoints, {
'ec2': {
'us-east-1': 'ec2.auto-resolve.amazonaws.com',
'us-west-1': 'ec2.us-west-1.amazonaws.com',
'us-west-2': 'ec2.us-west-2.amazonaws.com',
},
's3': {
'us-east-1': 's3.amazonaws.com'
}
})
def test_load_regions(self):
# Just the defaults.
endpoints = load_regions()
self.assertTrue('us-east-1' in endpoints['ec2'])
self.assertFalse('test-1' in endpoints['ec2'])
# With ENV overrides.
os.environ['BOTO_ENDPOINTS'] = os.path.join(
os.path.dirname(__file__),
'test_endpoints.json'
)
self.addCleanup(os.environ.pop, 'BOTO_ENDPOINTS')
endpoints = load_regions()
self.assertTrue('us-east-1' in endpoints['ec2'])
self.assertTrue('test-1' in endpoints['ec2'])
self.assertEqual(endpoints['ec2']['test-1'], 'ec2.test-1.amazonaws.com')
def test_get_regions(self):
# With defaults.
ec2_regions = get_regions('ec2')
self.assertTrue(len(ec2_regions) >= 10)
west_2 = None
for region_info in ec2_regions:
if region_info.name == 'us-west-2':
west_2 = region_info
break
self.assertNotEqual(west_2, None, "Couldn't find the us-west-2 region!")
self.assertTrue(isinstance(west_2, RegionInfo))
self.assertEqual(west_2.name, 'us-west-2')
self.assertEqual(west_2.endpoint, 'ec2.us-west-2.amazonaws.com')
self.assertEqual(west_2.connection_cls, None)
def test_get_regions_overrides(self):
ec2_regions = get_regions(
'ec2',
region_cls=TestRegionInfo,
connection_cls=FakeConn
)
self.assertTrue(len(ec2_regions) >= 10)
west_2 = None
for region_info in ec2_regions:
if region_info.name == 'us-west-2':
west_2 = region_info
break
self.assertNotEqual(west_2, None, "Couldn't find the us-west-2 region!")
self.assertFalse(isinstance(west_2, RegionInfo))
self.assertTrue(isinstance(west_2, TestRegionInfo))
self.assertEqual(west_2.name, 'us-west-2')
self.assertEqual(west_2.endpoint, 'ec2.us-west-2.amazonaws.com')
self.assertEqual(west_2.connection_cls, FakeConn)
if __name__ == '__main__':
unittest.main()
| mit |
nanditav/15712-TensorFlow | tensorflow/python/kernel_tests/one_hot_op_test.py | 25 | 13164 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.one_hot_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class OneHotTest(tf.test.TestCase):
def _testOneHot(self, truth, use_gpu=False, expected_err_re=None,
raises=None, **inputs):
with self.test_session(use_gpu=use_gpu):
if raises is not None:
with self.assertRaises(raises):
tf.one_hot(**inputs)
else:
ans = tf.one_hot(**inputs)
if expected_err_re is None:
tf_ans = ans.eval()
self.assertAllEqual(tf_ans, truth)
self.assertEqual(tf_ans.shape, ans.get_shape())
else:
with self.assertRaisesOpError(expected_err_re):
ans.eval()
def _testBothOneHot(self, truth, expected_err_re=None, raises=None, **inputs):
self._testOneHot(truth, True, expected_err_re, raises, **inputs)
self._testOneHot(truth, False, expected_err_re, raises, **inputs)
def _testBasic(self, dtype):
indices = np.asarray([0, 2, -1, 1], dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
[[1.0, -1.0, -1.0],
[-1.0, -1.0, 1.0],
[-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtype,
truth=truth)
# axis == 0
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
axis=0,
dtype=dtype,
truth=truth.T) # Output is transpose version in this case
def _testDefaultBasic(self, dtype):
indices = np.asarray([0, 2, -1, 1], dtype=np.int64)
depth = 3
truth = np.asarray(
[[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
truth=truth)
# axis == 0
self._testBothOneHot(
indices=indices,
depth=depth,
axis=0,
truth=truth.T) # Output is transpose version in this case
def testFloatBasic(self):
self._testBasic(np.float32)
self._testDefaultBasic(np.float32)
def testDoubleBasic(self):
self._testBasic(np.float64)
self._testDefaultBasic(np.float64)
def testInt32Basic(self):
self._testBasic(np.int32)
self._testDefaultBasic(np.int32)
def testInt64Basic(self):
self._testBasic(np.int64)
self._testDefaultBasic(np.int64)
def testComplex64Basic(self):
self._testBasic(np.complex64)
self._testDefaultBasic(np.complex64)
def testComplex128Basic(self):
self._testBasic(np.complex128)
self._testDefaultBasic(np.complex128)
def _testBatch(self, dtype):
indices = np.asarray([[0, 2, -1, 1],
[1, 0, 1, -1]],
dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
[[[1.0, -1.0, -1.0],
[-1.0, -1.0, 1.0],
[-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]],
[[-1.0, 1.0, -1.0],
[1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0],
[-1.0, -1.0, -1.0]]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtype,
truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
axis=1,
dtype=dtype,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def _testDefaultValuesBatch(self, dtype):
indices = np.asarray([[0, 2, -1, 1],
[1, 0, 1, -1]],
dtype=np.int64)
depth = 3
truth = np.asarray(
[[[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]],
[[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0]]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
dtype=dtype,
truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
depth=depth,
axis=1,
dtype=dtype,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def _testValueTypeBatch(self, dtype):
indices = np.asarray([[0, 2, -1, 1],
[1, 0, 1, -1]],
dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
[[[1.0, -1.0, -1.0],
[-1.0, -1.0, 1.0],
[-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]],
[[-1.0, 1.0, -1.0],
[1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0],
[-1.0, -1.0, -1.0]]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
dtype=dtype,
truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
axis=1,
dtype=dtype,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def _testEmpty(self, dtype):
indices = np.zeros((0, 16), dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.empty((0, 16, 3), dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtype,
truth=truth)
def testHalfBatch(self):
self._testEmpty(np.float16)
self._testBatch(np.float16)
self._testDefaultValuesBatch(np.float16)
self._testValueTypeBatch(np.float16)
def testFloatBatch(self):
self._testEmpty(np.float32)
self._testBatch(np.float32)
self._testDefaultValuesBatch(np.float32)
self._testValueTypeBatch(np.float32)
def testDoubleBatch(self):
self._testEmpty(np.float64)
self._testBatch(np.float64)
self._testDefaultValuesBatch(np.float64)
self._testValueTypeBatch(np.float64)
def testInt32Batch(self):
self._testEmpty(np.int32)
self._testBatch(np.int32)
self._testDefaultValuesBatch(np.int32)
self._testValueTypeBatch(np.int32)
def testInt64Batch(self):
self._testEmpty(np.int64)
self._testBatch(np.int64)
self._testDefaultValuesBatch(np.int64)
self._testValueTypeBatch(np.int64)
def testComplexBatch(self):
self._testEmpty(np.complex64)
self._testBatch(np.complex64)
# self._testDefaultValuesBatch(np.complex64)
self._testValueTypeBatch(np.complex64)
def testSimpleCases(self):
indices = [0,1,2]
depth = 3
truth = np.asarray(
[[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]],
dtype=np.float32)
self._testBothOneHot(indices=indices, depth=depth, truth=truth)
indices = [0,1,2]
depth = 3
truth = np.asarray(
[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
dtype=np.int32)
self._testBothOneHot(indices=indices, depth=depth, dtype=np.int32,
truth=truth)
indices = [0,1,2]
depth = 3
truth = np.asarray(
[[1, -1, -1],
[-1, 1, -1],
[-1, -1, 1]],
dtype=np.int32)
self._testBothOneHot(indices=indices, depth=depth, on_value=1,
off_value=-1, truth=truth)
def testSingleValueGiven(self):
# Only on_value provided
indices = [0,1,2]
depth = 3
truth = np.asarray(
[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
dtype=np.int32)
self._testBothOneHot(indices=indices, depth=depth, on_value=1, truth=truth)
# Only off_value provided
indices = [0,1,2]
depth = 3
truth = np.asarray(
[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
dtype=np.float32)
self._testBothOneHot(indices=indices, depth=depth,
off_value=0.0, truth=truth)
def testString(self):
indices = [0,1,2]
depth = 3
truth = np.asarray(
[[b"1.0", b"0.0", b"0.0"],
[b"0.0", b"1.0", b"0.0"],
[b"0.0", b"0.0", b"1.0"]])
on_value = np.asarray(b"1.0")
off_value = np.asarray(b"0.0")
self._testBothOneHot(indices=indices, depth=depth, on_value=on_value,
off_value=off_value, dtype=tf.string, truth=truth)
on_value = tf.constant(b"1.0")
off_value = tf.constant(b"0.0")
self._testBothOneHot(indices=indices, depth=depth, on_value=on_value,
off_value=off_value, dtype=tf.string, truth=truth)
on_value = b"1.0"
off_value = b"0.0"
self._testBothOneHot(indices=indices, depth=depth, on_value=on_value,
off_value=off_value, dtype=tf.string, truth=truth)
def testIndicesTypes(self):
tf_types = [tf.uint8, tf.int32, tf.int64]
np_types = [np.int32, np.int64]
for itype in tf_types + np_types:
# Note: to keep the tests simple in the case of uint8 the index -1 below
# maps to 255 which is out of the depth range, just like -1.
if itype in tf_types:
indices = tf.constant([[0, 2, -1, 1],
[1, 0, 1, -1]],
dtype=itype)
elif itype in np_types:
indices = np.asarray([[0, 2, -1, 1],
[1, 0, 1, -1]],
dtype=itype)
depth = 3
on_value = np.asarray(1.0, dtype=np.float32)
off_value = np.asarray(-1.0, dtype=np.float32)
truth = np.asarray(
[[[1.0, -1.0, -1.0],
[-1.0, -1.0, 1.0],
[-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]],
[[-1.0, 1.0, -1.0],
[1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0],
[-1.0, -1.0, -1.0]]],
dtype=np.float32)
# axis == -1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
axis=1,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def testPrefixDimOverflow(self):
for itype in [tf.int32, tf.int64, tf.uint8]:
prefix_dim_size = 65536
depth = 2
x = [i % depth for i in range(prefix_dim_size)]
indices = tf.constant(x, dtype=itype)
truth = np.zeros((prefix_dim_size, depth), np.float32)
for i in range(prefix_dim_size):
truth[i, x[i]] = 1.0
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=1.0,
off_value=0.0,
truth=truth)
def testOnOffMismatchTypeError(self):
indices = [0, 1, 2]
depth = 3
on_value = np.asarray(1.0, np.float64)
off_value = np.asarray(0.0, np.float32)
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
truth=None,
raises=TypeError)
def testDtypeMismatchTypeError(self):
indices = [0, 1, 2]
depth = 3
on_value = np.asarray(1.0, np.float32)
off_value = np.asarray(0.0, np.float32)
dtype = np.int32
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
dtype=dtype,
truth=None,
raises=TypeError)
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=off_value,
dtype=dtype,
truth=None,
raises=TypeError)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
gurneyalex/stock-logistics-workflow | stock_picking_reorder_lines/stock.py | 9 | 1938 | # -*- coding: utf-8 -*-
#
#
# Author: Alexandre Fayolle
# Copyright 2013 Camptocamp SA
#
# Author: Damien Crier
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, api, fields
class StockMove(models.Model):
_inherit = 'stock.move'
_order = 'date_expected desc, sequence, id'
sequence = fields.Integer()
@api.model
def _get_invoice_line_vals(self, move, partner, inv_type):
res = super(StockMove, self)._get_invoice_line_vals(move,
partner,
inv_type)
res['sequence'] = move.sequence
return res
class StockPicking(models.Model):
_inherit = 'stock.picking'
@api.depends('move_lines')
def _get_max_line_sequence(self):
for picking in self:
if picking.move_lines:
max_line_sequence = max(
picking.mapped('move_lines.sequence')) + 10
else:
max_line_sequence = 10
picking.max_line_sequence = max_line_sequence
max_line_sequence = fields.Integer(string='Max sequence in lines',
compute='_get_max_line_sequence')
| agpl-3.0 |
dvitme/odoo-addons | account_journal_active/__openerp__.py | 1 | 1665 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Account Journal Active",
"version": "8.0.1.2.0",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
"category": "Accounting",
"description": """
Account Journal Active Field
============================
Adds active field on account journal
""",
'depends': [
# we add dependency of account_voucher because we change voucher
# action views to make voucher visible when journal inactive
'account_voucher'
],
'data': [
'account_journal_view.xml',
'account_voucher_view.xml',
],
'demo': [],
'test': [],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MitchTalmadge/Emoji-Tools | src/main/resources/PythonScripts/fontTools/ttLib/tables/V_D_M_X_.py | 3 | 8286 | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from . import DefaultTable
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
import struct
VDMX_HeaderFmt = """
> # big endian
version: H # Version number (0 or 1)
numRecs: H # Number of VDMX groups present
numRatios: H # Number of aspect ratio groupings
"""
# the VMDX header is followed by an array of RatRange[numRatios] (i.e. aspect
# ratio ranges);
VDMX_RatRangeFmt = """
> # big endian
bCharSet: B # Character set
xRatio: B # Value to use for x-Ratio
yStartRatio: B # Starting y-Ratio value
yEndRatio: B # Ending y-Ratio value
"""
# followed by an array of offset[numRatios] from start of VDMX table to the
# VDMX Group for this ratio range (offsets will be re-calculated on compile);
# followed by an array of Group[numRecs] records;
VDMX_GroupFmt = """
> # big endian
recs: H # Number of height records in this group
startsz: B # Starting yPelHeight
endsz: B # Ending yPelHeight
"""
# followed by an array of vTable[recs] records.
VDMX_vTableFmt = """
> # big endian
yPelHeight: H # yPelHeight to which values apply
yMax: h # Maximum value (in pels) for this yPelHeight
yMin: h # Minimum value (in pels) for this yPelHeight
"""
class table_V_D_M_X_(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
pos = 0 # track current position from to start of VDMX table
dummy, data = sstruct.unpack2(VDMX_HeaderFmt, data, self)
pos += sstruct.calcsize(VDMX_HeaderFmt)
self.ratRanges = []
for i in range(self.numRatios):
ratio, data = sstruct.unpack2(VDMX_RatRangeFmt, data)
pos += sstruct.calcsize(VDMX_RatRangeFmt)
# the mapping between a ratio and a group is defined further below
ratio['groupIndex'] = None
self.ratRanges.append(ratio)
lenOffset = struct.calcsize('>H')
_offsets = [] # temporarily store offsets to groups
for i in range(self.numRatios):
offset = struct.unpack('>H', data[0:lenOffset])[0]
data = data[lenOffset:]
pos += lenOffset
_offsets.append(offset)
self.groups = []
for groupIndex in range(self.numRecs):
# the offset to this group from beginning of the VDMX table
currOffset = pos
group, data = sstruct.unpack2(VDMX_GroupFmt, data)
# the group lenght and bounding sizes are re-calculated on compile
recs = group.pop('recs')
startsz = group.pop('startsz')
endsz = group.pop('endsz')
pos += sstruct.calcsize(VDMX_GroupFmt)
for j in range(recs):
vTable, data = sstruct.unpack2(VDMX_vTableFmt, data)
vTableLength = sstruct.calcsize(VDMX_vTableFmt)
pos += vTableLength
# group is a dict of (yMax, yMin) tuples keyed by yPelHeight
group[vTable['yPelHeight']] = (vTable['yMax'], vTable['yMin'])
# make sure startsz and endsz match the calculated values
minSize = min(group.keys())
maxSize = max(group.keys())
assert startsz == minSize, \
"startsz (%s) must equal min yPelHeight (%s): group %d" % \
(group.startsz, minSize, groupIndex)
assert endsz == maxSize, \
"endsz (%s) must equal max yPelHeight (%s): group %d" % \
(group.endsz, maxSize, groupIndex)
self.groups.append(group)
# match the defined offsets with the current group's offset
for offsetIndex, offsetValue in enumerate(_offsets):
# when numRecs < numRatios there can more than one ratio range
# sharing the same VDMX group
if currOffset == offsetValue:
# map the group with the ratio range thas has the same
# index as the offset to that group (it took me a while..)
self.ratRanges[offsetIndex]['groupIndex'] = groupIndex
# check that all ratio ranges have a group
for i in range(self.numRatios):
ratio = self.ratRanges[i]
if ratio['groupIndex'] is None:
from fontTools import ttLib
raise ttLib.TTLibError(
"no group defined for ratRange %d" % i)
def _getOffsets(self):
"""
Calculate offsets to VDMX_Group records.
For each ratRange return a list of offset values from the beginning of
the VDMX table to a VDMX_Group.
"""
lenHeader = sstruct.calcsize(VDMX_HeaderFmt)
lenRatRange = sstruct.calcsize(VDMX_RatRangeFmt)
lenOffset = struct.calcsize('>H')
lenGroupHeader = sstruct.calcsize(VDMX_GroupFmt)
lenVTable = sstruct.calcsize(VDMX_vTableFmt)
# offset to the first group
pos = lenHeader + self.numRatios*lenRatRange + self.numRatios*lenOffset
groupOffsets = []
for group in self.groups:
groupOffsets.append(pos)
lenGroup = lenGroupHeader + len(group) * lenVTable
pos += lenGroup # offset to next group
offsets = []
for ratio in self.ratRanges:
groupIndex = ratio['groupIndex']
offsets.append(groupOffsets[groupIndex])
return offsets
def compile(self, ttFont):
if not(self.version == 0 or self.version == 1):
from fontTools import ttLib
raise ttLib.TTLibError(
"unknown format for VDMX table: version %s" % self.version)
data = sstruct.pack(VDMX_HeaderFmt, self)
for ratio in self.ratRanges:
data += sstruct.pack(VDMX_RatRangeFmt, ratio)
# recalculate offsets to VDMX groups
for offset in self._getOffsets():
data += struct.pack('>H', offset)
for group in self.groups:
recs = len(group)
startsz = min(group.keys())
endsz = max(group.keys())
gHeader = {'recs': recs, 'startsz': startsz, 'endsz': endsz}
data += sstruct.pack(VDMX_GroupFmt, gHeader)
for yPelHeight, (yMax, yMin) in sorted(group.items()):
vTable = {'yPelHeight': yPelHeight, 'yMax': yMax, 'yMin': yMin}
data += sstruct.pack(VDMX_vTableFmt, vTable)
return data
def toXML(self, writer, ttFont):
writer.simpletag("version", value=self.version)
writer.newline()
writer.begintag("ratRanges")
writer.newline()
for ratio in self.ratRanges:
groupIndex = ratio['groupIndex']
writer.simpletag(
"ratRange",
bCharSet=ratio['bCharSet'],
xRatio=ratio['xRatio'],
yStartRatio=ratio['yStartRatio'],
yEndRatio=ratio['yEndRatio'],
groupIndex=groupIndex
)
writer.newline()
writer.endtag("ratRanges")
writer.newline()
writer.begintag("groups")
writer.newline()
for groupIndex in range(self.numRecs):
group = self.groups[groupIndex]
recs = len(group)
startsz = min(group.keys())
endsz = max(group.keys())
writer.begintag("group", index=groupIndex)
writer.newline()
writer.comment("recs=%d, startsz=%d, endsz=%d" %
(recs, startsz, endsz))
writer.newline()
for yPelHeight, (yMax, yMin) in sorted(group.items()):
writer.simpletag(
"record",
[('yPelHeight', yPelHeight), ('yMax', yMax), ('yMin', yMin)])
writer.newline()
writer.endtag("group")
writer.newline()
writer.endtag("groups")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "version":
self.version = safeEval(attrs["value"])
elif name == "ratRanges":
if not hasattr(self, "ratRanges"):
self.ratRanges = []
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == "ratRange":
if not hasattr(self, "numRatios"):
self.numRatios = 1
else:
self.numRatios += 1
ratio = {
"bCharSet": safeEval(attrs["bCharSet"]),
"xRatio": safeEval(attrs["xRatio"]),
"yStartRatio": safeEval(attrs["yStartRatio"]),
"yEndRatio": safeEval(attrs["yEndRatio"]),
"groupIndex": safeEval(attrs["groupIndex"])
}
self.ratRanges.append(ratio)
elif name == "groups":
if not hasattr(self, "groups"):
self.groups = []
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == "group":
if not hasattr(self, "numRecs"):
self.numRecs = 1
else:
self.numRecs += 1
group = {}
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == "record":
yPelHeight = safeEval(attrs["yPelHeight"])
yMax = safeEval(attrs["yMax"])
yMin = safeEval(attrs["yMin"])
group[yPelHeight] = (yMax, yMin)
self.groups.append(group)
| gpl-3.0 |
shoopio/shoop | shuup_tests/core/test_staff_only_behavior.py | 2 | 1384 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from shuup.core.models import StaffOnlyBehaviorComponent
from shuup.testing.factories import (
get_default_payment_method, get_default_shop
)
from shuup_tests.utils.basketish_order_source import BasketishOrderSource
from shuup_tests.utils.fixtures import regular_user
regular_user = regular_user # noqa
@pytest.mark.django_db
def test_staff_only_behavior(admin_user, regular_user):
payment_method = get_default_payment_method()
component = StaffOnlyBehaviorComponent.objects.create()
payment_method.behavior_components.add(component)
source = BasketishOrderSource(get_default_shop())
# anonymous user
unavailability_reasons = list(payment_method.get_unavailability_reasons(source))
assert len(unavailability_reasons) == 1
# regular user
source.creator = regular_user
unavailability_reasons = list(payment_method.get_unavailability_reasons(source))
assert len(unavailability_reasons) == 1
# admin
source.creator = admin_user
unavailability_reasons = list(payment_method.get_unavailability_reasons(source))
assert len(unavailability_reasons) == 0
| agpl-3.0 |
mskvortsov/coreclr | tests/scripts/format.py | 32 | 8406 | #!/usr/bin/env python
#
## Licensed to the .NET Foundation under one or more agreements.
## The .NET Foundation licenses this file to you under the MIT license.
## See the LICENSE file in the project root for more information.
#
##
# Title :format.py
#
################################################################################
# Script to install and run jit-format over jit source for all configurations.
################################################################################
import urllib
import argparse
import os
import sys
import tarfile
import zipfile
import subprocess
import urllib2
import shutil
def expandPath(path):
return os.path.abspath(os.path.expanduser(path))
def del_rw(action, name, exc):
os.chmod(name, 0651)
os.remove(name)
def main(argv):
parser = argparse.ArgumentParser()
required = parser.add_argument_group('required arguments')
required.add_argument('-a', '--arch', type=str,
default=None, help='architecture to run jit-format on')
required.add_argument('-o', '--os', type=str,
default=None, help='operating system')
required.add_argument('-c', '--coreclr', type=str,
default=None, help='full path to coreclr')
args, unknown = parser.parse_known_args(argv)
if unknown:
print('Ignorning argument(s): ', ','.join(unknown))
if args.coreclr is None:
print('Specify --coreclr')
return -1
if args.os is None:
print('Specifiy --os')
return -1
if args.arch is None:
print('Specify --arch')
return -1
if not os.path.isdir(expandPath(args.coreclr)):
print('Bad path to coreclr')
return -1
coreclr = args.coreclr
platform = args.os
arch = args.arch
my_env = os.environ
# Download .Net CLI
dotnetcliUrl = ""
dotnetcliFilename = ""
# build.cmd removes the Tools directory, so we need to put our version of jitutils
# outside of the Tools directory
dotnetcliPath = os.path.join(coreclr, 'dotnetcli-jitutils')
# Try to make the dotnetcli-jitutils directory if it doesn't exist
try:
os.makedirs(dotnetcliPath)
except OSError:
if not os.path.isdir(dotnetcliPath):
raise
print("Downloading .Net CLI")
if platform == 'Linux':
dotnetcliUrl = "https://go.microsoft.com/fwlink/?linkid=839628"
dotnetcliFilename = os.path.join(dotnetcliPath, 'dotnetcli-jitutils.tar.gz')
elif platform == 'OSX':
dotnetcliUrl = "https://go.microsoft.com/fwlink/?linkid=839641"
dotnetcliFilename = os.path.join(dotnetcliPath, 'dotnetcli-jitutils.tar.gz')
elif platform == 'Windows_NT':
dotnetcliUrl = "https://go.microsoft.com/fwlink/?linkid=839634"
dotnetcliFilename = os.path.join(dotnetcliPath, 'dotnetcli-jitutils.zip')
else:
print('Unknown os ', os)
return -1
response = urllib2.urlopen(dotnetcliUrl)
request_url = response.geturl()
testfile = urllib.URLopener()
testfile.retrieve(request_url, dotnetcliFilename)
if not os.path.isfile(dotnetcliFilename):
print("Did not download .Net CLI!")
return -1
# Install .Net CLI
if platform == 'Linux' or platform == 'OSX':
tar = tarfile.open(dotnetcliFilename)
tar.extractall(dotnetcliPath)
tar.close()
elif platform == 'Windows_NT':
with zipfile.ZipFile(dotnetcliFilename, "r") as z:
z.extractall(dotnetcliPath)
dotnet = ""
if platform == 'Linux' or platform == 'OSX':
dotnet = "dotnet"
elif platform == 'Windows_NT':
dotnet = "dotnet.exe"
if not os.path.isfile(os.path.join(dotnetcliPath, dotnet)):
print("Did not extract .Net CLI from download")
return -1
# Download bootstrap
bootstrapFilename = ""
jitUtilsPath = os.path.join(coreclr, "jitutils")
if os.path.isdir(jitUtilsPath):
print("Deleting " + jitUtilsPath)
shutil.rmtree(jitUtilsPath, onerror=del_rw)
if platform == 'Linux' or platform == 'OSX':
bootstrapFilename = "bootstrap.sh"
elif platform == 'Windows_NT':
bootstrapFilename = "bootstrap.cmd"
bootstrapUrl = "https://raw.githubusercontent.com/dotnet/jitutils/master/" + bootstrapFilename
bootstrapPath = os.path.join(coreclr, bootstrapFilename)
testfile.retrieve(bootstrapUrl, bootstrapPath)
if not os.path.isfile(bootstrapPath):
print("Did not download bootstrap!")
return -1
# On *nix platforms, we need to make the bootstrap file executable
if platform == 'Linux' or platform == 'OSX':
print("Making bootstrap executable")
os.chmod(bootstrapPath, 0751)
print(bootstrapPath)
# Run bootstrap
my_env["PATH"] = dotnetcliPath + os.pathsep + my_env["PATH"]
if platform == 'Linux' or platform == 'OSX':
print("Running bootstrap")
proc = subprocess.Popen(['bash', bootstrapPath], env=my_env)
output,error = proc.communicate()
elif platform == 'Windows_NT':
proc = subprocess.Popen([bootstrapPath], env=my_env)
output,error = proc.communicate()
# Run jit-format
returncode = 0
jitutilsBin = os.path.join(coreclr, "jitutils", "bin")
my_env["PATH"] = jitutilsBin + os.pathsep + my_env["PATH"]
current_dir = os.getcwd()
if not os.path.isdir(jitutilsBin):
print("Jitutils not built!")
return -1
jitformat = jitutilsBin
if platform == 'Linux' or platform == 'OSX':
jitformat = os.path.join(jitformat, "jit-format")
elif platform == 'Windows_NT':
jitformat = os.path.join(jitformat,"jit-format.bat")
errorMessage = ""
builds = ["Checked", "Debug", "Release"]
projects = ["dll", "standalone", "crossgen"]
for build in builds:
for project in projects:
proc = subprocess.Popen([jitformat, "-a", arch, "-b", build, "-o", platform, "-c", coreclr, "--verbose", "--projects", project], env=my_env)
output,error = proc.communicate()
errorcode = proc.returncode
if errorcode != 0:
errorMessage += "\tjit-format -a " + arch + " -b " + build + " -o " + platform
errorMessage += " -c <absolute-path-to-coreclr> --verbose --fix --projects " + project +"\n"
returncode = errorcode
# Fix mode doesn't return an error, so we have to run the build, then run with
# --fix to generate the patch. This means that it is likely only the first run
# of jit-format will return a formatting failure.
if errorcode == -2:
# If errorcode was -2, no need to run clang-tidy again
proc = subprocess.Popen([jitformat, "--fix", "--untidy", "-a", arch, "-b", build, "-o", platform, "-c", coreclr, "--verbose", "--projects", project], env=my_env)
output,error = proc.communicate()
else:
# Otherwise, must run both
proc = subprocess.Popen([jitformat, "--fix", "-a", arch, "-b", build, "-o", platform, "-c", coreclr, "--verbose", "--projects", project], env=my_env)
output,error = proc.communicate()
os.chdir(current_dir)
if returncode != 0:
# Create a patch file
patchFile = open("format.patch", "w")
proc = subprocess.Popen(["git", "diff", "--patch", "-U20"], env=my_env, stdout=patchFile)
output,error = proc.communicate()
if os.path.isdir(jitUtilsPath):
print("Deleting " + jitUtilsPath)
shutil.rmtree(jitUtilsPath, onerror=del_rw)
if os.path.isdir(dotnetcliPath):
print("Deleting " + dotnetcliPath)
shutil.rmtree(dotnetcliPath, onerror=del_rw)
if os.path.isfile(bootstrapPath):
print("Deleting " + bootstrapPath)
os.remove(bootstrapPath)
if returncode != 0:
buildUrl = my_env["BUILD_URL"]
print("There were errors in formatting. Please run jit-format locally with: \n")
print(errorMessage)
print("\nOr download and apply generated patch:")
print("wget " + buildUrl + "artifact/format.patch")
print("git apply format.patch")
return returncode
if __name__ == '__main__':
return_code = main(sys.argv[1:])
sys.exit(return_code)
| mit |
androidarmv6/android_external_chromium_org | tools/telemetry/telemetry/core/backends/chrome/inspector_page_unittest.py | 24 | 1962 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import util
from telemetry.unittest import tab_test_case
class InspectorPageTest(tab_test_case.TabTestCase):
def __init__(self, *args):
super(InspectorPageTest, self).__init__(*args)
def setUp(self):
super(InspectorPageTest, self).setUp()
self._browser.SetHTTPServerDirectories(util.GetUnittestDataDir())
def testPageNavigateToNormalUrl(self):
self._tab.Navigate(self._browser.http_server.UrlOf('blank.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
def testCustomActionToNavigate(self):
self._tab.Navigate(
self._browser.http_server.UrlOf('page_with_link.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/page_with_link.html')
custom_action_called = [False]
def CustomAction():
custom_action_called[0] = True
self._tab.ExecuteJavaScript('document.getElementById("clickme").click();')
self._tab.PerformActionAndWaitForNavigate(CustomAction)
self.assertTrue(custom_action_called[0])
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
def testGetCookieByName(self):
self._tab.Navigate(
self._browser.http_server.UrlOf('blank.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
self._tab.ExecuteJavaScript('document.cookie="foo=bar"')
self.assertEquals(self._tab.GetCookieByName('foo'), 'bar')
def testScriptToEvaluateOnCommit(self):
self._tab.Navigate(
self._browser.http_server.UrlOf('blank.html'),
script_to_evaluate_on_commit='var foo = "bar";')
self._tab.WaitForDocumentReadyStateToBeComplete()
self.assertEquals(self._tab.EvaluateJavaScript('foo'), 'bar')
| bsd-3-clause |
jurcicek/extended-hidden-vector-state-parser | semantics-4/src/extractSmntcs.py | 1 | 5995 | #!//usr/bin/python
# -*- coding: UTF-8 -*-
import sys
import getopt
import os
from os.path import *
import glob
from xml.dom import minidom
import re
import struct
import codecs
from StringIO import StringIO
import toolkit
from lexMap import *
from observation import *
from semantics import *
###################################################################################################
###################################################################################################
fileNamePrefix = "maps"
dirCmb = "dcd/tst/dcd"
dirSmntcs = "data/tst"
ext = "cmb"
force = False
outputMlf = "dcd/tst/semantics.mlf"
lemmatized = False
conceptFileName = fileNamePrefix + "/" + "concept.map"
wordFileName = fileNamePrefix + "/" + "word.map"
def usage():
print("""
Usage: extractSmntcs.py [options]
Description:
Extracts semantics from *.cmb files into the format that can be found in the original dialogue
annotation. Basically, it compresses the information that is in *.cmb files.
Options:
-h : print this help message and exit
-v : produce verbose output
--dirCmb=DIR : input directory for *.cmb files {%s}
--dirSmntcs=DIR : input directory for *.smntcs coresponding to dcd files {%s}
--outputMlf=FILE : the file name of an output MLF file {%s}
--lemmatized : if the output files will be lemmatized {%s}
""" % (dirCmb, dirSmntcs, outputMlf, lemmatized))
###################################################################################################
###################################################################################################
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "hv",
["dirCmb=",
"dirSmntcs=",
"outputMlf=",
"lemmatized"])
except getopt.GetoptError, exc:
print("ERROR: " + exc.msg)
usage()
sys.exit(2)
verbose = None
for o, a in opts:
if o == "-h":
usage()
sys.exit()
elif o == "-v":
verbose = True
elif o == "--dirCmb":
dirCmb = a
elif o == "--dirSmntcs":
dirSmntcs = a
elif o == "--outputMlf":
outputMlf = a
elif o == "--lemmatized":
lemmatized = True
lst = glob.glob(dirCmb + "/*." + ext)
#lst = lst[:10]
lst.sort()
if verbose:
print("Start")
print("-------------------------------------------------")
print("Extracting SMNTCS files from %d files." % len(lst))
if lemmatized:
outputMlfFile = StringIO()
outputPush2MlfFile = StringIO()
outputMlfFileLab = StringIO()
outputTrnFile = StringIO()
outputPtbFile = StringIO()
else:
outputMlfFile = codecs.open(outputMlf, "w", "UTF-8")
outputPush2MlfFile = codecs.open(outputMlf + ".push2.mlf", "w", "UTF-8")
outputMlfFileLab = codecs.open(outputMlf + ".dcd", "w", "UTF-8")
outputTrnFile = codecs.open(outputMlf + ".trn", "w", "UTF-8")
outputPtbFile = codecs.open(outputMlf + ".ptb", "w", "UTF-8")
outputMlfFileSmntcs = codecs.open(outputMlf + ".smntcs", "w", "UTF-8")
outputPush2MlfFileSmntcs = codecs.open(outputMlf + ".push2.smntcs", "w", "UTF-8")
outputCuedFileSmntcs = codecs.open(outputMlf + ".cued", "w", "UTF-8")
outputMlfFile.write("#!MLF!#\n")
outputPush2MlfFile.write("#!MLF!#\n")
outputMlfFileLab.write("#!MLF!#\n")
outputMlfFileSmntcs.write("#!MLF!#\n")
outputPush2MlfFileSmntcs.write("#!MLF!#\n")
trn_id = 0
for fileName in lst:
push2 = False
if verbose:
print("Reading file: " + fileName)
smntcs = readSemanticsFromCMBFile(fileName, lemmatized=lemmatized)
smntcsWithoutText = removeTextFromSemantics(smntcs)
text = removeConceptsFromSemantics(smntcs)
try:
Semantics('id', smntcsWithoutText, 'x')
except ValueError:
smntcs = ""
smntcsWithoutText = ""
if re.search(r'[A-Z]\([A-Z]', smntcs) != None:
push2 = True
if re.search(r'\), [a-z]', smntcs) != None:
push2 = True
smntcsWTSplit = splitSmntcsToMlf(smntcsWithoutText)
idLab = splitext(splitext(basename(fileName))[0])[0]
outputMlfFile.write('"*/' + idLab + '.rec"\n')
if push2:
outputPush2MlfFile.write('"*/' + idLab + '.rec"\n')
outputMlfFileLab.write("#######################################\n")
outputMlfFileLab.write('"*/' + idLab + '.lab"\n')
outputMlfFileLab.write("#######################################\n")
for each in smntcsWTSplit:
outputMlfFile.write(each + "\n")
if push2:
outputPush2MlfFile.write(each + "\n")
outputMlfFileLab.write(each + "\n")
if each != ".":
if each == "(":
outputTrnFile.write("LEFT" + " ")
elif each == ")":
outputTrnFile.write("RIGHT" + " ")
else:
outputTrnFile.write(each + " ")
try:
outputPtbFile.write(Semantics('id', smntcs.encode('ascii','replace'), 'x').getPTBSemantics() + '\n')
except ValueError:
outputPtbFile.write("(TOP x)\n")
outputTrnFile.write("(spk1_%.5d)\n" %trn_id)
trn_id += 1
outputMlfFileSmntcs.write('"*/' + idLab + '.rec"\n')
outputMlfFileSmntcs.write(smntcs + "\n")
outputMlfFileSmntcs.write(".\n")
outputCuedFileSmntcs.write(text+' <=> '+Semantics('id', smntcsWithoutText.encode('ascii','replace'), 'x').getCUEDSemantics()+'\n')
if push2:
outputPush2MlfFileSmntcs.write('"*/' + idLab + '.rec"\n')
outputPush2MlfFileSmntcs.write(smntcs + "\n")
outputPush2MlfFileSmntcs.write(".\n")
outputMlfFile.close()
outputPush2MlfFile.close()
outputMlfFileLab.close()
outputTrnFile.close()
outputPtbFile.close()
outputMlfFileSmntcs.close()
outputCuedFileSmntcs.close()
if verbose:
print("-------------------------------------------------")
print("Finish")
| gpl-2.0 |
bobvanderlinden/machinekit | src/machinetalk/nanopb/generator/nanopb_generator.py | 8 | 53774 | #!/usr/bin/python
'''Generate header file for nanopb from a ProtoBuf FileDescriptorSet.'''
nanopb_version = "nanopb-0.3.3-dev"
import sys
try:
# Add some dummy imports to keep packaging tools happy.
import google, distutils.util # bbfreeze seems to need these
import pkg_resources # pyinstaller / protobuf 2.5 seem to need these
except:
# Don't care, we will error out later if it is actually important.
pass
try:
import google.protobuf.text_format as text_format
import google.protobuf.descriptor_pb2 as descriptor
except:
sys.stderr.write('''
*************************************************************
*** Could not import the Google protobuf Python libraries ***
*** Try installing package 'python-protobuf' or similar. ***
*************************************************************
''' + '\n')
raise
try:
#import proto.nanopb_pb2 as nanopb_pb2
#import proto.plugin_pb2 as plugin_pb2
import nanopb_pb2
import google.protobuf.compiler.plugin_pb2 as plugin_pb2
except:
sys.stderr.write('''
********************************************************************
*** Failed to import the protocol definitions for generator. ***
*** You have to run 'make' in the nanopb/generator/proto folder. ***
********************************************************************
''' + '\n')
raise
# ---------------------------------------------------------------------------
# Generation of single fields
# ---------------------------------------------------------------------------
import time
import os.path
# Values are tuple (c type, pb type, encoded size, int_size_allowed)
FieldD = descriptor.FieldDescriptorProto
datatypes = {
FieldD.TYPE_BOOL: ('bool', 'BOOL', 1, False),
FieldD.TYPE_DOUBLE: ('double', 'DOUBLE', 8, False),
FieldD.TYPE_FIXED32: ('uint32_t', 'FIXED32', 4, False),
FieldD.TYPE_FIXED64: ('uint64_t', 'FIXED64', 8, False),
FieldD.TYPE_FLOAT: ('float', 'FLOAT', 4, False),
FieldD.TYPE_INT32: ('int32_t', 'INT32', 10, True),
FieldD.TYPE_INT64: ('int64_t', 'INT64', 10, True),
FieldD.TYPE_SFIXED32: ('int32_t', 'SFIXED32', 4, False),
FieldD.TYPE_SFIXED64: ('int64_t', 'SFIXED64', 8, False),
FieldD.TYPE_SINT32: ('int32_t', 'SINT32', 5, True),
FieldD.TYPE_SINT64: ('int64_t', 'SINT64', 10, True),
FieldD.TYPE_UINT32: ('uint32_t', 'UINT32', 5, True),
FieldD.TYPE_UINT64: ('uint64_t', 'UINT64', 10, True)
}
# Integer size overrides (from .proto settings)
intsizes = {
nanopb_pb2.IS_8: 'int8_t',
nanopb_pb2.IS_16: 'int16_t',
nanopb_pb2.IS_32: 'int32_t',
nanopb_pb2.IS_64: 'int64_t',
}
class Names:
'''Keeps a set of nested names and formats them to C identifier.'''
def __init__(self, parts = ()):
if isinstance(parts, Names):
parts = parts.parts
self.parts = tuple(parts)
def __str__(self):
return '_'.join(self.parts)
def __add__(self, other):
if isinstance(other, (str, unicode)):
return Names(self.parts + (other,))
elif isinstance(other, tuple):
return Names(self.parts + other)
else:
raise ValueError("Name parts should be of type str")
def __eq__(self, other):
return isinstance(other, Names) and self.parts == other.parts
def names_from_type_name(type_name):
'''Parse Names() from FieldDescriptorProto type_name'''
if type_name[0] != '.':
raise NotImplementedError("Lookup of non-absolute type names is not supported")
return Names(type_name[1:].split('.'))
def varint_max_size(max_value):
'''Returns the maximum number of bytes a varint can take when encoded.'''
for i in range(1, 11):
if (max_value >> (i * 7)) == 0:
return i
raise ValueError("Value too large for varint: " + str(max_value))
assert varint_max_size(0) == 1
assert varint_max_size(127) == 1
assert varint_max_size(128) == 2
class EncodedSize:
'''Class used to represent the encoded size of a field or a message.
Consists of a combination of symbolic sizes and integer sizes.'''
def __init__(self, value = 0, symbols = []):
if isinstance(value, (str, Names)):
symbols = [str(value)]
value = 0
self.value = value
self.symbols = symbols
def __add__(self, other):
if isinstance(other, (int, long)):
return EncodedSize(self.value + other, self.symbols)
elif isinstance(other, (str, Names)):
return EncodedSize(self.value, self.symbols + [str(other)])
elif isinstance(other, EncodedSize):
return EncodedSize(self.value + other.value, self.symbols + other.symbols)
else:
raise ValueError("Cannot add size: " + repr(other))
def __mul__(self, other):
if isinstance(other, (int, long)):
return EncodedSize(self.value * other, [str(other) + '*' + s for s in self.symbols])
else:
raise ValueError("Cannot multiply size: " + repr(other))
def __str__(self):
if not self.symbols:
return str(self.value)
else:
return '(' + str(self.value) + ' + ' + ' + '.join(self.symbols) + ')'
def upperlimit(self):
if not self.symbols:
return self.value
else:
return 2**32 - 1
class Enum:
def __init__(self, names, desc, enum_options):
'''desc is EnumDescriptorProto'''
self.options = enum_options
self.names = names + desc.name
if enum_options.long_names:
self.values = [(self.names + x.name, x.number) for x in desc.value]
else:
self.values = [(names + x.name, x.number) for x in desc.value]
self.value_longnames = [self.names + x.name for x in desc.value]
def __str__(self):
result = 'typedef enum _%s {\n' % self.names
result += ',\n'.join([" %s = %d" % x for x in self.values])
result += '\n} %s;' % self.names
return result
class Field:
def __init__(self, struct_name, desc, field_options):
'''desc is FieldDescriptorProto'''
self.tag = desc.number
self.struct_name = struct_name
self.union_name = None
self.name = desc.name
self.default = None
self.max_size = None
self.max_count = None
self.array_decl = ""
self.enc_size = None
self.ctype = None
# Parse field options
if field_options.HasField("max_size"):
self.max_size = field_options.max_size
if field_options.HasField("max_count"):
self.max_count = field_options.max_count
if desc.HasField('default_value'):
self.default = desc.default_value
# Check field rules, i.e. required/optional/repeated.
can_be_static = True
if desc.label == FieldD.LABEL_REQUIRED:
self.rules = 'REQUIRED'
elif desc.label == FieldD.LABEL_OPTIONAL:
self.rules = 'OPTIONAL'
elif desc.label == FieldD.LABEL_REPEATED:
self.rules = 'REPEATED'
if self.max_count is None:
can_be_static = False
else:
self.array_decl = '[%d]' % self.max_count
else:
raise NotImplementedError(desc.label)
# Check if the field can be implemented with static allocation
# i.e. whether the data size is known.
if desc.type == FieldD.TYPE_STRING and self.max_size is None:
can_be_static = False
if desc.type == FieldD.TYPE_BYTES and self.max_size is None:
can_be_static = False
# Decide how the field data will be allocated
if field_options.type == nanopb_pb2.FT_DEFAULT:
if can_be_static:
field_options.type = nanopb_pb2.FT_STATIC
else:
field_options.type = nanopb_pb2.FT_CALLBACK
if field_options.type == nanopb_pb2.FT_STATIC and not can_be_static:
raise Exception("Field %s is defined as static, but max_size or "
"max_count is not given." % self.name)
if field_options.type == nanopb_pb2.FT_STATIC:
self.allocation = 'STATIC'
elif field_options.type == nanopb_pb2.FT_POINTER:
self.allocation = 'POINTER'
elif field_options.type == nanopb_pb2.FT_CALLBACK:
self.allocation = 'CALLBACK'
else:
raise NotImplementedError(field_options.type)
# Decide the C data type to use in the struct.
if datatypes.has_key(desc.type):
self.ctype, self.pbtype, self.enc_size, isa = datatypes[desc.type]
# Override the field size if user wants to use smaller integers
if isa and field_options.int_size != nanopb_pb2.IS_DEFAULT:
self.ctype = intsizes[field_options.int_size]
if desc.type == FieldD.TYPE_UINT32 or desc.type == FieldD.TYPE_UINT64:
self.ctype = 'u' + self.ctype;
elif desc.type == FieldD.TYPE_ENUM:
self.pbtype = 'ENUM'
self.ctype = names_from_type_name(desc.type_name)
if self.default is not None:
self.default = self.ctype + self.default
self.enc_size = 5 # protoc rejects enum values > 32 bits
elif desc.type == FieldD.TYPE_STRING:
self.pbtype = 'STRING'
self.ctype = 'char'
if self.allocation == 'STATIC':
self.ctype = 'char'
self.array_decl += '[%d]' % self.max_size
self.enc_size = varint_max_size(self.max_size) + self.max_size
elif desc.type == FieldD.TYPE_BYTES:
self.pbtype = 'BYTES'
if self.allocation == 'STATIC':
self.ctype = self.struct_name + self.name + 't'
self.enc_size = varint_max_size(self.max_size) + self.max_size
elif self.allocation == 'POINTER':
self.ctype = 'pb_bytes_array_t'
elif desc.type == FieldD.TYPE_MESSAGE:
self.pbtype = 'MESSAGE'
self.ctype = self.submsgname = names_from_type_name(desc.type_name)
self.enc_size = None # Needs to be filled in after the message type is available
else:
raise NotImplementedError(desc.type)
def __cmp__(self, other):
return cmp(self.tag, other.tag)
def __str__(self):
result = ''
if self.allocation == 'POINTER':
if self.rules == 'REPEATED':
result += ' pb_size_t ' + self.name + '_count;\n'
if self.pbtype == 'MESSAGE':
# Use struct definition, so recursive submessages are possible
result += ' struct _%s *%s;' % (self.ctype, self.name)
elif self.rules == 'REPEATED' and self.pbtype in ['STRING', 'BYTES']:
# String/bytes arrays need to be defined as pointers to pointers
result += ' %s **%s;' % (self.ctype, self.name)
else:
result += ' %s *%s;' % (self.ctype, self.name)
elif self.allocation == 'CALLBACK':
result += ' pb_callback_t %s;' % self.name
else:
if self.rules == 'OPTIONAL' and self.allocation == 'STATIC':
result += ' bool has_' + self.name + ';\n'
elif self.rules == 'REPEATED' and self.allocation == 'STATIC':
result += ' pb_size_t ' + self.name + '_count;\n'
result += ' %s %s%s;' % (self.ctype, self.name, self.array_decl)
return result
def types(self):
'''Return definitions for any special types this field might need.'''
if self.pbtype == 'BYTES' and self.allocation == 'STATIC':
result = 'typedef PB_BYTES_ARRAY_T(%d) %s;\n' % (self.max_size, self.ctype)
else:
result = ''
return result
def get_dependencies(self):
'''Get list of type names used by this field.'''
if self.allocation == 'STATIC':
return [str(self.ctype)]
else:
return []
def get_initializer(self, null_init, inner_init_only = False):
'''Return literal expression for this field's default value.
null_init: If True, initialize to a 0 value instead of default from .proto
inner_init_only: If True, exclude initialization for any count/has fields
'''
inner_init = None
if self.pbtype == 'MESSAGE':
if null_init:
inner_init = '%s_init_zero' % self.ctype
else:
inner_init = '%s_init_default' % self.ctype
elif self.default is None or null_init:
if self.pbtype == 'STRING':
inner_init = '""'
elif self.pbtype == 'BYTES':
inner_init = '{0, {0}}'
elif self.pbtype == 'ENUM':
inner_init = '(%s)0' % self.ctype
else:
inner_init = '0'
else:
if self.pbtype == 'STRING':
inner_init = self.default.encode('utf-8').encode('string_escape')
inner_init = inner_init.replace('"', '\\"')
inner_init = '"' + inner_init + '"'
elif self.pbtype == 'BYTES':
data = str(self.default).decode('string_escape')
data = ['0x%02x' % ord(c) for c in data]
if len(data) == 0:
inner_init = '{0, {0}}'
else:
inner_init = '{%d, {%s}}' % (len(data), ','.join(data))
elif self.pbtype in ['FIXED32', 'UINT32']:
inner_init = str(self.default) + 'u'
elif self.pbtype in ['FIXED64', 'UINT64']:
inner_init = str(self.default) + 'ull'
elif self.pbtype in ['SFIXED64', 'INT64']:
inner_init = str(self.default) + 'll'
else:
inner_init = str(self.default)
if inner_init_only:
return inner_init
outer_init = None
if self.allocation == 'STATIC':
if self.rules == 'REPEATED':
outer_init = '0, {'
outer_init += ', '.join([inner_init] * self.max_count)
outer_init += '}'
elif self.rules == 'OPTIONAL':
outer_init = 'false, ' + inner_init
else:
outer_init = inner_init
elif self.allocation == 'POINTER':
if self.rules == 'REPEATED':
outer_init = '0, NULL'
else:
outer_init = 'NULL'
elif self.allocation == 'CALLBACK':
if self.pbtype == 'EXTENSION':
outer_init = 'NULL'
else:
outer_init = '{{NULL}, NULL}'
return outer_init
def default_decl(self, declaration_only = False):
'''Return definition for this field's default value.'''
if self.default is None:
return None
ctype = self.ctype
default = self.get_initializer(False, True)
array_decl = ''
if self.pbtype == 'STRING':
if self.allocation != 'STATIC':
return None # Not implemented
array_decl = '[%d]' % self.max_size
elif self.pbtype == 'BYTES':
if self.allocation != 'STATIC':
return None # Not implemented
if declaration_only:
return 'extern const %s %s_default%s;' % (ctype, self.struct_name + self.name, array_decl)
else:
return 'const %s %s_default%s = %s;' % (ctype, self.struct_name + self.name, array_decl, default)
def tags(self):
'''Return the #define for the tag number of this field.'''
identifier = '%s_%s_tag' % (self.struct_name, self.name)
return '#define %-40s %d\n' % (identifier, self.tag)
def pb_field_t(self, prev_field_name):
'''Return the pb_field_t initializer to use in the constant array.
prev_field_name is the name of the previous field or None.
'''
if self.rules == 'ONEOF':
result = ' PB_ONEOF_FIELD(%s, ' % self.union_name
else:
result = ' PB_FIELD('
result += '%3d, ' % self.tag
result += '%-8s, ' % self.pbtype
result += '%s, ' % self.rules
result += '%-8s, ' % self.allocation
result += '%s, ' % ("FIRST" if not prev_field_name else "OTHER")
result += '%s, ' % self.struct_name
result += '%s, ' % self.name
result += '%s, ' % (prev_field_name or self.name)
if self.pbtype == 'MESSAGE':
result += '&%s_fields)' % self.submsgname
elif self.default is None:
result += '0)'
elif self.pbtype in ['BYTES', 'STRING'] and self.allocation != 'STATIC':
result += '0)' # Arbitrary size default values not implemented
elif self.rules == 'OPTEXT':
result += '0)' # Default value for extensions is not implemented
else:
result += '&%s_default)' % (self.struct_name + self.name)
return result
def largest_field_value(self):
'''Determine if this field needs 16bit or 32bit pb_field_t structure to compile properly.
Returns numeric value or a C-expression for assert.'''
if self.pbtype == 'MESSAGE':
if self.rules == 'REPEATED' and self.allocation == 'STATIC':
return 'pb_membersize(%s, %s[0])' % (self.struct_name, self.name)
elif self.rules == 'ONEOF':
return 'pb_membersize(%s, %s.%s)' % (self.struct_name, self.union_name, self.name)
else:
return 'pb_membersize(%s, %s)' % (self.struct_name, self.name)
return max(self.tag, self.max_size, self.max_count)
def encoded_size(self, allmsgs):
'''Return the maximum size that this field can take when encoded,
including the field tag. If the size cannot be determined, returns
None.'''
if self.allocation != 'STATIC':
return None
if self.pbtype == 'MESSAGE':
for msg in allmsgs:
if msg.name == self.submsgname:
encsize = msg.encoded_size(allmsgs)
if encsize is None:
return None # Submessage size is indeterminate
# Include submessage length prefix
encsize += varint_max_size(encsize.upperlimit())
break
else:
# Submessage cannot be found, this currently occurs when
# the submessage type is defined in a different file.
# Instead of direct numeric value, reference the size that
# has been #defined in the other file.
encsize = EncodedSize(self.submsgname + 'size')
# We will have to make a conservative assumption on the length
# prefix size, though.
encsize += 5
elif self.enc_size is None:
raise RuntimeError("Could not determine encoded size for %s.%s"
% (self.struct_name, self.name))
else:
encsize = EncodedSize(self.enc_size)
encsize += varint_max_size(self.tag << 3) # Tag + wire type
if self.rules == 'REPEATED':
# Decoders must be always able to handle unpacked arrays.
# Therefore we have to reserve space for it, even though
# we emit packed arrays ourselves.
encsize *= self.max_count
return encsize
class ExtensionRange(Field):
def __init__(self, struct_name, range_start, field_options):
'''Implements a special pb_extension_t* field in an extensible message
structure. The range_start signifies the index at which the extensions
start. Not necessarily all tags above this are extensions, it is merely
a speed optimization.
'''
self.tag = range_start
self.struct_name = struct_name
self.name = 'extensions'
self.pbtype = 'EXTENSION'
self.rules = 'OPTIONAL'
self.allocation = 'CALLBACK'
self.ctype = 'pb_extension_t'
self.array_decl = ''
self.default = None
self.max_size = 0
self.max_count = 0
def __str__(self):
return ' pb_extension_t *extensions;'
def types(self):
return ''
def tags(self):
return ''
def encoded_size(self, allmsgs):
# We exclude extensions from the count, because they cannot be known
# until runtime. Other option would be to return None here, but this
# way the value remains useful if extensions are not used.
return EncodedSize(0)
class ExtensionField(Field):
def __init__(self, struct_name, desc, field_options):
self.fullname = struct_name + desc.name
self.extendee_name = names_from_type_name(desc.extendee)
Field.__init__(self, self.fullname + 'struct', desc, field_options)
if self.rules != 'OPTIONAL':
self.skip = True
else:
self.skip = False
self.rules = 'OPTEXT'
def tags(self):
'''Return the #define for the tag number of this field.'''
identifier = '%s_tag' % self.fullname
return '#define %-40s %d\n' % (identifier, self.tag)
def extension_decl(self):
'''Declaration of the extension type in the .pb.h file'''
if self.skip:
msg = '/* Extension field %s was skipped because only "optional"\n' % self.fullname
msg +=' type of extension fields is currently supported. */\n'
return msg
return ('extern const pb_extension_type_t %s; /* field type: %s */\n' %
(self.fullname, str(self).strip()))
def extension_def(self):
'''Definition of the extension type in the .pb.c file'''
if self.skip:
return ''
result = 'typedef struct {\n'
result += str(self)
result += '\n} %s;\n\n' % self.struct_name
result += ('static const pb_field_t %s_field = \n %s;\n\n' %
(self.fullname, self.pb_field_t(None)))
result += 'const pb_extension_type_t %s = {\n' % self.fullname
result += ' NULL,\n'
result += ' NULL,\n'
result += ' &%s_field\n' % self.fullname
result += '};\n'
return result
# ---------------------------------------------------------------------------
# Generation of oneofs (unions)
# ---------------------------------------------------------------------------
class OneOf(Field):
def __init__(self, struct_name, oneof_desc):
self.struct_name = struct_name
self.name = oneof_desc.name
self.ctype = 'union'
self.fields = []
self.allocation = 'ONEOF'
self.default = None
self.rules = 'ONEOF'
def add_field(self, field):
if field.allocation == 'CALLBACK':
raise Exception("Callback fields inside of oneof are not supported"
+ " (field %s)" % field.name)
field.union_name = self.name
field.rules = 'ONEOF'
self.fields.append(field)
self.fields.sort(key = lambda f: f.tag)
# Sort by the lowest tag number inside union
self.tag = min([f.tag for f in self.fields])
def __cmp__(self, other):
return cmp(self.tag, other.tag)
def __str__(self):
result = ''
if self.fields:
result += ' pb_size_t which_' + self.name + ";\n"
result += ' union {\n'
for f in self.fields:
result += ' ' + str(f).replace('\n', '\n ') + '\n'
result += ' } ' + self.name + ';'
return result
def types(self):
return ''.join([f.types() for f in self.fields])
def get_dependencies(self):
deps = []
for f in self.fields:
deps += f.get_dependencies()
return deps
def get_initializer(self, null_init):
return '0, {' + self.fields[0].get_initializer(null_init) + '}'
def default_decl(self, declaration_only = False):
return None
def tags(self):
return '\n'.join([f.tags() for f in self.fields])
def pb_field_t(self, prev_field_name):
result = ',\n'.join([f.pb_field_t(prev_field_name) for f in self.fields])
return result
def largest_field_value(self):
return max([f.largest_field_value() for f in self.fields])
def encoded_size(self, allmsgs):
largest = EncodedSize(0)
for f in self.fields:
size = f.encoded_size(allmsgs)
if size is None:
return None
elif size.symbols:
return None # Cannot resolve maximum of symbols
elif size.value > largest.value:
largest = size
return largest
# ---------------------------------------------------------------------------
# Generation of messages (structures)
# ---------------------------------------------------------------------------
class Message:
def __init__(self, names, desc, message_options):
self.name = names
self.fields = []
self.oneofs = {}
no_unions = []
if message_options.msgid:
self.msgid = message_options.msgid
if hasattr(desc, 'oneof_decl'):
for i, f in enumerate(desc.oneof_decl):
oneof_options = get_nanopb_suboptions(desc, message_options, self.name + f.name)
if oneof_options.no_unions:
no_unions.append(i) # No union, but add fields normally
elif oneof_options.type == nanopb_pb2.FT_IGNORE:
pass # No union and skip fields also
else:
oneof = OneOf(self.name, f)
self.oneofs[i] = oneof
self.fields.append(oneof)
for f in desc.field:
field_options = get_nanopb_suboptions(f, message_options, self.name + f.name)
if field_options.type == nanopb_pb2.FT_IGNORE:
continue
field = Field(self.name, f, field_options)
if (hasattr(f, 'oneof_index') and
f.HasField('oneof_index') and
f.oneof_index not in no_unions):
if f.oneof_index in self.oneofs:
self.oneofs[f.oneof_index].add_field(field)
else:
self.fields.append(field)
if len(desc.extension_range) > 0:
field_options = get_nanopb_suboptions(desc, message_options, self.name + 'extensions')
range_start = min([r.start for r in desc.extension_range])
if field_options.type != nanopb_pb2.FT_IGNORE:
self.fields.append(ExtensionRange(self.name, range_start, field_options))
self.packed = message_options.packed_struct
self.ordered_fields = self.fields[:]
self.ordered_fields.sort()
def get_dependencies(self):
'''Get list of type names that this structure refers to.'''
deps = []
for f in self.fields:
deps += f.get_dependencies()
return deps
def __str__(self):
result = 'typedef struct _%s {\n' % self.name
if not self.ordered_fields:
# Empty structs are not allowed in C standard.
# Therefore add a dummy field if an empty message occurs.
result += ' uint8_t dummy_field;'
result += '\n'.join([str(f) for f in self.ordered_fields])
result += '\n}'
if self.packed:
result += ' pb_packed'
result += ' %s;' % self.name
if self.packed:
result = 'PB_PACKED_STRUCT_START\n' + result
result += '\nPB_PACKED_STRUCT_END'
return result
def types(self):
return ''.join([f.types() for f in self.fields])
def get_initializer(self, null_init):
if not self.ordered_fields:
return '{0}'
parts = []
for field in self.ordered_fields:
parts.append(field.get_initializer(null_init))
return '{' + ', '.join(parts) + '}'
def default_decl(self, declaration_only = False):
result = ""
for field in self.fields:
default = field.default_decl(declaration_only)
if default is not None:
result += default + '\n'
return result
def count_required_fields(self):
'''Returns number of required fields inside this message'''
count = 0
for f in self.fields:
if not isinstance(f, OneOf):
if f.rules == 'REQUIRED':
count += 1
return count
def count_all_fields(self):
count = 0
for f in self.fields:
if isinstance(f, OneOf):
count += len(f.fields)
else:
count += 1
return count
def fields_declaration(self):
result = 'extern const pb_field_t %s_fields[%d];' % (self.name, self.count_all_fields() + 1)
return result
def fields_definition(self):
result = 'const pb_field_t %s_fields[%d] = {\n' % (self.name, self.count_all_fields() + 1)
prev = None
for field in self.ordered_fields:
result += field.pb_field_t(prev)
result += ',\n'
if isinstance(field, OneOf):
prev = field.name + '.' + field.fields[-1].name
else:
prev = field.name
result += ' PB_LAST_FIELD\n};'
return result
def encoded_size(self, allmsgs):
'''Return the maximum size that this message can take when encoded.
If the size cannot be determined, returns None.
'''
size = EncodedSize(0)
for field in self.fields:
fsize = field.encoded_size(allmsgs)
if fsize is None:
return None
size += fsize
return size
# ---------------------------------------------------------------------------
# Processing of entire .proto files
# ---------------------------------------------------------------------------
def iterate_messages(desc, names = Names()):
'''Recursively find all messages. For each, yield name, DescriptorProto.'''
if hasattr(desc, 'message_type'):
submsgs = desc.message_type
else:
submsgs = desc.nested_type
for submsg in submsgs:
sub_names = names + submsg.name
yield sub_names, submsg
for x in iterate_messages(submsg, sub_names):
yield x
def iterate_extensions(desc, names = Names()):
'''Recursively find all extensions.
For each, yield name, FieldDescriptorProto.
'''
for extension in desc.extension:
yield names, extension
for subname, subdesc in iterate_messages(desc, names):
for extension in subdesc.extension:
yield subname, extension
def parse_file(fdesc, file_options):
'''Takes a FileDescriptorProto and returns tuple (enums, messages, extensions).'''
enums = []
messages = []
extensions = []
if fdesc.package:
base_name = Names(fdesc.package.split('.'))
else:
base_name = Names()
for enum in fdesc.enum_type:
enum_options = get_nanopb_suboptions(enum, file_options, base_name + enum.name)
enums.append(Enum(base_name, enum, enum_options))
for names, message in iterate_messages(fdesc, base_name):
message_options = get_nanopb_suboptions(message, file_options, names)
if message_options.skip_message:
continue
messages.append(Message(names, message, message_options))
for enum in message.enum_type:
enum_options = get_nanopb_suboptions(enum, message_options, names + enum.name)
enums.append(Enum(names, enum, enum_options))
for names, extension in iterate_extensions(fdesc, base_name):
field_options = get_nanopb_suboptions(extension, file_options, names + extension.name)
if field_options.type != nanopb_pb2.FT_IGNORE:
extensions.append(ExtensionField(names, extension, field_options))
# Fix field default values where enum short names are used.
for enum in enums:
if not enum.options.long_names:
for message in messages:
for field in message.fields:
if field.default in enum.value_longnames:
idx = enum.value_longnames.index(field.default)
field.default = enum.values[idx][0]
return enums, messages, extensions
def toposort2(data):
'''Topological sort.
From http://code.activestate.com/recipes/577413-topological-sort/
This function is under the MIT license.
'''
for k, v in data.items():
v.discard(k) # Ignore self dependencies
extra_items_in_deps = reduce(set.union, data.values(), set()) - set(data.keys())
data.update(dict([(item, set()) for item in extra_items_in_deps]))
while True:
ordered = set(item for item,dep in data.items() if not dep)
if not ordered:
break
for item in sorted(ordered):
yield item
data = dict([(item, (dep - ordered)) for item,dep in data.items()
if item not in ordered])
assert not data, "A cyclic dependency exists amongst %r" % data
def sort_dependencies(messages):
'''Sort a list of Messages based on dependencies.'''
dependencies = {}
message_by_name = {}
for message in messages:
dependencies[str(message.name)] = set(message.get_dependencies())
message_by_name[str(message.name)] = message
for msgname in toposort2(dependencies):
if msgname in message_by_name:
yield message_by_name[msgname]
def make_identifier(headername):
'''Make #ifndef identifier that contains uppercase A-Z and digits 0-9'''
result = ""
for c in headername.upper():
if c.isalnum():
result += c
else:
result += '_'
return result
def generate_header(dependencies, headername, enums, messages, extensions, options):
'''Generate content for a header file.
Generates strings, which should be concatenated and stored to file.
'''
yield '/* Automatically generated nanopb header */\n'
if options.notimestamp:
yield '/* Generated by %s */\n\n' % (nanopb_version)
else:
yield '/* Generated by %s at %s. */\n\n' % (nanopb_version, time.asctime())
symbol = make_identifier(headername)
yield '#ifndef PB_%s_INCLUDED\n' % symbol
yield '#define PB_%s_INCLUDED\n' % symbol
try:
yield options.libformat % ('pb.h')
except TypeError:
# no %s specified - use whatever was passed in as options.libformat
yield options.libformat
yield '\n'
for dependency in dependencies:
noext = os.path.splitext(dependency)[0]
yield options.genformat % (noext + options.extension + '.h')
yield '\n'
yield '#if PB_PROTO_HEADER_VERSION != 30\n'
yield '#error Regenerate this file with the current version of nanopb generator.\n'
yield '#endif\n'
yield '\n'
yield '#ifdef __cplusplus\n'
yield 'extern "C" {\n'
yield '#endif\n\n'
yield '/* Enum definitions */\n'
for enum in enums:
yield str(enum) + '\n\n'
yield '/* Struct definitions */\n'
for msg in sort_dependencies(messages):
yield msg.types()
yield str(msg) + '\n\n'
if extensions:
yield '/* Extensions */\n'
for extension in extensions:
yield extension.extension_decl()
yield '\n'
yield '/* Default values for struct fields */\n'
for msg in messages:
yield msg.default_decl(True)
yield '\n'
yield '/* Initializer values for message structs */\n'
for msg in messages:
identifier = '%s_init_default' % msg.name
yield '#define %-40s %s\n' % (identifier, msg.get_initializer(False))
for msg in messages:
identifier = '%s_init_zero' % msg.name
yield '#define %-40s %s\n' % (identifier, msg.get_initializer(True))
yield '\n'
yield '/* Field tags (for use in manual encoding/decoding) */\n'
for msg in sort_dependencies(messages):
for field in msg.fields:
yield field.tags()
for extension in extensions:
yield extension.tags()
yield '\n'
yield '/* Struct field encoding specification for nanopb */\n'
for msg in messages:
yield msg.fields_declaration() + '\n'
yield '\n'
yield '/* Maximum encoded size of messages (where known) */\n'
for msg in messages:
msize = msg.encoded_size(messages)
if msize is not None:
identifier = '%s_size' % msg.name
yield '#define %-40s %s\n' % (identifier, msize)
yield '\n'
yield '/* helper macros for message type ids if set with */\n'
yield '/* option (nanopb_msgopt).msgid = <id>; */\n\n'
yield '#ifdef PB_MSGID\n'
for msg in messages:
if hasattr(msg,'msgid'):
yield '#define PB_MSG_%d %s\n' % (msg.msgid, msg.name)
yield '\n'
symbol = make_identifier(headername.split('.')[0])
yield '#define %s_MESSAGES \\\n' % symbol
for msg in messages:
m = "-1"
msize = msg.encoded_size(messages)
if msize is not None:
m = msize
if hasattr(msg,'msgid'):
yield '\tPB_MSG(%d,%s,%s) \\\n' % (msg.msgid, m, msg.name)
yield '\n'
yield '#endif\n\n'
yield '#ifdef __cplusplus\n'
yield '} /* extern "C" */\n'
yield '#endif\n'
# End of header
yield '\n#endif\n'
def generate_source(headername, enums, messages, extensions, options):
'''Generate content for a source file.'''
yield '/* Automatically generated nanopb constant definitions */\n'
if options.notimestamp:
yield '/* Generated by %s */\n\n' % (nanopb_version)
else:
yield '/* Generated by %s at %s. */\n\n' % (nanopb_version, time.asctime())
yield options.genformat % (headername)
yield '\n'
yield '#if PB_PROTO_HEADER_VERSION != 30\n'
yield '#error Regenerate this file with the current version of nanopb generator.\n'
yield '#endif\n'
yield '\n'
for msg in messages:
yield msg.default_decl(False)
yield '\n\n'
for msg in messages:
yield msg.fields_definition() + '\n\n'
for ext in extensions:
yield ext.extension_def() + '\n'
# Add checks for numeric limits
if messages:
largest_msg = max(messages, key = lambda m: m.count_required_fields())
largest_count = largest_msg.count_required_fields()
if largest_count > 64:
yield '\n/* Check that missing required fields will be properly detected */\n'
yield '#if PB_MAX_REQUIRED_FIELDS < %d\n' % largest_count
yield '#error Properly detecting missing required fields in %s requires \\\n' % largest_msg.name
yield ' setting PB_MAX_REQUIRED_FIELDS to %d or more.\n' % largest_count
yield '#endif\n'
worst = 0
worst_field = ''
checks = []
checks_msgnames = []
for msg in messages:
checks_msgnames.append(msg.name)
for field in msg.fields:
status = field.largest_field_value()
if isinstance(status, (str, unicode)):
checks.append(status)
elif status > worst:
worst = status
worst_field = str(field.struct_name) + '.' + str(field.name)
if worst > 255 or checks:
yield '\n/* Check that field information fits in pb_field_t */\n'
if worst > 65535 or checks:
yield '#if !defined(PB_FIELD_32BIT)\n'
if worst > 65535:
yield '#error Field descriptor for %s is too large. Define PB_FIELD_32BIT to fix this.\n' % worst_field
else:
assertion = ' && '.join(str(c) + ' < 65536' for c in checks)
msgs = '_'.join(str(n) for n in checks_msgnames)
yield '/* If you get an error here, it means that you need to define PB_FIELD_32BIT\n'
yield ' * compile-time option. You can do that in pb.h or on compiler command line.\n'
yield ' * \n'
yield ' * The reason you need to do this is that some of your messages contain tag\n'
yield ' * numbers or field sizes that are larger than what can fit in 8 or 16 bit\n'
yield ' * field descriptors.\n'
yield ' */\n'
yield 'PB_STATIC_ASSERT((%s), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_%s)\n'%(assertion,msgs)
yield '#endif\n\n'
if worst < 65536:
yield '#if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT)\n'
if worst > 255:
yield '#error Field descriptor for %s is too large. Define PB_FIELD_16BIT to fix this.\n' % worst_field
else:
assertion = ' && '.join(str(c) + ' < 256' for c in checks)
msgs = '_'.join(str(n) for n in checks_msgnames)
yield '/* If you get an error here, it means that you need to define PB_FIELD_16BIT\n'
yield ' * compile-time option. You can do that in pb.h or on compiler command line.\n'
yield ' * \n'
yield ' * The reason you need to do this is that some of your messages contain tag\n'
yield ' * numbers or field sizes that are larger than what can fit in the default\n'
yield ' * 8 bit descriptors.\n'
yield ' */\n'
yield 'PB_STATIC_ASSERT((%s), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_%s)\n'%(assertion,msgs)
yield '#endif\n\n'
# Add check for sizeof(double)
has_double = False
for msg in messages:
for field in msg.fields:
if field.ctype == 'double':
has_double = True
if has_double:
yield '\n'
yield '/* On some platforms (such as AVR), double is really float.\n'
yield ' * These are not directly supported by nanopb, but see example_avr_double.\n'
yield ' * To get rid of this error, remove any double fields from your .proto.\n'
yield ' */\n'
yield 'PB_STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES)\n'
yield '\n'
# ---------------------------------------------------------------------------
# Options parsing for the .proto files
# ---------------------------------------------------------------------------
from fnmatch import fnmatch
def read_options_file(infile):
'''Parse a separate options file to list:
[(namemask, options), ...]
'''
results = []
for i, line in enumerate(infile):
line = line.strip()
if not line or line.startswith('//') or line.startswith('#'):
continue
parts = line.split(None, 1)
if len(parts) < 2:
sys.stderr.write("%s:%d: " % (infile.name, i + 1) +
"Option lines should have space between field name and options. " +
"Skipping line: '%s'\n" % line)
continue
opts = nanopb_pb2.NanoPBOptions()
try:
text_format.Merge(parts[1], opts)
except Exception, e:
sys.stderr.write("%s:%d: " % (infile.name, i + 1) +
"Unparseable option line: '%s'. " % line +
"Error: %s\n" % str(e))
continue
results.append((parts[0], opts))
return results
class Globals:
'''Ugly global variables, should find a good way to pass these.'''
verbose_options = False
separate_options = []
matched_namemasks = set()
def get_nanopb_suboptions(subdesc, options, name):
'''Get copy of options, and merge information from subdesc.'''
new_options = nanopb_pb2.NanoPBOptions()
new_options.CopyFrom(options)
# Handle options defined in a separate file
dotname = '.'.join(name.parts)
for namemask, options in Globals.separate_options:
if fnmatch(dotname, namemask):
Globals.matched_namemasks.add(namemask)
new_options.MergeFrom(options)
# Handle options defined in .proto
if isinstance(subdesc.options, descriptor.FieldOptions):
ext_type = nanopb_pb2.nanopb
elif isinstance(subdesc.options, descriptor.FileOptions):
ext_type = nanopb_pb2.nanopb_fileopt
elif isinstance(subdesc.options, descriptor.MessageOptions):
ext_type = nanopb_pb2.nanopb_msgopt
elif isinstance(subdesc.options, descriptor.EnumOptions):
ext_type = nanopb_pb2.nanopb_enumopt
else:
raise Exception("Unknown options type")
if subdesc.options.HasExtension(ext_type):
ext = subdesc.options.Extensions[ext_type]
new_options.MergeFrom(ext)
if Globals.verbose_options:
sys.stderr.write("Options for " + dotname + ": ")
sys.stderr.write(text_format.MessageToString(new_options) + "\n")
return new_options
# ---------------------------------------------------------------------------
# Command line interface
# ---------------------------------------------------------------------------
import sys
import os.path
from optparse import OptionParser
optparser = OptionParser(
usage = "Usage: nanopb_generator.py [options] file.pb ...",
epilog = "Compile file.pb from file.proto by: 'protoc -ofile.pb file.proto'. " +
"Output will be written to file.pb.h and file.pb.c.")
optparser.add_option("-x", dest="exclude", metavar="FILE", action="append", default=[],
help="Exclude file from generated #include list.")
optparser.add_option("-e", "--extension", dest="extension", metavar="EXTENSION", default=".pb",
help="Set extension to use instead of '.pb' for generated files. [default: %default]")
optparser.add_option("-f", "--options-file", dest="options_file", metavar="FILE", default="%s.options",
help="Set name of a separate generator options file.")
optparser.add_option("-Q", "--generated-include-format", dest="genformat",
metavar="FORMAT", default='#include "%s"\n',
help="Set format string to use for including other .pb.h files. [default: %default]")
optparser.add_option("-L", "--library-include-format", dest="libformat",
metavar="FORMAT", default='#include <%s>\n',
help="Set format string to use for including the nanopb pb.h header. [default: %default]")
optparser.add_option("-T", "--no-timestamp", dest="notimestamp", action="store_true", default=False,
help="Don't add timestamp to .pb.h and .pb.c preambles")
optparser.add_option("-q", "--quiet", dest="quiet", action="store_true", default=False,
help="Don't print anything except errors.")
optparser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False,
help="Print more information.")
optparser.add_option("-s", dest="settings", metavar="OPTION:VALUE", action="append", default=[],
help="Set generator option (max_size, max_count etc.).")
def process_file(filename, fdesc, options):
'''Process a single file.
filename: The full path to the .proto or .pb source file, as string.
fdesc: The loaded FileDescriptorSet, or None to read from the input file.
options: Command line options as they come from OptionsParser.
Returns a dict:
{'headername': Name of header file,
'headerdata': Data for the .h header file,
'sourcename': Name of the source code file,
'sourcedata': Data for the .c source code file
}
'''
toplevel_options = nanopb_pb2.NanoPBOptions()
for s in options.settings:
text_format.Merge(s, toplevel_options)
if not fdesc:
data = open(filename, 'rb').read()
fdesc = descriptor.FileDescriptorSet.FromString(data).file[0]
# Check if there is a separate .options file
had_abspath = False
try:
optfilename = options.options_file % os.path.splitext(filename)[0]
except TypeError:
# No %s specified, use the filename as-is
optfilename = options.options_file
had_abspath = True
if os.path.isfile(optfilename):
if options.verbose:
sys.stderr.write('Reading options from ' + optfilename + '\n')
Globals.separate_options = read_options_file(open(optfilename, "rU"))
else:
# If we are given a full filename and it does not exist, give an error.
# However, don't give error when we automatically look for .options file
# with the same name as .proto.
if options.verbose or had_abspath:
sys.stderr.write('Options file not found: ' + optfilename)
Globals.separate_options = []
Globals.matched_namemasks = set()
# Parse the file
file_options = get_nanopb_suboptions(fdesc, toplevel_options, Names([filename]))
enums, messages, extensions = parse_file(fdesc, file_options)
# Decide the file names
noext = os.path.splitext(filename)[0]
headername = noext + options.extension + '.h'
sourcename = noext + options.extension + '.c'
headerbasename = os.path.basename(headername)
# List of .proto files that should not be included in the C header file
# even if they are mentioned in the source .proto.
excludes = ['nanopb.proto', 'google/protobuf/descriptor.proto'] + options.exclude
dependencies = [d for d in fdesc.dependency if d not in excludes]
headerdata = ''.join(generate_header(dependencies, headerbasename, enums,
messages, extensions, options))
sourcedata = ''.join(generate_source(headerbasename, enums,
messages, extensions, options))
# Check if there were any lines in .options that did not match a member
unmatched = [n for n,o in Globals.separate_options if n not in Globals.matched_namemasks]
if unmatched and not options.quiet:
sys.stderr.write("Following patterns in " + optfilename + " did not match any fields: "
+ ', '.join(unmatched) + "\n")
if not Globals.verbose_options:
sys.stderr.write("Use protoc --nanopb-out=-v:. to see a list of the field names.\n")
return {'headername': headername, 'headerdata': headerdata,
'sourcename': sourcename, 'sourcedata': sourcedata}
def main_cli():
'''Main function when invoked directly from the command line.'''
options, filenames = optparser.parse_args()
if not filenames:
optparser.print_help()
sys.exit(1)
if options.quiet:
options.verbose = False
Globals.verbose_options = options.verbose
for filename in filenames:
results = process_file(filename, None, options)
if not options.quiet:
sys.stderr.write("Writing to " + results['headername'] + " and "
+ results['sourcename'] + "\n")
open(results['headername'], 'w').write(results['headerdata'])
open(results['sourcename'], 'w').write(results['sourcedata'])
def main_plugin():
'''Main function when invoked as a protoc plugin.'''
import sys
if sys.platform == "win32":
import os, msvcrt
# Set stdin and stdout to binary mode
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
data = sys.stdin.read()
request = plugin_pb2.CodeGeneratorRequest.FromString(data)
import shlex
args = shlex.split(request.parameter)
options, dummy = optparser.parse_args(args)
Globals.verbose_options = options.verbose
response = plugin_pb2.CodeGeneratorResponse()
for filename in request.file_to_generate:
for fdesc in request.proto_file:
if fdesc.name == filename:
results = process_file(filename, fdesc, options)
f = response.file.add()
f.name = results['headername']
f.content = results['headerdata']
f = response.file.add()
f.name = results['sourcename']
f.content = results['sourcedata']
sys.stdout.write(response.SerializeToString())
if __name__ == '__main__':
# Check if we are running as a plugin under protoc
if 'protoc-gen-' in sys.argv[0] or '--protoc-plugin' in sys.argv:
main_plugin()
else:
main_cli()
| lgpl-2.1 |
dkubiak789/odoo | addons/crm/validate_email.py | 462 | 5978 | # RFC 2822 - style email validation for Python
# (c) 2012 Syrus Akbary <me@syrusakbary.com>
# Extended from (c) 2011 Noel Bush <noel@aitools.org>
# for support of mx and user check
# This code is made available to you under the GNU LGPL v3.
#
# This module provides a single method, valid_email_address(),
# which returns True or False to indicate whether a given address
# is valid according to the 'addr-spec' part of the specification
# given in RFC 2822. Ideally, we would like to find this
# in some other library, already thoroughly tested and well-
# maintained. The standard Python library email.utils
# contains a parse_addr() function, but it is not sufficient
# to detect many malformed addresses.
#
# This implementation aims to be faithful to the RFC, with the
# exception of a circular definition (see comments below), and
# with the omission of the pattern components marked as "obsolete".
import re
import smtplib
import socket
try:
import DNS
ServerError = DNS.ServerError
except:
DNS = None
class ServerError(Exception): pass
# All we are really doing is comparing the input string to one
# gigantic regular expression. But building that regexp, and
# ensuring its correctness, is made much easier by assembling it
# from the "tokens" defined by the RFC. Each of these tokens is
# tested in the accompanying unit test file.
#
# The section of RFC 2822 from which each pattern component is
# derived is given in an accompanying comment.
#
# (To make things simple, every string below is given as 'raw',
# even when it's not strictly necessary. This way we don't forget
# when it is necessary.)
#
WSP = r'[ \t]' # see 2.2.2. Structured Header Field Bodies
CRLF = r'(?:\r\n)' # see 2.2.3. Long Header Fields
NO_WS_CTL = r'\x01-\x08\x0b\x0c\x0f-\x1f\x7f' # see 3.2.1. Primitive Tokens
QUOTED_PAIR = r'(?:\\.)' # see 3.2.2. Quoted characters
FWS = r'(?:(?:' + WSP + r'*' + CRLF + r')?' + \
WSP + r'+)' # see 3.2.3. Folding white space and comments
CTEXT = r'[' + NO_WS_CTL + \
r'\x21-\x27\x2a-\x5b\x5d-\x7e]' # see 3.2.3
CCONTENT = r'(?:' + CTEXT + r'|' + \
QUOTED_PAIR + r')' # see 3.2.3 (NB: The RFC includes COMMENT here
# as well, but that would be circular.)
COMMENT = r'\((?:' + FWS + r'?' + CCONTENT + \
r')*' + FWS + r'?\)' # see 3.2.3
CFWS = r'(?:' + FWS + r'?' + COMMENT + ')*(?:' + \
FWS + '?' + COMMENT + '|' + FWS + ')' # see 3.2.3
ATEXT = r'[\w!#$%&\'\*\+\-/=\?\^`\{\|\}~]' # see 3.2.4. Atom
ATOM = CFWS + r'?' + ATEXT + r'+' + CFWS + r'?' # see 3.2.4
DOT_ATOM_TEXT = ATEXT + r'+(?:\.' + ATEXT + r'+)*' # see 3.2.4
DOT_ATOM = CFWS + r'?' + DOT_ATOM_TEXT + CFWS + r'?' # see 3.2.4
QTEXT = r'[' + NO_WS_CTL + \
r'\x21\x23-\x5b\x5d-\x7e]' # see 3.2.5. Quoted strings
QCONTENT = r'(?:' + QTEXT + r'|' + \
QUOTED_PAIR + r')' # see 3.2.5
QUOTED_STRING = CFWS + r'?' + r'"(?:' + FWS + \
r'?' + QCONTENT + r')*' + FWS + \
r'?' + r'"' + CFWS + r'?'
LOCAL_PART = r'(?:' + DOT_ATOM + r'|' + \
QUOTED_STRING + r')' # see 3.4.1. Addr-spec specification
DTEXT = r'[' + NO_WS_CTL + r'\x21-\x5a\x5e-\x7e]' # see 3.4.1
DCONTENT = r'(?:' + DTEXT + r'|' + \
QUOTED_PAIR + r')' # see 3.4.1
DOMAIN_LITERAL = CFWS + r'?' + r'\[' + \
r'(?:' + FWS + r'?' + DCONTENT + \
r')*' + FWS + r'?\]' + CFWS + r'?' # see 3.4.1
DOMAIN = r'(?:' + DOT_ATOM + r'|' + \
DOMAIN_LITERAL + r')' # see 3.4.1
ADDR_SPEC = LOCAL_PART + r'@' + DOMAIN # see 3.4.1
# A valid address will match exactly the 3.4.1 addr-spec.
VALID_ADDRESS_REGEXP = '^' + ADDR_SPEC + '$'
def validate_email(email, check_mx=False,verify=False):
"""Indicate whether the given string is a valid email address
according to the 'addr-spec' portion of RFC 2822 (see section
3.4.1). Parts of the spec that are marked obsolete are *not*
included in this test, and certain arcane constructions that
depend on circular definitions in the spec may not pass, but in
general this should correctly identify any email address likely
to be in use as of 2011."""
try:
assert re.match(VALID_ADDRESS_REGEXP, email) is not None
check_mx |= verify
if check_mx:
if not DNS: raise Exception('For check the mx records or check if the email exists you must have installed pyDNS python package')
DNS.DiscoverNameServers()
hostname = email[email.find('@')+1:]
mx_hosts = DNS.mxlookup(hostname)
for mx in mx_hosts:
try:
smtp = smtplib.SMTP()
smtp.connect(mx[1])
if not verify: return True
status, _ = smtp.helo()
if status != 250: continue
smtp.mail('')
status, _ = smtp.rcpt(email)
if status != 250: return False
break
except smtplib.SMTPServerDisconnected: #Server not permits verify user
break
except smtplib.SMTPConnectError:
continue
except (AssertionError, ServerError):
return False
return True
# import sys
# sys.modules[__name__],sys.modules['validate_email_module'] = validate_email,sys.modules[__name__]
# from validate_email_module import *
| agpl-3.0 |
edgarli/proj8 | env/lib/python3.4/site-packages/simplejson/encoder.py | 43 | 26764 | """Implementation of JSONEncoder
"""
from __future__ import absolute_import
import re
from operator import itemgetter
# Do not import Decimal directly to avoid reload issues
import decimal
from .compat import u, unichr, binary_type, string_types, integer_types, PY3
def _import_speedups():
try:
from . import _speedups
return _speedups.encode_basestring_ascii, _speedups.make_encoder
except ImportError:
return None, None
c_encode_basestring_ascii, c_make_encoder = _import_speedups()
from simplejson.decoder import PosInf
#ESCAPE = re.compile(ur'[\x00-\x1f\\"\b\f\n\r\t\u2028\u2029]')
# This is required because u() will mangle the string and ur'' isn't valid
# python3 syntax
ESCAPE = re.compile(u'[\\x00-\\x1f\\\\"\\b\\f\\n\\r\\t\u2028\u2029]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
for i in [0x2028, 0x2029]:
ESCAPE_DCT.setdefault(unichr(i), '\\u%04x' % (i,))
FLOAT_REPR = repr
def encode_basestring(s, _PY3=PY3, _q=u('"')):
"""Return a JSON representation of a Python string
"""
if _PY3:
if isinstance(s, binary_type):
s = s.decode('utf-8')
else:
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return _q + ESCAPE.sub(replace, s) + _q
def py_encode_basestring_ascii(s, _PY3=PY3):
"""Return an ASCII-only JSON representation of a Python string
"""
if _PY3:
if isinstance(s, binary_type):
s = s.decode('utf-8')
else:
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict, namedtuple | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None,
use_decimal=True, namedtuple_as_object=True,
tuple_as_array=True, bigint_as_string=False,
item_sort_key=None, for_json=False, ignore_nan=False,
int_as_string_bitcount=None, iterable_as_array=False):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, separators should be an (item_separator, key_separator)
tuple. The default is (', ', ': ') if *indent* is ``None`` and
(',', ': ') otherwise. To get the most compact JSON representation,
you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
If use_decimal is true (not the default), ``decimal.Decimal`` will
be supported directly by the encoder. For the inverse, decode JSON
with ``parse_float=decimal.Decimal``.
If namedtuple_as_object is true (the default), objects with
``_asdict()`` methods will be encoded as JSON objects.
If tuple_as_array is true (the default), tuple (and subclasses) will
be encoded as JSON arrays.
If *iterable_as_array* is true (default: ``False``),
any object not in the above table that implements ``__iter__()``
will be encoded as a JSON array.
If bigint_as_string is true (not the default), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise.
If int_as_string_bitcount is a positive number (n), then int of size
greater than or equal to 2**n or lower than or equal to -2**n will be
encoded as strings.
If specified, item_sort_key is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key.
If for_json is true (not the default), objects with a ``for_json()``
method will use the return value of that method for encoding as JSON
instead of the object.
If *ignore_nan* is true (default: ``False``), then out of range
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized
as ``null`` in compliance with the ECMA-262 specification. If true,
this will override *allow_nan*.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.use_decimal = use_decimal
self.namedtuple_as_object = namedtuple_as_object
self.tuple_as_array = tuple_as_array
self.iterable_as_array = iterable_as_array
self.bigint_as_string = bigint_as_string
self.item_sort_key = item_sort_key
self.for_json = for_json
self.ignore_nan = ignore_nan
self.int_as_string_bitcount = int_as_string_bitcount
if indent is not None and not isinstance(indent, string_types):
indent = indent * ' '
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
elif indent is not None:
self.item_separator = ','
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from simplejson import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, binary_type):
_encoding = self.encoding
if (_encoding is not None and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if isinstance(o, string_types):
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, binary_type):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, ignore_nan=self.ignore_nan,
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on
# the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
if type(o) != float:
# See #118, do not trust custom str/repr
o = float(o)
return _repr(o)
if ignore_nan:
text = 'null'
elif not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
key_memo = {}
int_as_string_bitcount = (
53 if self.bigint_as_string else self.int_as_string_bitcount)
if (_one_shot and c_make_encoder is not None
and self.indent is None):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan, key_memo, self.use_decimal,
self.namedtuple_as_object, self.tuple_as_array,
int_as_string_bitcount,
self.item_sort_key, self.encoding, self.for_json,
self.ignore_nan, decimal.Decimal, self.iterable_as_array)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot, self.use_decimal,
self.namedtuple_as_object, self.tuple_as_array,
int_as_string_bitcount,
self.item_sort_key, self.encoding, self.for_json,
self.iterable_as_array, Decimal=decimal.Decimal)
try:
return _iterencode(o, 0)
finally:
key_memo.clear()
class JSONEncoderForHTML(JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &) because they are not expanded
within <script> tags.
"""
def encode(self, o):
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o, True)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
yield chunk
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
_use_decimal, _namedtuple_as_object, _tuple_as_array,
_int_as_string_bitcount, _item_sort_key,
_encoding,_for_json,
_iterable_as_array,
## HACK: hand-optimized bytecode; turn globals into locals
_PY3=PY3,
ValueError=ValueError,
string_types=string_types,
Decimal=None,
dict=dict,
float=float,
id=id,
integer_types=integer_types,
isinstance=isinstance,
list=list,
str=str,
tuple=tuple,
iter=iter,
):
if _use_decimal and Decimal is None:
Decimal = decimal.Decimal
if _item_sort_key and not callable(_item_sort_key):
raise TypeError("item_sort_key must be None or callable")
elif _sort_keys and not _item_sort_key:
_item_sort_key = itemgetter(0)
if (_int_as_string_bitcount is not None and
(_int_as_string_bitcount <= 0 or
not isinstance(_int_as_string_bitcount, integer_types))):
raise TypeError("int_as_string_bitcount must be a positive integer")
def _encode_int(value):
skip_quoting = (
_int_as_string_bitcount is None
or
_int_as_string_bitcount < 1
)
if type(value) not in integer_types:
# See #118, do not trust custom str/repr
value = int(value)
if (
skip_quoting or
(-1 << _int_as_string_bitcount)
< value <
(1 << _int_as_string_bitcount)
):
return str(value)
return '"' + str(value) + '"'
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if (isinstance(value, string_types) or
(_PY3 and isinstance(value, binary_type))):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, integer_types):
yield buf + _encode_int(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield buf + str(value)
else:
yield buf
for_json = _for_json and getattr(value, 'for_json', None)
if for_json and callable(for_json):
chunks = _iterencode(for_json(), _current_indent_level)
elif isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
else:
_asdict = _namedtuple_as_object and getattr(value, '_asdict', None)
if _asdict and callable(_asdict):
chunks = _iterencode_dict(_asdict(),
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if first:
# iterable_as_array misses the fast path at the top
yield '[]'
else:
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _stringify_key(key):
if isinstance(key, string_types): # pragma: no cover
pass
elif isinstance(key, binary_type):
key = key.decode(_encoding)
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, integer_types):
if type(key) not in integer_types:
# See #118, do not trust custom str/repr
key = int(key)
key = str(key)
elif _use_decimal and isinstance(key, Decimal):
key = str(key)
elif _skipkeys:
key = None
else:
raise TypeError("key " + repr(key) + " is not a string")
return key
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _PY3:
iteritems = dct.items()
else:
iteritems = dct.iteritems()
if _item_sort_key:
items = []
for k, v in dct.items():
if not isinstance(k, string_types):
k = _stringify_key(k)
if k is None:
continue
items.append((k, v))
items.sort(key=_item_sort_key)
else:
items = iteritems
for key, value in items:
if not (_item_sort_key or isinstance(key, string_types)):
key = _stringify_key(key)
if key is None:
# _skipkeys must be True
continue
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if (isinstance(value, string_types) or
(_PY3 and isinstance(value, binary_type))):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, integer_types):
yield _encode_int(value)
elif isinstance(value, float):
yield _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield str(value)
else:
for_json = _for_json and getattr(value, 'for_json', None)
if for_json and callable(for_json):
chunks = _iterencode(for_json(), _current_indent_level)
elif isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
else:
_asdict = _namedtuple_as_object and getattr(value, '_asdict', None)
if _asdict and callable(_asdict):
chunks = _iterencode_dict(_asdict(),
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if (isinstance(o, string_types) or
(_PY3 and isinstance(o, binary_type))):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, integer_types):
yield _encode_int(o)
elif isinstance(o, float):
yield _floatstr(o)
else:
for_json = _for_json and getattr(o, 'for_json', None)
if for_json and callable(for_json):
for chunk in _iterencode(for_json(), _current_indent_level):
yield chunk
elif isinstance(o, list):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
else:
_asdict = _namedtuple_as_object and getattr(o, '_asdict', None)
if _asdict and callable(_asdict):
for chunk in _iterencode_dict(_asdict(),
_current_indent_level):
yield chunk
elif (_tuple_as_array and isinstance(o, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
elif _use_decimal and isinstance(o, Decimal):
yield str(o)
else:
while _iterable_as_array:
# Markers are not checked here because it is valid for
# an iterable to return self.
try:
o = iter(o)
except TypeError:
break
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
return
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| artistic-2.0 |
xionzz/earthquake | venv/lib/python2.7/site-packages/setuptools/extension.py | 165 | 1731 | import sys
import re
import functools
import distutils.core
import distutils.extension
from setuptools.dist import _get_unpatched
_Extension = _get_unpatched(distutils.core.Extension)
def have_pyrex():
"""
Return True if Cython or Pyrex can be imported.
"""
pyrex_impls = 'Cython.Distutils.build_ext', 'Pyrex.Distutils.build_ext'
for pyrex_impl in pyrex_impls:
try:
# from (pyrex_impl) import build_ext
__import__(pyrex_impl, fromlist=['build_ext']).build_ext
return True
except Exception:
pass
return False
class Extension(_Extension):
"""Extension that uses '.c' files in place of '.pyx' files"""
def __init__(self, *args, **kw):
_Extension.__init__(self, *args, **kw)
self._convert_pyx_sources_to_lang()
def _convert_pyx_sources_to_lang(self):
"""
Replace sources with .pyx extensions to sources with the target
language extension. This mechanism allows language authors to supply
pre-converted sources but to prefer the .pyx sources.
"""
if have_pyrex():
# the build has Cython, so allow it to compile the .pyx files
return
lang = self.language or ''
target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
sub = functools.partial(re.sub, '.pyx$', target_ext)
self.sources = list(map(sub, self.sources))
class Library(Extension):
"""Just like a regular Extension, but built as a library instead"""
distutils.core.Extension = Extension
distutils.extension.Extension = Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = Extension
| mit |
shsingh/ansible | lib/ansible/modules/cloud/google/gce_snapshot.py | 29 | 6899 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce_snapshot
version_added: "2.3"
short_description: Create or destroy snapshots for GCE storage volumes
description:
- Manages snapshots for GCE instances. This module manages snapshots for
the storage volumes of a GCE compute instance. If there are multiple
volumes, each snapshot will be prepended with the disk name
options:
instance_name:
description:
- The GCE instance to snapshot
required: True
snapshot_name:
description:
- The name of the snapshot to manage
disks:
description:
- A list of disks to create snapshots for. If none is provided,
all of the volumes will be snapshotted
default: all
required: False
state:
description:
- Whether a snapshot should be C(present) or C(absent)
required: false
default: present
choices: [present, absent]
service_account_email:
description:
- GCP service account email for the project where the instance resides
required: true
credentials_file:
description:
- The path to the credentials file associated with the service account
required: true
project_id:
description:
- The GCP project ID to use
required: true
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.19.0"
author: Rob Wagner (@robwagner33)
'''
EXAMPLES = '''
- name: Create gce snapshot
gce_snapshot:
instance_name: example-instance
snapshot_name: example-snapshot
state: present
service_account_email: project_name@appspot.gserviceaccount.com
credentials_file: /path/to/credentials
project_id: project_name
delegate_to: localhost
- name: Delete gce snapshot
gce_snapshot:
instance_name: example-instance
snapshot_name: example-snapshot
state: absent
service_account_email: project_name@appspot.gserviceaccount.com
credentials_file: /path/to/credentials
project_id: project_name
delegate_to: localhost
# This example creates snapshots for only two of the available disks as
# disk0-example-snapshot and disk1-example-snapshot
- name: Create snapshots of specific disks
gce_snapshot:
instance_name: example-instance
snapshot_name: example-snapshot
state: present
disks:
- disk0
- disk1
service_account_email: project_name@appspot.gserviceaccount.com
credentials_file: /path/to/credentials
project_id: project_name
delegate_to: localhost
'''
RETURN = '''
snapshots_created:
description: List of newly created snapshots
returned: When snapshots are created
type: list
sample: "[disk0-example-snapshot, disk1-example-snapshot]"
snapshots_deleted:
description: List of destroyed snapshots
returned: When snapshots are deleted
type: list
sample: "[disk0-example-snapshot, disk1-example-snapshot]"
snapshots_existing:
description: List of snapshots that already existed (no-op)
returned: When snapshots were already present
type: list
sample: "[disk0-example-snapshot, disk1-example-snapshot]"
snapshots_absent:
description: List of snapshots that were already absent (no-op)
returned: When snapshots were already absent
type: list
sample: "[disk0-example-snapshot, disk1-example-snapshot]"
'''
try:
from libcloud.compute.types import Provider
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect
def find_snapshot(volume, name):
'''
Check if there is a snapshot already created with the given name for
the passed in volume.
Args:
volume: A gce StorageVolume object to manage
name: The name of the snapshot to look for
Returns:
The VolumeSnapshot object if one is found
'''
found_snapshot = None
snapshots = volume.list_snapshots()
for snapshot in snapshots:
if name == snapshot.name:
found_snapshot = snapshot
return found_snapshot
def main():
module = AnsibleModule(
argument_spec=dict(
instance_name=dict(required=True),
snapshot_name=dict(required=True),
state=dict(choices=['present', 'absent'], default='present'),
disks=dict(default=None, type='list'),
service_account_email=dict(type='str'),
credentials_file=dict(type='path'),
project_id=dict(type='str')
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.19.0+) is required for this module')
gce = gce_connect(module)
instance_name = module.params.get('instance_name')
snapshot_name = module.params.get('snapshot_name')
disks = module.params.get('disks')
state = module.params.get('state')
json_output = dict(
changed=False,
snapshots_created=[],
snapshots_deleted=[],
snapshots_existing=[],
snapshots_absent=[]
)
snapshot = None
instance = gce.ex_get_node(instance_name, 'all')
instance_disks = instance.extra['disks']
for instance_disk in instance_disks:
disk_snapshot_name = snapshot_name
disk_info = gce._get_components_from_path(instance_disk['source'])
device_name = disk_info['name']
device_zone = disk_info['zone']
if disks is None or device_name in disks:
volume_obj = gce.ex_get_volume(device_name, device_zone)
# If we have more than one disk to snapshot, prepend the disk name
if len(instance_disks) > 1:
disk_snapshot_name = device_name + "-" + disk_snapshot_name
snapshot = find_snapshot(volume_obj, disk_snapshot_name)
if snapshot and state == 'present':
json_output['snapshots_existing'].append(disk_snapshot_name)
elif snapshot and state == 'absent':
snapshot.destroy()
json_output['changed'] = True
json_output['snapshots_deleted'].append(disk_snapshot_name)
elif not snapshot and state == 'present':
volume_obj.snapshot(disk_snapshot_name)
json_output['changed'] = True
json_output['snapshots_created'].append(disk_snapshot_name)
elif not snapshot and state == 'absent':
json_output['snapshots_absent'].append(disk_snapshot_name)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| gpl-3.0 |
averainy/averainy | catsup/catsup/cli.py | 1 | 6122 | import sys
import os
major = sys.version_info[0]
if major < 3:
reload(sys)
sys.setdefaultencoding('utf-8')
from catsup.options import g
from catsup.logger import logger, enable_pretty_logging
enable_pretty_logging()
import catsup
doc = """Catsup v%s
Usage:
catsup init [<path>]
catsup build [-s <file>|--settings=<file>]
catsup deploy [-s <file>|--settings=<file>]
catsup git [-s <file>|--settings=<file>]
catsup rsync [-s <file>|--settings=<file>]
catsup server [-s <file>|--settings=<file>] [-p <port>|--port=<port>]
catsup webhook [-s <file>|--settings=<file>] [-p <port>|--port=<port>]
catsup watch [-s <file>|--settings=<file>]
catsup clean [-s <file>|--settings=<file>]
catsup themes
catsup install <theme>
catsup -h | --help
catsup --version
Options:
-h --help Show this screen and exit.
-s --settings=<file> specify a config file. [default: config.json]
-f --file=<file> specify a wordpress output file.
-o --output=<dir> specify a output folder. [default: .]
-p --port=<port> specify the server port. [default: 8888]
-g --global install theme to global theme folder.
""" % catsup.__version__
from parguments import Parguments
parguments = Parguments(doc, version=catsup.__version__)
@parguments.command
def init(path):
"""
Usage:
catsup init [<path>]
Options:
-h --help Show this screen and exit.
"""
from catsup.parser.utils import create_config_file
create_config_file(path)
@parguments.command
def build(settings):
"""
Usage:
catsup build [-s <file>|--settings=<file>]
Options:
-h --help Show this screen and exit.
-s --settings=<file> specify a setting file. [default: config.json]
"""
from catsup.generator import Generator
generator = Generator(settings)
generator.generate()
@parguments.command
def deploy(settings):
"""
Usage:
catsup deploy [-s <file>|--settings=<file>]
Options:
-h --help Show this screen and exit.
-s --settings=<file> specify a setting file. [default: config.json]
"""
import catsup.parser
import catsup.deploy
config = catsup.parser.config(settings)
if config.deploy.default == 'git':
catsup.deploy.git(config)
elif config.deploy.default == 'rsync':
catsup.deploy.rsync(config)
else:
logger.error("Unknown deploy: %s" % config.deploy.default)
@parguments.command
def git(settings):
"""
Usage:
catsup git [-s <file>|--settings=<file>]
Options:
-h --help Show this screen and exit.
-s --settings=<file> specify a setting file. [default: config.json]
"""
import catsup.parser.config
import catsup.deploy
config = catsup.parser.config(settings)
catsup.deploy.git(config)
@parguments.command
def rsync(settings):
"""
Usage:
catsup rsync [-s <file>|--settings=<file>]
Options:
-h --help Show this screen and exit.
-s --settings=<file> specify a setting file. [default: config.json]
"""
import catsup.parser.config
import catsup.deploy
config = catsup.parser.config(settings)
catsup.deploy.rsync(config)
@parguments.command
def server(settings, port):
"""
Usage:
catsup server [-s <file>|--settings=<file>] [-p <port>|--port=<port>]
Options:
-h --help Show this screen and exit.
-s --settings=<file> specify a setting file. [default: config.json]
-p --port=<port> specify the server port. [default: 8888]
"""
import catsup.server
preview_server = catsup.server.PreviewServer(settings, port)
preview_server.run()
@parguments.command
def webhook(settings, port):
"""
Usage:
catsup webhook [-s <file>|--settings=<file>] [-p <port>|--port=<port>]
Options:
-h --help Show this screen and exit.
-s --settings=<file> specify a setting file. [default: config.json]
-p --port=<port> specify the server port. [default: 8888]
"""
import catsup.server
server = catsup.server.WebhookServer(settings, port)
server.run()
@parguments.command
def watch(settings):
"""
Usage:
catsup watch [-s <file>|--settings=<file>]
Options:
-h --help Show this screen and exit.
-s --settings=<file> specify a setting file. [default: config.json]
"""
from catsup.generator import Generator
from catsup.server import CatsupEventHandler
from watchdog.observers import Observer
generator = Generator(settings)
generator.generate()
event_handler = CatsupEventHandler(generator)
observer = Observer()
for path in [generator.config.config.source, g.theme.path]:
path = os.path.abspath(path)
observer.schedule(event_handler, path=path, recursive=True)
observer.start()
while True:
pass
@parguments.command
def clean(settings):
"""
Usage:
catsup clean [-s <file>|--settings=<file>]
Options:
-h --help Show this screen and exit.
-s --settings=<file> specify a setting file. [default: config.json]
"""
import shutil
import catsup.parser.config
config = catsup.parser.config(settings)
for path in [config.config.static_output, config.config.output]:
if os.path.exists(path):
shutil.rmtree(path)
@parguments.command
def themes():
"""
Usage:
catsup themes
Options:
-h --help Show this screen and exit.
"""
from catsup.parser.themes import list_themes
list_themes()
@parguments.command
def install(name):
"""
Usage:
catsup install <name>
Options:
-h --help Show this screen and exit.
"""
from catsup.themes.install import install_theme
install_theme(name=name)
def main():
parguments.run()
| gpl-2.0 |
xArm-Developer/xArm-Python-SDK | example/wrapper/common/6002-set_fense_mode.py | 1 | 1032 | #!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2019, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com>
"""
Example: Set reduced mode
"""
import os
import sys
import time
sys.path.append(os.path.join(os.path.dirname(__file__), '../../..'))
from xarm.wrapper import XArmAPI
from configparser import ConfigParser
parser = ConfigParser()
parser.read('../robot.conf')
try:
ip = parser.get('xArm', 'ip')
except:
ip = input('Please input the xArm ip address[192.168.1.194]:')
if not ip:
ip = '192.168.1.194'
arm = XArmAPI(ip)
time.sleep(0.5)
if arm.warn_code != 0:
arm.clean_warn()
if arm.error_code != 0:
arm.clean_error()
x_max, x_min, y_max, y_min, z_max, z_min = 500, -500, 600, -600, 400, -400
code = arm.set_reduced_tcp_boundary([x_max, x_min, y_max, y_min, z_max, z_min])
print('set_reduced_tcp_boundary, code={}'.format(code))
code = arm.set_fense_mode(True)
print('set_fense_mode, code={}'.format(code))
| bsd-3-clause |
destinmoulton/squabble | pythonclient/venv/lib/python2.7/site-packages/pip/commands/show.py | 344 | 2767 | import os
from pip.basecommand import Command
from pip.log import logger
from pip._vendor import pkg_resources
class ShowCommand(Command):
"""Show information about one or more installed packages."""
name = 'show'
usage = """
%prog [options] <package> ..."""
summary = 'Show information about installed packages.'
def __init__(self, *args, **kw):
super(ShowCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-f', '--files',
dest='files',
action='store_true',
default=False,
help='Show the full list of installed files for each package.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
logger.warn('ERROR: Please provide a package name or names.')
return
query = args
results = search_packages_info(query)
print_results(results, options.files)
def search_packages_info(query):
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
installed_packages = dict(
[(p.project_name.lower(), p) for p in pkg_resources.working_set])
for name in query:
normalized_name = name.lower()
if normalized_name in installed_packages:
dist = installed_packages[normalized_name]
package = {
'name': dist.project_name,
'version': dist.version,
'location': dist.location,
'requires': [dep.project_name for dep in dist.requires()],
}
filelist = os.path.join(
dist.location,
dist.egg_name() + '.egg-info',
'installed-files.txt')
if os.path.isfile(filelist):
package['files'] = filelist
yield package
def print_results(distributions, list_all_files):
"""
Print the informations from installed distributions found.
"""
for dist in distributions:
logger.notify("---")
logger.notify("Name: %s" % dist['name'])
logger.notify("Version: %s" % dist['version'])
logger.notify("Location: %s" % dist['location'])
logger.notify("Requires: %s" % ', '.join(dist['requires']))
if list_all_files:
logger.notify("Files:")
if 'files' in dist:
for line in open(dist['files']):
logger.notify(" %s" % line.strip())
else:
logger.notify("Cannot locate installed-files.txt")
| mit |
Dob3r/python_training | test/test_add_contact.py | 1 | 1462 | # -*- coding: utf-8 -*-
from model.contact import *
import re
import pytest
# фикстуры теста
def test_add_contact(app, db, json_contacts_personinfo, json_contacts_contactinfo, check_ui):
personinfo = json_contacts_personinfo
contactinfo = json_contacts_contactinfo
with pytest.allure.step("Given a Contact list"):
old_contacts = db.get_contact_personinfo_list()
with pytest.allure.step("When I add a Contact %s:%s to the list" % (personinfo, contactinfo)):
app.contact.create(personinfo, contactinfo)
with pytest.allure.step("Then the new Contact list is equal to the old list with the added Contact"):
new_contacts = db.get_contact_personinfo_list()
# assert len(old_contacts) + 1 == len(new_contacts)
old_contacts.append(personinfo)
assert sorted(old_contacts, key=PersonInfo.id_or_max) == sorted(new_contacts, key=PersonInfo.id_or_max)
if check_ui:
contacts_from_ui = app.contact.get_contact_personinfo_list()
def clear(x):
return re.sub("\s+", " ", x)
def clean(contact):
return PersonInfo(id=contact.id, firstname=clear(contact.firstname.strip()), lastname=clear(contact.lastname.strip()))
contacts_from_db = map(clean, new_contacts)
assert sorted(contacts_from_db, key=PersonInfo.id_or_max) == sorted(contacts_from_ui, key=PersonInfo.id_or_max)
| apache-2.0 |
ahtn/keyplus | host-software/keyplus_flasher.py | 1 | 55673 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017 jem@seethis.link
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
if 1:
# PyQt <-> PySide signal compatability
from PyQt5.QtCore import pyqtSlot, pyqtSignal
Signal = pyqtSignal
Slot = pyqtSlot
# TODO: narrow down imports
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtCore import *
# from PyQt5.QtWidgets import (
# QMainWindow, QTextEdit, QAction, QApplication, QPushButton, QProgressBar,
# QWidget, QVBoxLayout, QLabel, QHBoxLayout, QLineEdit, QGroupBox,
# QFormLayout, QScrollArea, QSizePolicy, QGridLayout, QComboBox,
# QStackedLayout, QMessageBox, QFileDialog, QErrorMessage, QTableView,
# QFont, QDialog, QTabWidget
# )
# from PyQt5.QtGui import QIcon, QIntValidator
# from PyQt5.QtCore import Qt, QBasicTimer, QSize , QFileInfo, QTimer
# from PyQt5.QtCore import Slot, Signal, QAbstractTableModel
if 0:
from PySide.QtGui import (
QMainWindow, QTextEdit, QAction, QApplication, QPushButton, QProgressBar,
QWidget, QVBoxLayout, QLabel, QHBoxLayout, QLineEdit, QGroupBox,
QFormLayout, QScrollArea, QSizePolicy, QGridLayout, QComboBox,
QStackedLayout, QMessageBox, QFileDialog, QErrorMessage, QTableView,
QFont, QDialog, QTabWidget
)
from PySide.QtGui import QIcon, QIntValidator
from PySide.QtCore import Qt, QBasicTimer, QSize , QFileInfo, QTimer
from PySide.QtCore import Slot, Signal, QAbstractTableModel
from keyplus.layout import KeyplusLayout
from keyplus.layout.parser_info import KeyplusParserInfo
from keyplus.layout.rf_settings import LayoutRFSettings
from keyplus.device_info import KeyboardDeviceTarget, KeyboardFirmwareInfo
from keyplus import chip_id
from keyplus import KeyplusKeyboard
from keyplus.exceptions import *
from keyplus.debug import DEBUG
import keyplus.usb_ids
from keyplus.usb_ids import BootloaderType
# TODO: clean up directory structure
import sys
import traceback
import datetime, time, binascii
import ruamel.yaml as yaml
import colorama
import hexdump
import copy
import easyhid
import xusbboot
import efm8boot
import kp_boot_32u4
STATUS_BAR_TIMEOUT=4500
if DEBUG.gui:
# debug settings
# DEFAULT_LAYOUT_FILE = "../layouts/basic_split_test.yaml"
# DEFAULT_RF_FILE = ""
# DEFAULT_FIRMWARE_FILE = ""
# DEFAULT_DEVICE_ID = 0
DEFAULT_LAYOUT_FILE = "../layouts/basic_split_test.yaml"
DEFAULT_RF_FILE = "../host-software/_ignore_rf_settings.yaml"
DEFAULT_FIRMWARE_FILE = ""
DEFAULT_DEVICE_ID = 20
# EFM8 testing
# DEFAULT_LAYOUT_FILE = "../layouts/efm8_1key.yaml"
# DEFAULT_RF_FILE = "../layouts/test_rf_config.yaml"
# DEFAULT_FIRMWARE_FILE = "../ports/efm8/build/default/usb_keyboard-default.hex"
# DEFAULT_DEVICE_ID = 0
else:
DEFAULT_LAYOUT_FILE = ""
DEFAULT_RF_FILE = ""
DEFAULT_FIRMWARE_FILE = ""
DEFAULT_DEVICE_ID = ''
class KeyplusFlasherError(Exception):
pass
def error_msg_box(msg, title="Error"):
errorBox = QMessageBox()
errorBox.setWindowTitle(title)
errorBox.setText(msg)
errorBox.exec_()
def msg_box(description="", title="Message"):
msgBox = QMessageBox()
msgBox.setWindowTitle(title)
msgBox.setText(description)
msgBox.exec_()
def is_keyplus_device(device):
intf_num = device.interface_number
prot_ver = device.release_number & 0xf000
if (prot_ver, intf_num) not in [
(0x0000, TYPE0_INTERFACE_VENDOR),
(0x1000, TYPE1_INTERFACE_VENDOR),
]:
return False
usb_id = (device.vendor_id, device.product_id)
if (
usb_id in keyplus.usb_ids.KEYPLUS_USB_IDS or
usb_id in [(0x6666, 0x1111*i) for i in range(16)]
):
return True
def get_boot_loader_type(device):
usb_id = (device.vendor_id, device.product_id)
info = keyplus.usb_ids.BOOTLOADER_USB_IDS.get(usb_id)
if info == None:
return None
return info.bootloader
def is_xusb_bootloader_device(device):
return get_boot_loader_type(device) == BootloaderType.XUSB_BOOT
def is_kp_boot_device(device):
return get_boot_loader_type(device) == BootloaderType.KP_BOOT_32U4
def is_nrf24lu1p_bootloader_device(device):
return get_boot_loader_type(device) == BootloaderType.NRF24LU1P_FACTORY
def is_efm8_boot_device(device):
return get_boot_loader_type(device) == BootloaderType.EFM8_BOOT
def is_unifying_bootloader_device(device):
return False
def is_supported_device(device):
return is_keyplus_device(device) or is_bootloader_device(device)
def is_bootloader_device(device):
return keyplus.usb_ids.is_bootloader_usb_id(device.vendor_id, device.product_id)
class DeviceWidget(QGroupBox):
PROGRAM_SIGNAL = 0
INFO_SIGNAL = 1
program = Signal(str)
show_info = Signal(str)
reset = Signal(str)
def __init__(self, device):
super(DeviceWidget, self).__init__(None)
self.device = device
self.has_critical_error = False
self.label = QLabel()
self.initUI()
def tryOpenDevice(self, device):
try:
return KeyplusKeyboard(device)
except Exception as err:
return None
# label for generic keyplus device
def setup_keyplus_label(self):
kb = self.tryOpenDevice(self.device)
if kb == None:
self.label = None
return
# raise KeyplusUSBCommandError()
with kb:
settingsInfo = kb.get_device_info()
firmwareInfo = kb.get_firmware_info()
errorInfo = kb.get_error_info()
self.has_critical_error = errorInfo.has_critical_error()
if settingsInfo.crc == settingsInfo.compute_crc():
build_time_str = str(settingsInfo.get_timestamp())
device_name = settingsInfo.get_device_name()
self.label.setText('{} | {} | Firmware v{}.{}.{}\n'
'Device id: {}\n'
'Serial number: {}\n'
'Last time updated: {}'
.format(
self.device.manufacturer_string,
device_name,
firmwareInfo.version_major,
firmwareInfo.version_minor,
firmwareInfo.version_patch,
settingsInfo.device_id,
self.device.serial_number,
build_time_str
)
)
else:
# CRC doesn't match
if settingsInfo.is_empty:
self.label.setText('{} | {} | Firmware v{}.{}.{}\n'
'Warning: Empty settings!\n'
'Serial number: {}\n'
.format(
self.device.manufacturer_string,
self.device.product_string,
firmwareInfo.version_major,
firmwareInfo.version_minor,
firmwareInfo.version_patch,
self.device.serial_number,
)
)
else:
# corrupt settings in the flash
build_time_str = str(settingsInfo.get_timestamp())
self.label.setText('{} | {} | Firmware v{}.{}.{}\n'
'WARNING: Settings are uninitialized\n'
'Serial number: {}\n'
.format(
self.device.manufacturer_string,
self.device.product_string,
firmwareInfo.version_major,
firmwareInfo.version_minor,
firmwareInfo.version_patch,
self.device.serial_number,
)
)
# xusb_boot bootloader device
def setup_xusb_bootloader_label(self):
try:
self.device.open()
bootloader_info = xusbboot.get_boot_info(self.device)
except easyhid.HIDException as err:
# Incase opening the device fails
raise KeyplusFlasherError ("Error Opening Device: {} | {}:{}"
.format(
self.device.path,
self.device.vendor_id,
self.device.product_id
),
)
self.label.setText('{} | {} | Bootloader v{}.{}\n'
'MCU: {}\n'
'Flash size: {}\n'
'Serial number: {}\n'
.format(
self.device.manufacturer_string,
self.device.product_string,
bootloader_info.version_major,
bootloader_info.version_minor,
bootloader_info.mcu_string,
bootloader_info.flash_size,
self.device.serial_number
)
)
def setup_kp_boot_32u4_label(self):
try:
boot_dev = kp_boot_32u4.BootloaderDevice(self.device)
except easyhid.HIDException as err:
# Incase opening the device fails
raise KeyplusFlasherError ("Error Opening Device: {} | {}:{}"
.format(
self.device.path,
self.device.vendor_id,
self.device.product_id
),
)
self.label.setText('kp_boot_32u4 - v{}\n'
'MCU: {}\n'
'Flash size: {} EEPROM size: {}\n'
'Bootloader size: {}\n'
.format(
boot_dev.version,
boot_dev.chip_name,
boot_dev.flash_size,
boot_dev.eeprom_size,
boot_dev.boot_size,
)
)
def setup_efm8_boot_label(self):
try:
boot_dev = efm8boot.EFM8BootloaderHID(self.device)
except easyhid.HIDException as err:
# Incase opening the device fails
raise KeyplusFlasherError ("Error Opening Device: {} | {}:{}"
.format(
self.device.path,
self.device.vendor_id,
self.device.product_id
),
)
with boot_dev:
version = boot_dev.get_version()
self.label.setText('EFM8 Factory Bootloader\n'
'MCU: {}\n'
'Bootloader version: 0x{:02X}\n'
'Flash size: {}kB Bootloader size: {}kB\n'
.format(
boot_dev.info.name,
version,
(boot_dev.info.flashSize)/2**10,
(boot_dev.info.flashSize - boot_dev.info.bootloaderStart)/2**10,
)
)
# nrf24lu1p
def setup_nrf24lu1p_label(self):
# try:
# self.device.open()
# bootloader_info = xusbboot.get_boot_info(self.device)
# self.device.close()
# except TimeoutError as err:
# # Incase opening the device fails
# raise Exception ("Error Opening Device: {} | {}:{}"
# .format(
# self.device.path,
# self.device.vendor_id,
# self.device.product_id
# ),
# file=sys.stderr
# )
self.label.setText('nRF24LU1+ Bootloader v{}.{}\n'
'MCU: nRF24LU1+\n'
.format(
0,
0,
self.device.manufacturer_string,
self.device.product_string,
)
)
def updateLabel(self):
if is_keyplus_device(self.device):
self.setup_keyplus_label()
elif is_xusb_bootloader_device(self.device):
self.setup_xusb_bootloader_label()
elif is_kp_boot_device(self.device):
self.setup_kp_boot_32u4_label()
elif is_efm8_boot_device(self.device):
self.setup_efm8_boot_label()
elif is_nrf24lu1p_bootloader_device(self.device):
self.setup_nrf24lu1p_label()
else:
self.device.close()
raise KeyplusFlasherError("Unsupported USB device {}:{}".format(
self.device.vendor_id, self.device.product_id))
self.device.close()
self.updateStyle()
def updateStyle(self):
if self.label == None:
return
if self.has_critical_error:
self.label.setStyleSheet("""
QLabel {
background: #F88;
border: 1px solid;
padding: 2px;
font: 11pt;
}
""")
else:
self.label.setStyleSheet("""
QLabel {
background: #FFF;
self.updateLabel
border: 1px solid;
padding: 2px;
font: 11pt;
}
""")
def initUI(self):
programIcon = QIcon('img/download.png')
infoIcon = QIcon('img/info.png')
self.updateLabel()
if self.label == None:
return
self.label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.updateStyle()
self.label.setFixedHeight(90)
self.label.setMinimumWidth(390)
self.programButton = QPushButton(' Program')
self.programButton.setIcon(programIcon)
self.programButton.clicked.connect(self.programSignal)
if is_bootloader_device(self.device):
self.secondaryButton = QPushButton('Reset')
self.secondaryButton.clicked.connect(self.resetSignal)
else:
self.secondaryButton = QPushButton('Info')
self.secondaryButton.setIcon(infoIcon)
self.secondaryButton.clicked.connect(self.infoSignal)
self.layout = QGridLayout()
self.layout.addWidget(self.label, 0, 0, 2, 1)
self.layout.addWidget(self.programButton, 0, 1)
self.layout.addWidget(self.secondaryButton, 1, 1)
self.setLayout(self.layout)
self.setMaximumHeight(150)
self.setContentsMargins(0, 0, 0, 0)
self.setStyleSheet("""
QGroupBox {
border: 1px solid #CCC;
}
""")
def infoSignal(self):
self.show_info.emit(self.device.path)
def resetSignal(self):
self.reset.emit(self.device.path)
def programSignal(self):
self.program.emit(self.device.path)
def sizeHint(self):
return QSize(560, 0)
class DeviceInformationWindow(QDialog):
def __init__(self, parent, header, device_settings, firmware_settings,
error_codes, *args):
QDialog.__init__(self, parent, *args)
self.setGeometry(300, 200, 570, 450)
self.setWindowTitle("Device information")
table_model = DeviceInformationTable(self, header, device_settings)
dev_settings_table = QTableView()
dev_settings_table.setModel(table_model)
table_model = DeviceInformationTable(self, header, firmware_settings)
fw_settings_table = QTableView()
fw_settings_table.setModel(table_model)
table_model = DeviceInformationTable(self, header, error_codes)
error_code_table = QTableView()
error_code_table.setModel(table_model)
# set font
# font = QFont("monospace", 10)
# font = QFont("", 10)
# dev_settings_table.setFont(font)
# fw_settings_table.setFont(font)
# set column width to fit contents (set font first!)
dev_settings_table.resizeColumnsToContents()
fw_settings_table.resizeColumnsToContents()
error_code_table.resizeColumnsToContents()
tab_view = QTabWidget()
tab_view.addTab(dev_settings_table, "User settings")
tab_view.addTab(fw_settings_table, "Firmware settings")
tab_view.addTab(error_code_table, "Error Codes")
layout = QVBoxLayout(self)
layout.addWidget(tab_view)
self.setLayout(layout)
class DeviceInformationTable(QAbstractTableModel):
def __init__(self, parent, header, data_list, *args):
QAbstractTableModel.__init__(self, parent, *args)
self.data_list = data_list
self.header = header
def rowCount(self, parent):
return len(self.data_list)
def columnCount(self, parent):
if len(self.data_list) == 0:
return 0
else:
return len(self.data_list[0])
def data(self, index, role):
if not index.isValid():
return None
elif role != Qt.DisplayRole:
return None
return self.data_list[index.row()][index.column()]
def headerData(self, col, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.header[col]
return None
class DeviceList(QScrollArea):
def __init__(self, programming_handler, info_handler, reset_handler):
super(DeviceList, self).__init__()
self.deviceWidgets = []
self.programming_handler = programming_handler
self.info_handler = info_handler
self.reset_handler = reset_handler
self.updateCounter = 0
self.initUI()
def initUI(self):
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.listWidget = QWidget()
self.layout = QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.listWidget.setLayout(self.layout)
self.setWidgetResizable(True)
self.setWidget(self.listWidget)
self.updateList()
# def updateLabels(self):
# for dev in self.deviceWidgets:
# dev.updateLabels()
def clearList(self):
# look at the list of connected devices and find out which devices are
# no longer connected and remove them
i = 0
while i < self.layout.count():
devItem = self.layout.itemAt(i).widget()
self.layout.takeAt(i).widget().deleteLater()
def rebuildList(self):
self.clearList()
self.updateList()
def updateList(self):
self.updateCounter += 1
deviceInfoList = list(filter(is_supported_device, easyhid.Enumeration().find()))
deleteList = []
deviceIds = [dev.path for dev in deviceInfoList]
oldDevices = []
newDevices = []
# look at the list of connected devices and find out which devices are
# no longer connected and remove them
i = 0
while i < self.layout.count():
devItem = self.layout.itemAt(i).widget()
if hasattr(devItem, "device") and (devItem.device.path in deviceIds):
oldDevices.append(devItem.device)
i += 1
else:
self.layout.takeAt(i).widget().deleteLater()
# Now find the list of new devices
oldDeviceIds = [dev.path for dev in oldDevices]
for dev in deviceInfoList:
if dev.path in oldDeviceIds:
continue
else:
newDevices.append(dev)
for devInfo in newDevices:
devWidget = DeviceWidget(devInfo)
if devWidget.label:
self.deviceWidgets.append(devWidget)
self.layout.addWidget(devWidget)
devWidget.program.connect(self.programming_handler)
devWidget.show_info.connect(self.info_handler)
devWidget.reset.connect(self.reset_handler)
# if len(self.deviceWidgets) == 0:
if len(oldDevices) == 0 and len(newDevices) == 0:
n = self.updateCounter % 4
label = QLabel("Scanning for devices" + "." * n + " " * (4-n))
self.layout.setAlignment(Qt.AlignCenter)
self.layout.addWidget(label)
self.deviceWidgets = []
else:
self.layout.setAlignment(Qt.AlignTop)
self.updateCounter = 0
class FileSelector(QWidget):
ScopeLayout = 0
ScopeDevice = 1
ScopeFirmware = 2
ScopeAll = 3
def __init__(self):
super(FileSelector, self).__init__()
self.initUI()
self.lastDir = None
def initUI(self):
self.scopeSelector = QComboBox()
self.scopeSelector.addItem("Layout", FileSelector.ScopeLayout)
self.scopeSelector.addItem("Device and RF", FileSelector.ScopeDevice)
self.scopeSelector.addItem("Firmware Update", FileSelector.ScopeFirmware)
# self.scopeSelector.addItem("All", FileSelector.ScopeAll)
self.scopeSelector.currentIndexChanged.connect(self.scopeUpdate)
self.layoutSettings = LayoutSettingsScope()
self.deviceSettings = DeviceSettingsScope()
self.firmwareSettings = FirmwareSettingsScope()
self.scope = None
self.layout = QVBoxLayout()
self.layout.addWidget(self.scopeSelector)
self.stackedLayout = QStackedLayout()
self.stackedLayout.addWidget(self.layoutSettings)
self.stackedLayout.addWidget(self.deviceSettings)
self.stackedLayout.addWidget(self.firmwareSettings)
self.layout.addLayout(self.stackedLayout)
self.setMinimumSize(0, 300)
self.setLayout(self.layout)
# self.updateUI(FileSelector.ScopeLayout)
def scopeUpdate(self, index):
self.stackedLayout.setCurrentIndex(index)
def updateUI(self, scope):
if self.scope == scope:
return
self.layout.removeWidget(self.layoutSettings)
self.layout.removeWidget(self.deviceSettings)
self.layout.removeWidget(self.firmwareSettings)
if scope == FileSelector.ScopeLayout:
self.layout.addWidget(self.layoutSettings)
elif scope == FileSelector.ScopeDevice:
self.layout.addWidget(self.deviceSettings)
elif scope == FileSelector.ScopeFirmware:
self.layout.addWidget(self.firmwareSettings)
elif scope == FileSelector.ScopeAll:
self.layout.addWidget(self.layoutSettings)
self.layout.addWidget(self.deviceSettings)
self.layout.addWidget(self.firmwareSettings)
def getProgramingInfo(self):
return self.scopeSelector.currentIndex()
def getFirmwareFile(self):
return self.firmwareSettings.getFirmwareFile()
def getLayoutFile(self):
return self.layoutSettings.getLayoutFile()
def getRFLayoutFile(self):
return self.deviceSettings.getCurrentSettings()[2]
def getRFFile(self):
return self.deviceSettings.getCurrentSettings()[1]
def getTargetID(self):
return self.deviceSettings.getCurrentSettings()[0]
class LayoutSettingsScope(QGroupBox):
def __init__(self, parent=None):
super(LayoutSettingsScope, self).__init__("Layout settings:")
self.initUI()
def initUI(self):
self.fileWidget = FileBrowseWidget("Layout file (*.yaml)")
self.fileWidget.setText(DEFAULT_LAYOUT_FILE)
layout = QFormLayout()
layout.addRow(QLabel("Layout file (.yaml): "), self.fileWidget)
label = QLabel("<b>Note:</b> Each device that can act as a "
"wireless/wired receiver stores its own copy of the "
"layout settings. The other devices will still function "
"when the layout is updated, but they will use their "
"old version of the layout instead. "
"You can intentionally load different layouts on different "
"keyboard components to have different layout options depending "
"on which device is acting as the receiver."
)
label.setTextInteractionFlags(Qt.TextSelectableByMouse)
label.setWordWrap(True)
layout.addRow(label)
self.setLayout(layout)
def getLayoutFile(self):
return self.fileWidget.text()
class DeviceSettingsScope(QGroupBox):
def __init__(self):
super(DeviceSettingsScope, self).__init__("Device and RF settings:")
self.initUI()
def initUI(self):
self.layoutFile = FileBrowseWidget("Layout settings file .yaml (*.yaml)")
self.layoutFile.setText(DEFAULT_LAYOUT_FILE)
self.rfSettingsFile = FileBrowseWidget("Device settings file .yaml (*.yaml)")
self.rfSettingsFile.setText(DEFAULT_RF_FILE)
layout = QFormLayout()
layout.addRow(QLabel("Layout settings file (.yaml):"), self.layoutFile)
layout.addRow(QLabel("RF settings file (.yaml):"), self.rfSettingsFile)
self.idLine = QLineEdit()
self.idLine.setText(str(DEFAULT_DEVICE_ID))
self.idLine.setMaximumWidth(50)
self.idLine.setValidator(QIntValidator(0, 63))
layout.addRow(QLabel("Device id (0-63):"), self.idLine)
self.generateButton = QPushButton("Generate new RF settings")
self.generateButton.setMaximumWidth(230)
self.generateButton.clicked.connect(self.generateRFSettings)
layout.addRow(None, self.generateButton)
label = QLabel("<b>Note:</b> These settings only need to be loaded on each "
"device once and are persistent when you update the layout. "
"To ensure proper operation and security, each device must "
"have a unique device ID for a given RF settings file. "
"Since RF settings file contains your encryption key, make "
"sure to keep it secret.")
label.setTextInteractionFlags(Qt.TextSelectableByMouse)
label.setWordWrap(True)
layout.addRow(label)
self.setLayout(layout)
def generateRFSettings(self):
result = QFileDialog.getSaveFileName(
self, "Save file", FileBrowseWidget.lastDirectory, "RF settings .yaml (*.yaml)")
fname = result[0]
if fname != '':
fileInfo = QFileInfo(fname)
try:
FileBrowseWidget.lastDir = fileInfo.baseName()
except:
pass
try:
rf_settings = LayoutRFSettings()
rf_settings.load_random()
json_obj = rf_settings.to_json()
timeNow = datetime.datetime.now().strftime("%Y-%M-%d at %H:%M")
with open(fname, 'w') as outFile:
outFile.write(
"# Generated on {}\n\n".format(timeNow) +
yaml.safe_dump(json_obj, default_flow_style=False)
)
self.rfSettingsFile.lineEdit.setText(fname)
except IOError as e:
# TODO: proper error message
print("error writing file: " + str(e))
def getCurrentSettings(self):
rawID = self.idLine.text()
if rawID == '':
rawID = None
else:
rawID = int(rawID)
return (
rawID,
self.rfSettingsFile.lineEdit.text(),
self.layoutFile.lineEdit.text()
)
class FirmwareSettingsScope(QGroupBox):
def __init__(self):
super(FirmwareSettingsScope, self).__init__("Firmware Update:")
self.initUI()
def initUI(self):
self.fileWidget = FileBrowseWidget("Firmware file .hex (*.hex)")
self.fileWidget.setText(DEFAULT_FIRMWARE_FILE)
layout = QFormLayout()
layout.addRow(QLabel("Firmware file (.hex):"), self.fileWidget)
label = QLabel("<b>Note:</b> after updating the firmware, all layout "
"and device settings will be erased.")
label.setTextInteractionFlags(Qt.TextSelectableByMouse)
layout.addRow(label)
self.setLayout(layout)
def getFirmwareFile(self):
return self.fileWidget.text()
class FileBrowseWidget(QWidget):
lastDirectory = None
def __init__(self, fileType="Layout File (*.yaml)"):
super(FileBrowseWidget, self).__init__()
self.fileTypeName = fileType
self.initUI()
def initUI(self):
# hbox = QHBoxLayout()
hbox = QGridLayout()
self.lineEdit = QLineEdit()
self.browseButton = QPushButton("Browse")
hbox.addWidget(self.lineEdit, 0, 0, )
hbox.addWidget(self.browseButton, 0, 1)
hbox.setContentsMargins(0, 0, 0, 0)
self.browseButton.clicked.connect(self.grabFileName)
self.setLayout(hbox)
def setText(self, val):
self.lineEdit.setText(val)
def text(self):
return self.lineEdit.text()
def grabFileName(self):
result = QFileDialog.getOpenFileName(
self, "Open file", FileBrowseWidget.lastDirectory, self.fileTypeName)
fname = result[0]
if fname != '':
fileInfo = QFileInfo(fname)
try:
FileBrowseWidget.lastDir = fileInfo.baseName()
except:
pass
self.lineEdit.setText(fname)
class Loader(QMainWindow):
def __init__(self, parent=None):
super(Loader, self).__init__(parent)
self.initUI()
def clearDeviceList(self):
self.statusBar().showMessage("Refreshing device list...", STATUS_BAR_TIMEOUT)
self.deviceListWidget.clearList()
def updateDeviceList(self):
self.statusBar().showMessage("Device list updating...", STATUS_BAR_TIMEOUT)
self.deviceListWidget.updateList()
self.statusBar().showMessage("Device list updated finished!", STATUS_BAR_TIMEOUT)
def getFileName(self, ext):
fname = QFileDialog.getOpenFileName(self, 'Open file', '/home')
# if fname[0]:
# f = open(fname[0], 'r')
# with f:
# data = f.read()
# self.textEdit.setText(data)
def initUI(self):
# textEdit = QTextEdit()
# self.setCentralWidget(textEdit)
# self.setStyleSheet("QGroupBox { border: 1px solid gray; padding: 5px;}");
# Action to quit program
exitAction = QAction(QIcon(None), 'Quit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.close)
# # Action to update device list
# self.refreshAction = QAction(QIcon('img/reload.png'), 'Refresh', self)
# self.refreshAction.setShortcut('F5')
# self.refreshAction.setStatusTip('Refresh list of connected devices.')
# self.refreshAction.triggered.connect(self.updateDeviceList)
# Action to show program information
helpAction = QAction(QIcon(None), 'Help', self)
helpAction.setShortcut('F1')
helpAction.triggered.connect(self.showHelpDialog)
# Action to help
aboutAction = QAction(QIcon(None), 'About', self)
aboutAction.triggered.connect(self.showAboutDialog)
self.statusBar()
# Add the file menu
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
# fileMenu.addAction(self.refreshAction)
fileMenu.addAction(exitAction)
fileMenu = menubar.addMenu('&Help')
fileMenu.addAction(helpAction)
fileMenu.addAction(aboutAction)
# # Add the toolbar
# toolbar = self.addToolBar('Exit')
# # toolbar.addAction(self.refreshAction)
# toolbar.setMovable(False)
# Add the main windows widgets
self.deviceListWidget = DeviceList(
self.programDeviceHandler,
self.infoDeviceHandler,
self.resetDeviceHandler
)
self.fileSelectorWidget = FileSelector()
self.setStyleSheet("""
QStatusBar {
border-top: 1px solid #CCC;
}
QToolBar {
border-top: 1px solid #DDD;
border-bottom: 1px solid #CCC;
}
""")
gbox = QGroupBox("Connected USB devices:")
gboxLayout = QVBoxLayout()
gboxLayout.addWidget(self.deviceListWidget)
gbox.setLayout(gboxLayout)
self.refreshEvent = QTimer()
self.refreshEvent.setInterval(1250)
self.refreshEvent.timeout.connect(self.USBUpdate)
self.refreshEvent.start()
layout = QVBoxLayout()
layout.addWidget(self.fileSelectorWidget)
layout.addWidget(gbox)
self.setCentralWidget(QWidget())
self.centralWidget().setLayout(layout)
self.setMinimumSize(620, 700)
self.setMaximumWidth(620)
self.setWindowFlags(Qt.Window | Qt.WindowMinimizeButtonHint | Qt.WindowCloseButtonHint)
self.setGeometry(300, 300, 350, 250)
self.setWindowTitle('keyplus layout and firmware loader')
self.show()
def abort_update(self, target_device):
try:
target_device.close()
except:
pass
self.deviceListWidget.updateList()
def abort_update2(self):
self.deviceListWidget.updateList()
def update_device_list(self):
"""
Update the device list.
"""
for widget in self.deviceListWidget.deviceWidgets[:]: # Use [:] to copy list
try:
widget.updateLabel()
except (easyhid.HIDException, KeyplusFlasherError):
# If communication with a USB device fails, remove it from the list.
self.deviceListWidget.deviceWidgets.remove(widget)
def check_version(self, kb):
if not kb.firmware_info.has_at_least_version(keyplus.__version__):
version = kb.firmware_info.get_version_str()
error_msg_box("Need at least version {}, but this device has {}. Select "
"'Firmware Update' from drop down box to update the firmware.\n\n"
"You can download newer versions from here: https://github.com/ahtn/keyplus/releases"
.format(keyplus.__version__, version))
return -1
else:
return 0
@Slot(str)
def programDeviceHandler(self, device_path):
target_device = self.tryOpenDevicePath(device_path)
if target_device == None:
self.abort_update(target_device)
return
programmingMode = self.fileSelectorWidget.getProgramingInfo()
if is_bootloader_device(target_device) and programmingMode != FileSelector.ScopeFirmware:
error_msg_box("The device's bootloader is running. "
"Choose 'Update Firmware' from the drop down box "
"to flash new firmware, or reset it to use to run "
"the currently loaded firmware (if any).")
self.abort_update(target_device)
return
if programmingMode == FileSelector.ScopeLayout:
target_device.close()
kb = self.tryOpenDevicePath2(device_path)
if kb == None:
return
if self.check_version(kb):
return
self.statusBar().showMessage("Started updating layout", STATUS_BAR_TIMEOUT)
layout_file = self.fileSelectorWidget.getLayoutFile()
if layout_file == '':
error_msg_box("No layout file given.")
return
parser_info = KeyplusParserInfo()
try:
kp_layout = KeyplusLayout()
kp_layout.from_yaml_file(layout_file, parser_info=parser_info)
device_target = kb.get_device_target()
settings_data = kp_layout.build_settings_section(device_target)
layout_data = kp_layout.build_layout_section(device_target)
except (KeyplusError, IOError) as err:
error_msg_box(str(err))
return
except (yaml.YAMLError) as err:
error_msg_box("YAML syntax error: \n" + str(err))
self.abort_update(target_device)
return
with kb:
reset_type = RESET_TYPE_SOFTWARE
try:
if kb.get_error_info().has_critical_error():
reset_type = RESET_TYPE_HARDWARE
kb.update_settings_section(settings_data, keep_rf=True)
kb.update_layout_section(layout_data)
except easyhid.HIDException:
error_msg_box("Error writing layout")
self.abort_update(target_device)
return
try:
kb.reset(reset_type)
except easyhid.HIDException:
pass # may fail if HID device re-enumerates differently
kb.disconnect()
self.update_device_list()
if len(parser_info.warnings) > 0:
error_msg_box(
"The device was programmed successfully, but some "
"non-critical errors were encountered:\n" +
"\n".join([str(warn) for warn in parser_info.warnings]),
title = "Warnings",
)
self.statusBar().showMessage("Finished updating layout", STATUS_BAR_TIMEOUT)
elif programmingMode == FileSelector.ScopeDevice:
target_device.close()
kb = self.tryOpenDevicePath2(device_path)
if kb == None:
return
if self.check_version(kb):
return
layout_file = self.fileSelectorWidget.getRFLayoutFile()
rf_file = self.fileSelectorWidget.getRFFile()
target_id = self.fileSelectorWidget.getTargetID()
self.statusBar().showMessage("Started updating RF settings", STATUS_BAR_TIMEOUT)
if layout_file == '':
error_msg_box("No layout file given.")
self.abort_update(target_device)
return
elif rf_file == '':
error_msg_box("No RF settings file given.")
self.abort_update(target_device)
return
elif target_id == None:
error_msg_box("No device id file given.")
self.abort_update(target_device)
return
parser_info = KeyplusParserInfo()
rf_parser_info = KeyplusParserInfo()
try:
kp_layout = KeyplusLayout()
warnings = []
kp_layout.from_yaml_file(layout_file,
rf_file,
parser_info=parser_info,
rf_parser_info=rf_parser_info)
device_target = kb.get_device_target()
device_target.device_id = target_id
settings_data = kp_layout.build_settings_section(device_target)
layout_data = kp_layout.build_layout_section(device_target)
except IOError as err:
error_msg_box("IOError: " + str(err))
self.abort_update(target_device)
return
except KeyplusError as err:
error_msg_box("KeyplusError: " + str(err))
self.abort_update(target_device)
return
except (yaml.YAMLError) as err:
error_msg_box("YAML syntax error: \n" + str(err))
self.abort_update(target_device)
return
except Exception as err:
traceback.print_tb(err.__traceback__, file=sys.stderr)
error_msg_box("Exception({}): {}\nAt: {}"
.format(type(err),
str(err),
"".join(traceback.format_tb(err.__traceback__))
)
)
self.abort_update(target_device)
return
with kb:
kb.update_settings_section(settings_data, keep_rf=False)
kb.update_layout_section(layout_data)
try:
kb.reset(reset_type=RESET_TYPE_HARDWARE)
except easyhid.HIDException:
print("failed to reset")
pass # may fail if HID device re-enumerates differently
kb.disconnect()
self.clearDeviceList()
self.updateDeviceList()
if warnings != []:
error_msg_box(
"The device was programmed successfully, but some "
"non-critical errors were encountered:\n" +
"\n".join([str(warn) for warn in warnings]),
title = "Warnings",
)
self.statusBar().showMessage("Finished updating RF settings", STATUS_BAR_TIMEOUT)
elif programmingMode == FileSelector.ScopeFirmware:
fw_file = self.fileSelectorWidget.getFirmwareFile()
self.statusBar().showMessage("Starting update firmware", STATUS_BAR_TIMEOUT)
if fw_file == '':
error_msg_box("No firmware file given.")
else:
if is_xusb_bootloader_device(target_device):
self.program_xusb_boot_firmware_hex(target_device, fw_file)
elif is_kp_boot_device(target_device):
self.program_kp_boot_32u4_firmware_hex(target_device, fw_file)
elif is_efm8_boot_device(target_device):
self.program_efm8_boot_firmware_hex(target_device, fw_file)
elif is_keyplus_device(target_device):
target_device.close()
kb = self.tryOpenDevicePath2(device_path)
try:
with kb:
boot_vid, boot_pid = kb.enter_bootloader()
bootloader_info = keyplus.usb_ids.get_bootloader_info(
boot_vid,
boot_pid,
)
if bootloader_info.bootloader == BootloaderType.UNKNOWN:
error_msg_box(
"The device has entered its bootloader but "
"requires an external utility to flash. " +
bootloader_info.nonHIDMessage
)
if bootloader_info.nonHID:
print(bootloader_info)
error_msg_box(
"The device has entered its bootloader but "
"requires an external utility to flash. " +
bootloader_info.nonHIDMessage
)
return
if bootloader_info and bootloader_info.uses_serial_num:
serial_num = target_device.serial_number
else:
serial_num = None
self.bootloaderProgramTimer = QTimer()
self.bootloaderProgramTimer.setInterval(3000)
self.bootloaderProgramTimer.setSingleShot(True)
self.bootloaderProgramTimer.timeout.connect( lambda:
self.programFirmwareHex(boot_vid, boot_pid,
serial_num, fw_file)
)
self.bootloaderProgramTimer.start()
except (easyhid.HIDException):
error_msg_box("Programming hex file failed: '{}'".format(fw_file))
else:
error_msg_box("This bootloader is currently unsupported")
else:
try:
target_device.close()
except:
pass
raise Exception("Unimplemented programming mode")
def programFirmwareHex(self, boot_vid, boot_pid, serial_num, file_name):
device = None
matches = []
en = easyhid.Enumeration().find()
# Look for devices with matching serial_num number
for dev in en:
if serial_num and dev.serial_number == serial_num:
device = dev
break
elif (
not serial_num and
dev.vendor_id == boot_vid and
dev.product_id == boot_pid
):
# if a device was found with matching vid:pid, but the
# original device didn't expose a serial number, then
# assume that the bootloader/firmware doesn't set the
# serial_num number, so just program the first matching
# device
device = dev
break
if device == None:
error_msg_box("Couldn't connect to the device's bootloader")
return
else:
if self.tryOpenDevice(device):
return
elif is_xusb_bootloader_device(device):
self.program_xusb_boot_firmware_hex(device, file_name)
elif is_kp_boot_device(device):
self.program_kp_boot_32u4_firmware_hex(device, file_name)
elif is_efm8_boot_device(device):
self.program_efm8_boot_firmware_hex(device, file_name)
elif is_nrf24lu1p_bootloader_device:
error_msg_box("Programming nrf24 is currently unsupported")
return
self.statusBar().showMessage("Finished updating firmware", STATUS_BAR_TIMEOUT)
def program_xusb_boot_firmware_hex(self, device, file_name):
try:
xusbboot.write_hexfile(device, file_name)
except xusbboot.BootloaderException as err:
error_msg_box("Error programming the hex file to the bootloader: " + str(err))
finally:
device.close()
def program_kp_boot_32u4_firmware_hex(self, device, file_name):
try:
device.close()
boot_dev = kp_boot_32u4.BootloaderDevice(device)
with boot_dev:
boot_dev.write_flash_hex(file_name)
boot_dev.reset_mcu()
except Exception as err:
error_msg_box("Error programming the hex file to the bootloader: " + str(err))
def program_efm8_boot_firmware_hex(self, device, file_name):
try:
device.close()
boot_dev = efm8boot.EFM8BootloaderHID(device)
with boot_dev:
boot_dev.write_flash_hex(file_name)
boot_dev.reset_mcu()
except Exception as err:
error_msg_box("Error programming the hex file the bootloader: " + str(err))
def tryOpenDevicePath2(self, device_path):
try:
device = easyhid.Enumeration().find(path=device_path)[0]
return KeyplusKeyboard(device)
except Exception as err:
msg_box(
description="Failed to open device! Check that it is not in "
"use by another program and you have permission to read/write "
"to it. ErrorMsg: {}"
.format(err),
title="USB Device write error"
)
traceback.print_exc(file=sys.stderr)
return None
def tryOpenDevicePath(self, device_path):
try:
device = easyhid.Enumeration().find(path=device_path)[0]
device.open()
return device
except:
msg_box(
description="Failed to open device! Check it is still present "
"and you have permission to write to it.",
title="USB Device write error"
)
return None
def tryOpenDevice(self, device):
try:
device.open()
return False
except:
msg_box(
description="Failed to open device! Check it is still present "
"and you have permission to write to it.",
title="USB Device write error"
)
return True
@Slot(str)
def resetDeviceHandler(self, device_path):
device = self.tryOpenDevicePath(device_path)
if device == None: return
if is_keyplus_device(device):
device.close()
kb = self.tryOpenDevicePath2(device_path)
with kb:
kb.reset_device()
elif is_xusb_bootloader_device(device):
xusbboot.reset(device)
elif is_kp_boot_device(device):
device.close()
dev = kp_boot_32u4.BootloaderDevice(device)
with dev:
dev.reset_mcu()
elif is_nrf24lu1p_bootloader_device(device):
print("TODO: reset: ", device_path, file=sys.stderr)
elif is_efm8_boot_device(device):
device.close()
dev = efm8boot.EFM8BootloaderHID(device)
with dev:
dev.reset_mcu()
else:
print("Can't reset device: ", device_path, file=sys.stderr)
@Slot(str)
def infoDeviceHandler(self, device_path):
kb = self.tryOpenDevicePath2(device_path)
if kb == None:
return
with kb:
deviceInfo = kb.get_device_info()
firmwareInfo = kb.get_firmware_info()
rfInfo = kb.get_rf_info()
errorInfo = kb.get_error_info()
serial_number = kb.serial_number
def ms_str(x):
return "{}ms".format(x)
def us_str(x):
return "{0:.1f}µs".format(x / 255 * 48.0)
header = ["Attribute", "Value"]
device_settings = []
device_settings = [
("Device ID", deviceInfo.device_id),
("Device name", deviceInfo.get_device_name()),
("Device serial number", serial_number),
("Last layout update", str(deviceInfo.get_timestamp())),
("Default report mode", deviceInfo.get_default_report_mode_str()),
("Matrix scan mode", deviceInfo.get_scan_mode_str()),
("Matrix columns", deviceInfo.scan_plan.cols),
("Matrix rows", deviceInfo.scan_plan.rows),
("Key debounce press time", ms_str(deviceInfo.scan_plan.debounce_time_press)),
("Key debounce release time", ms_str(deviceInfo.scan_plan.debounce_time_release)),
("Key press trigger time", ms_str(deviceInfo.scan_plan.trigger_time_press)),
("Key release trigger time", ms_str(deviceInfo.scan_plan.trigger_time_release)),
("Key discharge idle time", us_str(deviceInfo.scan_plan.parasitic_discharge_delay_idle)),
("Key discharge debouncing time", us_str(deviceInfo.scan_plan.parasitic_discharge_delay_debouncing)),
("Settings stored CRC", hex(deviceInfo.crc)),
("Settings computed CRC", hex(deviceInfo.compute_crc())),
("USB", (not deviceInfo.usb_disabled and firmwareInfo.has_usb)),
("I2C", (not deviceInfo.i2c_disabled and firmwareInfo.has_i2c)),
("nRF24 wireless", (not deviceInfo.nrf24_disabled and firmwareInfo.has_nrf24)),
("Unifying mouse", (not deviceInfo.unifying_disabled and firmwareInfo.has_unifying)),
("Bluetooth", (not deviceInfo.bluetooth_disabled and firmwareInfo.has_bluetooth)),
("RF pipe0", binascii.hexlify(bytes(rfInfo.pipe_addr_0)).decode('ascii')),
("RF pipe1", binascii.hexlify(bytes(rfInfo.pipe_addr_1)).decode('ascii')),
("RF pipe2", "{:02x}".format(rfInfo.pipe_addr_2)),
("RF pipe3", "{:02x}".format(rfInfo.pipe_addr_3)),
("RF pipe4", "{:02x}".format(rfInfo.pipe_addr_4)),
("RF pipe5", "{:02x}".format(rfInfo.pipe_addr_5)),
("RF channel", str(rfInfo.channel)),
("RF auto retransmit count", str(rfInfo.arc)),
]
firmware_settings = [
("Firmware version", firmwareInfo.get_version_str()),
("Firmware build date", str(datetime.datetime.fromtimestamp(firmwareInfo.timestamp_raw))),
("Firmware git hash", firmwareInfo.get_git_hash_str()),
("Microcontroller", chip_id.get_chip_name_from_id(firmwareInfo.chip_id)),
("Board ID", hex(firmwareInfo.board_id)),
("Internal scan method", firmwareInfo.get_interal_scan_method_as_str()),
("Layout storage size", firmwareInfo.layout_flash_size),
("Bootloader VID", "{:04x}".format(firmwareInfo.bootloader_vid)),
("Bootloader PID", "{:04x}".format(firmwareInfo.bootloader_pid)),
("Support scanning", firmwareInfo.has_scanning),
("Support scanning col to row", firmwareInfo.has_scanning_col_row),
("Support scanning row to col", firmwareInfo.has_scanning_row_col),
("Media keys", firmwareInfo.has_media_keys),
("Mouse keys", firmwareInfo.has_mouse_keys),
("Layer keys", firmwareInfo.has_layer_keys),
("Sticky keys", firmwareInfo.has_sticky_keys),
("Tap keys", firmwareInfo.has_tap_keys),
("Hold keys", firmwareInfo.has_hold_keys),
("Support 6KRO", firmwareInfo.has_6kro),
("Support NKRO", firmwareInfo.has_nkro),
("Support indicator LEDs", firmwareInfo.has_led_indicators),
("Support LED backlighting", firmwareInfo.has_led_backlighting),
("Support ws2812 LEDs", firmwareInfo.has_led_ws2812),
("Support USB" , firmwareInfo.has_usb) ,
("Support nRF24 wireless" , firmwareInfo.has_nrf24) ,
("Support Unifying" , firmwareInfo.has_unifying) ,
("Support I2C" , firmwareInfo.has_i2c) ,
("Support Bluetooth" , firmwareInfo.has_bluetooth) ,
]
error_codes = []
for code in errorInfo.get_error_codes():
error_codes.append(
(errorInfo.error_code_to_name(code), code)
)
self.info_window = DeviceInformationWindow(
self,
header,
device_settings,
firmware_settings,
error_codes,
)
self.info_window.exec_()
self.deviceListWidget.updateList()
def USBUpdate(self):
self.deviceListWidget.updateList()
def showAboutDialog(self):
QMessageBox.about(self, "About keyplus Loader.",
"""
The keyplus layout and firmware loader.
keyplus version: """ + keyplus.__version__
)
def showHelpDialog(self):
QMessageBox.about(self, "keyplus Loader Help",
"""
This is the layout and firmware loader for the keyplus keyboard firmware.
The layout files are *.yaml files. For documentation and examples see here: TODO
The rf files are *.yaml files. For documentation and examples see here: TODO
The firmware loader accepts *.hex files. For the latest keyplus firmware see here: TODO
"""
)
if __name__ == '__main__':
from colorama import Fore, Style
colorama.init(convert=False)
app = QApplication(sys.argv)
ex = Loader()
sys.exit(app.exec_())
| mit |
Fruit-Snacks/aima-python | submissions/Capps/myCSPs.py | 18 | 2926 | import csp
rgb = ['R', 'G', 'B']
domains = {
'AL': rgb,
'AZ': rgb,
'AR': rgb,
'CA': rgb,
'CO': rgb,
'CT': rgb,
'DE': rgb,
'FL': rgb,
'GA': rgb,
'ID': rgb,
'IL': rgb,
'IN': rgb,
'IA': rgb,
'KS': rgb,
'KY': rgb,
'LA': rgb,
'ME': rgb,
'MD': rgb,
'MA': rgb,
'MI': rgb,
'MN': rgb,
'MS': rgb,
'MO': rgb,
'MT': rgb,
'NE': rgb,
'NV': rgb,
'NH': rgb,
'NJ': rgb,
'NM': rgb,
'NY': rgb,
'NC': rgb,
'ND': rgb,
'OH': rgb,
'OK': rgb,
'OR': rgb,
'PA': rgb,
'RI': rgb,
'SC': rgb,
'SD': rgb,
'TN': rgb,
'TX': rgb,
'UT': rgb,
'VT': rgb,
'VA': rgb,
'WA': rgb,
'WV': rgb,
'WI': rgb,
'WY': rgb,
}
variables = domains.keys()
neighbors = {
'AL': ['MS', 'GA', 'FL'],
'AZ': ['CA', 'NV', 'UT', 'NM'],
'AR': ['OK', 'TX', 'LA', 'MS', 'TN', 'MO'],
'CA': ['OR', 'NV', 'AZ'],
'CO': ['UT', 'NM', 'OK', 'KS', 'NE', 'WY'],
'CT': ['RI', 'MA', 'NY'],
'DE': ['PA', 'MD', 'NJ'],
'FL': ['GA', 'AL'],
'GA': ['SC', 'NC', 'TN', 'AL', 'FL'],
'ID': ['WA', 'OR', 'NV', 'UT', 'WY', 'MT'],
'IL': ['IA', 'WI', 'MO', 'IN', 'KY'],
'IN': ['MI', 'OH', 'KY', 'IL'],
'IA': ['WI', 'MN', 'SD', 'NE', 'MO', 'IL'],
'KS': ['NE', 'CO', 'OK', 'MS'],
'KY': ['TN', 'VA', 'WV', 'OH', 'IN', 'IL', 'MO'],
'LA': ['MS', 'AR', 'TX'],
'ME': ['NH'],
'MD': ['VA', 'WV', 'PA'],
'MA': ['NH', 'VT', 'RI', 'CT', 'NY'],
'MI': ['OH', 'IN', 'WI'],
'MN': ['WI', 'IA', 'ND', 'SD'],
'MS': ['LA', 'AR', 'TN', 'AL'],
'MO': ['KY', 'IL', 'IA', 'NE', 'KS', 'OK'],
'MT': ['ID', 'WY', 'SD', 'ND'],
'NE': ['SD', 'WY', 'CO', 'KS', 'MO', 'IA'],
'NV': ['OR', 'CA', 'AZ', 'UT', 'ID'],
'NH': ['MA', 'ME', 'VT'],
'NJ': ['NY', 'PA', 'DE'],
'NM': ['AZ', 'CO', 'TX', 'OK'],
'NY': ['CT', 'VT', 'MA', 'NJ', 'PA'],
'NC': ['VA', 'TN', 'SC', 'GA'],
'ND': ['MT', 'SD', 'MN'],
'OH': ['MI', 'IN', 'KY', 'WV', 'PA'],
'OK': ['KS', 'CO', 'NM', 'TX', 'AR', 'MO'],
'OR': ['WA', 'ID'],
'PA': ['NY', 'OH', 'WV', 'MD', 'NJ', 'DE'],
'RI': ['MA', 'CT'],
'SC': ['NC', 'GA'],
'SD': ['MT', 'WY', 'NE', 'IA', 'MN', 'ND'],
'TN': ['MO', 'AR', 'MS', 'AL', 'GA', 'NC', 'VA', 'KY'],
'TX': ['NM', 'OK', 'LA'],
'UT': ['ID', 'NV', 'AZ', 'CO', 'WY'],
'VT': ['NY', 'NH', 'MA'],
'VA': ['WV', 'MD', 'KY', 'TN', 'NC'],
'WA': ['ID', 'OR'],
'WV': ['PA', 'OH', 'KY', 'VA', 'MD'],
'WI': ['MI', 'MI', 'IA', 'IL'],
'WY': ['MT', 'ID', 'UT', 'CO', 'NE', 'SD'],
}
def constraints(A, a, B, b):
if A == B: # e.g. NSW == NSW
return True
if a == b: # e.g. WA = G and SA = G
return False
return True
myAus = csp.CSP(variables, domains, neighbors, constraints)
myCSPs = [
{'csp': myAus,
# 'select_unassigned_variable':csp.mrv,
}
] | mit |
ballschin52/support-tools | googlecode-issues-exporter/generate_user_map.py | 151 | 3446 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for generating a user mapping from Google Code user to BitBucket user.
"""
import argparse
import json
import sys
import issues
class OptionalMap(dict):
"""Dictionary that returns the key for missing items. """
def __missing__(self, key):
"""Implements the dict interface. """
return key
def addIfNotPresent(users, user):
"""Adds a user if it is not already set."""
if user not in users:
users[user] = user
def _CreateUsersDict(issue_data, project_name):
"""Extract users from list of issues into a dict.
Args:
issue_data: Issue data
project_name: The name of the project being exported.
Returns:
Dict of users associated with a list of issues
"""
users = {}
for issue in issue_data:
googlecode_issue = issues.GoogleCodeIssue(
issue, project_name, OptionalMap())
reporting_user = googlecode_issue.GetAuthor()
addIfNotPresent(users, reporting_user)
assignee_user = googlecode_issue.GetOwner()
addIfNotPresent(users, assignee_user)
googlecode_comments = googlecode_issue.GetComments()
for comment in googlecode_comments:
googlecode_comment = issues.GoogleCodeComment(googlecode_issue, comment)
commenting_user = googlecode_comment.GetAuthor()
addIfNotPresent(users, commenting_user)
return {
"users": users
}
def Generate(issue_file_path, project_name):
"""Generates a user map for the specified issues. """
issue_data = None
user_file = open(issue_file_path)
user_data = json.load(user_file)
user_projects = user_data["projects"]
for project in user_projects:
if project_name in project["name"]:
issue_data = project["issues"]["items"]
break
if issue_data is None:
raise issues.ProjectNotFoundError(
"Project %s not found" % project_name)
users = _CreateUsersDict(issue_data, project_name)
with open("users.json", "w") as users_file:
user_json = json.dumps(users, sort_keys=True, indent=4,
separators=(",", ": "), ensure_ascii=False)
users_file.write(unicode(user_json))
print "\nCreated file users.json.\n"
def main(args):
"""The main function.
Args:
args: The command line arguments.
Raises:
issues.ProjectNotFoundError: The user passed in an invalid project name.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--issue_file_path", required=True,
help="The path to the file containing the issues from"
"Google Code.")
parser.add_argument("--project_name", required=True,
help="The name of the Google Code project you wish to"
"export")
parsed_args, _ = parser.parse_known_args(args)
Generate(parsed_args.issue_file_path, parsed_args.project_name)
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 |
cgimenop/Excel2Testlink | ExcelParser/lib/jdcal-1.0/jdcal.py | 19 | 14903 | # -*- coding:utf-8 -*-
"""Functions for converting between Julian dates and calendar dates.
A function for converting Gregorian calendar dates to Julian dates, and
another function for converting Julian calendar dates to Julian dates
are defined. Two functions for the reverse calculations are also
defined.
Different regions of the world switched to Gregorian calendar from
Julian calendar on different dates. Having separate functions for Julian
and Gregorian calendars allow maximum flexibility in choosing the
relevant calendar.
All the above functions are "proleptic". This means that they work for
dates on which the concerned calendar is not valid. For example,
Gregorian calendar was not used prior to around October 1582.
Julian dates are stored in two floating point numbers (double). Julian
dates, and Modified Julian dates, are large numbers. If only one number
is used, then the precision of the time stored is limited. Using two
numbers, time can be split in a manner that will allow maximum
precision. For example, the first number could be the Julian date for
the beginning of a day and the second number could be the fractional
day. Calculations that need the latter part can now work with maximum
precision.
A function to test if a given Gregorian calendar year is a leap year is
defined.
Zero point of Modified Julian Date (MJD) and the MJD of 2000/1/1
12:00:00 are also given.
This module is based on the TPM C library, by Jeffery W. Percival. The
idea for splitting Julian date into two floating point numbers was
inspired by the IAU SOFA C library.
:author: Prasanth Nair
:contact: prasanthhn@gmail.com
:license: BSD (http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import division
from __future__ import print_function
import math
__version__ = "1.0"
MJD_0 = 2400000.5
MJD_JD2000 = 51544.5
def fpart(x):
"""Return fractional part of given number."""
return math.modf(x)[0]
def ipart(x):
"""Return integer part of given number."""
return math.modf(x)[1]
def is_leap(year):
"""Leap year or not in the Gregorian calendar."""
x = math.fmod(year, 4)
y = math.fmod(year, 100)
z = math.fmod(year, 400)
# Divisible by 4 and,
# either not divisible by 100 or divisible by 400.
return not x and (y or not z)
def gcal2jd(year, month, day):
"""Gregorian calendar date to Julian date.
The input and output are for the proleptic Gregorian calendar,
i.e., no consideration of historical usage of the calendar is
made.
Parameters
----------
year : int
Year as an integer.
month : int
Month as an integer.
day : int
Day as an integer.
Returns
-------
jd1, jd2: 2-element tuple of floats
When added together, the numbers give the Julian date for the
given Gregorian calendar date. The first number is always
MJD_0 i.e., 2451545.5. So the second is the MJD.
Examples
--------
>>> gcal2jd(2000,1,1)
(2400000.5, 51544.0)
>>> 2400000.5 + 51544.0 + 0.5
2451545.0
>>> year = [-4699, -2114, -1050, -123, -1, 0, 1, 123, 1678.0, 2000,
....: 2012, 2245]
>>> month = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
>>> day = [1, 12, 23, 14, 25, 16, 27, 8, 9, 10, 11, 31]
>>> x = [gcal2jd(y, m, d) for y, m, d in zip(year, month, day)]
>>> for i in x: print i
(2400000.5, -2395215.0)
(2400000.5, -1451021.0)
(2400000.5, -1062364.0)
(2400000.5, -723762.0)
(2400000.5, -679162.0)
(2400000.5, -678774.0)
(2400000.5, -678368.0)
(2400000.5, -633797.0)
(2400000.5, -65812.0)
(2400000.5, 51827.0)
(2400000.5, 56242.0)
(2400000.5, 141393.0)
Negative months and days are valid. For example, 2000/-2/-4 =>
1999/+12-2/-4 => 1999/10/-4 => 1999/9/30-4 => 1999/9/26.
>>> gcal2jd(2000, -2, -4)
(2400000.5, 51447.0)
>>> gcal2jd(1999, 9, 26)
(2400000.5, 51447.0)
>>> gcal2jd(2000, 2, -1)
(2400000.5, 51573.0)
>>> gcal2jd(2000, 1, 30)
(2400000.5, 51573.0)
>>> gcal2jd(2000, 3, -1)
(2400000.5, 51602.0)
>>> gcal2jd(2000, 2, 28)
(2400000.5, 51602.0)
Month 0 becomes previous month.
>>> gcal2jd(2000, 0, 1)
(2400000.5, 51513.0)
>>> gcal2jd(1999, 12, 1)
(2400000.5, 51513.0)
Day number 0 becomes last day of previous month.
>>> gcal2jd(2000, 3, 0)
(2400000.5, 51603.0)
>>> gcal2jd(2000, 2, 29)
(2400000.5, 51603.0)
If `day` is greater than the number of days in `month`, then it
gets carried over to the next month.
>>> gcal2jd(2000,2,30)
(2400000.5, 51604.0)
>>> gcal2jd(2000,3,1)
(2400000.5, 51604.0)
>>> gcal2jd(2001,2,30)
(2400000.5, 51970.0)
>>> gcal2jd(2001,3,2)
(2400000.5, 51970.0)
Notes
-----
The returned Julian date is for mid-night of the given date. To
find the Julian date for any time of the day, simply add time as a
fraction of a day. For example Julian date for mid-day can be
obtained by adding 0.5 to either the first part or the second
part. The latter is preferable, since it will give the MJD for the
date and time.
BC dates should be given as -(BC - 1) where BC is the year. For
example 1 BC == 0, 2 BC == -1, and so on.
Negative numbers can be used for `month` and `day`. For example
2000, -1, 1 is the same as 1999, 11, 1.
The Julian dates are proleptic Julian dates, i.e., values are
returned without considering if Gregorian dates are valid for the
given date.
The input values are truncated to integers.
"""
year = int(year)
month = int(month)
day = int(day)
a = ipart((month - 14) / 12.0)
jd = ipart((1461 * (year + 4800 + a)) / 4.0)
jd += ipart((367 * (month - 2 - 12 * a)) / 12.0)
x = ipart((year + 4900 + a) / 100.0)
jd -= ipart((3 * x) / 4.0)
jd += day - 2432075.5 # was 32075; add 2400000.5
jd -= 0.5 # 0 hours; above JD is for midday, switch to midnight.
return MJD_0, jd
def jd2gcal(jd1, jd2):
"""Julian date to Gregorian calendar date and time of day.
The input and output are for the proleptic Gregorian calendar,
i.e., no consideration of historical usage of the calendar is
made.
Parameters
----------
jd1, jd2: int
Sum of the two numbers is taken as the given Julian date. For
example `jd1` can be the zero point of MJD (MJD_0) and `jd2`
can be the MJD of the date and time. But any combination will
work.
Returns
-------
y, m, d, f : int, int, int, float
Four element tuple containing year, month, day and the
fractional part of the day in the Gregorian calendar. The first
three are integers, and the last part is a float.
Examples
--------
>>> jd2gcal(*gcal2jd(2000,1,1))
(2000, 1, 1, 0.0)
>>> jd2gcal(*gcal2jd(1950,1,1))
(1950, 1, 1, 0.0)
Out of range months and days are carried over to the next/previous
year or next/previous month. See gcal2jd for more examples.
>>> jd2gcal(*gcal2jd(1999,10,12))
(1999, 10, 12, 0.0)
>>> jd2gcal(*gcal2jd(2000,2,30))
(2000, 3, 1, 0.0)
>>> jd2gcal(*gcal2jd(-1999,10,12))
(-1999, 10, 12, 0.0)
>>> jd2gcal(*gcal2jd(2000, -2, -4))
(1999, 9, 26, 0.0)
>>> gcal2jd(2000,1,1)
(2400000.5, 51544.0)
>>> jd2gcal(2400000.5, 51544.0)
(2000, 1, 1, 0.0)
>>> jd2gcal(2400000.5, 51544.5)
(2000, 1, 1, 0.5)
>>> jd2gcal(2400000.5, 51544.245)
(2000, 1, 1, 0.24500000000261934)
>>> jd2gcal(2400000.5, 51544.1)
(2000, 1, 1, 0.099999999998544808)
>>> jd2gcal(2400000.5, 51544.75)
(2000, 1, 1, 0.75)
Notes
-----
The last element of the tuple is the same as
(hh + mm / 60.0 + ss / 3600.0) / 24.0
where hh, mm, and ss are the hour, minute and second of the day.
See Also
--------
gcal2jd
"""
from math import modf
jd1_f, jd1_i = modf(jd1)
jd2_f, jd2_i = modf(jd2)
jd_i = jd1_i + jd2_i
f = jd1_f + jd2_f
# Set JD to noon of the current date. Fractional part is the
# fraction from midnight of the current date.
if -0.5 < f < 0.5:
f += 0.5
elif f >= 0.5:
jd_i += 1
f -= 0.5
elif f <= -0.5:
jd_i -= 1
f += 1.5
l = jd_i + 68569
n = ipart((4 * l) / 146097.0)
l -= ipart(((146097 * n) + 3) / 4.0)
i = ipart((4000 * (l + 1)) / 1461001)
l -= ipart((1461 * i) / 4.0) - 31
j = ipart((80 * l) / 2447.0)
day = l - ipart((2447 * j) / 80.0)
l = ipart(j / 11.0)
month = j + 2 - (12 * l)
year = 100 * (n - 49) + i + l
return int(year), int(month), int(day), f
def jcal2jd(year, month, day):
"""Julian calendar date to Julian date.
The input and output are for the proleptic Julian calendar,
i.e., no consideration of historical usage of the calendar is
made.
Parameters
----------
year : int
Year as an integer.
month : int
Month as an integer.
day : int
Day as an integer.
Returns
-------
jd1, jd2: 2-element tuple of floats
When added together, the numbers give the Julian date for the
given Julian calendar date. The first number is always
MJD_0 i.e., 2451545.5. So the second is the MJD.
Examples
--------
>>> jcal2jd(2000, 1, 1)
(2400000.5, 51557.0)
>>> year = [-4699, -2114, -1050, -123, -1, 0, 1, 123, 1678, 2000,
...: 2012, 2245]
>>> month = [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12]
>>> day = [1, 12, 23, 14, 25, 16, 27, 8, 9, 10, 11, 31]
>>> x = [jcal2jd(y, m, d) for y, m, d in zip(year, month, day)]
>>> for i in x: print i
(2400000.5, -2395252.0)
(2400000.5, -1451039.0)
(2400000.5, -1062374.0)
(2400000.5, -723765.0)
(2400000.5, -679164.0)
(2400000.5, -678776.0)
(2400000.5, -678370.0)
(2400000.5, -633798.0)
(2400000.5, -65772.0)
(2400000.5, 51871.0)
(2400000.5, 56285.0)
Notes
-----
Unlike `gcal2jd`, negative months and days can result in incorrect
Julian dates.
"""
year = int(year)
month = int(month)
day = int(day)
jd = 367 * year
x = ipart((month - 9) / 7.0)
jd -= ipart((7 * (year + 5001 + x)) / 4.0)
jd += ipart((275 * month) / 9.0)
jd += day
jd += 1729777 - 2400000.5 # Return 240000.5 as first part of JD.
jd -= 0.5 # Convert midday to midnight.
return MJD_0, jd
def jd2jcal(jd1, jd2):
"""Julian calendar date for the given Julian date.
The input and output are for the proleptic Julian calendar,
i.e., no consideration of historical usage of the calendar is
made.
Parameters
----------
jd1, jd2: int
Sum of the two numbers is taken as the given Julian date. For
example `jd1` can be the zero point of MJD (MJD_0) and `jd2`
can be the MJD of the date and time. But any combination will
work.
Returns
-------
y, m, d, f : int, int, int, float
Four element tuple containing year, month, day and the
fractional part of the day in the Julian calendar. The first
three are integers, and the last part is a float.
Examples
--------
>>> jd2jcal(*jcal2jd(2000, 1, 1))
(2000, 1, 1, 0.0)
>>> jd2jcal(*jcal2jd(-4000, 10, 11))
(-4000, 10, 11, 0.0)
>>> jcal2jd(2000, 1, 1)
(2400000.5, 51557.0)
>>> jd2jcal(2400000.5, 51557.0)
(2000, 1, 1, 0.0)
>>> jd2jcal(2400000.5, 51557.5)
(2000, 1, 1, 0.5)
>>> jd2jcal(2400000.5, 51557.245)
(2000, 1, 1, 0.24500000000261934)
>>> jd2jcal(2400000.5, 51557.1)
(2000, 1, 1, 0.099999999998544808)
>>> jd2jcal(2400000.5, 51557.75)
(2000, 1, 1, 0.75)
"""
from math import modf
jd1_f, jd1_i = modf(jd1)
jd2_f, jd2_i = modf(jd2)
jd_i = jd1_i + jd2_i
f = jd1_f + jd2_f
# Set JD to noon of the current date. Fractional part is the
# fraction from midnight of the current date.
if -0.5 < f < 0.5:
f += 0.5
elif f >= 0.5:
jd_i += 1
f -= 0.5
elif f <= -0.5:
jd_i -= 1
f += 1.5
j = jd_i + 1402.0
k = ipart((j - 1) / 1461.0)
l = j - (1461.0 * k)
n = ipart((l - 1) / 365.0) - ipart(l / 1461.0)
i = l - (365.0 * n) + 30.0
j = ipart((80.0 * i) / 2447.0)
day = i - ipart((2447.0 * j) / 80.0)
i = ipart(j / 11.0)
month = j + 2 - (12.0 * i)
year = (4 * k) + n + i - 4716.0
return int(year), int(month), int(day), f
# Some tests.
def _test_gcal2jd_with_sla_cldj():
"""Compare gcal2jd with slalib.sla_cldj."""
import random
try:
from pyslalib import slalib
except ImportError:
print("SLALIB (PySLALIB not available).")
return 1
n = 1000
mday = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# sla_cldj needs year > -4699 i.e., 4700 BC.
year = [random.randint(-4699, 2200) for i in range(n)]
month = [random.randint(1, 12) for i in range(n)]
day = [random.randint(1, 31) for i in range(n)]
for i in range(n):
x = 0
if is_leap(year[i]) and month[i] == 2:
x = 1
if day[i] > mday[month[i]] + x:
day[i] = mday[month[i]]
jd_jdc = [gcal2jd(y, m, d)[1]
for y, m, d in zip(year, month, day)]
jd_sla = [slalib.sla_cldj(y, m, d)[0]
for y, m, d in zip(year, month, day)]
diff = [abs(i - j) for i, j in zip(jd_sla, jd_jdc)]
assert max(diff) <= 1e-8
assert min(diff) <= 1e-8
def _test_jd2gcal():
"""Check jd2gcal as reverse of gcal2jd."""
import random
n = 1000
mday = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
year = [random.randint(-4699, 2200) for i in range(n)]
month = [random.randint(1, 12) for i in range(n)]
day = [random.randint(1, 31) for i in range(n)]
for i in range(n):
x = 0
if is_leap(year[i]) and month[i] == 2:
x = 1
if day[i] > mday[month[i]] + x:
day[i] = mday[month[i]]
jd = [gcal2jd(y, m, d)[1]
for y, m, d in zip(year, month, day)]
x = [jd2gcal(MJD_0, i) for i in jd]
for i in range(n):
assert x[i][0] == year[i]
assert x[i][1] == month[i]
assert x[i][2] == day[i]
assert x[i][3] <= 1e-15
def _test_jd2jcal():
"""Check jd2jcal as reverse of jcal2jd."""
import random
n = 1000
year = [random.randint(-4699, 2200) for i in range(n)]
month = [random.randint(1, 12) for i in range(n)]
day = [random.randint(1, 28) for i in range(n)]
jd = [jcal2jd(y, m, d)[1]
for y, m, d in zip(year, month, day)]
x = [jd2gcal(MJD_0, i) for i in jd]
for i in range(n):
assert x[i][0] == year[i]
assert x[i][1] == month[i]
assert x[i][2] == day[i]
assert x[i][3] <= 1e-15
| mit |
joopert/home-assistant | homeassistant/components/bbox/device_tracker.py | 2 | 2593 | """Support for French FAI Bouygues Bbox routers."""
from collections import namedtuple
from datetime import timedelta
import logging
from typing import List
import pybbox
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = "192.168.1.254"
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string}
)
def get_scanner(hass, config):
"""Validate the configuration and return a Bbox scanner."""
scanner = BboxDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
Device = namedtuple("Device", ["mac", "name", "ip", "last_update"])
class BboxDeviceScanner(DeviceScanner):
"""This class scans for devices connected to the bbox."""
def __init__(self, config):
"""Get host from config."""
self.host = config[CONF_HOST]
"""Initialize the scanner."""
self.last_results: List[Device] = []
self.success_init = self._update_info()
_LOGGER.info("Scanner initialized")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [device.mac for device in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
filter_named = [
result.name for result in self.last_results if result.mac == device
]
if filter_named:
return filter_named[0]
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""Check the Bbox for devices.
Returns boolean if scanning successful.
"""
_LOGGER.info("Scanning...")
box = pybbox.Bbox(ip=self.host)
result = box.get_all_connected_devices()
now = dt_util.now()
last_results = []
for device in result:
if device["active"] != 1:
continue
last_results.append(
Device(
device["macaddress"], device["hostname"], device["ipaddress"], now
)
)
self.last_results = last_results
_LOGGER.info("Scan successful")
return True
| apache-2.0 |
zhukaixy/kbengine | kbe/src/lib/python/Lib/test/multibytecodec_support.py | 60 | 14522 | #
# multibytecodec_support.py
# Common Unittest Routines for CJK codecs
#
import codecs
import os
import re
import sys
import unittest
from http.client import HTTPException
from test import support
from io import BytesIO
class TestBase:
encoding = '' # codec name
codec = None # codec tuple (with 4 elements)
tstring = None # must set. 2 strings to test StreamReader
codectests = None # must set. codec test tuple
roundtriptest = 1 # set if roundtrip is possible with unicode
has_iso10646 = 0 # set if this encoding contains whole iso10646 map
xmlcharnametest = None # string to test xmlcharrefreplace
unmappedunicode = '\udeee' # a unicode codepoint that is not mapped.
def setUp(self):
if self.codec is None:
self.codec = codecs.lookup(self.encoding)
self.encode = self.codec.encode
self.decode = self.codec.decode
self.reader = self.codec.streamreader
self.writer = self.codec.streamwriter
self.incrementalencoder = self.codec.incrementalencoder
self.incrementaldecoder = self.codec.incrementaldecoder
def test_chunkcoding(self):
tstring_lines = []
for b in self.tstring:
lines = b.split(b"\n")
last = lines.pop()
assert last == b""
lines = [line + b"\n" for line in lines]
tstring_lines.append(lines)
for native, utf8 in zip(*tstring_lines):
u = self.decode(native)[0]
self.assertEqual(u, utf8.decode('utf-8'))
if self.roundtriptest:
self.assertEqual(native, self.encode(u)[0])
def test_errorhandle(self):
for source, scheme, expected in self.codectests:
if isinstance(source, bytes):
func = self.decode
else:
func = self.encode
if expected:
result = func(source, scheme)[0]
if func is self.decode:
self.assertTrue(type(result) is str, type(result))
self.assertEqual(result, expected,
'%a.decode(%r, %r)=%a != %a'
% (source, self.encoding, scheme, result,
expected))
else:
self.assertTrue(type(result) is bytes, type(result))
self.assertEqual(result, expected,
'%a.encode(%r, %r)=%a != %a'
% (source, self.encoding, scheme, result,
expected))
else:
self.assertRaises(UnicodeError, func, source, scheme)
def test_xmlcharrefreplace(self):
if self.has_iso10646:
self.skipTest('encoding contains full ISO 10646 map')
s = "\u0b13\u0b23\u0b60 nd eggs"
self.assertEqual(
self.encode(s, "xmlcharrefreplace")[0],
b"ଓଣୠ nd eggs"
)
def test_customreplace_encode(self):
if self.has_iso10646:
self.skipTest('encoding contains full ISO 10646 map')
from html.entities import codepoint2name
def xmlcharnamereplace(exc):
if not isinstance(exc, UnicodeEncodeError):
raise TypeError("don't know how to handle %r" % exc)
l = []
for c in exc.object[exc.start:exc.end]:
if ord(c) in codepoint2name:
l.append("&%s;" % codepoint2name[ord(c)])
else:
l.append("&#%d;" % ord(c))
return ("".join(l), exc.end)
codecs.register_error("test.xmlcharnamereplace", xmlcharnamereplace)
if self.xmlcharnametest:
sin, sout = self.xmlcharnametest
else:
sin = "\xab\u211c\xbb = \u2329\u1234\u232a"
sout = b"«ℜ» = ⟨ሴ⟩"
self.assertEqual(self.encode(sin,
"test.xmlcharnamereplace")[0], sout)
def test_callback_returns_bytes(self):
def myreplace(exc):
return (b"1234", exc.end)
codecs.register_error("test.cjktest", myreplace)
enc = self.encode("abc" + self.unmappedunicode + "def", "test.cjktest")[0]
self.assertEqual(enc, b"abc1234def")
def test_callback_wrong_objects(self):
def myreplace(exc):
return (ret, exc.end)
codecs.register_error("test.cjktest", myreplace)
for ret in ([1, 2, 3], [], None, object()):
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_long_index(self):
def myreplace(exc):
return ('x', int(exc.end))
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode('abcd' + self.unmappedunicode + 'efgh',
'test.cjktest'), (b'abcdxefgh', 9))
def myreplace(exc):
return ('x', sys.maxsize + 1)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(IndexError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_None_index(self):
def myreplace(exc):
return ('x', None)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_backward_index(self):
def myreplace(exc):
if myreplace.limit > 0:
myreplace.limit -= 1
return ('REPLACED', 0)
else:
return ('TERMINAL', exc.end)
myreplace.limit = 3
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode('abcd' + self.unmappedunicode + 'efgh',
'test.cjktest'),
(b'abcdREPLACEDabcdREPLACEDabcdREPLACEDabcdTERMINALefgh', 9))
def test_callback_forward_index(self):
def myreplace(exc):
return ('REPLACED', exc.end + 2)
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode('abcd' + self.unmappedunicode + 'efgh',
'test.cjktest'), (b'abcdREPLACEDgh', 9))
def test_callback_index_outofbound(self):
def myreplace(exc):
return ('TERM', 100)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(IndexError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_incrementalencoder(self):
UTF8Reader = codecs.getreader('utf-8')
for sizehint in [None] + list(range(1, 33)) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(BytesIO(self.tstring[1]))
ostream = BytesIO()
encoder = self.incrementalencoder()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
e = encoder.encode(data)
ostream.write(e)
self.assertEqual(ostream.getvalue(), self.tstring[0])
def test_incrementaldecoder(self):
UTF8Writer = codecs.getwriter('utf-8')
for sizehint in [None, -1] + list(range(1, 33)) + \
[64, 128, 256, 512, 1024]:
istream = BytesIO(self.tstring[0])
ostream = UTF8Writer(BytesIO())
decoder = self.incrementaldecoder()
while 1:
data = istream.read(sizehint)
if not data:
break
else:
u = decoder.decode(data)
ostream.write(u)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_incrementalencoder_error_callback(self):
inv = self.unmappedunicode
e = self.incrementalencoder()
self.assertRaises(UnicodeEncodeError, e.encode, inv, True)
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), b'')
e.reset()
def tempreplace(exc):
return ('called', exc.end)
codecs.register_error('test.incremental_error_callback', tempreplace)
e.errors = 'test.incremental_error_callback'
self.assertEqual(e.encode(inv, True), b'called')
# again
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), b'')
def test_streamreader(self):
UTF8Writer = codecs.getwriter('utf-8')
for name in ["read", "readline", "readlines"]:
for sizehint in [None, -1] + list(range(1, 33)) + \
[64, 128, 256, 512, 1024]:
istream = self.reader(BytesIO(self.tstring[0]))
ostream = UTF8Writer(BytesIO())
func = getattr(istream, name)
while 1:
data = func(sizehint)
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_streamwriter(self):
readfuncs = ('read', 'readline', 'readlines')
UTF8Reader = codecs.getreader('utf-8')
for name in readfuncs:
for sizehint in [None] + list(range(1, 33)) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(BytesIO(self.tstring[1]))
ostream = self.writer(BytesIO())
func = getattr(istream, name)
while 1:
if sizehint is not None:
data = func(sizehint)
else:
data = func()
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[0])
class TestBase_Mapping(unittest.TestCase):
pass_enctest = []
pass_dectest = []
supmaps = []
codectests = []
def setUp(self):
try:
self.open_mapping_file().close() # test it to report the error early
except (OSError, HTTPException):
self.skipTest("Could not retrieve "+self.mapfileurl)
def open_mapping_file(self):
return support.open_urlresource(self.mapfileurl)
def test_mapping_file(self):
if self.mapfileurl.endswith('.xml'):
self._test_mapping_file_ucm()
else:
self._test_mapping_file_plain()
def _test_mapping_file_plain(self):
unichrs = lambda s: ''.join(map(chr, map(eval, s.split('+'))))
urt_wa = {}
with self.open_mapping_file() as f:
for line in f:
if not line:
break
data = line.split('#')[0].strip().split()
if len(data) != 2:
continue
csetval = eval(data[0])
if csetval <= 0x7F:
csetch = bytes([csetval & 0xff])
elif csetval >= 0x1000000:
csetch = bytes([(csetval >> 24), ((csetval >> 16) & 0xff),
((csetval >> 8) & 0xff), (csetval & 0xff)])
elif csetval >= 0x10000:
csetch = bytes([(csetval >> 16), ((csetval >> 8) & 0xff),
(csetval & 0xff)])
elif csetval >= 0x100:
csetch = bytes([(csetval >> 8), (csetval & 0xff)])
else:
continue
unich = unichrs(data[1])
if ord(unich) == 0xfffd or unich in urt_wa:
continue
urt_wa[unich] = csetch
self._testpoint(csetch, unich)
def _test_mapping_file_ucm(self):
with self.open_mapping_file() as f:
ucmdata = f.read()
uc = re.findall('<a u="([A-F0-9]{4})" b="([0-9A-F ]+)"/>', ucmdata)
for uni, coded in uc:
unich = chr(int(uni, 16))
codech = bytes(int(c, 16) for c in coded.split())
self._testpoint(codech, unich)
def test_mapping_supplemental(self):
for mapping in self.supmaps:
self._testpoint(*mapping)
def _testpoint(self, csetch, unich):
if (csetch, unich) not in self.pass_enctest:
self.assertEqual(unich.encode(self.encoding), csetch)
if (csetch, unich) not in self.pass_dectest:
self.assertEqual(str(csetch, self.encoding), unich)
def test_errorhandle(self):
for source, scheme, expected in self.codectests:
if isinstance(source, bytes):
func = source.decode
else:
func = source.encode
if expected:
if isinstance(source, bytes):
result = func(self.encoding, scheme)
self.assertTrue(type(result) is str, type(result))
self.assertEqual(result, expected,
'%a.decode(%r, %r)=%a != %a'
% (source, self.encoding, scheme, result,
expected))
else:
result = func(self.encoding, scheme)
self.assertTrue(type(result) is bytes, type(result))
self.assertEqual(result, expected,
'%a.encode(%r, %r)=%a != %a'
% (source, self.encoding, scheme, result,
expected))
else:
self.assertRaises(UnicodeError, func, self.encoding, scheme)
def load_teststring(name):
dir = os.path.join(os.path.dirname(__file__), 'cjkencodings')
with open(os.path.join(dir, name + '.txt'), 'rb') as f:
encoded = f.read()
with open(os.path.join(dir, name + '-utf8.txt'), 'rb') as f:
utf8 = f.read()
return encoded, utf8
| lgpl-3.0 |
psantann/zerorpc-python | zerorpc/exceptions.py | 134 | 1861 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class LostRemote(Exception):
pass
class TimeoutExpired(Exception):
def __init__(self, timeout_s, when=None):
msg = 'timeout after {0}s'.format(timeout_s)
if when:
msg = '{0}, when {1}'.format(msg, when)
super(TimeoutExpired, self).__init__(msg)
class RemoteError(Exception):
def __init__(self, name, human_msg, human_traceback):
self.name = name
self.msg = human_msg
self.traceback = human_traceback
def __str__(self):
if self.traceback is not None:
return self.traceback
return '{0}: {1}'.format(self.name, self.msg)
| mit |
opensourcechipspark/platform_external_chromium_org | tools/telemetry/telemetry/core/chrome/extension_dict_backend.py | 23 | 2588 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import re
import weakref
from telemetry.core import extension_page
from telemetry.core.chrome import inspector_backend
class ExtensionNotFoundException(Exception):
pass
class ExtensionDictBackend(object):
def __init__(self, browser_backend):
self._browser_backend = browser_backend
# Maps extension ids to ExtensionPage objects.
self._extension_dict = weakref.WeakValueDictionary()
def __getitem__(self, extension_id):
extension_object = self._extension_dict.get(extension_id)
if not extension_object:
extension_object = self._CreateExtensionObject(extension_id)
assert extension_object
self._extension_dict[extension_id] = extension_object
return extension_object
def __contains__(self, extension_id):
return extension_id in self._GetExtensionIds()
@staticmethod
def _ExtractExtensionId(url):
m = re.match(r"(chrome-extension://)([^/]+)", url)
assert m
return m.group(2)
@staticmethod
def _GetExtensionId(extension_info):
if 'url' not in extension_info:
return None
return ExtensionDictBackend._ExtractExtensionId(extension_info['url'])
def _CreateExtensionObject(self, extension_id):
extension_info = self._FindExtensionInfo(extension_id)
if not extension_info or not 'webSocketDebuggerUrl' in extension_info:
raise ExtensionNotFoundException()
return extension_page.ExtensionPage(
self._CreateInspectorBackendForDebuggerUrl(
extension_info['webSocketDebuggerUrl']))
def _CreateInspectorBackendForDebuggerUrl(self, debugger_url):
return inspector_backend.InspectorBackend(self._browser_backend.browser,
self._browser_backend,
debugger_url)
def _FindExtensionInfo(self, extension_id):
for extension_info in self._GetExtensionInfoList():
if self._GetExtensionId(extension_info) == extension_id:
return extension_info
return None
def _GetExtensionInfoList(self, timeout=None):
data = self._browser_backend.Request('', timeout=timeout)
return self._FilterExtensions(json.loads(data))
def _FilterExtensions(self, all_pages):
return [page_info for page_info in all_pages
if page_info['url'].startswith('chrome-extension://')]
def _GetExtensionIds(self):
return map(self._GetExtensionId, self._GetExtensionInfoList())
| bsd-3-clause |
Huyuwei/tvm | tests/python/relay/test_pass_fold_scale_axis.py | 2 | 23654 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
from tvm import relay
from tvm.relay import transform
def _get_positive_scale(size):
return np.random.uniform(0.5, 1, size=size).astype('float32')
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, transform.Pass)
mod = relay.Module.from_expr(expr)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_fold_fwd_simple():
"""Simple testcase."""
def before(x, conv_weight, in_bias, in_scale, channels):
args = [x, conv_weight, in_bias]
in_bias = relay.expand_dims(in_bias, axis=1, num_newaxis=2)
x = relay.multiply(x, in_scale)
x = relay.nn.relu(x)
x = relay.add(x, in_bias)
y = relay.nn.conv2d(x, conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
return relay.Function(args, y)
def expected(x, conv_weight, in_bias, in_scale, channels):
# use a fixed order of args so alpha equal check can pass
args = [x, conv_weight, in_bias]
in_bias = relay.expand_dims(in_bias, axis=1, num_newaxis=2)
squeezed_scale = relay.squeeze(in_scale, axis=[1,2])
x = relay.nn.relu(x)
in_bias = relay.divide(in_bias, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2))
x = relay.add(x, in_bias)
conv_weight = relay.multiply(
conv_weight , relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2))
y = relay.nn.conv2d(x, conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
return relay.Function(args, y)
def check(shape, channels):
x = relay.var("x", shape=shape)
in_channels = shape[1]
weight = relay.var("weight")
in_bias = relay.var("in_bias", shape=(in_channels,))
in_scale = relay.const(_get_positive_scale((in_channels, 1, 1)))
y1 = before(x, weight, in_bias, in_scale, channels)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint:x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
y1_expected = expected(x, weight, in_bias, in_scale, channels)
y1_folded = run_opt_pass(y1_folded, transform.InferType())
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert relay.analysis.alpha_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 2)
def test_fold_fwd_dual_path():
"""scale axis being consumed by two consumers"""
def before(x, conv_weight, in_bias, in_scale, channels):
args = [x, conv_weight, in_bias]
x = relay.multiply(in_scale, x)
x = relay.nn.relu(x)
x = relay.subtract(x, in_bias)
y1 = relay.nn.conv2d(x, conv_weight,
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
groups=channels,
padding=(1, 1))
y2 = relay.nn.conv2d(x, conv_weight,
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
groups=channels,
padding=(1, 1))
z = relay.add(y1, y2)
return relay.Function(args, z)
def expected(x, conv_weight, in_bias, in_scale, channels):
args = [x, conv_weight, in_bias]
x = relay.nn.relu(x)
in_bias = relay.divide(in_bias, in_scale)
x = relay.subtract(x, in_bias)
y1 = relay.nn.conv2d(x,
relay.multiply(conv_weight, in_scale),
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
groups=channels,
padding=(1, 1))
y2 = relay.nn.conv2d(x,
relay.multiply(conv_weight, in_scale),
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
groups=channels,
padding=(1, 1))
z = relay.add(y1, y2)
return relay.Function(args, z)
def check(shape, channels):
x = relay.var("x", shape=shape)
in_channels = shape[-1]
# test depthwise
assert in_channels == channels
weight = relay.var("weight")
in_bias = relay.var("in_bias", shape=(in_channels,))
in_scale = relay.const(_get_positive_scale(in_channels,))
y1 = before(x, weight, in_bias, in_scale, channels)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
type_dict = {x.name_hint:x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_expected = expected(x, weight, in_bias, in_scale, channels)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert relay.analysis.alpha_equal(y1_folded, y1_expected)
check((2, 4, 10, 3), 3)
def test_fold_fwd_fail():
"""testcase where we canont fold"""
def before(x, conv_weight, in_bias, in_scale, channels):
x = relay.multiply(x, in_scale)
xx = relay.nn.leaky_relu(x, alpha=0.1)
y1 = relay.nn.conv2d(xx, conv_weight,
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC",
padding=(1, 1))
z = relay.add(y1, x)
return relay.Function(relay.analysis.free_vars(z), z)
def check(shape, channels):
x = relay.var("x", shape=shape)
in_channels = shape[-1]
# test depthwise
assert in_channels == channels
weight = relay.var("weight")
in_bias = relay.var("in_bias", shape=(in_channels,))
in_scale = relay.const(_get_positive_scale(size=(in_channels,)))
y1 = before(x, weight, in_bias, in_scale, channels)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
assert relay.analysis.alpha_equal(y1, y1_folded)
check((2, 11, 10, 4), 4)
def test_fold_fwd_relu_fail():
"""testcase where we canont fold because scale can not pass relu"""
def before(x, conv_weight, in_bias, in_scale, channels):
x = relay.multiply(x, in_scale)
xx = relay.nn.relu(x)
y1 = relay.nn.conv2d(xx, conv_weight,
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC",
padding=(1, 1))
z = relay.add(y1, x)
return relay.Function(relay.analysis.free_vars(z), z)
def check(shape, channels, in_scale):
x = relay.var("x", shape=shape)
in_channels = shape[-1]
# test depthwise
assert in_channels == channels
weight = relay.var("weight")
in_bias = relay.var("in_bias", shape=(in_channels,))
y1 = before(x, weight, in_bias, in_scale, channels)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
assert relay.analysis.alpha_equal(y1, y1_folded)
in_scale = relay.var("in_scale", shape=(4,))
check((2, 11, 10, 4), 4, in_scale)
in_scale = relay.const(-_get_positive_scale((4,)))
check((2, 11, 10, 4), 4, in_scale)
def test_fold_fwd_negative_scale():
"""Testcase of folding negative scale"""
def before(x, conv_weight, in_scale, channels):
args = [x, conv_weight]
x = relay.multiply(x, in_scale)
y = relay.nn.conv2d(x, conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
return relay.Function(args, y)
def expected(x, conv_weight, in_scale, channels):
# use a fixed order of args so alpha equal check can pass
args = [x, conv_weight]
squeezed_scale = relay.squeeze(in_scale, axis=[1,2])
conv_weight = relay.multiply(
conv_weight , relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2))
y = relay.nn.conv2d(x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
return relay.Function(args, y)
def check(shape, channels):
x = relay.var("x", shape=shape)
in_channels = shape[1]
in_scale = relay.const(-_get_positive_scale((in_channels, 1, 1)))
weight = relay.var("weight")
y1 = before(x, weight, in_scale, channels)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint:x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
y1_expected = expected(x, weight, in_scale, channels)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert relay.analysis.alpha_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 4)
def test_fold_bwd_simple():
"""Simple testcase."""
def before(x, conv_weight, out_bias, out_scale, channels):
args = [x, conv_weight, out_bias]
out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
y = relay.nn.conv2d(x, conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
y = relay.add(y, out_bias)
y = relay.nn.relu(y)
y = relay.multiply(y, out_scale)
return relay.Function(args, y)
def expected(x, conv_weight, out_bias, out_scale, channels):
# use a fixed order of args so alpha equal check can pass
args = [x, conv_weight, out_bias]
out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
squeezed_scale = relay.squeeze(out_scale, axis=[1,2])
conv_weight = relay.multiply(
conv_weight , relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3))
y = relay.nn.conv2d(x, conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
out_bias = relay.multiply(out_bias,
relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2))
y = relay.add(y, out_bias)
y = relay.nn.relu(y)
return relay.Function(args, y)
def check(shape, channels):
x = relay.var("x", shape=shape)
in_channels = shape[1]
weight = relay.var("weight")
out_bias = relay.var("out_bias", shape=(channels,))
out_scale = relay.const(_get_positive_scale((channels, 1, 1)))
y1 = before(x, weight, out_bias, out_scale, channels)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint:x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, out_bias, out_scale, channels)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert relay.analysis.alpha_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 8)
def test_fold_bwd_dual_path():
"""Dual path testcase."""
def before(x, conv_weight, out_bias, out_scale, channels):
args = [x, conv_weight, out_bias]
y1 = relay.nn.conv2d(x, conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(x, conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
y2 = relay.nn.relu(y2)
y = relay.add(y1, y2)
y = relay.multiply(y, out_scale)
return relay.Function(args, y)
def expected(x, conv_weight, out_bias, out_scale, channels):
# use a fixed order of args so alpha equal check can pass
args = [x, conv_weight, out_bias]
out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
squeezed_scale = relay.squeeze(out_scale, axis=[1,2])
def fold_conv_weight():
return relay.multiply(
conv_weight ,
relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3))
y1 = relay.nn.conv2d(x, fold_conv_weight(),
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(x, fold_conv_weight(),
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
y2 = relay.nn.relu(y2)
y = relay.add(y1, y2)
return relay.Function(args, y)
def check(shape, channels):
x = relay.var("x", shape=shape)
in_channels = shape[1]
weight = relay.var("weight")
out_bias = relay.var("out_bias", shape=(channels,))
out_scale = relay.const(_get_positive_scale((channels, 1, 1)))
y1 = before(x, weight, out_bias, out_scale, channels)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint:x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, out_bias, out_scale, channels)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert relay.analysis.alpha_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 8)
def test_fold_bwd_dual_consumer():
def before(x, conv_weight, out_bias, out_scale, channels):
args = [x, conv_weight, out_bias]
y0 = relay.nn.conv2d(x, conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
y0 = relay.multiply(y0, out_scale)
y0 = relay.nn.relu(y0)
y1 = relay.nn.conv2d(y0, conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
y1 = relay.multiply(y1, out_scale)
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(y0, conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
y2 = relay.multiply(y2, out_scale)
y2 = relay.nn.relu(y2)
y = relay.add(y1, y2)
return relay.Function(args, y)
def expected(x, conv_weight, out_bias, out_scale, channels):
# use a fixed order of args so alpha equal check can pass
args = [x, conv_weight, out_bias]
def fold_conv_weight():
squeezed_scale = relay.squeeze(out_scale, axis=[1,2])
return relay.multiply(
conv_weight ,
relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3))
y0 = relay.nn.conv2d(x, fold_conv_weight(),
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
y0 = relay.nn.relu(y0)
y1 = relay.nn.conv2d(y0, fold_conv_weight(),
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(y0, fold_conv_weight(),
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
y2 = relay.nn.relu(y2)
y = relay.add(y1, y2)
return relay.Function(args, y)
def check(shape, channels):
x = relay.var("x", shape=shape)
in_channels = shape[1]
weight = relay.var("weight")
out_bias = relay.var("out_bias", shape=(channels,))
out_scale = relay.const(_get_positive_scale((channels,1, 1)))
y1 = before(x, weight, out_bias, out_scale, channels)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint:x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, out_bias, out_scale, channels)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert relay.analysis.alpha_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 4)
def test_fold_bwd_fail():
"""Dual path testcase."""
def fail1(x, conv_weight, out_bias, out_scale, channels):
args = [x, conv_weight, out_bias]
out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
y1 = relay.nn.conv2d(x, conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(x, conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
out_layout="CNHW")
# fold will fail because the axis from two path
# differs from each other.
y2 = relay.nn.relu(y2)
y = relay.add(y1, y2)
y = relay.multiply(y, out_scale)
return relay.Function(args, y)
def fail2(x, conv_weight, out_bias, out_scale, channels):
args = [x, conv_weight, out_bias]
out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
y1 = relay.nn.conv2d(x, conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
y2 = relay.nn.relu(y1)
# fold will fail because y1 is referred also by y2
y1 = relay.multiply(y1, out_scale)
y = relay.add(y1, y2)
return relay.Function(args, y)
def check(shape, channels, fbefore):
x = relay.var("x", shape=shape)
in_channels = shape[1]
weight = relay.var("weight")
out_bias = relay.var("out_bias", shape=(channels,))
out_scale = relay.const(_get_positive_scale((channels, 1, 1)))
y1 = fbefore(x, weight, out_bias, out_scale, channels)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
assert relay.analysis.alpha_equal(y1_folded, y1)
check((4, 4, 10, 10), 4, fail1)
check((4, 4, 10, 10), 4, fail2)
def test_fold_bwd_relu_fail():
"""testcase where we canont fold because scale can not pass relu"""
def before(x, conv_weight, out_scale, channels):
y = relay.nn.conv2d(x, conv_weight,
channels=channels,
kernel_size=(3, 3),
data_layout="NCHW",
padding=(1, 1))
y = relay.nn.relu(y)
y = relay.multiply(x, out_scale)
return relay.Function(relay.analysis.free_vars(y), y)
def check(shape, channels, out_scale):
x = relay.var("x", shape=shape)
in_channels = shape[1]
weight = relay.var("weight")
y1 = before(x, weight, out_scale, channels)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
assert relay.analysis.alpha_equal(y1, y1_folded)
out_scale = relay.var("in_scale", shape=(4, 1, 1))
check((4, 4, 10, 10), 4, out_scale)
out_scale = relay.const(np.random.uniform(size=(4, 1, 1), low=-1.0, high=0.0)).astype("float32")
check((4, 4, 10, 10), 4, out_scale)
def test_fold_bwd_negative_scale():
"""Testcase of folding negative scale"""
def before(x, conv_weight, out_scale, channels):
args = [x, conv_weight]
y = relay.nn.conv2d(x, conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
y = relay.multiply(y, out_scale)
return relay.Function(args, y)
def expected(x, conv_weight, out_scale, channels):
# use a fixed order of args so alpha equal check can pass
args = [x, conv_weight]
squeezed_scale = relay.squeeze(out_scale, axis=[1,2])
conv_weight = relay.multiply(
conv_weight , relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3))
y = relay.nn.conv2d(x, conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1))
return relay.Function(args, y)
def check(shape, channels):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
out_scale = relay.const(-_get_positive_scale((channels, 1, 1)))
y1 = before(x, weight, out_scale, channels)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint:x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, out_scale, channels)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert relay.analysis.alpha_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 8)
if __name__ == "__main__":
test_fold_fwd_simple()
test_fold_fwd_dual_path()
test_fold_fwd_fail()
test_fold_fwd_relu_fail()
test_fold_fwd_negative_scale()
test_fold_bwd_simple()
test_fold_bwd_dual_path()
test_fold_bwd_dual_consumer()
test_fold_bwd_fail()
test_fold_bwd_relu_fail()
test_fold_bwd_negative_scale()
| apache-2.0 |
jcchin/MagnePlane | paper/code/example1.py | 13 | 1663 | from openmdao.main.api import Assembly
from openmdao.lib.datatypes.api import Float, Int
from openmdao.lib.drivers.api import BroydenSolver
from openmdao.lib.casehandlers.api import CSVCaseRecorder
from hyperloop.api import (TubeLimitFlow, CompressionSystem, TubeWallTemp,
Pod, Mission)
class HyperloopPod(Assembly):
#Design Variables
Mach_pod_max = Float(1.0, iotype="in", desc="travel Mach of the pod")
Mach_c1_in = Float(.6, iotype="in", desc="Mach number at entrance to the first \
compressor at design conditions")
Mach_bypass = Float(.95, iotype="in", desc="Mach in the air passing around the pod")
c1_PR_des = Float(12.47, iotype="in", desc="pressure ratio of first compressor at \
design conditions")
Ps_tube = Float(99, iotype="in", desc="static pressure in the tube", units="Pa",
low=0)
#Parameters
solar_heating_factor = Float(.7, iotype="in",
desc="Fractional amount of solar radiation to consider in tube temperature \
calculations", low=0, high=1)
tube_length = Float(563270, units = 'm', iotype='in', desc='Length of entire\
Hyperloop')
pwr_marg = Float(.3, iotype="in", desc="fractional extra energy requirement")
hub_to_tip = Float(.4, iotype="in", desc="hub to tip ratio for the compressor")
coef_drag = Float(2, iotype="in", desc="capsule drag coefficient")
n_rows = Int(14, iotype="in", desc="number of rows of seats in the pod")
length_row = Float(150, iotype="in", units="cm", desc="length of each row of seats")
#Outputs
#would go here if they existed for this assembly
#var_name = Float(default_val, iotype="out", ...) | apache-2.0 |
r-o-b-b-i-e/pootle | pytest_pootle/fixtures/core/utils/wordcount.py | 11 | 2691 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from collections import OrderedDict
import pytest
WORDCOUNT_TESTS = OrderedDict()
WORDCOUNT_TESTS['string_with_repeated_newlines'] = {
"string": ("There is a woman in town by the name of Elsie Dolban. She is "
"eccentric, and that has caused some of the villagers to "
"condemn her. I'm interested in *your* opinion of "
"her.\n\nSpeak to Inquisitor Roche, he has strong opinions in "
"this matter."),
"ttk": 44,
"pootle": 45,
}
WORDCOUNT_TESTS['simple_string'] = {
"string": ("There is a woman in town by the name of Elsie Dolban."),
"ttk": 12,
"pootle": 12,
}
WORDCOUNT_TESTS['dots'] = {
"string": ("Before.After"),
"ttk": 2,
"pootle": 1,
}
WORDCOUNT_TESTS['escaped_tags'] = {
"string": ("<b>"),
"ttk": 1,
"pootle": 0,
}
WORDCOUNT_TESTS['xml_tags'] = {
"string": ("<b>"),
"ttk": 0,
"pootle": 0,
}
WORDCOUNT_TESTS['xml_tags_with_attributes'] = {
"string": ('<p class="whatever">'),
"ttk": 0,
"pootle": 0,
}
WORDCOUNT_TESTS['java_format'] = {
"string": ("\23 said"),
"ttk": 2,
"pootle": 1,
}
WORDCOUNT_TESTS['template_format'] = {
"string": ("Hi ${name}"),
"ttk": 2,
"pootle": 1,
}
WORDCOUNT_TESTS['android_format'] = {
"string": ("%3$n"),
"ttk": 1,
"pootle": 0,
}
WORDCOUNT_TESTS['sprintf'] = {
"string": ("I am %s."),
"ttk": 3,
"pootle": 2,
}
WORDCOUNT_TESTS['objective_c'] = {
"string": ("Hi %@"),
"ttk": 1,
"pootle": 1,
}
WORDCOUNT_TESTS['dollar_sign'] = {
"string": ("$name$"),
"ttk": 1,
"pootle": 0,
}
WORDCOUNT_TESTS['newlines'] = {
"string": ("\n\n"),
"ttk": 0,
"pootle": 0,
}
WORDCOUNT_TESTS['escape_sequences'] = {
"string": ("\r\n\t"),
"ttk": 0,
"pootle": 0,
}
WORDCOUNT_TESTS['xml_entities'] = {
"string": ("‐"),
"ttk": 1,
"pootle": 0,
}
WORDCOUNT_TESTS['numeric_xml_entities'] = {
"string": ("{"),
"ttk": 1,
"pootle": 0,
}
WORDCOUNT_TESTS['product_names'] = {
"string": ("Evernote International"),
"ttk": 2,
"pootle": 0,
}
WORDCOUNT_TESTS['shortcuts'] = {
"string": ("Ctrl+A"),
"ttk": 1,
"pootle": 0,
}
WORDCOUNT_TESTS['shortcuts_modifiers'] = {
"string": ("Ctrl+"),
"ttk": 1,
"pootle": 0,
}
@pytest.fixture(params=WORDCOUNT_TESTS.keys())
def wordcount_names(request):
return request.param
| gpl-3.0 |
anisku11/sublimeku | Packages/pygments/all/pygments/token.py | 13 | 5813 | # -*- coding: utf-8 -*-
"""
pygments.token
~~~~~~~~~~~~~~
Basic token types and the standard tokens.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class _TokenType(tuple):
parent = None
def split(self):
buf = []
node = self
while node is not None:
buf.append(node)
node = node.parent
buf.reverse()
return buf
def __init__(self, *args):
# no need to call super.__init__
self.subtypes = set()
def __contains__(self, val):
return self is val or (
type(val) is self.__class__ and
val[:len(self)] == self
)
def __getattr__(self, val):
if not val or not val[0].isupper():
return tuple.__getattribute__(self, val)
new = _TokenType(self + (val,))
setattr(self, val, new)
self.subtypes.add(new)
new.parent = self
return new
def __repr__(self):
return 'Token' + (self and '.' or '') + '.'.join(self)
Token = _TokenType()
# Special token types
Text = Token.Text
Whitespace = Text.Whitespace
Escape = Token.Escape
Error = Token.Error
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
Other = Token.Other
# Common token types for source code
Keyword = Token.Keyword
Name = Token.Name
Literal = Token.Literal
String = Literal.String
Number = Literal.Number
Punctuation = Token.Punctuation
Operator = Token.Operator
Comment = Token.Comment
# Generic types for non-source code
Generic = Token.Generic
# String and some others are not direct childs of Token.
# alias them:
Token.Token = Token
Token.String = String
Token.Number = Number
def is_token_subtype(ttype, other):
"""
Return True if ``ttype`` is a subtype of ``other``.
exists for backwards compatibility. use ``ttype in other`` now.
"""
return ttype in other
def string_to_tokentype(s):
"""
Convert a string into a token type::
>>> string_to_token('String.Double')
Token.Literal.String.Double
>>> string_to_token('Token.Literal.Number')
Token.Literal.Number
>>> string_to_token('')
Token
Tokens that are already tokens are returned unchanged:
>>> string_to_token(String)
Token.Literal.String
"""
if isinstance(s, _TokenType):
return s
if not s:
return Token
node = Token
for item in s.split('.'):
node = getattr(node, item)
return node
# Map standard token types to short names, used in CSS class naming.
# If you add a new item, please be sure to run this file to perform
# a consistency check for duplicate values.
STANDARD_TYPES = {
Token: '',
Text: '',
Whitespace: 'w',
Escape: 'esc',
Error: 'err',
Other: 'x',
Keyword: 'k',
Keyword.Constant: 'kc',
Keyword.Declaration: 'kd',
Keyword.Namespace: 'kn',
Keyword.Pseudo: 'kp',
Keyword.Reserved: 'kr',
Keyword.Type: 'kt',
Name: 'n',
Name.Attribute: 'na',
Name.Builtin: 'nb',
Name.Builtin.Pseudo: 'bp',
Name.Class: 'nc',
Name.Constant: 'no',
Name.Decorator: 'nd',
Name.Entity: 'ni',
Name.Exception: 'ne',
Name.Function: 'nf',
Name.Property: 'py',
Name.Label: 'nl',
Name.Namespace: 'nn',
Name.Other: 'nx',
Name.Tag: 'nt',
Name.Variable: 'nv',
Name.Variable.Class: 'vc',
Name.Variable.Global: 'vg',
Name.Variable.Instance: 'vi',
Literal: 'l',
Literal.Date: 'ld',
String: 's',
String.Backtick: 'sb',
String.Char: 'sc',
String.Doc: 'sd',
String.Double: 's2',
String.Escape: 'se',
String.Heredoc: 'sh',
String.Interpol: 'si',
String.Other: 'sx',
String.Regex: 'sr',
String.Single: 's1',
String.Symbol: 'ss',
Number: 'm',
Number.Bin: 'mb',
Number.Float: 'mf',
Number.Hex: 'mh',
Number.Integer: 'mi',
Number.Integer.Long: 'il',
Number.Oct: 'mo',
Operator: 'o',
Operator.Word: 'ow',
Punctuation: 'p',
Comment: 'c',
Comment.Hashbang: 'ch',
Comment.Multiline: 'cm',
Comment.Preproc: 'cp',
Comment.Single: 'c1',
Comment.Special: 'cs',
Generic: 'g',
Generic.Deleted: 'gd',
Generic.Emph: 'ge',
Generic.Error: 'gr',
Generic.Heading: 'gh',
Generic.Inserted: 'gi',
Generic.Output: 'go',
Generic.Prompt: 'gp',
Generic.Strong: 'gs',
Generic.Subheading: 'gu',
Generic.Traceback: 'gt',
}
| mit |
bt3gl/Project-Euler | 065-100th-e-numerator.py | 2 | 1065 | #!/usr/bin/python
__author__ = "Mari Wahl"
__email__ = "marina.w4hl@gmail.com"
'''
e = [2; 1,2,1, 1,4,1, 1,6,1 , ... , 1,2k,1, ...].
The first ten terms in the sequence of convergents for e are:
2, 3, 8/3, 11/4, 19/7, 87/32, 106/39, 193/71, 1264/465, 1457/536, ...
The sum of digits in the numerator of the 10th convergent is 1+4+5+7=17.
Find the sum of digits in the numerator of the 100th convergent of the continued fraction for e.
'''
from itertools import islice
def take(iterable, n):
#Make an iterator that returns selected elements from the iterable.
return list(islice(iterable, n))
def e():
yield 2
k = 1
while True:
yield 1
yield 2*k
yield 1
k += 1
def rationalize(frac):
if len(frac) == 0:
return (1, 0)
elif len(frac) == 1:
return (frac[0], 1)
else:
remainder = frac[1:len(frac)]
(num, denom) = rationalize(remainder)
return (frac[0] * num + denom, num)
numerator = rationalize(take(e(), 100))[0]
print sum(int(d) for d in str(numerator)) | mit |
devigned/autorest | Samples/azure-storage/Azure.Python/storagemanagementclient/operations/usage_operations.py | 3 | 3650 | # coding=utf-8
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class UsageOperations(object):
"""UsageOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2015-06-15".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-06-15"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets the current usage count and the limit for the resources under the
subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`UsagePaged <petstore.models.UsagePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/usages'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.UsagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.UsagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| mit |
oxnz/NZChat | NZChat/cast/test.py | 2 | 5326 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
# server has a table of thread, which thread update the process bar
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtNetwork import *
import sys
import time
class NZFileSendThread(QThread):
def __init__(self, parent=None):
super(NZFileSenderThread, self).__init__(parent)
def run(self):
print 'run send thread'
class NZFileRecvThread(QThread):
def __init__(self, socketDescriptor, parent=None):
super(NZFileRecvThread, self).__init__(parent)
self.sd = socketDescriptor
self.done = False
def run(self):
self.s = QTcpSocket()
self.connect(self.s,
SIGNAL('error(QAbstractSocket.SocketError)'),
SLOT(self.displayError(QAbstractSocket.SocketError)))
if not self.s.setSocketDescriptor(self.sd):
print 'error in set descriptor'
self.s.readyRead.connect(self.readFileBlock)
while not self.done:
self.s.waitForReadyRead()
self.s.write(QByteArray('hello from server'))
self.s.disconnectFromHost()
self.s.waitForDisconnected()
def readFileBlock(self):
pass
def displayError(self, socketError):
print 'error while thread processing file'
class NZFileRecvServer(QTcpServer):
def __init__(self, parent=None):
super(NZFileRecvServer, self).__init__(parent)
def incomingConnection(self, socketDescriptor):
rt = NZFileRecvThread(socketDescriptor, self)
rt.finished.connect(rt.deleteLater)
rt.start()
class NZFileRecvModel(QAbstractTableModel):
def __init__(self, parent=None):
super(NZFileRecvModel, self).__init__(parent)
self.__sections = ['文件', '大小', '速度', '来自','进度']
self.__tasks = []
self.__recvServer = NZFileRecvServer(self)
# if not self.__recvServer.listen(QHostAddress.Any, 8889):
if not self.__recvServer.listen(QHostAddress('192.168.0.101'), 8889):
print 'NZFileRecvServer error:', self.__recvServer.errorString()
QMessageBox.critical(self,
self.tr('threaded file recv'),
self.tr('unalbe to start server: %1').
arg(self.__recvServer.errorString()));
def reportError(self, socketError):
print 'a errror han'
if socketError == QAbstractSocket.RemoteHostClosedError:
print 'remove closed'
elif socketError == QAbstractSocket.HostNotFoundError:
print 'not found'
elif socketError == QAbstractSocket.ConnectionRefusedError:
print 'refused'
else:
print 'server unkonw error:', self.cc.errorString()
def sort(self, column, order):
print 'sort'
def columnCount(self, index):
return len(self.__sections)
def rowCount(self, index):
return len(self.__tasks)
def headerData(self, section, orientation, role):
results = {Qt.Vertical: {
Qt.DisplayRole: QVariant(),
},
Qt.Horizontal: {
Qt.DisplayRole: lambda: self.__sections[section],
},
}
try:
return results[orientation][role]()
except:
return QAbstractTableModel.headerData(self, section, orientation, role)
def data(self, index, role):
if not index.isValid():
return QVariant()
row = index.row()
col = index.column()
return 'test'
def setData(self, index, value, role):
print 'set'
class NZFileSendModel(QAbstractTableModel):
def __init__(self, parent=None):
super(NZFileSendModel, self).__init__(parent)
self.__sections = ['文件', '大小', '速度', '发往','进度']
self.__tasks = []
def sort(self, column, order):
print 'sort'
def columnCount(self, index):
return len(self.__sections)
def rowCount(self, index):
return len(self.__tasks)
def headerData(self, section, orientation, role):
results = {Qt.Vertical: {
Qt.DisplayRole: QVariant(),
},
Qt.Horizontal: {
Qt.DisplayRole: lambda: self.__sections[section],
},
}
try:
return results[orientation][role]()
except:
return QAbstractTableModel.headerData(self, section, orientation, role)
def flags(self, index):
flags = QAbstractTableModel.flags(self, index)
flags |= Qt.ItemIsEditalbe
return flags
def data(self, index, role):
if not index.isValid():
return QVariant()
row = index.row()
col = index.column()
return 'test'
def setData(self, index, value, role):
print 'set'
class NZFileTransfer(QFrame):
def __init__(self, parent=None):
super(NZFileTransfer, self).__init__(parent)
self.resize(400, 300)
vbox = QVBoxLayout(self)
self.setLayout(vbox)
self.sendTable = QTableView(self)
self.sendTable.setModel(NZFileSendModel(self))
self.sendTable.setSortingEnabled(True)
self.sendTable.setEditTriggers(QAbstractItemView.DoubleClicked | QAbstractItemView.SelectedClicked)
self.sendTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
self.recvTable = QTableView(self)
self.recvTable.setModel(NZFileRecvModel(self))
self.recvTable.setSortingEnabled(True)
self.recvTable.setEditTriggers(QAbstractItemView.DoubleClicked | QAbstractItemView.SelectedClicked)
self.recvTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
vbox.addWidget(self.sendTable)
vbox.addStretch()
vbox.addWidget(self.recvTable)
sendButton = QPushButton("Send")
recvButton = QPushButton("Recv")
hbox = QHBoxLayout()
hbox.addWidget(sendButton)
hbox.addStretch()
hbox.addWidget(recvButton)
vbox.addLayout(hbox)
sendButton.clicked.connect(self.sendFile)
def sendFile(self):
print 'send file'
app = QApplication(sys.argv)
x = NZFileTransfer()
x.show()
app.exec_()
| mit |
jwheare/digest | digestfetch.py | 1 | 8721 | #!/usr/bin/env python
# encoding: utf-8
"""
digestfetch.py
Fetch bitesize content from the world of the web for use in a daily digest pocketmod
"""
import sitecustomize
# Builtin modules
from copy import copy
from operator import itemgetter
import re
import time, datetime
import urllib, urllib2, httplib
# 3rd party modules
import pylast
import gdata.calendar.service
from BeautifulSoup import BeautifulSoup, BeautifulStoneSoup, SoupStrainer
import simplejson
import feedparser
import flickrapi
# Settings, keys, passwords
from settings import *
def lastfm_auth():
"""Authenticate with the Last.fm API"""
sg = pylast.SessionGenerator(LASTFM_KEY, LASTFM_SECRET)
token = sg.getToken()
auth_url = sg.getAuthURL(token)
print "Please open the following URL in your web browser and complete the authentication process, then press Enter to continue..."
print auth_url
raw_input()
data = sg.getSessionKey(token)
print data
def lastfm_event_recommendations():
"""Fetch a list of event recommendations for today from Last.fm"""
user = pylast.User('jwheare', LASTFM_KEY, LASTFM_SECRET, LASTFM_SESSION)
events = user.getRecommendedEvents(limit=6)
return events
def get_tube_colors():
colors = {
"bakerloo": ("ffffff", "ae6118"),
"central": ("ffffff", "e41f1f"),
"circle": ("113b92", "f8d42d"),
"district": ("ffffff", "00a575"),
"eastlondon": ("113b92", "f2ad41"),
"hammersmithandcity": ("113b92", "e899a8"),
"jubilee": ("ffffff", "8f989e"),
"metropolitan": ("ffffff", "893267"),
"northern": ("ffffff", "000000"),
"piccadilly": ("ffffff", "0450a1"),
"victoria": ("ffffff", "009fe0"),
"waterlooandcity": ("113b92", "70c3ce"),
"dlr": ("ffffff", "00bbb4"),
}
return colors
def tube_status():
"""Fetch Tube status from TFL"""
url = "http://www.tfl.gov.uk/tfl/livetravelnews/realtime/tube/later.html"
soup = BeautifulSoup(urllib.urlopen(url), markupMassage=BeautifulSoup.MARKUP_MASSAGE,
parseOnlyThese=SoupStrainer("div", { "id": "service-board" }),
convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
# Parse line status
lines = soup.find("dl", { "id": "lines" }).findAll("dt")
line_status = {}
for line in lines:
status = line.findNext("dd")
if status.h3:
line_status[line['class']] = (line.string, status.h3.string)
else:
line_status[line['class']] = (line.string, "")
# Parse station status
station_categories = soup.find("dl", { "id": "stations" }).findAll("dt")
station_status = {}
for category in station_categories:
stations = []
next = category.findNextSibling(re.compile("dt|dd"))
while next and next.name != u"dt":
if next.h3:
stations.append(next.h3.string)
next = next.findNextSibling(re.compile("dt|dd"))
station_status[category.string] = stations
return line_status, station_status
def twitter_friends():
"""Fetch Twitter updates from friends"""
url = "http://twitter.com/statuses/friends_timeline.json"
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password("Twitter API", "twitter.com", TWITTER_USERNAME, TWITTER_PASSWORD)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
try:
json = opener.open(url).read()
statuses = simplejson.loads(json)
return statuses
except urllib2.HTTPError, e:
print e
def newsgator_headlines():
"""Fetch unread feeds from Newsgator"""
url = "http://services.newsgator.com/ngws/svc/Subscription.aspx/%s/headlines" % NEWSGATOR_LOCATIONID
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password("NewsGator Online Services", "services.newsgator.com", NEWSGATOR_USERNAME, NEWSGATOR_PASSWORD)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
opener.addheaders = [
('X-NGAPIToken', NEWSGATOR_KEY)
]
try:
data = feedparser.parse(opener.open(url))
return data
except urllib2.HTTPError, e:
print e
def gcal_events():
"""Fetch events from a list of Google Calendars"""
calendar_service = gdata.calendar.service.CalendarService()
calendar_service.email = GCAL_USERNAME
calendar_service.password = GCAL_PASSWORD
calendar_service.ProgrammaticLogin()
# feed = calendar_service.GetAllCalendarsFeed()
# for i, calendar in enumerate(feed.entry):
# print '\t%s. %s (%s %s)' % (i, calendar.title.text, calendar.color.value, calendar.id.text.split('/')[-1])
events = []
start_min = time.strftime('%Y-%m-%d', time.gmtime(time.time()))
start_max = time.strftime('%Y-%m-%d', time.gmtime(time.time() + 60*60*24*3))
for (calendar, info) in GCAL_CALENDARS.iteritems():
cal_name, color = info
print u'•', cal_name
query = gdata.calendar.service.CalendarEventQuery(calendar, 'private', 'composite')
query.start_min = start_min
query.start_max = start_max
query.orderby = 'starttime'
query.sortorder = 'ascending'
try:
feed = calendar_service.CalendarQuery(query)
# print feed
for event in feed.entry:
if event.when:
comments = []
if event.comments and event.comments.feed_link and event.comments.feed_link.feed:
for c in event.comments.feed_link.feed.entry:
if c.content.text:
comments.append({
'author': c.author[0].name.text,
'content': c.content.text,
})
event_info = {
'color': color,
'title': event.title.text,
'comments': comments,
'allday': False,
'location': event.where[0].value_string
}
try:
start = datetime.datetime.strptime(event.when[0].start_time, "%Y-%m-%dT%H:%M:%S.000Z")
except ValueError:
try:
start = datetime.datetime.strptime(event.when[0].start_time, "%Y-%m-%dT%H:%M:%S.000+01:00")
except ValueError:
start = datetime.datetime.strptime(event.when[0].start_time, "%Y-%m-%d")
event_info['allday'] = True
event_info['start'] = start
events.append(event_info)
except httplib.BadStatusLine, e:
print "! %s" % e
events.sort(key=itemgetter('start'))
return events
def weather():
forecast_url = "http://feeds.bbc.co.uk/weather/feeds/rss/5day/world/%s.xml" % BBC_WEATHER_LOCATION
forecast_data = feedparser.parse(urllib.urlopen(forecast_url))
warning_url = "http://www.metoffice.gov.uk/xml/warnings_rss_%s.xml" % MET_WEATHER_REGION
warning_data = feedparser.parse(urllib.urlopen(warning_url))
return forecast_data, warning_data
def flickr_auth():
"""Authenticate with the Flickr API"""
flickr = flickrapi.FlickrAPI(FLICKR_KEY, FLICKR_SECRET)
token, frob = flickr.get_token_part_one(perms='read')
if not token:
raw_input("Press ENTER after you authorized this program")
flickr.get_token_part_two((token, frob))
return flickr
def contact_photo():
flickr = flickr_auth()
yesterday = datetime.datetime.now() - datetime.timedelta(hours=24)
photos = flickr.photos_search(
user_id='me',
contacts='all',
media='photos',
sort='interestingness-desc',
min_upload_date=int(time.mktime(yesterday.timetuple())),
extras='owner_name,tags,date_taken'
)
print "Searching",
for p in photos.findall('photos/photo'):
print '#',
sizes = flickr.photos_getSizes(photo_id=p.attrib['id'])
for size in sizes.findall("sizes/size"):
print '.',
if size.attrib['label'] == u'Original':
if int(size.attrib['width']) > int(size.attrib['height']):
print 'done'
return p, size
if __name__ == '__main__':
gcal_events() | bsd-3-clause |
looker/sentry | src/sentry/runner/settings.py | 3 | 5134 | """
sentry.runner.settings
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import os
import click
DEFAULT_SETTINGS_MODULE = 'sentry.conf.server'
DEFAULT_SETTINGS_CONF = 'config.yml'
DEFAULT_SETTINGS_OVERRIDE = 'sentry.conf.py'
def generate_secret_key():
from django.utils.crypto import get_random_string
chars = u'abcdefghijklmnopqrstuvwxyz0123456789!@#%^&*(-_=+)'
return get_random_string(50, chars)
def load_config_template(path, version='default'):
from pkg_resources import resource_string
return resource_string('sentry', 'data/config/%s.%s' % (path, version)).decode('utf8')
def generate_settings(dev=False):
"""
This command is run when ``default_path`` doesn't exist, or ``init`` is
run and returns a string representing the default data to put into their
settings file.
"""
context = {
'secret_key': generate_secret_key(),
'debug_flag': dev,
'mail.backend': 'console' if dev else 'smtp',
}
py = load_config_template(DEFAULT_SETTINGS_OVERRIDE, 'default') % context
yaml = load_config_template(DEFAULT_SETTINGS_CONF, 'default') % context
return py, yaml
def get_sentry_conf():
"""
Fetch the SENTRY_CONF value, either from the click context
if available, or SENTRY_CONF environment variable.
"""
try:
ctx = click.get_current_context()
return ctx.obj['config']
except (RuntimeError, KeyError, TypeError):
try:
return os.environ['SENTRY_CONF']
except KeyError:
return '~/.sentry'
def discover_configs():
"""
Discover the locations of three configuration components:
* Config directory (~/.sentry)
* Optional python config file (~/.sentry/sentry.conf.py)
* Optional yaml config (~/.sentry/config.yml)
"""
try:
config = os.environ['SENTRY_CONF']
except KeyError:
config = '~/.sentry'
config = os.path.expanduser(config)
# This is the old, now deprecated code path where SENTRY_CONF is pointed directly
# to a python file
if config.endswith(('.py', '.conf')) or os.path.isfile(config):
return (os.path.dirname(config), config, None, )
return (
config, os.path.join(config, DEFAULT_SETTINGS_OVERRIDE),
os.path.join(config, DEFAULT_SETTINGS_CONF),
)
def configure(ctx, py, yaml, skip_service_validation=False):
"""
Given the two different config files, set up the environment.
NOTE: Will only execute once, so it's safe to call multiple times.
"""
global __installed
if __installed:
return
# Make sure that our warnings are always displayed
import warnings
warnings.filterwarnings('default', '', Warning, r'^sentry')
# Add in additional mimetypes that are useful for our static files
# which aren't common in default system registries
import mimetypes
for type, ext in (
('application/json', 'map'), ('application/font-woff', 'woff'),
('application/font-woff2', 'woff2'), ('application/vnd.ms-fontobject', 'eot'),
('application/x-font-ttf', 'ttf'), ('application/x-font-ttf',
'ttc'), ('font/opentype', 'otf'),
):
mimetypes.add_type(type, '.' + ext)
from .importer import install
if yaml is None:
# `yaml` will be None when SENTRY_CONF is pointed
# directly to a file, in which case, this file must exist
if not os.path.exists(py):
if ctx:
raise click.ClickException(
"Configuration file does not exist. Use 'sentry init' to initialize the file."
)
raise ValueError(
"Configuration file does not exist at '%s'" % click.format_filename(py)
)
elif not os.path.exists(yaml) and not os.path.exists(py):
if ctx:
raise click.ClickException(
"Configuration file does not exist. Use 'sentry init' to initialize the file."
)
raise ValueError("Configuration file does not exist at '%s'" % click.format_filename(yaml))
# Add autoreload for config.yml file if needed
if yaml is not None and os.path.exists(yaml):
from sentry.utils.uwsgi import reload_on_change
reload_on_change(yaml)
os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry_config'
install('sentry_config', py, DEFAULT_SETTINGS_MODULE)
# HACK: we need to force access of django.conf.settings to
# ensure we don't hit any import-driven recursive behavior
from django.conf import settings
hasattr(settings, 'INSTALLED_APPS')
from .initializer import initialize_app, on_configure
initialize_app(
{
'config_path': py,
'settings': settings,
'options': yaml,
},
skip_service_validation=skip_service_validation
)
on_configure({'settings': settings})
__installed = True
__installed = False
| bsd-3-clause |
nathandaddio/puzzle_app | puzzle_app/puzzle_app/tests/views/test_hitori_views.py | 1 | 2961 | import pytest
from pyramid.exceptions import HTTPNotFound
from puzzle_app.views.hitori import hitori_boards_get, hitori_board_get
from factories import (
HitoriGameBoardFactory,
HitoriGameBoardCellFactory
)
class TestHitoriGameBoardsGet:
@pytest.fixture
def board(self, db_session):
board = HitoriGameBoardFactory(number_of_rows=5, number_of_columns=5)
db_session.add(board)
db_session.commit()
return board
@pytest.fixture
def cells(self, db_session, board):
cells = [
HitoriGameBoardCellFactory(hitori_game_board=board, row_number=3, column_number=4, value=6),
HitoriGameBoardCellFactory(hitori_game_board=board, row_number=2, column_number=5, value=6)
]
db_session.add_all(cells)
db_session.commit()
return cells
@pytest.fixture
def boards_response(self, dummy_request):
return hitori_boards_get(dummy_request)
@pytest.fixture
def expected_boards_response(self, board, cells):
return [
{
'id': board.id,
'number_of_rows': board.number_of_rows,
'number_of_columns': board.number_of_columns,
'solved': False,
'feasible': None,
'cells': [ # note that the order of the cells changes as we return (row, column) order of cells
{
'id': cells[1].id,
'row_number': cells[1].row_number,
'column_number': cells[1].column_number,
'value': cells[1].value,
'included_in_solution': None
},
{
'id': cells[0].id,
'row_number': cells[0].row_number,
'column_number': cells[0].column_number,
'value': cells[0].value,
'included_in_solution': None
}
]
}
]
def test_hitori_game_boards_get(self, board, cells, boards_response, expected_boards_response):
assert boards_response == expected_boards_response
@pytest.fixture
def board_request(self, board, dummy_request):
dummy_request.matchdict['board_id'] = board.id
return dummy_request
@pytest.fixture
def board_response(self, board_request):
return hitori_board_get(board_request)
def test_hitori_game_board_get(self, board, cells, board_response, expected_boards_response):
assert board_response == expected_boards_response[0]
@pytest.fixture
def bad_board_id_request(self, dummy_request):
dummy_request.matchdict['board_id'] = 100
return dummy_request
def test_board_get_bad_id(self, bad_board_id_request):
with pytest.raises(HTTPNotFound):
hitori_board_get(bad_board_id_request)
| mit |
kcarnold/autograd | examples/fluidsim/fluidsim.py | 2 | 4623 | from __future__ import absolute_import
from __future__ import print_function
import autograd.numpy as np
from autograd import value_and_grad
from scipy.optimize import minimize
from scipy.misc import imread
import matplotlib
import matplotlib.pyplot as plt
import os
from builtins import range
# Fluid simulation code based on
# "Real-Time Fluid Dynamics for Games" by Jos Stam
# http://www.intpowertechcorp.com/GDC03.pdf
def project(vx, vy):
"""Project the velocity field to be approximately mass-conserving,
using a few iterations of Gauss-Seidel."""
p = np.zeros(vx.shape)
h = 1.0/vx.shape[0]
div = -0.5 * h * (np.roll(vx, -1, axis=0) - np.roll(vx, 1, axis=0)
+ np.roll(vy, -1, axis=1) - np.roll(vy, 1, axis=1))
for k in range(10):
p = (div + np.roll(p, 1, axis=0) + np.roll(p, -1, axis=0)
+ np.roll(p, 1, axis=1) + np.roll(p, -1, axis=1))/4.0
vx -= 0.5*(np.roll(p, -1, axis=0) - np.roll(p, 1, axis=0))/h
vy -= 0.5*(np.roll(p, -1, axis=1) - np.roll(p, 1, axis=1))/h
return vx, vy
def advect(f, vx, vy):
"""Move field f according to x and y velocities (u and v)
using an implicit Euler integrator."""
rows, cols = f.shape
cell_ys, cell_xs = np.meshgrid(np.arange(rows), np.arange(cols))
center_xs = (cell_xs - vx).ravel()
center_ys = (cell_ys - vy).ravel()
# Compute indices of source cells.
left_ix = np.floor(center_xs).astype(int)
top_ix = np.floor(center_ys).astype(int)
rw = center_xs - left_ix # Relative weight of right-hand cells.
bw = center_ys - top_ix # Relative weight of bottom cells.
left_ix = np.mod(left_ix, rows) # Wrap around edges of simulation.
right_ix = np.mod(left_ix + 1, rows)
top_ix = np.mod(top_ix, cols)
bot_ix = np.mod(top_ix + 1, cols)
# A linearly-weighted sum of the 4 surrounding cells.
flat_f = (1 - rw) * ((1 - bw)*f[left_ix, top_ix] + bw*f[left_ix, bot_ix]) \
+ rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix])
return np.reshape(flat_f, (rows, cols))
def simulate(vx, vy, smoke, num_time_steps, ax=None, render=False):
print("Running simulation...")
for t in range(num_time_steps):
if ax: plot_matrix(ax, smoke, t, render)
vx_updated = advect(vx, vx, vy)
vy_updated = advect(vy, vx, vy)
vx, vy = project(vx_updated, vy_updated)
smoke = advect(smoke, vx, vy)
if ax: plot_matrix(ax, smoke, num_time_steps, render)
return smoke
def plot_matrix(ax, mat, t, render=False):
plt.cla()
ax.matshow(mat)
ax.set_xticks([])
ax.set_yticks([])
plt.draw()
if render:
matplotlib.image.imsave('step{0:03d}.png'.format(t), mat)
plt.pause(0.001)
if __name__ == '__main__':
simulation_timesteps = 100
print("Loading initial and target states...")
init_smoke = imread('init_smoke.png')[:,:,0]
#target = imread('peace.png')[::2,::2,3]
target = imread('skull.png')[::2,::2]
rows, cols = target.shape
init_dx_and_dy = np.zeros((2, rows, cols)).ravel()
def distance_from_target_image(smoke):
return np.mean((target - smoke)**2)
def convert_param_vector_to_matrices(params):
vx = np.reshape(params[:(rows*cols)], (rows, cols))
vy = np.reshape(params[(rows*cols):], (rows, cols))
return vx, vy
def objective(params):
init_vx, init_vy = convert_param_vector_to_matrices(params)
final_smoke = simulate(init_vx, init_vy, init_smoke, simulation_timesteps)
return distance_from_target_image(final_smoke)
# Specify gradient of objective function using autograd.
objective_with_grad = value_and_grad(objective)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, frameon=False)
def callback(params):
init_vx, init_vy = convert_param_vector_to_matrices(params)
simulate(init_vx, init_vy, init_smoke, simulation_timesteps, ax)
print("Optimizing initial conditions...")
result = minimize(objective_with_grad, init_dx_and_dy, jac=True, method='CG',
options={'maxiter':25, 'disp':True}, callback=callback)
print("Rendering optimized flow...")
init_vx, init_vy = convert_param_vector_to_matrices(result.x)
simulate(init_vx, init_vy, init_smoke, simulation_timesteps, ax, render=True)
print("Converting frames to an animated GIF...")
os.system("convert -delay 5 -loop 0 step*.png"
" -delay 250 step100.png surprise.gif") # Using imagemagick.
os.system("rm step*.png")
| mit |
ffalcinelli/django-ejabberd-bridge | ejabberd_bridge/tests.py | 2 | 7615 | # -*- coding: utf-8 -*-
# Copyright (C) 2013 Fabio Falcinelli
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from io import StringIO
import io
import struct
from django.contrib.auth import get_user_model
from django.test import TestCase
from mock import patch
from ejabberd_bridge.management.commands import ejabberd_auth
__author__ = 'fabio'
class AuthBridgeTestCase(TestCase):
fixtures = ["auth"]
def setUp(self):
super(AuthBridgeTestCase, self).setUp()
self.cmd = ejabberd_auth.Command()
self.srv = "localhost"
def tearDown(self):
pass
def _check_cmd_parsing(self, params):
data = struct.pack(">H", len(params)) + params.encode("utf-8")
with patch("sys.stdin", StringIO(data.decode("utf-8"))):
result = self.cmd.from_ejabberd()
self.assertSequenceEqual(result, params.split(":"))
def test_from_jabber_auth(self):
"""
Tests the parsing of the auth command
"""
params = "auth:User:Server:Password"
self._check_cmd_parsing(params)
def test_from_jabber_isuser(self):
"""
Tests the parsing of the isuser command
"""
params = "isuser:User:Server"
self._check_cmd_parsing(params)
def test_from_jabber_setpass(self):
"""
Tests the parsing of the setpass command
"""
params = "setpass:User:Server:Password"
self._check_cmd_parsing(params)
def test_to_jabber_true(self):
"""
Tests conversion from python True value to bytes suitable for eJabberd
"""
with patch("sys.stdout", new_callable=StringIO) as stdout_mocked:
self.cmd.to_ejabberd(True)
self.assertEqual(stdout_mocked.getvalue(), '\x00\x02\x00\x01')
def test_to_jabber_false(self):
"""
Tests conversion from python False value to bytes suitable for eJabberd
"""
with patch("sys.stdout", new_callable=StringIO) as stdout_mocked:
self.cmd.to_ejabberd(False)
self.assertEqual(stdout_mocked.getvalue(), '\x00\x02\x00\x00')
def test_isuser_ok(self):
"""
Tests isuser command with a existent and valid user
"""
username = "admin"
self.assertTrue(self.cmd.isuser(username=username, server=self.srv))
def test_isuser_does_not_exists(self):
"""
Tests isuser command with an user which does not exist
"""
username = "user_that_does_not_exist"
self.assertFalse(self.cmd.isuser(username=username, server=self.srv))
def test_isuser_is_disabled(self):
"""
Tests isuser command with an user which is disabled
"""
username = "user01"
self.assertFalse(self.cmd.isuser(username=username, server=self.srv))
def test_auth_ok(self):
"""
Tests auth command with a right user and password pair
"""
username = "user02"
password = "password"
self.assertTrue(self.cmd.auth(username=username, server=self.srv, password=password))
def test_auth_wrong_password(self):
"""
Tests auth command with a right user but wrong password
"""
username = "user02"
password = "WRONG"
self.assertFalse(self.cmd.auth(username=username, server=self.srv, password=password))
def test_auth_does_not_exist(self):
"""
Tests auth command with a non existent user
"""
username = "user_that_does_not_exists"
password = "password"
self.assertFalse(self.cmd.auth(username=username, server=self.srv, password=password))
def test_auth_not_active(self):
"""
Tests auth command with a right user and password pair but user is not active
"""
username = "user01"
password = "password"
self.assertFalse(self.cmd.auth(username=username, server=self.srv, password=password))
def test_setpass_ok(self):
"""
Tests setpass command with a right user and a new password
"""
username = "user02"
password = "new_password"
self.assertTrue(self.cmd.setpass(username=username, server=self.srv, password=password))
user = get_user_model().objects.get(username=username)
self.assertTrue(user.check_password(password))
def test_setpass_does_not_exist(self):
"""
Tests setpass command with a non existent user
"""
username = "user_that_does_not_exists"
password = "new_password"
self.assertFalse(self.cmd.setpass(username=username, server=self.srv, password=password))
def _execute_cmd_handle(self, params):
data = struct.pack(">H", len(params)) + params.encode("utf-8")
with patch("sys.stdin", StringIO(data.decode("utf-8"))), patch("sys.stdout",
new_callable=StringIO) as stdout_mocked:
self.cmd.handle(params, run_forever=False)
return stdout_mocked.getvalue()
def test_handle_auth_ok(self):
"""
Tests successful auth command thorugh the handle method
"""
params = "auth:user02:localhost:password"
self.assertEqual('\x00\x02\x00\x01', self._execute_cmd_handle(params))
def test_handle_auth_nok(self):
"""
Tests failing auth command thorugh the handle method
"""
params = "auth:User:Server:Password"
self.assertEqual('\x00\x02\x00\x00', self._execute_cmd_handle(params))
def test_handle_isuser_ok(self):
"""
Tests successful isuser command thorugh the handle method
"""
params = "isuser:user02:localhost"
self.assertEqual('\x00\x02\x00\x01', self._execute_cmd_handle(params))
def test_handle_isuser_nok(self):
"""
Tests failing isuser command thorugh the handle method
"""
params = "isuser:User:Server"
self.assertEqual('\x00\x02\x00\x00', self._execute_cmd_handle(params))
def test_handle_setpass_ok(self):
"""
Tests successful setpass command thorugh the handle method
"""
params = "setpass:user02:localhost:new_password"
self.assertEqual('\x00\x02\x00\x01', self._execute_cmd_handle(params))
def test_handle_setpass_nok(self):
"""
Tests failing setpass command thorugh the handle method
"""
params = "setpass:User:Server:Password"
self.assertEqual('\x00\x02\x00\x00', self._execute_cmd_handle(params))
def test_handle_invalid_data(self):
"""
Tests failing with invalid bytes argument
"""
params = "foo bar"
data = struct.pack(">H", len(params) + 10) + params.encode("utf-8")
with patch("sys.stdin", io.BytesIO(data)), patch("sys.stdout", new_callable=StringIO) as stdout_mocked:
self.cmd.handle(params, run_forever=False)
self.assertEqual('\x00\x02\x00\x00', stdout_mocked.getvalue()) | lgpl-3.0 |
GunoH/intellij-community | python/helpers/pydev/pydevd_attach_to_process/winappdbg/interactive.py | 102 | 85840 | #!~/.wine/drive_c/Python25/python.exe
# -*- coding: utf-8 -*-
# Acknowledgements:
# Nicolas Economou, for his command line debugger on which this is inspired.
# http://tinyurl.com/nicolaseconomou
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Interactive debugging console.
@group Debugging:
ConsoleDebugger
@group Exceptions:
CmdError
"""
from __future__ import with_statement
__revision__ = "$Id$"
__all__ = [ 'ConsoleDebugger', 'CmdError' ]
# TODO document this module with docstrings.
# TODO command to set a last error breakpoint.
# TODO command to show available plugins.
from winappdbg import win32
from winappdbg import compat
from winappdbg.system import System
from winappdbg.util import PathOperations
from winappdbg.event import EventHandler, NoEvent
from winappdbg.textio import HexInput, HexOutput, HexDump, CrashDump, DebugLog
import os
import sys
import code
import time
import warnings
import traceback
# too many variables named "cmd" to have a module by the same name :P
from cmd import Cmd
# lazy imports
readline = None
#==============================================================================
class DummyEvent (NoEvent):
"Dummy event object used internally by L{ConsoleDebugger}."
def get_pid(self):
return self._pid
def get_tid(self):
return self._tid
def get_process(self):
return self._process
def get_thread(self):
return self._thread
#==============================================================================
class CmdError (Exception):
"""
Exception raised when a command parsing error occurs.
Used internally by L{ConsoleDebugger}.
"""
#==============================================================================
class ConsoleDebugger (Cmd, EventHandler):
"""
Interactive console debugger.
@see: L{Debug.interactive}
"""
#------------------------------------------------------------------------------
# Class variables
# Exception to raise when an error occurs executing a command.
command_error_exception = CmdError
# Milliseconds to wait for debug events in the main loop.
dwMilliseconds = 100
# History file name.
history_file = '.winappdbg_history'
# Confirm before quitting?
confirm_quit = True
# Valid plugin name characters.
valid_plugin_name_chars = 'ABCDEFGHIJKLMNOPQRSTUVWXY' \
'abcdefghijklmnopqrstuvwxy' \
'012345678' \
'_'
# Names of the registers.
segment_names = ( 'cs', 'ds', 'es', 'fs', 'gs' )
register_alias_64_to_32 = {
'eax':'Rax', 'ebx':'Rbx', 'ecx':'Rcx', 'edx':'Rdx',
'eip':'Rip', 'ebp':'Rbp', 'esp':'Rsp', 'esi':'Rsi', 'edi':'Rdi'
}
register_alias_64_to_16 = { 'ax':'Rax', 'bx':'Rbx', 'cx':'Rcx', 'dx':'Rdx' }
register_alias_64_to_8_low = { 'al':'Rax', 'bl':'Rbx', 'cl':'Rcx', 'dl':'Rdx' }
register_alias_64_to_8_high = { 'ah':'Rax', 'bh':'Rbx', 'ch':'Rcx', 'dh':'Rdx' }
register_alias_32_to_16 = { 'ax':'Eax', 'bx':'Ebx', 'cx':'Ecx', 'dx':'Edx' }
register_alias_32_to_8_low = { 'al':'Eax', 'bl':'Ebx', 'cl':'Ecx', 'dl':'Edx' }
register_alias_32_to_8_high = { 'ah':'Eax', 'bh':'Ebx', 'ch':'Ecx', 'dh':'Edx' }
register_aliases_full_32 = list(segment_names)
register_aliases_full_32.extend(compat.iterkeys(register_alias_32_to_16))
register_aliases_full_32.extend(compat.iterkeys(register_alias_32_to_8_low))
register_aliases_full_32.extend(compat.iterkeys(register_alias_32_to_8_high))
register_aliases_full_32 = tuple(register_aliases_full_32)
register_aliases_full_64 = list(segment_names)
register_aliases_full_64.extend(compat.iterkeys(register_alias_64_to_32))
register_aliases_full_64.extend(compat.iterkeys(register_alias_64_to_16))
register_aliases_full_64.extend(compat.iterkeys(register_alias_64_to_8_low))
register_aliases_full_64.extend(compat.iterkeys(register_alias_64_to_8_high))
register_aliases_full_64 = tuple(register_aliases_full_64)
# Names of the control flow instructions.
jump_instructions = (
'jmp', 'jecxz', 'jcxz',
'ja', 'jnbe', 'jae', 'jnb', 'jb', 'jnae', 'jbe', 'jna', 'jc', 'je',
'jz', 'jnc', 'jne', 'jnz', 'jnp', 'jpo', 'jp', 'jpe', 'jg', 'jnle',
'jge', 'jnl', 'jl', 'jnge', 'jle', 'jng', 'jno', 'jns', 'jo', 'js'
)
call_instructions = ( 'call', 'ret', 'retn' )
loop_instructions = ( 'loop', 'loopz', 'loopnz', 'loope', 'loopne' )
control_flow_instructions = call_instructions + loop_instructions + \
jump_instructions
#------------------------------------------------------------------------------
# Instance variables
def __init__(self):
"""
Interactive console debugger.
@see: L{Debug.interactive}
"""
Cmd.__init__(self)
EventHandler.__init__(self)
# Quit the debugger when True.
self.debuggerExit = False
# Full path to the history file.
self.history_file_full_path = None
# Last executed command.
self.__lastcmd = ""
#------------------------------------------------------------------------------
# Debugger
# Use this Debug object.
def start_using_debugger(self, debug):
# Clear the previous Debug object.
self.stop_using_debugger()
# Keep the Debug object.
self.debug = debug
# Set ourselves as the event handler for the debugger.
self.prevHandler = debug.set_event_handler(self)
# Stop using the Debug object given by start_using_debugger().
# Circular references must be removed, or the destructors never get called.
def stop_using_debugger(self):
if hasattr(self, 'debug'):
debug = self.debug
debug.set_event_handler(self.prevHandler)
del self.prevHandler
del self.debug
return debug
return None
# Destroy the Debug object.
def destroy_debugger(self, autodetach = True):
debug = self.stop_using_debugger()
if debug is not None:
if not autodetach:
debug.kill_all(bIgnoreExceptions=True)
debug.lastEvent = None
debug.stop()
del debug
@property
def lastEvent(self):
return self.debug.lastEvent
def set_fake_last_event(self, process):
if self.lastEvent is None:
self.debug.lastEvent = DummyEvent(self.debug)
self.debug.lastEvent._process = process
self.debug.lastEvent._thread = process.get_thread(
process.get_thread_ids()[0])
self.debug.lastEvent._pid = process.get_pid()
self.debug.lastEvent._tid = self.lastEvent._thread.get_tid()
#------------------------------------------------------------------------------
# Input
# TODO
# * try to guess breakpoints when insufficient data is given
# * child Cmd instances will have to be used for other prompts, for example
# when assembling or editing memory - it may also be a good idea to think
# if it's possible to make the main Cmd instance also a child, instead of
# the debugger itself - probably the same goes for the EventHandler, maybe
# it can be used as a contained object rather than a parent class.
# Join a token list into an argument string.
def join_tokens(self, token_list):
return self.debug.system.argv_to_cmdline(token_list)
# Split an argument string into a token list.
def split_tokens(self, arg, min_count = 0, max_count = None):
token_list = self.debug.system.cmdline_to_argv(arg)
if len(token_list) < min_count:
raise CmdError("missing parameters.")
if max_count and len(token_list) > max_count:
raise CmdError("too many parameters.")
return token_list
# Token is a thread ID or name.
def input_thread(self, token):
targets = self.input_thread_list( [token] )
if len(targets) == 0:
raise CmdError("missing thread name or ID")
if len(targets) > 1:
msg = "more than one thread with that name:\n"
for tid in targets:
msg += "\t%d\n" % tid
msg = msg[:-len("\n")]
raise CmdError(msg)
return targets[0]
# Token list is a list of thread IDs or names.
def input_thread_list(self, token_list):
targets = set()
system = self.debug.system
for token in token_list:
try:
tid = self.input_integer(token)
if not system.has_thread(tid):
raise CmdError("thread not found (%d)" % tid)
targets.add(tid)
except ValueError:
found = set()
for process in system.iter_processes():
found.update( system.find_threads_by_name(token) )
if not found:
raise CmdError("thread not found (%s)" % token)
for thread in found:
targets.add( thread.get_tid() )
targets = list(targets)
targets.sort()
return targets
# Token is a process ID or name.
def input_process(self, token):
targets = self.input_process_list( [token] )
if len(targets) == 0:
raise CmdError("missing process name or ID")
if len(targets) > 1:
msg = "more than one process with that name:\n"
for pid in targets:
msg += "\t%d\n" % pid
msg = msg[:-len("\n")]
raise CmdError(msg)
return targets[0]
# Token list is a list of process IDs or names.
def input_process_list(self, token_list):
targets = set()
system = self.debug.system
for token in token_list:
try:
pid = self.input_integer(token)
if not system.has_process(pid):
raise CmdError("process not found (%d)" % pid)
targets.add(pid)
except ValueError:
found = system.find_processes_by_filename(token)
if not found:
raise CmdError("process not found (%s)" % token)
for (process, _) in found:
targets.add( process.get_pid() )
targets = list(targets)
targets.sort()
return targets
# Token is a command line to execute.
def input_command_line(self, command_line):
argv = self.debug.system.cmdline_to_argv(command_line)
if not argv:
raise CmdError("missing command line to execute")
fname = argv[0]
if not os.path.exists(fname):
try:
fname, _ = win32.SearchPath(None, fname, '.exe')
except WindowsError:
raise CmdError("file not found: %s" % fname)
argv[0] = fname
command_line = self.debug.system.argv_to_cmdline(argv)
return command_line
# Token is an integer.
# Only hexadecimal format is supported.
def input_hexadecimal_integer(self, token):
return int(token, 0x10)
# Token is an integer.
# It can be in any supported format.
def input_integer(self, token):
return HexInput.integer(token)
## input_integer = input_hexadecimal_integer
# Token is an address.
# The address can be a integer, a label or a register.
def input_address(self, token, pid = None, tid = None):
address = None
if self.is_register(token):
if tid is None:
if self.lastEvent is None or pid != self.lastEvent.get_pid():
msg = "can't resolve register (%s) for unknown thread"
raise CmdError(msg % token)
tid = self.lastEvent.get_tid()
address = self.input_register(token, tid)
if address is None:
try:
address = self.input_hexadecimal_integer(token)
except ValueError:
if pid is None:
if self.lastEvent is None:
raise CmdError("no current process set")
process = self.lastEvent.get_process()
elif self.lastEvent is not None and pid == self.lastEvent.get_pid():
process = self.lastEvent.get_process()
else:
try:
process = self.debug.system.get_process(pid)
except KeyError:
raise CmdError("process not found (%d)" % pid)
try:
address = process.resolve_label(token)
except Exception:
raise CmdError("unknown address (%s)" % token)
return address
# Token is an address range, or a single address.
# The addresses can be integers, labels or registers.
def input_address_range(self, token_list, pid = None, tid = None):
if len(token_list) == 2:
token_1, token_2 = token_list
address = self.input_address(token_1, pid, tid)
try:
size = self.input_integer(token_2)
except ValueError:
raise CmdError("bad address range: %s %s" % (token_1, token_2))
elif len(token_list) == 1:
token = token_list[0]
if '-' in token:
try:
token_1, token_2 = token.split('-')
except Exception:
raise CmdError("bad address range: %s" % token)
address = self.input_address(token_1, pid, tid)
size = self.input_address(token_2, pid, tid) - address
else:
address = self.input_address(token, pid, tid)
size = None
return address, size
# XXX TODO
# Support non-integer registers here.
def is_register(self, token):
if win32.arch == 'i386':
if token in self.register_aliases_full_32:
return True
token = token.title()
for (name, typ) in win32.CONTEXT._fields_:
if name == token:
return win32.sizeof(typ) == win32.sizeof(win32.DWORD)
elif win32.arch == 'amd64':
if token in self.register_aliases_full_64:
return True
token = token.title()
for (name, typ) in win32.CONTEXT._fields_:
if name == token:
return win32.sizeof(typ) == win32.sizeof(win32.DWORD64)
return False
# The token is a register name.
# Returns None if no register name is matched.
def input_register(self, token, tid = None):
if tid is None:
if self.lastEvent is None:
raise CmdError("no current process set")
thread = self.lastEvent.get_thread()
else:
thread = self.debug.system.get_thread(tid)
ctx = thread.get_context()
token = token.lower()
title = token.title()
if title in ctx:
return ctx.get(title) # eax -> Eax
if ctx.arch == 'i386':
if token in self.segment_names:
return ctx.get( 'Seg%s' % title ) # cs -> SegCs
if token in self.register_alias_32_to_16:
return ctx.get( self.register_alias_32_to_16[token] ) & 0xFFFF
if token in self.register_alias_32_to_8_low:
return ctx.get( self.register_alias_32_to_8_low[token] ) & 0xFF
if token in self.register_alias_32_to_8_high:
return (ctx.get( self.register_alias_32_to_8_high[token] ) & 0xFF00) >> 8
elif ctx.arch == 'amd64':
if token in self.segment_names:
return ctx.get( 'Seg%s' % title ) # cs -> SegCs
if token in self.register_alias_64_to_32:
return ctx.get( self.register_alias_64_to_32[token] ) & 0xFFFFFFFF
if token in self.register_alias_64_to_16:
return ctx.get( self.register_alias_64_to_16[token] ) & 0xFFFF
if token in self.register_alias_64_to_8_low:
return ctx.get( self.register_alias_64_to_8_low[token] ) & 0xFF
if token in self.register_alias_64_to_8_high:
return (ctx.get( self.register_alias_64_to_8_high[token] ) & 0xFF00) >> 8
return None
# Token list contains an address or address range.
# The prefix is also parsed looking for process and thread IDs.
def input_full_address_range(self, token_list):
pid, tid = self.get_process_and_thread_ids_from_prefix()
address, size = self.input_address_range(token_list, pid, tid)
return pid, tid, address, size
# Token list contains a breakpoint.
def input_breakpoint(self, token_list):
pid, tid, address, size = self.input_full_address_range(token_list)
if not self.debug.is_debugee(pid):
raise CmdError("target process is not being debugged")
return pid, tid, address, size
# Token list contains a memory address, and optional size and process.
# Sets the results as the default for the next display command.
def input_display(self, token_list, default_size = 64):
pid, tid, address, size = self.input_full_address_range(token_list)
if not size:
size = default_size
next_address = HexOutput.integer(address + size)
self.default_display_target = next_address
return pid, tid, address, size
#------------------------------------------------------------------------------
# Output
# Tell the user a module was loaded.
def print_module_load(self, event):
mod = event.get_module()
base = mod.get_base()
name = mod.get_filename()
if not name:
name = ''
msg = "Loaded module (%s) %s"
msg = msg % (HexDump.address(base), name)
print(msg)
# Tell the user a module was unloaded.
def print_module_unload(self, event):
mod = event.get_module()
base = mod.get_base()
name = mod.get_filename()
if not name:
name = ''
msg = "Unloaded module (%s) %s"
msg = msg % (HexDump.address(base), name)
print(msg)
# Tell the user a process was started.
def print_process_start(self, event):
pid = event.get_pid()
start = event.get_start_address()
if start:
start = HexOutput.address(start)
print("Started process %d at %s" % (pid, start))
else:
print("Attached to process %d" % pid)
# Tell the user a thread was started.
def print_thread_start(self, event):
tid = event.get_tid()
start = event.get_start_address()
if start:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
start = event.get_process().get_label_at_address(start)
print("Started thread %d at %s" % (tid, start))
else:
print("Attached to thread %d" % tid)
# Tell the user a process has finished.
def print_process_end(self, event):
pid = event.get_pid()
code = event.get_exit_code()
print("Process %d terminated, exit code %d" % (pid, code))
# Tell the user a thread has finished.
def print_thread_end(self, event):
tid = event.get_tid()
code = event.get_exit_code()
print("Thread %d terminated, exit code %d" % (tid, code))
# Print(debug strings.
def print_debug_string(self, event):
tid = event.get_tid()
string = event.get_debug_string()
print("Thread %d says: %r" % (tid, string))
# Inform the user of any other debugging event.
def print_event(self, event):
code = HexDump.integer( event.get_event_code() )
name = event.get_event_name()
desc = event.get_event_description()
if code in desc:
print('')
print("%s: %s" % (name, desc))
else:
print('')
print("%s (%s): %s" % (name, code, desc))
self.print_event_location(event)
# Stop on exceptions and prompt for commands.
def print_exception(self, event):
address = HexDump.address( event.get_exception_address() )
code = HexDump.integer( event.get_exception_code() )
desc = event.get_exception_description()
if event.is_first_chance():
chance = 'first'
else:
chance = 'second'
if code in desc:
msg = "%s at address %s (%s chance)" % (desc, address, chance)
else:
msg = "%s (%s) at address %s (%s chance)" % (desc, code, address, chance)
print('')
print(msg)
self.print_event_location(event)
# Show the current location in the code.
def print_event_location(self, event):
process = event.get_process()
thread = event.get_thread()
self.print_current_location(process, thread)
# Show the current location in the code.
def print_breakpoint_location(self, event):
process = event.get_process()
thread = event.get_thread()
pc = event.get_exception_address()
self.print_current_location(process, thread, pc)
# Show the current location in any process and thread.
def print_current_location(self, process = None, thread = None, pc = None):
if not process:
if self.lastEvent is None:
raise CmdError("no current process set")
process = self.lastEvent.get_process()
if not thread:
if self.lastEvent is None:
raise CmdError("no current process set")
thread = self.lastEvent.get_thread()
thread.suspend()
try:
if pc is None:
pc = thread.get_pc()
ctx = thread.get_context()
finally:
thread.resume()
label = process.get_label_at_address(pc)
try:
disasm = process.disassemble(pc, 15)
except WindowsError:
disasm = None
except NotImplementedError:
disasm = None
print('')
print(CrashDump.dump_registers(ctx),)
print("%s:" % label)
if disasm:
print(CrashDump.dump_code_line(disasm[0], pc, bShowDump = True))
else:
try:
data = process.peek(pc, 15)
except Exception:
data = None
if data:
print('%s: %s' % (HexDump.address(pc), HexDump.hexblock_byte(data)))
else:
print('%s: ???' % HexDump.address(pc))
# Display memory contents using a given method.
def print_memory_display(self, arg, method):
if not arg:
arg = self.default_display_target
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_display(token_list)
label = self.get_process(pid).get_label_at_address(address)
data = self.read_memory(address, size, pid)
if data:
print("%s:" % label)
print(method(data, address),)
#------------------------------------------------------------------------------
# Debugging
# Get the process ID from the prefix or the last event.
def get_process_id_from_prefix(self):
if self.cmdprefix:
pid = self.input_process(self.cmdprefix)
else:
if self.lastEvent is None:
raise CmdError("no current process set")
pid = self.lastEvent.get_pid()
return pid
# Get the thread ID from the prefix or the last event.
def get_thread_id_from_prefix(self):
if self.cmdprefix:
tid = self.input_thread(self.cmdprefix)
else:
if self.lastEvent is None:
raise CmdError("no current process set")
tid = self.lastEvent.get_tid()
return tid
# Get the process from the prefix or the last event.
def get_process_from_prefix(self):
pid = self.get_process_id_from_prefix()
return self.get_process(pid)
# Get the thread from the prefix or the last event.
def get_thread_from_prefix(self):
tid = self.get_thread_id_from_prefix()
return self.get_thread(tid)
# Get the process and thread IDs from the prefix or the last event.
def get_process_and_thread_ids_from_prefix(self):
if self.cmdprefix:
try:
pid = self.input_process(self.cmdprefix)
tid = None
except CmdError:
try:
tid = self.input_thread(self.cmdprefix)
pid = self.debug.system.get_thread(tid).get_pid()
except CmdError:
msg = "unknown process or thread (%s)" % self.cmdprefix
raise CmdError(msg)
else:
if self.lastEvent is None:
raise CmdError("no current process set")
pid = self.lastEvent.get_pid()
tid = self.lastEvent.get_tid()
return pid, tid
# Get the process and thread from the prefix or the last event.
def get_process_and_thread_from_prefix(self):
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
thread = self.get_thread(tid)
return process, thread
# Get the process object.
def get_process(self, pid = None):
if pid is None:
if self.lastEvent is None:
raise CmdError("no current process set")
process = self.lastEvent.get_process()
elif self.lastEvent is not None and pid == self.lastEvent.get_pid():
process = self.lastEvent.get_process()
else:
try:
process = self.debug.system.get_process(pid)
except KeyError:
raise CmdError("process not found (%d)" % pid)
return process
# Get the thread object.
def get_thread(self, tid = None):
if tid is None:
if self.lastEvent is None:
raise CmdError("no current process set")
thread = self.lastEvent.get_thread()
elif self.lastEvent is not None and tid == self.lastEvent.get_tid():
thread = self.lastEvent.get_thread()
else:
try:
thread = self.debug.system.get_thread(tid)
except KeyError:
raise CmdError("thread not found (%d)" % tid)
return thread
# Read the process memory.
def read_memory(self, address, size, pid = None):
process = self.get_process(pid)
try:
data = process.peek(address, size)
except WindowsError:
orig_address = HexOutput.integer(address)
next_address = HexOutput.integer(address + size)
msg = "error reading process %d, from %s to %s (%d bytes)"
msg = msg % (pid, orig_address, next_address, size)
raise CmdError(msg)
return data
# Write the process memory.
def write_memory(self, address, data, pid = None):
process = self.get_process(pid)
try:
process.write(address, data)
except WindowsError:
size = len(data)
orig_address = HexOutput.integer(address)
next_address = HexOutput.integer(address + size)
msg = "error reading process %d, from %s to %s (%d bytes)"
msg = msg % (pid, orig_address, next_address, size)
raise CmdError(msg)
# Change a register value.
def change_register(self, register, value, tid = None):
# Get the thread.
if tid is None:
if self.lastEvent is None:
raise CmdError("no current process set")
thread = self.lastEvent.get_thread()
else:
try:
thread = self.debug.system.get_thread(tid)
except KeyError:
raise CmdError("thread not found (%d)" % tid)
# Convert the value to integer type.
try:
value = self.input_integer(value)
except ValueError:
pid = thread.get_pid()
value = self.input_address(value, pid, tid)
# Suspend the thread.
# The finally clause ensures the thread is resumed before returning.
thread.suspend()
try:
# Get the current context.
ctx = thread.get_context()
# Register name matching is case insensitive.
register = register.lower()
# Integer 32 bits registers.
if register in self.register_names:
register = register.title() # eax -> Eax
# Segment (16 bit) registers.
if register in self.segment_names:
register = 'Seg%s' % register.title() # cs -> SegCs
value = value & 0x0000FFFF
# Integer 16 bits registers.
if register in self.register_alias_16:
register = self.register_alias_16[register]
previous = ctx.get(register) & 0xFFFF0000
value = (value & 0x0000FFFF) | previous
# Integer 8 bits registers (low part).
if register in self.register_alias_8_low:
register = self.register_alias_8_low[register]
previous = ctx.get(register) % 0xFFFFFF00
value = (value & 0x000000FF) | previous
# Integer 8 bits registers (high part).
if register in self.register_alias_8_high:
register = self.register_alias_8_high[register]
previous = ctx.get(register) % 0xFFFF00FF
value = ((value & 0x000000FF) << 8) | previous
# Set the new context.
ctx.__setitem__(register, value)
thread.set_context(ctx)
# Resume the thread.
finally:
thread.resume()
# Very crude way to find data within the process memory.
# TODO: Perhaps pfind.py can be integrated here instead.
def find_in_memory(self, query, process):
for mbi in process.get_memory_map():
if mbi.State != win32.MEM_COMMIT or mbi.Protect & win32.PAGE_GUARD:
continue
address = mbi.BaseAddress
size = mbi.RegionSize
try:
data = process.read(address, size)
except WindowsError:
msg = "*** Warning: read error at address %s"
msg = msg % HexDump.address(address)
print(msg)
width = min(len(query), 16)
p = data.find(query)
while p >= 0:
q = p + len(query)
d = data[ p : min(q, p + width) ]
h = HexDump.hexline(d, width = width)
a = HexDump.address(address + p)
print("%s: %s" % (a, h))
p = data.find(query, q)
# Kill a process.
def kill_process(self, pid):
process = self.debug.system.get_process(pid)
try:
process.kill()
if self.debug.is_debugee(pid):
self.debug.detach(pid)
print("Killed process (%d)" % pid)
except Exception:
print("Error trying to kill process (%d)" % pid)
# Kill a thread.
def kill_thread(self, tid):
thread = self.debug.system.get_thread(tid)
try:
thread.kill()
process = thread.get_process()
pid = process.get_pid()
if self.debug.is_debugee(pid) and not process.is_alive():
self.debug.detach(pid)
print("Killed thread (%d)" % tid)
except Exception:
print("Error trying to kill thread (%d)" % tid)
#------------------------------------------------------------------------------
# Command prompt input
# Prompt the user for commands.
def prompt_user(self):
while not self.debuggerExit:
try:
self.cmdloop()
break
except CmdError:
e = sys.exc_info()[1]
print("*** Error: %s" % str(e))
except Exception:
traceback.print_exc()
## self.debuggerExit = True
# Prompt the user for a YES/NO kind of question.
def ask_user(self, msg, prompt = "Are you sure? (y/N): "):
print(msg)
answer = raw_input(prompt)
answer = answer.strip()[:1].lower()
return answer == 'y'
# Autocomplete the given command when not ambiguous.
# Convert it to lowercase (so commands are seen as case insensitive).
def autocomplete(self, cmd):
cmd = cmd.lower()
completed = self.completenames(cmd)
if len(completed) == 1:
cmd = completed[0]
return cmd
# Get the help text for the given list of command methods.
# Note it's NOT a list of commands, but a list of actual method names.
# Each line of text is stripped and all lines are sorted.
# Repeated text lines are removed.
# Returns a single, possibly multiline, string.
def get_help(self, commands):
msg = set()
for name in commands:
if name != 'do_help':
try:
doc = getattr(self, name).__doc__.split('\n')
except Exception:
return ( "No help available when Python"
" is run with the -OO switch." )
for x in doc:
x = x.strip()
if x:
msg.add(' %s' % x)
msg = list(msg)
msg.sort()
msg = '\n'.join(msg)
return msg
# Parse the prefix and remove it from the command line.
def split_prefix(self, line):
prefix = None
if line.startswith('~'):
pos = line.find(' ')
if pos == 1:
pos = line.find(' ', pos + 1)
if not pos < 0:
prefix = line[ 1 : pos ].strip()
line = line[ pos : ].strip()
return prefix, line
#------------------------------------------------------------------------------
# Cmd() hacks
# Header for help page.
doc_header = 'Available commands (type help * or help <command>)'
## # Read and write directly to stdin and stdout.
## # This prevents the use of raw_input and print.
## use_rawinput = False
@property
def prompt(self):
if self.lastEvent:
pid = self.lastEvent.get_pid()
tid = self.lastEvent.get_tid()
if self.debug.is_debugee(pid):
## return '~%d(%d)> ' % (tid, pid)
return '%d:%d> ' % (pid, tid)
return '> '
# Return a sorted list of method names.
# Only returns the methods that implement commands.
def get_names(self):
names = Cmd.get_names(self)
names = [ x for x in set(names) if x.startswith('do_') ]
names.sort()
return names
# Automatically autocomplete commands, even if Tab wasn't pressed.
# The prefix is removed from the line and stored in self.cmdprefix.
# Also implement the commands that consist of a symbol character.
def parseline(self, line):
self.cmdprefix, line = self.split_prefix(line)
line = line.strip()
if line:
if line[0] == '.':
line = 'plugin ' + line[1:]
elif line[0] == '#':
line = 'python ' + line[1:]
cmd, arg, line = Cmd.parseline(self, line)
if cmd:
cmd = self.autocomplete(cmd)
return cmd, arg, line
## # Don't repeat the last executed command.
## def emptyline(self):
## pass
# Reset the defaults for some commands.
def preloop(self):
self.default_disasm_target = 'eip'
self.default_display_target = 'eip'
self.last_display_command = self.do_db
# Put the prefix back in the command line.
def get_lastcmd(self):
return self.__lastcmd
def set_lastcmd(self, lastcmd):
if self.cmdprefix:
lastcmd = '~%s %s' % (self.cmdprefix, lastcmd)
self.__lastcmd = lastcmd
lastcmd = property(get_lastcmd, set_lastcmd)
# Quit the command prompt if the debuggerExit flag is on.
def postcmd(self, stop, line):
return stop or self.debuggerExit
#------------------------------------------------------------------------------
# Commands
# Each command contains a docstring with it's help text.
# The help text consist of independent text lines,
# where each line shows a command and it's parameters.
# Each command method has the help message for itself and all it's aliases.
# Only the docstring for the "help" command is shown as-is.
# NOTE: Command methods MUST be all lowercase!
# Extended help command.
def do_help(self, arg):
"""
? - show the list of available commands
? * - show help for all commands
? <command> [command...] - show help for the given command(s)
help - show the list of available commands
help * - show help for all commands
help <command> [command...] - show help for the given command(s)
"""
if not arg:
Cmd.do_help(self, arg)
elif arg in ('?', 'help'):
# An easter egg :)
print(" Help! I need somebody...")
print(" Help! Not just anybody...")
print(" Help! You know, I need someone...")
print(" Heeelp!")
else:
if arg == '*':
commands = self.get_names()
commands = [ x for x in commands if x.startswith('do_') ]
else:
commands = set()
for x in arg.split(' '):
x = x.strip()
if x:
for n in self.completenames(x):
commands.add( 'do_%s' % n )
commands = list(commands)
commands.sort()
print(self.get_help(commands))
def do_shell(self, arg):
"""
! - spawn a system shell
shell - spawn a system shell
! <command> [arguments...] - execute a single shell command
shell <command> [arguments...] - execute a single shell command
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
# Try to use the environment to locate cmd.exe.
# If not found, it's usually OK to just use the filename,
# since cmd.exe is one of those "magic" programs that
# can be automatically found by CreateProcess.
shell = os.getenv('ComSpec', 'cmd.exe')
# When given a command, run it and return.
# When no command is given, spawn a shell.
if arg:
arg = '%s /c %s' % (shell, arg)
else:
arg = shell
process = self.debug.system.start_process(arg, bConsole = True)
process.wait()
# This hack fixes a bug in Python, the interpreter console is closing the
# stdin pipe when calling the exit() function (Ctrl+Z seems to work fine).
class _PythonExit(object):
def __repr__(self):
return "Use exit() or Ctrl-Z plus Return to exit"
def __call__(self):
raise SystemExit()
_python_exit = _PythonExit()
# Spawns a Python shell with some handy local variables and the winappdbg
# module already imported. Also the console banner is improved.
def _spawn_python_shell(self, arg):
import winappdbg
banner = ('Python %s on %s\nType "help", "copyright", '
'"credits" or "license" for more information.\n')
platform = winappdbg.version.lower()
platform = 'WinAppDbg %s' % platform
banner = banner % (sys.version, platform)
local = {}
local.update(__builtins__)
local.update({
'__name__' : '__console__',
'__doc__' : None,
'exit' : self._python_exit,
'self' : self,
'arg' : arg,
'winappdbg' : winappdbg,
})
try:
code.interact(banner=banner, local=local)
except SystemExit:
# We need to catch it so it doesn't kill our program.
pass
def do_python(self, arg):
"""
# - spawn a python interpreter
python - spawn a python interpreter
# <statement> - execute a single python statement
python <statement> - execute a single python statement
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
# When given a Python statement, execute it directly.
if arg:
try:
compat.exec_(arg, globals(), locals())
except Exception:
traceback.print_exc()
# When no statement is given, spawn a Python interpreter.
else:
try:
self._spawn_python_shell(arg)
except Exception:
e = sys.exc_info()[1]
raise CmdError(
"unhandled exception when running Python console: %s" % e)
# The plugins interface is quite simple.
#
# Just place a .py file with the plugin name in the "plugins" folder,
# for example "do_example.py" would implement the "example" command.
#
# The plugin must have a function named "do", which implements the
# command functionality exactly like the do_* methods of Cmd instances.
#
# The docstring for the "do" function will be parsed exactly like
# one of the debugger's commands - that is, each line is treated
# independently.
#
def do_plugin(self, arg):
"""
[~prefix] .<name> [arguments] - run a plugin command
[~prefix] plugin <name> [arguments] - run a plugin command
"""
pos = arg.find(' ')
if pos < 0:
name = arg
arg = ''
else:
name = arg[:pos]
arg = arg[pos:].strip()
if not name:
raise CmdError("missing plugin name")
for c in name:
if c not in self.valid_plugin_name_chars:
raise CmdError("invalid plugin name: %r" % name)
name = 'winappdbg.plugins.do_%s' % name
try:
plugin = __import__(name)
components = name.split('.')
for comp in components[1:]:
plugin = getattr(plugin, comp)
reload(plugin)
except ImportError:
raise CmdError("plugin not found: %s" % name)
try:
return plugin.do(self, arg)
except CmdError:
raise
except Exception:
e = sys.exc_info()[1]
## traceback.print_exc(e) # XXX DEBUG
raise CmdError("unhandled exception in plugin: %s" % e)
def do_quit(self, arg):
"""
quit - close the debugging session
q - close the debugging session
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
if self.confirm_quit:
count = self.debug.get_debugee_count()
if count > 0:
if count == 1:
msg = "There's a program still running."
else:
msg = "There are %s programs still running." % count
if not self.ask_user(msg):
return False
self.debuggerExit = True
return True
do_q = do_quit
def do_attach(self, arg):
"""
attach <target> [target...] - attach to the given process(es)
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
targets = self.input_process_list( self.split_tokens(arg, 1) )
if not targets:
print("Error: missing parameters")
else:
debug = self.debug
for pid in targets:
try:
debug.attach(pid)
print("Attached to process (%d)" % pid)
except Exception:
print("Error: can't attach to process (%d)" % pid)
def do_detach(self, arg):
"""
[~process] detach - detach from the current process
detach - detach from the current process
detach <target> [target...] - detach from the given process(es)
"""
debug = self.debug
token_list = self.split_tokens(arg)
if self.cmdprefix:
token_list.insert(0, self.cmdprefix)
targets = self.input_process_list(token_list)
if not targets:
if self.lastEvent is None:
raise CmdError("no current process set")
targets = [ self.lastEvent.get_pid() ]
for pid in targets:
try:
debug.detach(pid)
print("Detached from process (%d)" % pid)
except Exception:
print("Error: can't detach from process (%d)" % pid)
def do_windowed(self, arg):
"""
windowed <target> [arguments...] - run a windowed program for debugging
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
cmdline = self.input_command_line(arg)
try:
process = self.debug.execl(arg,
bConsole = False,
bFollow = self.options.follow)
print("Spawned process (%d)" % process.get_pid())
except Exception:
raise CmdError("can't execute")
self.set_fake_last_event(process)
def do_console(self, arg):
"""
console <target> [arguments...] - run a console program for debugging
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
cmdline = self.input_command_line(arg)
try:
process = self.debug.execl(arg,
bConsole = True,
bFollow = self.options.follow)
print("Spawned process (%d)" % process.get_pid())
except Exception:
raise CmdError("can't execute")
self.set_fake_last_event(process)
def do_continue(self, arg):
"""
continue - continue execution
g - continue execution
go - continue execution
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
if self.debug.get_debugee_count() > 0:
return True
do_g = do_continue
do_go = do_continue
def do_gh(self, arg):
"""
gh - go with exception handled
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
if self.lastEvent:
self.lastEvent.continueStatus = win32.DBG_EXCEPTION_HANDLED
return self.do_go(arg)
def do_gn(self, arg):
"""
gn - go with exception not handled
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
if self.lastEvent:
self.lastEvent.continueStatus = win32.DBG_EXCEPTION_NOT_HANDLED
return self.do_go(arg)
def do_refresh(self, arg):
"""
refresh - refresh the list of running processes and threads
[~process] refresh - refresh the list of running threads
"""
if arg:
raise CmdError("too many arguments")
if self.cmdprefix:
process = self.get_process_from_prefix()
process.scan()
else:
self.debug.system.scan()
def do_processlist(self, arg):
"""
pl - show the processes being debugged
processlist - show the processes being debugged
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
system = self.debug.system
pid_list = self.debug.get_debugee_pids()
if pid_list:
print("Process ID File name")
for pid in pid_list:
if pid == 0:
filename = "System Idle Process"
elif pid == 4:
filename = "System"
else:
filename = system.get_process(pid).get_filename()
filename = PathOperations.pathname_to_filename(filename)
print("%-12d %s" % (pid, filename))
do_pl = do_processlist
def do_threadlist(self, arg):
"""
tl - show the threads being debugged
threadlist - show the threads being debugged
"""
if arg:
raise CmdError("too many arguments")
if self.cmdprefix:
process = self.get_process_from_prefix()
for thread in process.iter_threads():
tid = thread.get_tid()
name = thread.get_name()
print("%-12d %s" % (tid, name))
else:
system = self.debug.system
pid_list = self.debug.get_debugee_pids()
if pid_list:
print("Thread ID Thread name")
for pid in pid_list:
process = system.get_process(pid)
for thread in process.iter_threads():
tid = thread.get_tid()
name = thread.get_name()
print("%-12d %s" % (tid, name))
do_tl = do_threadlist
def do_kill(self, arg):
"""
[~process] kill - kill a process
[~thread] kill - kill a thread
kill - kill the current process
kill * - kill all debugged processes
kill <processes and/or threads...> - kill the given processes and threads
"""
if arg:
if arg == '*':
target_pids = self.debug.get_debugee_pids()
target_tids = list()
else:
target_pids = set()
target_tids = set()
if self.cmdprefix:
pid, tid = self.get_process_and_thread_ids_from_prefix()
if tid is None:
target_tids.add(tid)
else:
target_pids.add(pid)
for token in self.split_tokens(arg):
try:
pid = self.input_process(token)
target_pids.add(pid)
except CmdError:
try:
tid = self.input_process(token)
target_pids.add(pid)
except CmdError:
msg = "unknown process or thread (%s)" % token
raise CmdError(msg)
target_pids = list(target_pids)
target_tids = list(target_tids)
target_pids.sort()
target_tids.sort()
msg = "You are about to kill %d processes and %d threads."
msg = msg % ( len(target_pids), len(target_tids) )
if self.ask_user(msg):
for pid in target_pids:
self.kill_process(pid)
for tid in target_tids:
self.kill_thread(tid)
else:
if self.cmdprefix:
pid, tid = self.get_process_and_thread_ids_from_prefix()
if tid is None:
if self.lastEvent is not None and pid == self.lastEvent.get_pid():
msg = "You are about to kill the current process."
else:
msg = "You are about to kill process %d." % pid
if self.ask_user(msg):
self.kill_process(pid)
else:
if self.lastEvent is not None and tid == self.lastEvent.get_tid():
msg = "You are about to kill the current thread."
else:
msg = "You are about to kill thread %d." % tid
if self.ask_user(msg):
self.kill_thread(tid)
else:
if self.lastEvent is None:
raise CmdError("no current process set")
pid = self.lastEvent.get_pid()
if self.ask_user("You are about to kill the current process."):
self.kill_process(pid)
# TODO: create hidden threads using undocumented API calls.
def do_modload(self, arg):
"""
[~process] modload <filename.dll> - load a DLL module
"""
filename = self.split_tokens(arg, 1, 1)[0]
process = self.get_process_from_prefix()
try:
process.inject_dll(filename, bWait=False)
except RuntimeError:
print("Can't inject module: %r" % filename)
# TODO: modunload
def do_stack(self, arg):
"""
[~thread] k - show the stack trace
[~thread] stack - show the stack trace
"""
if arg: # XXX TODO add depth parameter
raise CmdError("too many arguments")
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
thread = process.get_thread(tid)
try:
stack_trace = thread.get_stack_trace_with_labels()
if stack_trace:
print(CrashDump.dump_stack_trace_with_labels(stack_trace),)
else:
print("No stack trace available for thread (%d)" % tid)
except WindowsError:
print("Can't get stack trace for thread (%d)" % tid)
do_k = do_stack
def do_break(self, arg):
"""
break - force a debug break in all debugees
break <process> [process...] - force a debug break
"""
debug = self.debug
system = debug.system
targets = self.input_process_list( self.split_tokens(arg) )
if not targets:
targets = debug.get_debugee_pids()
targets.sort()
if self.lastEvent:
current = self.lastEvent.get_pid()
else:
current = None
for pid in targets:
if pid != current and debug.is_debugee(pid):
process = system.get_process(pid)
try:
process.debug_break()
except WindowsError:
print("Can't force a debug break on process (%d)")
def do_step(self, arg):
"""
p - step on the current assembly instruction
next - step on the current assembly instruction
step - step on the current assembly instruction
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if self.lastEvent is None:
raise CmdError("no current process set")
if arg: # XXX this check is to be removed
raise CmdError("too many arguments")
pid = self.lastEvent.get_pid()
thread = self.lastEvent.get_thread()
pc = thread.get_pc()
code = thread.disassemble(pc, 16)[0]
size = code[1]
opcode = code[2].lower()
if ' ' in opcode:
opcode = opcode[ : opcode.find(' ') ]
if opcode in self.jump_instructions or opcode in ('int', 'ret', 'retn'):
return self.do_trace(arg)
address = pc + size
## print(hex(pc), hex(address), size # XXX DEBUG
self.debug.stalk_at(pid, address)
return True
do_p = do_step
do_next = do_step
def do_trace(self, arg):
"""
t - trace at the current assembly instruction
trace - trace at the current assembly instruction
"""
if arg: # XXX this check is to be removed
raise CmdError("too many arguments")
if self.lastEvent is None:
raise CmdError("no current thread set")
self.lastEvent.get_thread().set_tf()
return True
do_t = do_trace
def do_bp(self, arg):
"""
[~process] bp <address> - set a code breakpoint
"""
pid = self.get_process_id_from_prefix()
if not self.debug.is_debugee(pid):
raise CmdError("target process is not being debugged")
process = self.get_process(pid)
token_list = self.split_tokens(arg, 1, 1)
try:
address = self.input_address(token_list[0], pid)
deferred = False
except Exception:
address = token_list[0]
deferred = True
if not address:
address = token_list[0]
deferred = True
self.debug.break_at(pid, address)
if deferred:
print("Deferred breakpoint set at %s" % address)
else:
print("Breakpoint set at %s" % address)
def do_ba(self, arg):
"""
[~thread] ba <a|w|e> <1|2|4|8> <address> - set hardware breakpoint
"""
debug = self.debug
thread = self.get_thread_from_prefix()
pid = thread.get_pid()
tid = thread.get_tid()
if not debug.is_debugee(pid):
raise CmdError("target thread is not being debugged")
token_list = self.split_tokens(arg, 3, 3)
access = token_list[0].lower()
size = token_list[1]
address = token_list[2]
if access == 'a':
access = debug.BP_BREAK_ON_ACCESS
elif access == 'w':
access = debug.BP_BREAK_ON_WRITE
elif access == 'e':
access = debug.BP_BREAK_ON_EXECUTION
else:
raise CmdError("bad access type: %s" % token_list[0])
if size == '1':
size = debug.BP_WATCH_BYTE
elif size == '2':
size = debug.BP_WATCH_WORD
elif size == '4':
size = debug.BP_WATCH_DWORD
elif size == '8':
size = debug.BP_WATCH_QWORD
else:
raise CmdError("bad breakpoint size: %s" % size)
thread = self.get_thread_from_prefix()
tid = thread.get_tid()
pid = thread.get_pid()
if not debug.is_debugee(pid):
raise CmdError("target process is not being debugged")
address = self.input_address(address, pid)
if debug.has_hardware_breakpoint(tid, address):
debug.erase_hardware_breakpoint(tid, address)
debug.define_hardware_breakpoint(tid, address, access, size)
debug.enable_hardware_breakpoint(tid, address)
def do_bm(self, arg):
"""
[~process] bm <address-address> - set memory breakpoint
"""
pid = self.get_process_id_from_prefix()
if not self.debug.is_debugee(pid):
raise CmdError("target process is not being debugged")
process = self.get_process(pid)
token_list = self.split_tokens(arg, 1, 2)
address, size = self.input_address_range(token_list[0], pid)
self.debug.watch_buffer(pid, address, size)
def do_bl(self, arg):
"""
bl - list the breakpoints for the current process
bl * - list the breakpoints for all processes
[~process] bl - list the breakpoints for the given process
bl <process> [process...] - list the breakpoints for each given process
"""
debug = self.debug
if arg == '*':
if self.cmdprefix:
raise CmdError("prefix not supported")
breakpoints = debug.get_debugee_pids()
else:
targets = self.input_process_list( self.split_tokens(arg) )
if self.cmdprefix:
targets.insert(0, self.input_process(self.cmdprefix))
if not targets:
if self.lastEvent is None:
raise CmdError("no current process is set")
targets = [ self.lastEvent.get_pid() ]
for pid in targets:
bplist = debug.get_process_code_breakpoints(pid)
printed_process_banner = False
if bplist:
if not printed_process_banner:
print("Process %d:" % pid)
printed_process_banner = True
for bp in bplist:
address = repr(bp)[1:-1].replace('remote address ','')
print(" %s" % address)
dbplist = debug.get_process_deferred_code_breakpoints(pid)
if dbplist:
if not printed_process_banner:
print("Process %d:" % pid)
printed_process_banner = True
for (label, action, oneshot) in dbplist:
if oneshot:
address = " Deferred unconditional one-shot" \
" code breakpoint at %s"
else:
address = " Deferred unconditional" \
" code breakpoint at %s"
address = address % label
print(" %s" % address)
bplist = debug.get_process_page_breakpoints(pid)
if bplist:
if not printed_process_banner:
print("Process %d:" % pid)
printed_process_banner = True
for bp in bplist:
address = repr(bp)[1:-1].replace('remote address ','')
print(" %s" % address)
for tid in debug.system.get_process(pid).iter_thread_ids():
bplist = debug.get_thread_hardware_breakpoints(tid)
if bplist:
print("Thread %d:" % tid)
for bp in bplist:
address = repr(bp)[1:-1].replace('remote address ','')
print(" %s" % address)
def do_bo(self, arg):
"""
[~process] bo <address> - make a code breakpoint one-shot
[~thread] bo <address> - make a hardware breakpoint one-shot
[~process] bo <address-address> - make a memory breakpoint one-shot
[~process] bo <address> <size> - make a memory breakpoint one-shot
"""
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.enable_one_shot_hardware_breakpoint(tid, address)
found = True
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.enable_one_shot_code_breakpoint(pid, address)
found = True
else:
if debug.has_page_breakpoint(pid, address):
debug.enable_one_shot_page_breakpoint(pid, address)
found = True
if not found:
print("Error: breakpoint not found.")
def do_be(self, arg):
"""
[~process] be <address> - enable a code breakpoint
[~thread] be <address> - enable a hardware breakpoint
[~process] be <address-address> - enable a memory breakpoint
[~process] be <address> <size> - enable a memory breakpoint
"""
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.enable_hardware_breakpoint(tid, address)
found = True
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.enable_code_breakpoint(pid, address)
found = True
else:
if debug.has_page_breakpoint(pid, address):
debug.enable_page_breakpoint(pid, address)
found = True
if not found:
print("Error: breakpoint not found.")
def do_bd(self, arg):
"""
[~process] bd <address> - disable a code breakpoint
[~thread] bd <address> - disable a hardware breakpoint
[~process] bd <address-address> - disable a memory breakpoint
[~process] bd <address> <size> - disable a memory breakpoint
"""
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.disable_hardware_breakpoint(tid, address)
found = True
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.disable_code_breakpoint(pid, address)
found = True
else:
if debug.has_page_breakpoint(pid, address):
debug.disable_page_breakpoint(pid, address)
found = True
if not found:
print("Error: breakpoint not found.")
def do_bc(self, arg):
"""
[~process] bc <address> - clear a code breakpoint
[~thread] bc <address> - clear a hardware breakpoint
[~process] bc <address-address> - clear a memory breakpoint
[~process] bc <address> <size> - clear a memory breakpoint
"""
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.dont_watch_variable(tid, address)
found = True
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.dont_break_at(pid, address)
found = True
else:
if debug.has_page_breakpoint(pid, address):
debug.dont_watch_buffer(pid, address, size)
found = True
if not found:
print("Error: breakpoint not found.")
def do_disassemble(self, arg):
"""
[~thread] u [register] - show code disassembly
[~process] u [address] - show code disassembly
[~thread] disassemble [register] - show code disassembly
[~process] disassemble [address] - show code disassembly
"""
if not arg:
arg = self.default_disasm_target
token_list = self.split_tokens(arg, 1, 1)
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
address = self.input_address(token_list[0], pid, tid)
try:
code = process.disassemble(address, 15*8)[:8]
except Exception:
msg = "can't disassemble address %s"
msg = msg % HexDump.address(address)
raise CmdError(msg)
if code:
label = process.get_label_at_address(address)
last_code = code[-1]
next_address = last_code[0] + last_code[1]
next_address = HexOutput.integer(next_address)
self.default_disasm_target = next_address
print("%s:" % label)
## print(CrashDump.dump_code(code))
for line in code:
print(CrashDump.dump_code_line(line, bShowDump = False))
do_u = do_disassemble
def do_search(self, arg):
"""
[~process] s [address-address] <search string>
[~process] search [address-address] <search string>
"""
token_list = self.split_tokens(arg, 1, 3)
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
if len(token_list) == 1:
pattern = token_list[0]
minAddr = None
maxAddr = None
else:
pattern = token_list[-1]
addr, size = self.input_address_range(token_list[:-1], pid, tid)
minAddr = addr
maxAddr = addr + size
iter = process.search_bytes(pattern)
if process.get_bits() == 32:
addr_width = 8
else:
addr_width = 16
# TODO: need a prettier output here!
for addr in iter:
print(HexDump.address(addr, addr_width))
do_s = do_search
def do_searchhex(self, arg):
"""
[~process] sh [address-address] <hexadecimal pattern>
[~process] searchhex [address-address] <hexadecimal pattern>
"""
token_list = self.split_tokens(arg, 1, 3)
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
if len(token_list) == 1:
pattern = token_list[0]
minAddr = None
maxAddr = None
else:
pattern = token_list[-1]
addr, size = self.input_address_range(token_list[:-1], pid, tid)
minAddr = addr
maxAddr = addr + size
iter = process.search_hexa(pattern)
if process.get_bits() == 32:
addr_width = 8
else:
addr_width = 16
for addr, bytes in iter:
print(HexDump.hexblock(bytes, addr, addr_width),)
do_sh = do_searchhex
## def do_strings(self, arg):
## """
## [~process] strings - extract ASCII strings from memory
## """
## if arg:
## raise CmdError("too many arguments")
## pid, tid = self.get_process_and_thread_ids_from_prefix()
## process = self.get_process(pid)
## for addr, size, data in process.strings():
## print("%s: %r" % (HexDump.address(addr), data)
def do_d(self, arg):
"""
[~thread] d <register> - show memory contents
[~thread] d <register-register> - show memory contents
[~thread] d <register> <size> - show memory contents
[~process] d <address> - show memory contents
[~process] d <address-address> - show memory contents
[~process] d <address> <size> - show memory contents
"""
return self.last_display_command(arg)
def do_db(self, arg):
"""
[~thread] db <register> - show memory contents as bytes
[~thread] db <register-register> - show memory contents as bytes
[~thread] db <register> <size> - show memory contents as bytes
[~process] db <address> - show memory contents as bytes
[~process] db <address-address> - show memory contents as bytes
[~process] db <address> <size> - show memory contents as bytes
"""
self.print_memory_display(arg, HexDump.hexblock)
self.last_display_command = self.do_db
def do_dw(self, arg):
"""
[~thread] dw <register> - show memory contents as words
[~thread] dw <register-register> - show memory contents as words
[~thread] dw <register> <size> - show memory contents as words
[~process] dw <address> - show memory contents as words
[~process] dw <address-address> - show memory contents as words
[~process] dw <address> <size> - show memory contents as words
"""
self.print_memory_display(arg, HexDump.hexblock_word)
self.last_display_command = self.do_dw
def do_dd(self, arg):
"""
[~thread] dd <register> - show memory contents as dwords
[~thread] dd <register-register> - show memory contents as dwords
[~thread] dd <register> <size> - show memory contents as dwords
[~process] dd <address> - show memory contents as dwords
[~process] dd <address-address> - show memory contents as dwords
[~process] dd <address> <size> - show memory contents as dwords
"""
self.print_memory_display(arg, HexDump.hexblock_dword)
self.last_display_command = self.do_dd
def do_dq(self, arg):
"""
[~thread] dq <register> - show memory contents as qwords
[~thread] dq <register-register> - show memory contents as qwords
[~thread] dq <register> <size> - show memory contents as qwords
[~process] dq <address> - show memory contents as qwords
[~process] dq <address-address> - show memory contents as qwords
[~process] dq <address> <size> - show memory contents as qwords
"""
self.print_memory_display(arg, HexDump.hexblock_qword)
self.last_display_command = self.do_dq
# XXX TODO
# Change the way the default is used with ds and du
def do_ds(self, arg):
"""
[~thread] ds <register> - show memory contents as ANSI string
[~process] ds <address> - show memory contents as ANSI string
"""
if not arg:
arg = self.default_display_target
token_list = self.split_tokens(arg, 1, 1)
pid, tid, address, size = self.input_display(token_list, 256)
process = self.get_process(pid)
data = process.peek_string(address, False, size)
if data:
print(repr(data))
self.last_display_command = self.do_ds
def do_du(self, arg):
"""
[~thread] du <register> - show memory contents as Unicode string
[~process] du <address> - show memory contents as Unicode string
"""
if not arg:
arg = self.default_display_target
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_display(token_list, 256)
process = self.get_process(pid)
data = process.peek_string(address, True, size)
if data:
print(repr(data))
self.last_display_command = self.do_du
def do_register(self, arg):
"""
[~thread] r - print(the value of all registers
[~thread] r <register> - print(the value of a register
[~thread] r <register>=<value> - change the value of a register
[~thread] register - print(the value of all registers
[~thread] register <register> - print(the value of a register
[~thread] register <register>=<value> - change the value of a register
"""
arg = arg.strip()
if not arg:
self.print_current_location()
else:
equ = arg.find('=')
if equ >= 0:
register = arg[:equ].strip()
value = arg[equ+1:].strip()
if not value:
value = '0'
self.change_register(register, value)
else:
value = self.input_register(arg)
if value is None:
raise CmdError("unknown register: %s" % arg)
try:
label = None
thread = self.get_thread_from_prefix()
process = thread.get_process()
module = process.get_module_at_address(value)
if module:
label = module.get_label_at_address(value)
except RuntimeError:
label = None
reg = arg.upper()
val = HexDump.address(value)
if label:
print("%s: %s (%s)" % (reg, val, label))
else:
print("%s: %s" % (reg, val))
do_r = do_register
def do_eb(self, arg):
"""
[~process] eb <address> <data> - write the data to the specified address
"""
# TODO
# data parameter should be optional, use a child Cmd here
pid = self.get_process_id_from_prefix()
token_list = self.split_tokens(arg, 2)
address = self.input_address(token_list[0], pid)
data = HexInput.hexadecimal(' '.join(token_list[1:]))
self.write_memory(address, data, pid)
# XXX TODO
# add ew, ed and eq here
def do_find(self, arg):
"""
[~process] f <string> - find the string in the process memory
[~process] find <string> - find the string in the process memory
"""
if not arg:
raise CmdError("missing parameter: string")
process = self.get_process_from_prefix()
self.find_in_memory(arg, process)
do_f = do_find
def do_memory(self, arg):
"""
[~process] m - show the process memory map
[~process] memory - show the process memory map
"""
if arg: # TODO: take min and max addresses
raise CmdError("too many arguments")
process = self.get_process_from_prefix()
try:
memoryMap = process.get_memory_map()
mappedFilenames = process.get_mapped_filenames()
print('')
print(CrashDump.dump_memory_map(memoryMap, mappedFilenames))
except WindowsError:
msg = "can't get memory information for process (%d)"
raise CmdError(msg % process.get_pid())
do_m = do_memory
#------------------------------------------------------------------------------
# Event handling
# TODO
# * add configurable stop/don't stop behavior on events and exceptions
# Stop for all events, unless stated otherwise.
def event(self, event):
self.print_event(event)
self.prompt_user()
# Stop for all exceptions, unless stated otherwise.
def exception(self, event):
self.print_exception(event)
self.prompt_user()
# Stop for breakpoint exceptions.
def breakpoint(self, event):
if hasattr(event, 'breakpoint') and event.breakpoint:
self.print_breakpoint_location(event)
else:
self.print_exception(event)
self.prompt_user()
# Stop for WOW64 breakpoint exceptions.
def wow64_breakpoint(self, event):
self.print_exception(event)
self.prompt_user()
# Stop for single step exceptions.
def single_step(self, event):
if event.debug.is_tracing(event.get_tid()):
self.print_breakpoint_location(event)
else:
self.print_exception(event)
self.prompt_user()
# Don't stop for C++ exceptions.
def ms_vc_exception(self, event):
self.print_exception(event)
event.continueStatus = win32.DBG_CONTINUE
# Don't stop for process start.
def create_process(self, event):
self.print_process_start(event)
self.print_thread_start(event)
self.print_module_load(event)
# Don't stop for process exit.
def exit_process(self, event):
self.print_process_end(event)
# Don't stop for thread creation.
def create_thread(self, event):
self.print_thread_start(event)
# Don't stop for thread exit.
def exit_thread(self, event):
self.print_thread_end(event)
# Don't stop for DLL load.
def load_dll(self, event):
self.print_module_load(event)
# Don't stop for DLL unload.
def unload_dll(self, event):
self.print_module_unload(event)
# Don't stop for debug strings.
def output_string(self, event):
self.print_debug_string(event)
#------------------------------------------------------------------------------
# History file
def load_history(self):
global readline
if readline is None:
try:
import readline
except ImportError:
return
if self.history_file_full_path is None:
folder = os.environ.get('USERPROFILE', '')
if not folder:
folder = os.environ.get('HOME', '')
if not folder:
folder = os.path.split(sys.argv[0])[1]
if not folder:
folder = os.path.curdir
self.history_file_full_path = os.path.join(folder,
self.history_file)
try:
if os.path.exists(self.history_file_full_path):
readline.read_history_file(self.history_file_full_path)
except IOError:
e = sys.exc_info()[1]
warnings.warn("Cannot load history file, reason: %s" % str(e))
def save_history(self):
if self.history_file_full_path is not None:
global readline
if readline is None:
try:
import readline
except ImportError:
return
try:
readline.write_history_file(self.history_file_full_path)
except IOError:
e = sys.exc_info()[1]
warnings.warn("Cannot save history file, reason: %s" % str(e))
#------------------------------------------------------------------------------
# Main loop
# Debugging loop.
def loop(self):
self.debuggerExit = False
debug = self.debug
# Stop on the initial event, if any.
if self.lastEvent is not None:
self.cmdqueue.append('r')
self.prompt_user()
# Loop until the debugger is told to quit.
while not self.debuggerExit:
try:
# If for some reason the last event wasn't continued,
# continue it here. This won't be done more than once
# for a given Event instance, though.
try:
debug.cont()
# On error, show the command prompt.
except Exception:
traceback.print_exc()
self.prompt_user()
# While debugees are attached, handle debug events.
# Some debug events may cause the command prompt to be shown.
if self.debug.get_debugee_count() > 0:
try:
# Get the next debug event.
debug.wait()
# Dispatch the debug event.
try:
debug.dispatch()
# Continue the debug event.
finally:
debug.cont()
# On error, show the command prompt.
except Exception:
traceback.print_exc()
self.prompt_user()
# While no debugees are attached, show the command prompt.
else:
self.prompt_user()
# When the user presses Ctrl-C send a debug break to all debugees.
except KeyboardInterrupt:
success = False
try:
print("*** User requested debug break")
system = debug.system
for pid in debug.get_debugee_pids():
try:
system.get_process(pid).debug_break()
success = True
except:
traceback.print_exc()
except:
traceback.print_exc()
if not success:
raise # This should never happen!
| apache-2.0 |
mdibaiee/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/tests/performance/concatenation.py | 451 | 1145 | from __future__ import absolute_import, division, unicode_literals
def f1():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x += y + z
def f2():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = x + y + z
def f3():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = "".join((x, y, z))
def f4():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = "%s%s%s" % (x, y, z)
import timeit
for x in range(4):
statement = "f%s" % (x + 1)
t = timeit.Timer(statement, "from __main__ import " + statement)
r = t.repeat(3, 1000000)
print(r, min(r))
| mpl-2.0 |
huiren/ece511 | src/python/m5/util/multidict.py | 85 | 5241 | # Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
__all__ = [ 'multidict' ]
class multidict(object):
def __init__(self, parent = {}, **kwargs):
self.local = dict(**kwargs)
self.parent = parent
self.deleted = {}
def __str__(self):
return str(dict(self.items()))
def __repr__(self):
return `dict(self.items())`
def __contains__(self, key):
return self.local.has_key(key) or self.parent.has_key(key)
def __delitem__(self, key):
try:
del self.local[key]
except KeyError, e:
if key in self.parent:
self.deleted[key] = True
else:
raise KeyError, e
def __setitem__(self, key, value):
self.deleted.pop(key, False)
self.local[key] = value
def __getitem__(self, key):
try:
return self.local[key]
except KeyError, e:
if not self.deleted.get(key, False) and key in self.parent:
return self.parent[key]
else:
raise KeyError, e
def __len__(self):
return len(self.local) + len(self.parent)
def next(self):
for key,value in self.local.items():
yield key,value
if self.parent:
for key,value in self.parent.next():
if key not in self.local and key not in self.deleted:
yield key,value
def has_key(self, key):
return key in self
def iteritems(self):
for item in self.next():
yield item
def items(self):
return [ item for item in self.next() ]
def iterkeys(self):
for key,value in self.next():
yield key
def keys(self):
return [ key for key,value in self.next() ]
def itervalues(self):
for key,value in self.next():
yield value
def values(self):
return [ value for key,value in self.next() ]
def get(self, key, default=None):
try:
return self[key]
except KeyError, e:
return default
def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self.deleted.pop(key, False)
self.local[key] = default
return default
def _dump(self):
print 'multidict dump'
node = self
while isinstance(node, multidict):
print ' ', node.local
node = node.parent
def _dumpkey(self, key):
values = []
node = self
while isinstance(node, multidict):
if key in node.local:
values.append(node.local[key])
node = node.parent
print key, values
if __name__ == '__main__':
test1 = multidict()
test2 = multidict(test1)
test3 = multidict(test2)
test4 = multidict(test3)
test1['a'] = 'test1_a'
test1['b'] = 'test1_b'
test1['c'] = 'test1_c'
test1['d'] = 'test1_d'
test1['e'] = 'test1_e'
test2['a'] = 'test2_a'
del test2['b']
test2['c'] = 'test2_c'
del test1['a']
test2.setdefault('f', multidict)
print 'test1>', test1.items()
print 'test2>', test2.items()
#print test1['a']
print test1['b']
print test1['c']
print test1['d']
print test1['e']
print test2['a']
#print test2['b']
print test2['c']
print test2['d']
print test2['e']
for key in test2.iterkeys():
print key
test2.get('g', 'foo')
#test2.get('b')
test2.get('b', 'bar')
test2.setdefault('b', 'blah')
print test1
print test2
print `test2`
print len(test2)
test3['a'] = [ 0, 1, 2, 3 ]
print test4
| bsd-3-clause |
syaiful6/django | django/utils/tree.py | 372 | 4883 | """
A class for storing a tree graph. Primarily used for filter constructs in the
ORM.
"""
import copy
class Node(object):
"""
A single internal node in the tree graph. A Node should be viewed as a
connection (the root) with the children being either leaf nodes or other
Node instances.
"""
# Standard connector type. Clients usually won't use this at all and
# subclasses will usually override the value.
default = 'DEFAULT'
def __init__(self, children=None, connector=None, negated=False):
"""
Constructs a new Node. If no connector is given, the default will be
used.
"""
self.children = children[:] if children else []
self.connector = connector or self.default
self.negated = negated
# We need this because of django.db.models.query_utils.Q. Q. __init__() is
# problematic, but it is a natural Node subclass in all other respects.
@classmethod
def _new_instance(cls, children=None, connector=None, negated=False):
"""
This is called to create a new instance of this class when we need new
Nodes (or subclasses) in the internal code in this class. Normally, it
just shadows __init__(). However, subclasses with an __init__ signature
that is not an extension of Node.__init__ might need to implement this
method to allow a Node to create a new instance of them (if they have
any extra setting up to do).
"""
obj = Node(children, connector, negated)
obj.__class__ = cls
return obj
def __str__(self):
if self.negated:
return '(NOT (%s: %s))' % (self.connector, ', '.join(str(c) for c
in self.children))
return '(%s: %s)' % (self.connector, ', '.join(str(c) for c in
self.children))
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def __deepcopy__(self, memodict):
"""
Utility method used by copy.deepcopy().
"""
obj = Node(connector=self.connector, negated=self.negated)
obj.__class__ = self.__class__
obj.children = copy.deepcopy(self.children, memodict)
return obj
def __len__(self):
"""
The size of a node if the number of children it has.
"""
return len(self.children)
def __bool__(self):
"""
For truth value testing.
"""
return bool(self.children)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __contains__(self, other):
"""
Returns True is 'other' is a direct child of this instance.
"""
return other in self.children
def add(self, data, conn_type, squash=True):
"""
Combines this tree and the data represented by data using the
connector conn_type. The combine is done by squashing the node other
away if possible.
This tree (self) will never be pushed to a child node of the
combined tree, nor will the connector or negated properties change.
The function returns a node which can be used in place of data
regardless if the node other got squashed or not.
If `squash` is False the data is prepared and added as a child to
this tree without further logic.
"""
if data in self.children:
return data
if not squash:
self.children.append(data)
return data
if self.connector == conn_type:
# We can reuse self.children to append or squash the node other.
if (isinstance(data, Node) and not data.negated
and (data.connector == conn_type or len(data) == 1)):
# We can squash the other node's children directly into this
# node. We are just doing (AB)(CD) == (ABCD) here, with the
# addition that if the length of the other node is 1 the
# connector doesn't matter. However, for the len(self) == 1
# case we don't want to do the squashing, as it would alter
# self.connector.
self.children.extend(data.children)
return self
else:
# We could use perhaps additional logic here to see if some
# children could be used for pushdown here.
self.children.append(data)
return data
else:
obj = self._new_instance(self.children, self.connector,
self.negated)
self.connector = conn_type
self.children = [obj, data]
return data
def negate(self):
"""
Negate the sense of the root connector.
"""
self.negated = not self.negated
| bsd-3-clause |
ouziel-slama/compose | tests/unit/log_printer_test.py | 6 | 1755 | from __future__ import unicode_literals
from __future__ import absolute_import
import os
from fig.cli.log_printer import LogPrinter
from .. import unittest
class LogPrinterTest(unittest.TestCase):
def get_default_output(self, monochrome=False):
def reader(*args, **kwargs):
yield "hello\nworld"
container = MockContainer(reader)
output = run_log_printer([container], monochrome=monochrome)
return output
def test_single_container(self):
output = self.get_default_output()
self.assertIn('hello', output)
self.assertIn('world', output)
def test_monochrome(self):
output = self.get_default_output(monochrome=True)
self.assertNotIn('\033[', output)
def test_polychrome(self):
output = self.get_default_output()
self.assertIn('\033[', output)
def test_unicode(self):
glyph = u'\u2022'.encode('utf-8')
def reader(*args, **kwargs):
yield glyph + b'\n'
container = MockContainer(reader)
output = run_log_printer([container])
self.assertIn(glyph, output)
def run_log_printer(containers, monochrome=False):
r, w = os.pipe()
reader, writer = os.fdopen(r, 'r'), os.fdopen(w, 'w')
printer = LogPrinter(containers, output=writer, monochrome=monochrome)
printer.run()
writer.close()
return reader.read()
class MockContainer(object):
def __init__(self, reader):
self._reader = reader
@property
def name(self):
return 'myapp_web_1'
@property
def name_without_project(self):
return 'web_1'
def attach(self, *args, **kwargs):
return self._reader()
def wait(self, *args, **kwargs):
return 0
| apache-2.0 |
mahak/neutron | neutron/db/migration/alembic_migrations/dvr_init_opts.py | 14 | 2619 | # Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial operations for dvr
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'dvr_host_macs',
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('mac_address', sa.String(length=32),
nullable=False, unique=True),
sa.PrimaryKeyConstraint('host')
)
op.create_table(
'ml2_dvr_port_bindings',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('router_id', sa.String(length=36), nullable=True),
sa.Column('vif_type', sa.String(length=64), nullable=False),
sa.Column('vif_details', sa.String(length=4095),
nullable=False, server_default=''),
sa.Column('vnic_type', sa.String(length=64),
nullable=False, server_default='normal'),
sa.Column('profile', sa.String(length=4095),
nullable=False, server_default=''),
sa.Column(u'status', sa.String(16), nullable=False),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('port_id', 'host')
)
op.create_table(
'csnat_l3_agent_bindings',
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.Column('l3_agent_id', sa.String(length=36), nullable=False),
sa.Column('host_id', sa.String(length=255), nullable=True),
sa.Column('csnat_gw_port_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['l3_agent_id'], ['agents.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['csnat_gw_port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('router_id', 'l3_agent_id')
)
| apache-2.0 |
kvar/ansible | lib/ansible/modules/network/fortios/fortios_spamfilter_fortishield.py | 13 | 9400 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_spamfilter_fortishield
short_description: Configure FortiGuard - AntiSpam in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify spamfilter feature and fortishield category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
spamfilter_fortishield:
description:
- Configure FortiGuard - AntiSpam.
default: null
type: dict
suboptions:
spam_submit_force:
description:
- Enable/disable force insertion of a new mime entity for the submission text.
type: str
choices:
- enable
- disable
spam_submit_srv:
description:
- Hostname of the spam submission server.
type: str
spam_submit_txt2htm:
description:
- Enable/disable conversion of text email to HTML email.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure FortiGuard - AntiSpam.
fortios_spamfilter_fortishield:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
spamfilter_fortishield:
spam_submit_force: "enable"
spam_submit_srv: "<your_own_value>"
spam_submit_txt2htm: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_spamfilter_fortishield_data(json):
option_list = ['spam_submit_force', 'spam_submit_srv', 'spam_submit_txt2htm']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def spamfilter_fortishield(data, fos):
vdom = data['vdom']
spamfilter_fortishield_data = data['spamfilter_fortishield']
filtered_data = underscore_to_hyphen(filter_spamfilter_fortishield_data(spamfilter_fortishield_data))
return fos.set('spamfilter',
'fortishield',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_spamfilter(data, fos):
if data['spamfilter_fortishield']:
resp = spamfilter_fortishield(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"spamfilter_fortishield": {
"required": False, "type": "dict", "default": None,
"options": {
"spam_submit_force": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"spam_submit_srv": {"required": False, "type": "str"},
"spam_submit_txt2htm": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_spamfilter(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_spamfilter(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
equialgo/scikit-learn | sklearn/decomposition/kernel_pca.py | 19 | 10960 | """Kernel Principal Components Analysis"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..utils import check_random_state
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted, check_array
from ..exceptions import NotFittedError
from ..base import BaseEstimator, TransformerMixin
from ..preprocessing import KernelCenterer
from ..metrics.pairwise import pairwise_kernels
class KernelPCA(BaseEstimator, TransformerMixin):
"""Kernel Principal component analysis (KPCA)
Non-linear dimensionality reduction through the use of kernels (see
:ref:`metrics`).
Read more in the :ref:`User Guide <kernel_PCA>`.
Parameters
----------
n_components : int, default=None
Number of components. If None, all non-zero components are kept.
kernel : "linear" | "poly" | "rbf" | "sigmoid" | "cosine" | "precomputed"
Kernel. Default="linear".
degree : int, default=3
Degree for poly kernels. Ignored by other kernels.
gamma : float, default=1/n_features
Kernel coefficient for rbf and poly kernels. Ignored by other
kernels.
coef0 : float, default=1
Independent term in poly and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, default=None
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
alpha : int, default=1.0
Hyperparameter of the ridge regression that learns the
inverse transform (when fit_inverse_transform=True).
fit_inverse_transform : bool, default=False
Learn the inverse transform for non-precomputed kernels.
(i.e. learn to find the pre-image of a point)
eigen_solver : string ['auto'|'dense'|'arpack'], default='auto'
Select eigensolver to use. If n_components is much less than
the number of training samples, arpack may be more efficient
than the dense eigensolver.
tol : float, default=0
Convergence tolerance for arpack.
If 0, optimal value will be chosen by arpack.
max_iter : int, default=None
Maximum number of iterations for arpack.
If None, optimal value will be chosen by arpack.
remove_zero_eig : boolean, default=False
If True, then all components with zero eigenvalues are removed, so
that the number of components in the output may be < n_components
(and sometimes even zero due to numerical instability).
When n_components is None, this parameter is ignored and components
with zero eigenvalues are removed regardless.
random_state : int seed, RandomState instance, or None, default=None
A pseudo random number generator used for the initialization of the
residuals when eigen_solver == 'arpack'.
.. versionadded:: 0.18
n_jobs : int, default=1
The number of parallel jobs to run.
If `-1`, then the number of jobs is set to the number of CPU cores.
.. versionadded:: 0.18
copy_X : boolean, default=True
If True, input X is copied and stored by the model in the `X_fit_`
attribute. If no further changes will be done to X, setting
`copy_X=False` saves memory by storing a reference.
.. versionadded:: 0.18
Attributes
----------
lambdas_ : array, (n_components,)
Eigenvalues of the centered kernel matrix in decreasing order.
If `n_components` and `remove_zero_eig` are not set,
then all values are stored.
alphas_ : array, (n_samples, n_components)
Eigenvectors of the centered kernel matrix. If `n_components` and
`remove_zero_eig` are not set, then all components are stored.
dual_coef_ : array, (n_samples, n_features)
Inverse transform matrix. Set if `fit_inverse_transform` is True.
X_transformed_fit_ : array, (n_samples, n_components)
Projection of the fitted data on the kernel principal components.
X_fit_ : (n_samples, n_features)
The data used to fit the model. If `copy_X=False`, then `X_fit_` is
a reference. This attribute is used for the calls to transform.
References
----------
Kernel PCA was introduced in:
Bernhard Schoelkopf, Alexander J. Smola,
and Klaus-Robert Mueller. 1999. Kernel principal
component analysis. In Advances in kernel methods,
MIT Press, Cambridge, MA, USA 327-352.
"""
def __init__(self, n_components=None, kernel="linear",
gamma=None, degree=3, coef0=1, kernel_params=None,
alpha=1.0, fit_inverse_transform=False, eigen_solver='auto',
tol=0, max_iter=None, remove_zero_eig=False,
random_state=None, copy_X=True, n_jobs=1):
if fit_inverse_transform and kernel == 'precomputed':
raise ValueError(
"Cannot fit_inverse_transform with a precomputed kernel.")
self.n_components = n_components
self.kernel = kernel
self.kernel_params = kernel_params
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.alpha = alpha
self.fit_inverse_transform = fit_inverse_transform
self.eigen_solver = eigen_solver
self.remove_zero_eig = remove_zero_eig
self.tol = tol
self.max_iter = max_iter
self._centerer = KernelCenterer()
self.random_state = random_state
self.n_jobs = n_jobs
self.copy_X = copy_X
@property
def _pairwise(self):
return self.kernel == "precomputed"
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, n_jobs=self.n_jobs,
**params)
def _fit_transform(self, K):
""" Fit's using kernel K"""
# center kernel
K = self._centerer.fit_transform(K)
if self.n_components is None:
n_components = K.shape[0]
else:
n_components = min(K.shape[0], self.n_components)
# compute eigenvectors
if self.eigen_solver == 'auto':
if K.shape[0] > 200 and n_components < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
else:
eigen_solver = self.eigen_solver
if eigen_solver == 'dense':
self.lambdas_, self.alphas_ = linalg.eigh(
K, eigvals=(K.shape[0] - n_components, K.shape[0] - 1))
elif eigen_solver == 'arpack':
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, K.shape[0])
self.lambdas_, self.alphas_ = eigsh(K, n_components,
which="LA",
tol=self.tol,
maxiter=self.max_iter,
v0=v0)
# sort eigenvectors in descending order
indices = self.lambdas_.argsort()[::-1]
self.lambdas_ = self.lambdas_[indices]
self.alphas_ = self.alphas_[:, indices]
# remove eigenvectors with a zero eigenvalue
if self.remove_zero_eig or self.n_components is None:
self.alphas_ = self.alphas_[:, self.lambdas_ > 0]
self.lambdas_ = self.lambdas_[self.lambdas_ > 0]
return K
def _fit_inverse_transform(self, X_transformed, X):
if hasattr(X, "tocsr"):
raise NotImplementedError("Inverse transform not implemented for "
"sparse matrices!")
n_samples = X_transformed.shape[0]
K = self._get_kernel(X_transformed)
K.flat[::n_samples + 1] += self.alpha
self.dual_coef_ = linalg.solve(K, X, sym_pos=True, overwrite_a=True)
self.X_transformed_fit_ = X_transformed
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy_X)
K = self._get_kernel(X)
self._fit_transform(K)
if self.fit_inverse_transform:
sqrt_lambdas = np.diag(np.sqrt(self.lambdas_))
X_transformed = np.dot(self.alphas_, sqrt_lambdas)
self._fit_inverse_transform(X_transformed, X)
self.X_fit_ = X
return self
def fit_transform(self, X, y=None, **params):
"""Fit the model from data in X and transform X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
self.fit(X, **params)
X_transformed = self.alphas_ * np.sqrt(self.lambdas_)
if self.fit_inverse_transform:
self._fit_inverse_transform(X_transformed, X)
return X_transformed
def transform(self, X):
"""Transform X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'X_fit_')
K = self._centerer.transform(self._get_kernel(X, self.X_fit_))
return np.dot(K, self.alphas_ / np.sqrt(self.lambdas_))
def inverse_transform(self, X):
"""Transform X back to original space.
Parameters
----------
X : array-like, shape (n_samples, n_components)
Returns
-------
X_new : array-like, shape (n_samples, n_features)
References
----------
"Learning to Find Pre-Images", G BakIr et al, 2004.
"""
if not self.fit_inverse_transform:
raise NotFittedError("The fit_inverse_transform parameter was not"
" set to True when instantiating and hence "
"the inverse transform is not available.")
K = self._get_kernel(X, self.X_transformed_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
flybird119/voltdb | lib/python/voltcli/voltdb.d/recover.py | 5 | 2037 | # This file is part of VoltDB.
# Copyright (C) 2008-2015 VoltDB Inc.
#
# This file contains original code and/or modifications of original code.
# Any modifications made by VoltDB Inc. are licensed under the following
# terms and conditions:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
@VOLT.Command(
# Uses all default except last is safemode switch availability
bundles = VOLT.ServerBundle('recover',
needs_catalog=False,
supports_live=False,
default_host=True,
safemode_available=True,
supports_daemon=True,
supports_multiple_daemons=True,
check_environment_config=True),
options = (
VOLT.BooleanOption('-r', '--replica', 'replica', 'recover replica cluster', default = False),
),
description = 'Start the database and recover the previous state.'
)
def recover(runner):
runner.go()
| agpl-3.0 |
todaychi/hue | desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/chart/area_chart.py | 10 | 2944 | from __future__ import absolute_import
#Autogenerated schema
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
Set,
Bool,
Integer,
Sequence,
Alias,
)
from openpyxl.descriptors.excel import ExtensionList
from openpyxl.descriptors.nested import (
NestedMinMax,
NestedSet,
NestedBool,
)
from ._chart import ChartBase
from .descriptors import NestedGapAmount
from .axis import TextAxis, NumericAxis, SeriesAxis, ChartLines
from .label import DataLabels
from .series import Series
class _AreaChartBase(ChartBase):
grouping = NestedSet(values=(['percentStacked', 'standard', 'stacked']))
varyColors = NestedBool(nested=True, allow_none=True)
ser = Sequence(expected_type=Series, allow_none=True)
dLbls = Typed(expected_type=DataLabels, allow_none=True)
dataLabels = Alias("dLbls")
dropLines = Typed(expected_type=ChartLines, allow_none=True)
_series_type = "area"
__elements__ = ('grouping', 'varyColors', 'ser', 'dLbls', 'dropLines')
def __init__(self,
grouping="standard",
varyColors=None,
ser=(),
dLbls=None,
dropLines=None,
):
self.grouping = grouping
self.varyColors = varyColors
self.ser = ser
self.dLbls = dLbls
self.dropLines = dropLines
super(_AreaChartBase, self).__init__()
class AreaChart(_AreaChartBase):
tagname = "areaChart"
grouping = _AreaChartBase.grouping
varyColors = _AreaChartBase.varyColors
ser = _AreaChartBase.ser
dLbls = _AreaChartBase.dLbls
dropLines = _AreaChartBase.dropLines
# chart properties actually used by containing classes
x_axis = Typed(expected_type=TextAxis)
y_axis = Typed(expected_type=NumericAxis)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = _AreaChartBase.__elements__ + ('axId',)
def __init__(self,
axId=None,
extLst=None,
**kw
):
self.x_axis = TextAxis()
self.y_axis = NumericAxis()
super(AreaChart, self).__init__(**kw)
class AreaChart3D(AreaChart):
tagname = "area3DChart"
grouping = _AreaChartBase.grouping
varyColors = _AreaChartBase.varyColors
ser = _AreaChartBase.ser
dLbls = _AreaChartBase.dLbls
dropLines = _AreaChartBase.dropLines
gapDepth = NestedGapAmount()
x_axis = Typed(expected_type=TextAxis)
y_axis = Typed(expected_type=NumericAxis)
z_axis = Typed(expected_type=SeriesAxis, allow_none=True)
__elements__ = AreaChart.__elements__ + ('gapDepth', )
def __init__(self, gapDepth=None, **kw):
self.gapDepth = gapDepth
super(AreaChart3D, self).__init__(**kw)
self.x_axis = TextAxis()
self.y_axis = NumericAxis()
self.z_axis = SeriesAxis()
| apache-2.0 |
40223151/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/xml/dom/minicompat.py | 781 | 3228 | """Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
StringTypes = (str,)
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name))
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
| gpl-3.0 |
pkilambi/ceilometer | ceilometer/tests/telemetry/test_notifications.py | 3 | 3962 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslotest import base
from ceilometer.telemetry import notifications
NOTIFICATION = {
u'_context_domain': None,
u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d',
'event_type': u'sample.create',
'timestamp': u'2015-06-1909: 19: 35.786893',
u'_context_auth_token': None,
u'_context_read_only': False,
'payload': [{
u'counter_name': u'instance100',
u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2',
u'resource_id': u'instance',
u'timestamp': u'2015-06-19T09: 19: 35.785330',
u'message_signature': u'fake_signature1',
u'resource_metadata': {u'foo': u'bar'},
u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack',
u'counter_unit': u'instance',
u'counter_volume': 1.0,
u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2',
u'message_id': u'4d865c6e-1664-11e5-9d41-0819a6cff905',
u'counter_type': u'gauge'
},
{
u'counter_name': u'instance100',
u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2',
u'resource_id': u'instance',
u'timestamp': u'2015-06-19T09: 19: 35.785330',
u'message_signature': u'fake_signature12',
u'resource_metadata': {u'foo': u'bar'},
u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack',
u'counter_unit': u'instance',
u'counter_volume': 1.0,
u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2',
u'message_id': u'4d866da8-1664-11e5-9d41-0819a6cff905',
u'counter_type': u'gauge'
}],
u'_context_resource_uuid': None,
u'_context_user_identity': u'fake_user_identity---',
u'_context_show_deleted': False,
u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2',
'priority': 'info',
u'_context_is_admin': True,
u'_context_project_domain': None,
u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2',
u'_context_user_domain': None,
'publisher_id': u'ceilometer.api',
'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e'
}
class TelemetryApiPostTestCase(base.BaseTestCase):
def test_process_notification(self):
sample_creation = notifications.TelemetryApiPost(None)
samples = list(sample_creation.process_notification(NOTIFICATION))
self.assertEqual(2, len(samples))
payload = NOTIFICATION["payload"]
for index, sample in enumerate(samples):
self.assertEqual(payload[index]["user_id"], sample.user_id)
self.assertEqual(payload[index]["counter_name"], sample.name)
self.assertEqual(payload[index]["resource_id"], sample.resource_id)
self.assertEqual(payload[index]["timestamp"], sample.timestamp)
self.assertEqual(payload[index]["resource_metadata"],
sample.resource_metadata)
self.assertEqual(payload[index]["counter_volume"], sample.volume)
self.assertEqual(payload[index]["source"], sample.source)
self.assertEqual(payload[index]["counter_type"], sample.type)
self.assertEqual(payload[index]["message_id"], sample.id)
self.assertEqual(payload[index]["counter_unit"], sample.unit)
| apache-2.0 |
weisongchen/flaskapp | venv/lib/python2.7/site-packages/flask/json.py | 121 | 9183 | # -*- coding: utf-8 -*-
"""
flask.jsonimpl
~~~~~~~~~~~~~~
Implementation helpers for the JSON support in Flask.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import io
import uuid
from datetime import date
from .globals import current_app, request
from ._compat import text_type, PY2
from werkzeug.http import http_date
from jinja2 import Markup
# Use the same json implementation as itsdangerous on which we
# depend anyways.
from itsdangerous import json as _json
# Figure out if simplejson escapes slashes. This behavior was changed
# from one version to another without reason.
_slash_escape = '\\/' not in _json.dumps('/')
__all__ = ['dump', 'dumps', 'load', 'loads', 'htmlsafe_dump',
'htmlsafe_dumps', 'JSONDecoder', 'JSONEncoder',
'jsonify']
def _wrap_reader_for_text(fp, encoding):
if isinstance(fp.read(0), bytes):
fp = io.TextIOWrapper(io.BufferedReader(fp), encoding)
return fp
def _wrap_writer_for_text(fp, encoding):
try:
fp.write('')
except TypeError:
fp = io.TextIOWrapper(fp, encoding)
return fp
class JSONEncoder(_json.JSONEncoder):
"""The default Flask JSON encoder. This one extends the default simplejson
encoder by also supporting ``datetime`` objects, ``UUID`` as well as
``Markup`` objects which are serialized as RFC 822 datetime strings (same
as the HTTP date format). In order to support more data types override the
:meth:`default` method.
"""
def default(self, o):
"""Implement this method in a subclass such that it returns a
serializable object for ``o``, or calls the base implementation (to
raise a :exc:`TypeError`).
For example, to support arbitrary iterators, you could implement
default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
if isinstance(o, date):
return http_date(o.timetuple())
if isinstance(o, uuid.UUID):
return str(o)
if hasattr(o, '__html__'):
return text_type(o.__html__())
return _json.JSONEncoder.default(self, o)
class JSONDecoder(_json.JSONDecoder):
"""The default JSON decoder. This one does not change the behavior from
the default simplejson decoder. Consult the :mod:`json` documentation
for more information. This decoder is not only used for the load
functions of this module but also :attr:`~flask.Request`.
"""
def _dump_arg_defaults(kwargs):
"""Inject default arguments for dump functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_encoder)
if not current_app.config['JSON_AS_ASCII']:
kwargs.setdefault('ensure_ascii', False)
kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS'])
else:
kwargs.setdefault('sort_keys', True)
kwargs.setdefault('cls', JSONEncoder)
def _load_arg_defaults(kwargs):
"""Inject default arguments for load functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_decoder)
else:
kwargs.setdefault('cls', JSONDecoder)
def dumps(obj, **kwargs):
"""Serialize ``obj`` to a JSON formatted ``str`` by using the application's
configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an
application on the stack.
This function can return ``unicode`` strings or ascii-only bytestrings by
default which coerce into unicode strings automatically. That behavior by
default is controlled by the ``JSON_AS_ASCII`` configuration variable
and can be overridden by the simplejson ``ensure_ascii`` parameter.
"""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
rv = _json.dumps(obj, **kwargs)
if encoding is not None and isinstance(rv, text_type):
rv = rv.encode(encoding)
return rv
def dump(obj, fp, **kwargs):
"""Like :func:`dumps` but writes into a file object."""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
if encoding is not None:
fp = _wrap_writer_for_text(fp, encoding)
_json.dump(obj, fp, **kwargs)
def loads(s, **kwargs):
"""Unserialize a JSON object from a string ``s`` by using the application's
configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an
application on the stack.
"""
_load_arg_defaults(kwargs)
if isinstance(s, bytes):
s = s.decode(kwargs.pop('encoding', None) or 'utf-8')
return _json.loads(s, **kwargs)
def load(fp, **kwargs):
"""Like :func:`loads` but reads from a file object.
"""
_load_arg_defaults(kwargs)
if not PY2:
fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8')
return _json.load(fp, **kwargs)
def htmlsafe_dumps(obj, **kwargs):
"""Works exactly like :func:`dumps` but is safe for use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags.
The following characters are escaped in strings:
- ``<``
- ``>``
- ``&``
- ``'``
This makes it safe to embed such strings in any place in HTML with the
notable exception of double quoted attributes. In that case single
quote your attributes or HTML escape it in addition.
.. versionchanged:: 0.10
This function's return value is now always safe for HTML usage, even
if outside of script tags or if used in XHTML. This rule does not
hold true when using this function in HTML attributes that are double
quoted. Always single quote attributes if you use the ``|tojson``
filter. Alternatively use ``|tojson|forceescape``.
"""
rv = dumps(obj, **kwargs) \
.replace(u'<', u'\\u003c') \
.replace(u'>', u'\\u003e') \
.replace(u'&', u'\\u0026') \
.replace(u"'", u'\\u0027')
if not _slash_escape:
rv = rv.replace('\\/', '/')
return rv
def htmlsafe_dump(obj, fp, **kwargs):
"""Like :func:`htmlsafe_dumps` but writes into a file object."""
fp.write(text_type(htmlsafe_dumps(obj, **kwargs)))
def jsonify(*args, **kwargs):
"""This function wraps :func:`dumps` to add a few enhancements that make
life easier. It turns the JSON output into a :class:`~flask.Response`
object with the :mimetype:`application/json` mimetype. For convenience, it
also converts multiple arguments into an array or multiple keyword arguments
into a dict. This means that both ``jsonify(1,2,3)`` and
``jsonify([1,2,3])`` serialize to ``[1,2,3]``.
For clarity, the JSON serialization behavior has the following differences
from :func:`dumps`:
1. Single argument: Passed straight through to :func:`dumps`.
2. Multiple arguments: Converted to an array before being passed to
:func:`dumps`.
3. Multiple keyword arguments: Converted to a dict before being passed to
:func:`dumps`.
4. Both args and kwargs: Behavior undefined and will throw an exception.
Example usage::
from flask import jsonify
@app.route('/_get_current_user')
def get_current_user():
return jsonify(username=g.user.username,
email=g.user.email,
id=g.user.id)
This will send a JSON response like this to the browser::
{
"username": "admin",
"email": "admin@localhost",
"id": 42
}
.. versionchanged:: 0.11
Added support for serializing top-level arrays. This introduces a
security risk in ancient browsers. See :ref:`json-security` for details.
This function's response will be pretty printed if it was not requested
with ``X-Requested-With: XMLHttpRequest`` to simplify debugging unless
the ``JSONIFY_PRETTYPRINT_REGULAR`` config parameter is set to false.
Compressed (not pretty) formatting currently means no indents and no
spaces after separators.
.. versionadded:: 0.2
"""
indent = None
separators = (',', ':')
if current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] and not request.is_xhr:
indent = 2
separators = (', ', ': ')
if args and kwargs:
raise TypeError('jsonify() behavior undefined when passed both args and kwargs')
elif len(args) == 1: # single args are passed directly to dumps()
data = args[0]
else:
data = args or kwargs
return current_app.response_class(
(dumps(data, indent=indent, separators=separators), '\n'),
mimetype=current_app.config['JSONIFY_MIMETYPE']
)
def tojson_filter(obj, **kwargs):
return Markup(htmlsafe_dumps(obj, **kwargs))
| mit |
hamptus/mftpy | mftpy/mft.2.7/tools/extract.py | 1 | 2716 | """
This tool is used to extract MFT entries
"""
from mft.meta.boot import BootFile
# FIXME pass an argument to function so it knows if you are extracting from disk or partition
def extract_meta(partition=None):
"""
Extract the system meta files from a partition
"""
bootdata = open(partition, 'rb').read(1024)
# FIXME: Validate partition to ensure we are pulling files from an NTFS partition
bootfile = BootFile(bootdata)
filenames = [
'0_$MFT.txt',
'1_$MFTMirr.txt',
'2_$LogFile.txt',
'3_$Volume.txt',
"4_$AttrDef.txt",
"5_$dot.txt",
"6_$Bitmap.txt",
"7_$Boot.txt",
"8_$BadClus.txt",
"9_$Secure.txt",
"10_$Upcase.txt",
"11_$Extend.txt",
]
with open(partition, 'rb+') as partition:
partition.seek(bootfile.get_mft_start_offset())
for filename in filenames:
mftentry = open(filename, "w")
mftentry.write(partition.read(1024))
mftentry.close()
def extract_mft_files(partition=None, count=1, start=0):
"""
partition = Which partition to extract from
count = How many files to extract. Default 1.
start = Where to start extracting files. Default is 0.
"""
if not partition:
partition = raw_input("Which partition should I extract from? ")
bootdata = open(partition, 'rb').read(1024)
bootfile = BootFile(bootdata)
with open(partition, 'rb+') as partition:
offset = bootfile.get_mft_start_offset() + (1024 * start)
partition.seek(offset)
for i in xrange(count):
with open("%s_mft.txt" % i, "w") as mft:
mft.write(partition.read(1024))
# def list_filenames(partition=r'/dev/sda1', count=1, start=0):
"""
Returns a list of filenames parsed from the mft entry
"""
#bootdata = open(partition, 'rb').read(1024)
#bootfile = BootFile(bootdata)
#with open(partition, 'rb+') as partition:
#offset = bootfile.get_mft_start_offset() + (1024 * start)
#partition.seek(offset)
#for i in xrange(count):
#mft = MftEntry(partition.read(1024))
#for attr in mft.attributes:
#try:
#yield attr.name
#except AttributeError:
#pass
# for i in list_filenames(partition="/dev/sda2", start=150, count=50):
# print i
# To run, uncomment the line below
# extract_meta_from_partition()
if __name__ == "__main__":
# partition = raw_input("Which partition would you like to extract from? ")
# extract_meta(partition=partition)
extract_mft_files('/dev/sda1', count=10, start=500) | gpl-3.0 |
iblis17/ultisnips | pythonx/UltiSnips/snippet_manager.py | 1 | 31801 | #!/usr/bin/env python
# encoding: utf-8
"""Contains the SnippetManager facade used by all Vim Functions."""
from collections import defaultdict
from functools import wraps
import os
import platform
import traceback
import sys
import vim
import re
from contextlib import contextmanager
from UltiSnips import _vim
from UltiSnips._diff import diff, guess_edit
from UltiSnips.compatibility import as_unicode
from UltiSnips.position import Position
from UltiSnips.snippet.definition import UltiSnipsSnippetDefinition
from UltiSnips.snippet.source import UltiSnipsFileSource, SnipMateFileSource, \
find_all_snippet_files, find_snippet_files, AddedSnippetsSource
from UltiSnips.text import escape
from UltiSnips.vim_state import VimState, VisualContentPreserver
from UltiSnips.buffer_proxy import use_proxy_buffer, suspend_proxy_edits
def _ask_user(a, formatted):
"""Asks the user using inputlist() and returns the selected element or
None."""
try:
rv = _vim.eval('inputlist(%s)' % _vim.escape(formatted))
if rv is None or rv == '0':
return None
rv = int(rv)
if rv > len(a):
rv = len(a)
return a[rv - 1]
except _vim.error:
# Likely "invalid expression", but might be translated. We have no way
# of knowing the exact error, therefore, we ignore all errors silently.
return None
except KeyboardInterrupt:
return None
def _ask_snippets(snippets):
"""Given a list of snippets, ask the user which one they want to use, and
return it."""
display = [as_unicode('%i: %s (%s)') % (i + 1, escape(s.description, '\\'),
escape(s.location, '\\')) for i, s in enumerate(snippets)]
return _ask_user(snippets, display)
def err_to_scratch_buffer(func):
"""Decorator that will catch any Exception that 'func' throws and displays
it in a new Vim scratch buffer."""
@wraps(func)
def wrapper(self, *args, **kwds):
try:
return func(self, *args, **kwds)
except Exception as e: # pylint: disable=bare-except
msg = \
"""An error occured. This is either a bug in UltiSnips or a bug in a
snippet definition. If you think this is a bug, please report it to
https://github.com/SirVer/ultisnips/issues/new.
Following is the full stack trace:
"""
msg += traceback.format_exc()
if hasattr(e, 'snippet_info'):
msg += "\nSnippet, caused error:\n"
msg += re.sub(
'^(?=\S)', ' ', e.snippet_info, flags=re.MULTILINE
)
# snippet_code comes from _python_code.py, it's set manually for
# providing error message with stacktrace of failed python code
# inside of the snippet.
if hasattr(e, 'snippet_code'):
_, _, tb = sys.exc_info()
tb_top = traceback.extract_tb(tb)[-1]
msg += "\nExecuted snippet code:\n"
lines = e.snippet_code.split("\n")
for number, line in enumerate(lines, 1):
msg += str(number).rjust(3)
prefix = " " if line else ""
if tb_top[1] == number:
prefix = " > "
msg += prefix + line + "\n"
# Vim sends no WinLeave msg here.
self._leaving_buffer() # pylint:disable=protected-access
_vim.new_scratch_buffer(msg)
return wrapper
# TODO(sirver): This class is still too long. It should only contain public
# facing methods, most of the private methods should be moved outside of it.
class SnippetManager(object):
"""The main entry point for all UltiSnips functionality.
All Vim functions call methods in this class.
"""
def __init__(self, expand_trigger, forward_trigger, backward_trigger):
self.expand_trigger = expand_trigger
self.forward_trigger = forward_trigger
self.backward_trigger = backward_trigger
self._inner_state_up = False
self._supertab_keys = None
self._csnippets = []
self._buffer_filetypes = defaultdict(lambda: ['all'])
self._vstate = VimState()
self._visual_content = VisualContentPreserver()
self._snippet_sources = []
self._snip_expanded_in_action = False
self._inside_action = False
self._last_inserted_char = ''
self._added_snippets_source = AddedSnippetsSource()
self.register_snippet_source('ultisnips_files', UltiSnipsFileSource())
self.register_snippet_source('added', self._added_snippets_source)
enable_snipmate = '1'
if _vim.eval("exists('g:UltiSnipsEnableSnipMate')") == '1':
enable_snipmate = _vim.eval('g:UltiSnipsEnableSnipMate')
if enable_snipmate == '1':
self.register_snippet_source('snipmate_files',
SnipMateFileSource())
self._reinit()
@err_to_scratch_buffer
def jump_forwards(self):
"""Jumps to the next tabstop."""
_vim.command('let g:ulti_jump_forwards_res = 1')
_vim.command('let &undolevels = &undolevels')
if not self._jump():
_vim.command('let g:ulti_jump_forwards_res = 0')
return self._handle_failure(self.forward_trigger)
@err_to_scratch_buffer
def jump_backwards(self):
"""Jumps to the previous tabstop."""
_vim.command('let g:ulti_jump_backwards_res = 1')
_vim.command('let &undolevels = &undolevels')
if not self._jump(True):
_vim.command('let g:ulti_jump_backwards_res = 0')
return self._handle_failure(self.backward_trigger)
@err_to_scratch_buffer
def expand(self):
"""Try to expand a snippet at the current position."""
_vim.command('let g:ulti_expand_res = 1')
if not self._try_expand():
_vim.command('let g:ulti_expand_res = 0')
self._handle_failure(self.expand_trigger)
@err_to_scratch_buffer
def expand_or_jump(self):
"""This function is used for people who wants to have the same trigger
for expansion and forward jumping.
It first tries to expand a snippet, if this fails, it tries to
jump forward.
"""
_vim.command('let g:ulti_expand_or_jump_res = 1')
rv = self._try_expand()
if not rv:
_vim.command('let g:ulti_expand_or_jump_res = 2')
rv = self._jump()
if not rv:
_vim.command('let g:ulti_expand_or_jump_res = 0')
self._handle_failure(self.expand_trigger)
@err_to_scratch_buffer
def snippets_in_current_scope(self):
"""Returns the snippets that could be expanded to Vim as a global
variable."""
before = _vim.buf.line_till_cursor
snippets = self._snips(before, True)
# Sort snippets alphabetically
snippets.sort(key=lambda x: x.trigger)
for snip in snippets:
description = snip.description[snip.description.find(snip.trigger) +
len(snip.trigger) + 2:]
key = as_unicode(snip.trigger)
description = as_unicode(description)
# remove surrounding "" or '' in snippet description if it exists
if len(description) > 2:
if (description[0] == description[-1] and
description[0] in "'\""):
description = description[1:-1]
_vim.command(as_unicode(
"let g:current_ulti_dict['{key}'] = '{val}'").format(
key=key.replace("'", "''"),
val=description.replace("'", "''")))
@err_to_scratch_buffer
def list_snippets(self):
"""Shows the snippets that could be expanded to the User and let her
select one."""
before = _vim.buf.line_till_cursor
snippets = self._snips(before, True)
if len(snippets) == 0:
self._handle_failure(self.backward_trigger)
return True
# Sort snippets alphabetically
snippets.sort(key=lambda x: x.trigger)
if not snippets:
return True
snippet = _ask_snippets(snippets)
if not snippet:
return True
self._do_snippet(snippet, before)
return True
@err_to_scratch_buffer
def add_snippet(self, trigger, value, description,
options, ft='all', priority=0, context=None, actions={}):
"""Add a snippet to the list of known snippets of the given 'ft'."""
self._added_snippets_source.add_snippet(ft,
UltiSnipsSnippetDefinition(priority, trigger, value,
description, options, {}, 'added',
context, actions))
@err_to_scratch_buffer
def expand_anon(
self, value, trigger='', description='', options='',
context=None, actions={}
):
"""Expand an anonymous snippet right here."""
before = _vim.buf.line_till_cursor
snip = UltiSnipsSnippetDefinition(0, trigger, value, description,
options, {}, '', context, actions)
if not trigger or snip.matches(before):
self._do_snippet(snip, before)
return True
else:
return False
def register_snippet_source(self, name, snippet_source):
"""Registers a new 'snippet_source' with the given 'name'.
The given class must be an instance of SnippetSource. This
source will be queried for snippets.
"""
self._snippet_sources.append((name, snippet_source))
def unregister_snippet_source(self, name):
"""Unregister the source with the given 'name'.
Does nothing if it is not registered.
"""
for index, (source_name, _) in enumerate(self._snippet_sources):
if name == source_name:
self._snippet_sources = self._snippet_sources[:index] + \
self._snippet_sources[index + 1:]
break
def reset_buffer_filetypes(self):
"""Reset the filetypes for the current buffer."""
if _vim.buf.number in self._buffer_filetypes:
del self._buffer_filetypes[_vim.buf.number]
def add_buffer_filetypes(self, ft):
"""Checks for changes in the list of snippet files or the contents of
the snippet files and reloads them if necessary."""
buf_fts = self._buffer_filetypes[_vim.buf.number]
idx = -1
for ft in ft.split('.'):
ft = ft.strip()
if not ft:
continue
try:
idx = buf_fts.index(ft)
except ValueError:
self._buffer_filetypes[_vim.buf.number].insert(idx + 1, ft)
idx += 1
@err_to_scratch_buffer
def _cursor_moved(self):
"""Called whenever the cursor moved."""
if not self._csnippets and self._inner_state_up:
self._teardown_inner_state()
self._vstate.remember_position()
if _vim.eval('mode()') not in 'in':
return
if self._ignore_movements:
self._ignore_movements = False
return
if self._csnippets:
cstart = self._csnippets[0].start.line
cend = self._csnippets[0].end.line + \
self._vstate.diff_in_buffer_length
ct = _vim.buf[cstart:cend + 1]
lt = self._vstate.remembered_buffer
pos = _vim.buf.cursor
lt_span = [0, len(lt)]
ct_span = [0, len(ct)]
initial_line = cstart
# Cut down on lines searched for changes. Start from behind and
# remove all equal lines. Then do the same from the front.
if lt and ct:
while (lt[lt_span[1] - 1] == ct[ct_span[1] - 1] and
self._vstate.ppos.line < initial_line + lt_span[1] - 1 and
pos.line < initial_line + ct_span[1] - 1 and
(lt_span[0] < lt_span[1]) and
(ct_span[0] < ct_span[1])):
ct_span[1] -= 1
lt_span[1] -= 1
while (lt_span[0] < lt_span[1] and
ct_span[0] < ct_span[1] and
lt[lt_span[0]] == ct[ct_span[0]] and
self._vstate.ppos.line >= initial_line and
pos.line >= initial_line):
ct_span[0] += 1
lt_span[0] += 1
initial_line += 1
ct_span[0] = max(0, ct_span[0] - 1)
lt_span[0] = max(0, lt_span[0] - 1)
initial_line = max(cstart, initial_line - 1)
lt = lt[lt_span[0]:lt_span[1]]
ct = ct[ct_span[0]:ct_span[1]]
try:
rv, es = guess_edit(initial_line, lt, ct, self._vstate)
if not rv:
lt = '\n'.join(lt)
ct = '\n'.join(ct)
es = diff(lt, ct, initial_line)
self._csnippets[0].replay_user_edits(es, self._ctab)
except IndexError:
# Rather do nothing than throwing an error. It will be correct
# most of the time
pass
self._check_if_still_inside_snippet()
if self._csnippets:
self._csnippets[0].update_textobjects()
self._vstate.remember_buffer(self._csnippets[0])
def _setup_inner_state(self):
"""Map keys and create autocommands that should only be defined when a
snippet is active."""
if self._inner_state_up:
return
if self.expand_trigger != self.forward_trigger:
_vim.command('inoremap <buffer> <silent> ' + self.forward_trigger +
' <C-R>=UltiSnips#JumpForwards()<cr>')
_vim.command('snoremap <buffer> <silent> ' + self.forward_trigger +
' <Esc>:call UltiSnips#JumpForwards()<cr>')
_vim.command('inoremap <buffer> <silent> ' + self.backward_trigger +
' <C-R>=UltiSnips#JumpBackwards()<cr>')
_vim.command('snoremap <buffer> <silent> ' + self.backward_trigger +
' <Esc>:call UltiSnips#JumpBackwards()<cr>')
# Setup the autogroups.
_vim.command('augroup UltiSnips')
_vim.command('autocmd!')
_vim.command('autocmd CursorMovedI * call UltiSnips#CursorMoved()')
_vim.command('autocmd CursorMoved * call UltiSnips#CursorMoved()')
_vim.command(
'autocmd InsertLeave * call UltiSnips#LeavingInsertMode()')
_vim.command('autocmd BufLeave * call UltiSnips#LeavingBuffer()')
_vim.command(
'autocmd CmdwinEnter * call UltiSnips#LeavingBuffer()')
_vim.command(
'autocmd CmdwinLeave * call UltiSnips#LeavingBuffer()')
# Also exit the snippet when we enter a unite complete buffer.
_vim.command('autocmd Filetype unite call UltiSnips#LeavingBuffer()')
_vim.command('augroup END')
_vim.command('silent doautocmd <nomodeline> User UltiSnipsEnterFirstSnippet')
self._inner_state_up = True
def _teardown_inner_state(self):
"""Reverse _setup_inner_state."""
if not self._inner_state_up:
return
try:
_vim.command('silent doautocmd <nomodeline> User UltiSnipsExitLastSnippet')
if self.expand_trigger != self.forward_trigger:
_vim.command('iunmap <buffer> %s' % self.forward_trigger)
_vim.command('sunmap <buffer> %s' % self.forward_trigger)
_vim.command('iunmap <buffer> %s' % self.backward_trigger)
_vim.command('sunmap <buffer> %s' % self.backward_trigger)
_vim.command('augroup UltiSnips')
_vim.command('autocmd!')
_vim.command('augroup END')
self._inner_state_up = False
except _vim.error:
# This happens when a preview window was opened. This issues
# CursorMoved, but not BufLeave. We have no way to unmap, until we
# are back in our buffer
pass
@err_to_scratch_buffer
def _save_last_visual_selection(self):
"""This is called when the expand trigger is pressed in visual mode.
Our job is to remember everything between '< and '> and pass it on to.
${VISUAL} in case it will be needed.
"""
self._visual_content.conserve()
def _leaving_buffer(self):
"""Called when the user switches tabs/windows/buffers.
It basically means that all snippets must be properly
terminated.
"""
while len(self._csnippets):
self._current_snippet_is_done()
self._reinit()
def _reinit(self):
"""Resets transient state."""
self._ctab = None
self._ignore_movements = False
def _check_if_still_inside_snippet(self):
"""Checks if the cursor is outside of the current snippet."""
if self._cs and (
not self._cs.start <= _vim.buf.cursor <= self._cs.end
):
self._current_snippet_is_done()
self._reinit()
self._check_if_still_inside_snippet()
def _current_snippet_is_done(self):
"""The current snippet should be terminated."""
self._csnippets.pop()
if not self._csnippets:
self._teardown_inner_state()
def _jump(self, backwards=False):
# we need to set 'onemore' there, because of limitations of the vim
# API regarding cursor movements; without that test
# 'CanExpandAnonSnippetInJumpActionWhileSelected' will fail
with _vim.toggle_opt('ve', 'onemore'):
"""Helper method that does the actual jump."""
jumped = False
# We need to remember current snippets stack here because of
# post-jump action on the last tabstop should be able to access
# snippet instance which is ended just now.
stack_for_post_jump = self._csnippets[:]
# If next tab has length 1 and the distance between itself and
# self._ctab is 1 then there is 1 less CursorMove events. We
# cannot ignore next movement in such case.
ntab_short_and_near = False
if self._cs:
snippet_for_action = self._cs
elif stack_for_post_jump:
snippet_for_action = stack_for_post_jump[-1]
else:
snippet_for_action = None
if self._cs:
ntab = self._cs.select_next_tab(backwards)
if ntab:
if self._cs.snippet.has_option('s'):
lineno = _vim.buf.cursor.line
_vim.buf[lineno] = _vim.buf[lineno].rstrip()
_vim.select(ntab.start, ntab.end)
jumped = True
if (self._ctab is not None
and ntab.start - self._ctab.end == Position(0, 1)
and ntab.end - ntab.start == Position(0, 1)):
ntab_short_and_near = True
if ntab.number == 0:
self._current_snippet_is_done()
self._ctab = ntab
else:
# This really shouldn't happen, because a snippet should
# have been popped when its final tabstop was used.
# Cleanup by removing current snippet and recursing.
self._current_snippet_is_done()
jumped = self._jump(backwards)
if jumped:
self._vstate.remember_position()
self._vstate.remember_unnamed_register(self._ctab.current_text)
if not ntab_short_and_near:
self._ignore_movements = True
if len(stack_for_post_jump) > 0 and ntab is not None:
with use_proxy_buffer(stack_for_post_jump, self._vstate):
snippet_for_action.snippet.do_post_jump(
ntab.number,
-1 if backwards else 1,
stack_for_post_jump,
snippet_for_action
)
return jumped
def _leaving_insert_mode(self):
"""Called whenever we leave the insert mode."""
self._vstate.restore_unnamed_register()
def _handle_failure(self, trigger):
"""Mainly make sure that we play well with SuperTab."""
if trigger.lower() == '<tab>':
feedkey = '\\' + trigger
elif trigger.lower() == '<s-tab>':
feedkey = '\\' + trigger
else:
feedkey = None
mode = 'n'
if not self._supertab_keys:
if _vim.eval("exists('g:SuperTabMappingForward')") != '0':
self._supertab_keys = (
_vim.eval('g:SuperTabMappingForward'),
_vim.eval('g:SuperTabMappingBackward'),
)
else:
self._supertab_keys = ['', '']
for idx, sttrig in enumerate(self._supertab_keys):
if trigger.lower() == sttrig.lower():
if idx == 0:
feedkey = r"\<Plug>SuperTabForward"
mode = 'n'
elif idx == 1:
feedkey = r"\<Plug>SuperTabBackward"
mode = 'p'
# Use remap mode so SuperTab mappings will be invoked.
break
if (feedkey == r"\<Plug>SuperTabForward" or
feedkey == r"\<Plug>SuperTabBackward"):
_vim.command('return SuperTab(%s)' % _vim.escape(mode))
elif feedkey:
_vim.command('return %s' % _vim.escape(feedkey))
def _snips(self, before, partial, autotrigger_only=False):
"""Returns all the snippets for the given text before the cursor.
If partial is True, then get also return partial matches.
"""
filetypes = self._buffer_filetypes[_vim.buf.number][::-1]
matching_snippets = defaultdict(list)
clear_priority = None
cleared = {}
for _, source in self._snippet_sources:
source.ensure(filetypes, cached=autotrigger_only)
# Collect cleared information from sources.
for _, source in self._snippet_sources:
sclear_priority = source.get_clear_priority(filetypes)
if sclear_priority is not None and (clear_priority is None
or sclear_priority > clear_priority):
clear_priority = sclear_priority
for key, value in source.get_cleared(filetypes).items():
if key not in cleared or value > cleared[key]:
cleared[key] = value
for _, source in self._snippet_sources:
possible_snippets = source.get_snippets(
filetypes,
before,
partial,
autotrigger_only
)
for snippet in possible_snippets:
if ((clear_priority is None or snippet.priority > clear_priority)
and (snippet.trigger not in cleared or
snippet.priority > cleared[snippet.trigger])):
matching_snippets[snippet.trigger].append(snippet)
if not matching_snippets:
return []
# Now filter duplicates and only keep the one with the highest
# priority.
snippets = []
for snippets_with_trigger in matching_snippets.values():
highest_priority = max(s.priority for s in snippets_with_trigger)
snippets.extend(s for s in snippets_with_trigger
if s.priority == highest_priority)
# For partial matches we are done, but if we want to expand a snippet,
# we have to go over them again and only keep those with the maximum
# priority.
if partial:
return snippets
highest_priority = max(s.priority for s in snippets)
return [s for s in snippets if s.priority == highest_priority]
def _do_snippet(self, snippet, before):
"""Expands the given snippet, and handles everything that needs to be
done with it."""
self._setup_inner_state()
self._snip_expanded_in_action = False
# Adjust before, maybe the trigger is not the complete word
text_before = before
if snippet.matched:
text_before = before[:-len(snippet.matched)]
with use_proxy_buffer(self._csnippets, self._vstate):
with self._action_context():
cursor_set_in_action = snippet.do_pre_expand(
self._visual_content.text,
self._csnippets
)
if cursor_set_in_action:
text_before = _vim.buf.line_till_cursor
before = _vim.buf.line_till_cursor
with suspend_proxy_edits():
if self._cs:
start = Position(_vim.buf.cursor.line, len(text_before))
end = Position(_vim.buf.cursor.line, len(before))
# If cursor is set in pre-action, then action was modified
# cursor line, in that case we do not need to do any edits, it
# can break snippet
if not cursor_set_in_action:
# It could be that our trigger contains the content of
# TextObjects in our containing snippet. If this is indeed
# the case, we have to make sure that those are properly
# killed. We do this by pretending that the user deleted
# and retyped the text that our trigger matched.
edit_actions = [
('D', start.line, start.col, snippet.matched),
('I', start.line, start.col, snippet.matched),
]
self._csnippets[0].replay_user_edits(edit_actions)
si = snippet.launch(text_before, self._visual_content,
self._cs.find_parent_for_new_to(start),
start, end
)
else:
start = Position(_vim.buf.cursor.line, len(text_before))
end = Position(_vim.buf.cursor.line, len(before))
si = snippet.launch(text_before, self._visual_content,
None, start, end)
self._visual_content.reset()
self._csnippets.append(si)
si.update_textobjects()
with use_proxy_buffer(self._csnippets, self._vstate):
with self._action_context():
snippet.do_post_expand(
si._start, si._end, self._csnippets
)
self._vstate.remember_buffer(self._csnippets[0])
if not self._snip_expanded_in_action:
self._jump()
elif self._cs.current_text != '':
self._jump()
else:
self._current_snippet_is_done()
if self._inside_action:
self._snip_expanded_in_action = True
def _try_expand(self, autotrigger_only=False):
"""Try to expand a snippet in the current place."""
before = _vim.buf.line_till_cursor
snippets = self._snips(before, False, autotrigger_only)
if snippets:
# prefer snippets with context if any
snippets_with_context = [s for s in snippets if s.context]
if snippets_with_context:
snippets = snippets_with_context
if not snippets:
# No snippet found
return False
_vim.command('let &undolevels = &undolevels')
if len(snippets) == 1:
snippet = snippets[0]
else:
snippet = _ask_snippets(snippets)
if not snippet:
return True
self._do_snippet(snippet, before)
_vim.command('let &undolevels = &undolevels')
return True
@property
def _cs(self):
"""The current snippet or None."""
if not len(self._csnippets):
return None
return self._csnippets[-1]
def _file_to_edit(self, requested_ft, bang): # pylint: disable=no-self-use
"""Returns a file to be edited for the given requested_ft.
If 'bang' is
empty only private files in g:UltiSnipsSnippetsDir are considered,
otherwise all files are considered and the user gets to choose.
"""
# This method is not using self, but is called by UltiSnips.vim and is
# therefore in this class because it is the facade to Vim.
potentials = set()
if _vim.eval("exists('g:UltiSnipsSnippetsDir')") == '1':
snippet_dir = _vim.eval('g:UltiSnipsSnippetsDir')
else:
home = _vim.eval('$HOME')
if platform.system() == 'Windows':
snippet_dir = os.path.join(home, 'vimfiles', 'UltiSnips')
elif _vim.eval("has('nvim')") == '1':
xdg_home_config = _vim.eval('$XDG_CONFIG_HOME') or os.path.join(home, ".config")
snippet_dir = os.path.join(xdg_home_config, 'nvim', 'UltiSnips')
else:
snippet_dir = os.path.join(home, '.vim', 'UltiSnips')
filetypes = []
if requested_ft:
filetypes.append(requested_ft)
else:
if bang:
filetypes.extend(self._buffer_filetypes[_vim.buf.number])
else:
filetypes.append(self._buffer_filetypes[_vim.buf.number][0])
for ft in filetypes:
potentials.update(find_snippet_files(ft, snippet_dir))
potentials.add(os.path.join(snippet_dir,
ft + '.snippets'))
if bang:
potentials.update(find_all_snippet_files(ft))
potentials = set(os.path.realpath(os.path.expanduser(p))
for p in potentials)
if len(potentials) > 1:
files = sorted(potentials)
formatted = [as_unicode('%i: %s') % (i, escape(fn, '\\')) for
i, fn in enumerate(files, 1)]
file_to_edit = _ask_user(files, formatted)
if file_to_edit is None:
return ''
else:
file_to_edit = potentials.pop()
dirname = os.path.dirname(file_to_edit)
if not os.path.exists(dirname):
os.makedirs(dirname)
return file_to_edit
@contextmanager
def _action_context(self):
try:
old_flag = self._inside_action
self._inside_action = True
yield
finally:
self._inside_action = old_flag
@err_to_scratch_buffer
def _track_change(self):
inserted_char = _vim.eval('v:char')
try:
if inserted_char == '':
before = _vim.buf.line_till_cursor
if before and before[-1] == self._last_inserted_char:
self._try_expand(autotrigger_only=True)
finally:
self._last_inserted_char = inserted_char
UltiSnips_Manager = SnippetManager( # pylint:disable=invalid-name
vim.eval('g:UltiSnipsExpandTrigger'),
vim.eval('g:UltiSnipsJumpForwardTrigger'),
vim.eval('g:UltiSnipsJumpBackwardTrigger'))
| gpl-3.0 |
atuljain/odoo | addons/account_bank_statement_extensions/wizard/__init__.py | 442 | 1125 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import confirm_statement_line
import cancel_statement_line
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MichalKononenko/python-qinfer | src/qinfer/derived_models.py | 2 | 35703 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##
# derived_models.py: Models that decorate and extend other models.
##
# © 2012 Chris Ferrie (csferrie@gmail.com) and
# Christopher E. Granade (cgranade@gmail.com)
#
# This file is a part of the Qinfer project.
# Licensed under the AGPL version 3.
##
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##
## FEATURES ###################################################################
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division # Ensures that a/b is always a float.
## ALL ########################################################################
# We use __all__ to restrict what globals are visible to external modules.
__all__ = [
'DerivedModel',
'PoisonedModel',
'BinomialModel',
'MultinomialModel',
'MLEModel',
'RandomWalkModel',
'GaussianRandomWalkModel'
]
## IMPORTS ####################################################################
from builtins import range
from functools import reduce
from past.builtins import basestring
import numpy as np
from scipy.stats import binom, multivariate_normal
from itertools import combinations_with_replacement as tri_comb
from qinfer.utils import binomial_pdf, multinomial_pdf, sample_multinomial
from qinfer.abstract_model import Model, DifferentiableModel
from qinfer._lib import enum # <- TODO: replace with flufl.enum!
from qinfer.utils import binom_est_error
from qinfer.domains import IntegerDomain, MultinomialDomain
## FUNCTIONS ###################################################################
def rmfield( a, *fieldnames_to_remove ):
# Removes named fields from a structured np array
return a[ [ name for name in a.dtype.names if name not in fieldnames_to_remove ] ]
## CLASSES #####################################################################
class DerivedModel(Model):
"""
Base class for any model that decorates another model.
Provides passthroughs for modelparam_names, n_modelparams, etc.
Many of these passthroughs can and should be overriden by
specific subclasses, but it is rare that something will
override all of them.
"""
_underlying_model = None
def __init__(self, underlying_model):
self._underlying_model = underlying_model
super(DerivedModel, self).__init__()
@property
def underlying_model(self):
return self._underlying_model
@property
def base_model(self):
return self._underlying_model.base_model
@property
def model_chain(self):
return self._underlying_model.model_chain + (self._underlying_model, )
@property
def n_modelparams(self):
# We have as many modelparameters as the underlying model.
return self.underlying_model.n_modelparams
@property
def expparams_dtype(self):
return self.underlying_model.expparams_dtype
@property
def modelparam_names(self):
return self.underlying_model.modelparam_names
@property
def Q(self):
return self.underlying_model.Q
def clear_cache(self):
self.underlying_model.clear_cache()
def n_outcomes(self, expparams):
return self.underlying_model.n_outcomes(expparams)
def are_models_valid(self, modelparams):
return self.underlying_model.are_models_valid(modelparams)
def domain(self, expparams):
return self.underlying_model.domain(expparams)
def are_expparam_dtypes_consistent(self, expparams):
return self.underlying_model.are_expparam_dtypes_consistent(expparams)
def update_timestep(self, modelparams, expparams):
return self.underlying_model.update_timestep(modelparams, expparams)
def canonicalize(self, modelparams):
return self.underlying_model.canonicalize(modelparams)
PoisonModes = enum.enum("ALE", "MLE")
class PoisonedModel(DerivedModel):
r"""
Model that simulates sampling error incurred by the MLE or ALE methods of
reconstructing likelihoods from sample data. The true likelihood given by an
underlying model is perturbed by a normally distributed random variable
:math:`\epsilon`, and then truncated to the interval :math:`[0, 1]`.
The variance of :math:`\epsilon` can be specified either as a constant,
to simulate ALE (in which samples are collected until a given threshold is
met), or as proportional to the variance of a possibly-hedged binomial
estimator, to simulate MLE.
:param Model underlying_model: The "true" model to be poisoned.
:param float tol: For ALE, specifies the given error tolerance to simulate.
:param int n_samples: For MLE, specifies the number of samples collected.
:param float hedge: For MLE, specifies the hedging used in estimating the
true likelihood.
"""
def __init__(self, underlying_model,
tol=None, n_samples=None, hedge=None
):
super(PoisonedModel, self).__init__(underlying_model)
if tol is None != n_samples is None:
raise ValueError(
"Exactly one of tol and n_samples must be specified"
)
if tol is not None:
self._mode = PoisonModes.ALE
self._tol = tol
else:
self._mode = PoisonModes.MLE
self._n_samples = n_samples
self._hedge = hedge if hedge is not None else 0.0
## METHODS ##
def likelihood(self, outcomes, modelparams, expparams):
# By calling the superclass implementation, we can consolidate
# call counting there.
# Get the original, undisturbed likelihoods.
super(PoisonedModel, self).likelihood(outcomes, modelparams, expparams)
L = self.underlying_model.likelihood(
outcomes, modelparams, expparams)
# Now get the random variates from a standard normal [N(0, 1)]
# distribution; we'll rescale them soon.
epsilon = np.random.normal(size=L.shape)
# If ALE, rescale by a constant tolerance.
if self._mode == PoisonModes.ALE:
epsilon *= self._tol
# Otherwise, rescale by the estimated error in the binomial estimator.
elif self._mode == PoisonModes.MLE:
epsilon *= binom_est_error(p=L, N=self._n_samples, hedge=self._hedge)
# Now we truncate and return.
np.clip(L + epsilon, 0, 1, out=L)
return L
def simulate_experiment(self, modelparams, expparams, repeat=1):
"""
Simulates experimental data according to the original (unpoisoned)
model. Note that this explicitly causes the simulated data and the
likelihood function to disagree. This is, strictly speaking, a violation
of the assumptions made about `~qinfer.abstract_model.Model` subclasses.
This violation is by intention, and allows for testing the robustness
of inference algorithms against errors in that assumption.
"""
super(PoisonedModel, self).simulate_experiment(modelparams, expparams, repeat)
return self.underlying_model.simulate_experiment(modelparams, expparams, repeat)
class BinomialModel(DerivedModel):
"""
Model representing finite numbers of iid samples from another model,
using the binomial distribution to calculate the new likelihood function.
:param qinfer.abstract_model.Model underlying_model: An instance of a two-
outcome model to be decorated by the binomial distribution.
Note that a new experimental parameter field ``n_meas`` is added by this
model. This parameter field represents how many times a measurement should
be made at a given set of experimental parameters. To ensure the correct
operation of this model, it is important that the decorated model does not
also admit a field with the name ``n_meas``.
"""
def __init__(self, underlying_model):
super(BinomialModel, self).__init__(underlying_model)
if not (underlying_model.is_n_outcomes_constant and underlying_model.n_outcomes(None) == 2):
raise ValueError("Decorated model must be a two-outcome model.")
if isinstance(underlying_model.expparams_dtype, str):
# We default to calling the original experiment parameters "x".
self._expparams_scalar = True
self._expparams_dtype = [('x', underlying_model.expparams_dtype), ('n_meas', 'uint')]
else:
self._expparams_scalar = False
self._expparams_dtype = underlying_model.expparams_dtype + [('n_meas', 'uint')]
## PROPERTIES ##
@property
def decorated_model(self):
# Provided for backcompat only.
return self.underlying_model
@property
def expparams_dtype(self):
return self._expparams_dtype
@property
def is_n_outcomes_constant(self):
"""
Returns ``True`` if and only if the number of outcomes for each
experiment is independent of the experiment being performed.
This property is assumed by inference engines to be constant for
the lifetime of a Model instance.
"""
return False
## METHODS ##
def n_outcomes(self, expparams):
"""
Returns an array of dtype ``uint`` describing the number of outcomes
for each experiment specified by ``expparams``.
:param numpy.ndarray expparams: Array of experimental parameters. This
array must be of dtype agreeing with the ``expparams_dtype``
property.
"""
return expparams['n_meas'] + 1
def domain(self, expparams):
"""
Returns a list of ``Domain``s, one for each input expparam.
:param numpy.ndarray expparams: Array of experimental parameters. This
array must be of dtype agreeing with the ``expparams_dtype``
property, or, in the case where ``n_outcomes_constant`` is ``True``,
``None`` should be a valid input.
:rtype: list of ``Domain``
"""
return [IntegerDomain(min=0,max=n_o-1) for n_o in self.n_outcomes(expparams)]
def are_expparam_dtypes_consistent(self, expparams):
"""
Returns `True` iff all of the given expparams
correspond to outcome domains with the same dtype.
For efficiency, concrete subclasses should override this method
if the result is always `True`.
:param np.ndarray expparams: Array of expparamms
of type `expparams_dtype`
:rtype: `bool`
"""
# The output type is always the same, even though the domain is not.
return True
def likelihood(self, outcomes, modelparams, expparams):
# By calling the superclass implementation, we can consolidate
# call counting there.
super(BinomialModel, self).likelihood(outcomes, modelparams, expparams)
pr1 = self.underlying_model.likelihood(
np.array([1], dtype='uint'),
modelparams,
expparams['x'] if self._expparams_scalar else expparams)
# Now we concatenate over outcomes.
L = np.concatenate([
binomial_pdf(expparams['n_meas'][np.newaxis, :], outcomes[idx], pr1)
for idx in range(outcomes.shape[0])
])
assert not np.any(np.isnan(L))
return L
def simulate_experiment(self, modelparams, expparams, repeat=1):
# FIXME: uncommenting causes a slowdown, but we need to call
# to track sim counts.
#super(BinomialModel, self).simulate_experiment(modelparams, expparams)
# Start by getting the pr(1) for the underlying model.
pr1 = self.underlying_model.likelihood(
np.array([1], dtype='uint'),
modelparams,
expparams['x'] if self._expparams_scalar else expparams)
dist = binom(
expparams['n_meas'].astype('int'), # ← Really, NumPy?
pr1[0, :, :]
)
sample = (
(lambda: dist.rvs()[np.newaxis, :, :])
if pr1.size != 1 else
(lambda: np.array([[[dist.rvs()]]]))
)
os = np.concatenate([
sample()
for idx in range(repeat)
], axis=0)
return os[0,0,0] if os.size == 1 else os
def update_timestep(self, modelparams, expparams):
return self.underlying_model.update_timestep(modelparams,
expparams['x'] if self._expparams_scalar else expparams
)
class DifferentiableBinomialModel(BinomialModel, DifferentiableModel):
"""
Extends :class:`BinomialModel` to take advantage of differentiable
two-outcome models.
"""
def __init__(self, underlying_model):
if not isinstance(underlying_model, DifferentiableModel):
raise TypeError("Decorated model must also be differentiable.")
BinomialModel.__init__(self, underlying_model)
def score(self, outcomes, modelparams, expparams):
raise NotImplementedError("Not yet implemented.")
def fisher_information(self, modelparams, expparams):
# Since the FI simply adds, we can multiply the single-shot
# FI provided by the underlying model by the number of measurements
# that we perform.
two_outcome_fi = self.underlying_model.fisher_information(
modelparams, expparams
)
return two_outcome_fi * expparams['n_meas']
class MultinomialModel(DerivedModel):
"""
Model representing finite numbers of iid samples from another model with
a fixed and finite number of outcomes,
using the multinomial distribution to calculate the new likelihood function.
:param qinfer.abstract_model.FiniteOutcomeModel underlying_model: An instance
of a D-outcome model to be decorated by the multinomial distribution.
This underlying model must have ``is_n_outcomes_constant`` as ``True``.
Note that a new experimental parameter field ``n_meas`` is added by this
model. This parameter field represents how many times a measurement should
be made at a given set of experimental parameters. To ensure the correct
operation of this model, it is important that the decorated model does not
also admit a field with the name ``n_meas``.
"""
## INITIALIZER ##
def __init__(self, underlying_model):
super(MultinomialModel, self).__init__(underlying_model)
if isinstance(underlying_model.expparams_dtype, str):
# We default to calling the original experiment parameters "x".
self._expparams_scalar = True
self._expparams_dtype = [('x', underlying_model.expparams_dtype), ('n_meas', 'uint')]
else:
self._expparams_scalar = False
self._expparams_dtype = underlying_model.expparams_dtype + [('n_meas', 'uint')]
# Demand that the underlying model always has the same number of outcomes
# This assumption could in principle be generalized, but not worth the effort now.
assert(self.underlying_model.is_n_outcomes_constant)
self._underlying_domain = self.underlying_model.domain(None)
self._n_sides = self._underlying_domain.n_members
# Useful for getting the right type, etc.
self._example_domain = MultinomialDomain(n_elements=self.n_sides, n_meas=3)
## PROPERTIES ##
@property
def decorated_model(self):
# Provided for backcompat only.
return self.underlying_model
@property
def expparams_dtype(self):
return self._expparams_dtype
@property
def is_n_outcomes_constant(self):
"""
Returns ``True`` if and only if the number of outcomes for each
experiment is independent of the experiment being performed.
This property is assumed by inference engines to be constant for
the lifetime of a Model instance.
"""
# Different values of n_meas result in different numbers of outcomes
return False
@property
def n_sides(self):
"""
Returns the number of possible outcomes of the underlying model.
"""
return self._n_sides
@property
def underlying_domain(self):
"""
Returns the `Domain` of the underlying model.
"""
return self._underlying_domain
## METHODS ##
def n_outcomes(self, expparams):
"""
Returns an array of dtype ``uint`` describing the number of outcomes
for each experiment specified by ``expparams``.
:param numpy.ndarray expparams: Array of experimental parameters. This
array must be of dtype agreeing with the ``expparams_dtype``
property.
"""
# Standard combinatorial formula equal to the number of
# possible tuples whose non-negative integer entries sum to n_meas.
n = expparams['n_meas']
k = self.n_sides
return scipy.special.binom(n + k - 1, k - 1)
def domain(self, expparams):
"""
Returns a list of :class:`Domain` objects, one for each input expparam.
:param numpy.ndarray expparams: Array of experimental parameters. This
array must be of dtype agreeing with the ``expparams_dtype``
property.
:rtype: list of ``Domain``
"""
return [
MultinomialDomain(n_elements=self.n_sides, n_meas=ep['n_meas'])
for ep in expparams
]
def are_expparam_dtypes_consistent(self, expparams):
"""
Returns `True` iff all of the given expparams
correspond to outcome domains with the same dtype.
For efficiency, concrete subclasses should override this method
if the result is always `True`.
:param np.ndarray expparams: Array of expparamms
of type `expparams_dtype`
:rtype: `bool`
"""
# The output type is always the same, even though the domain is not.
return True
def likelihood(self, outcomes, modelparams, expparams):
# By calling the superclass implementation, we can consolidate
# call counting there.
super(MultinomialModel, self).likelihood(outcomes, modelparams, expparams)
# Save a wee bit of time by only calculating the likelihoods of outcomes 0,...,d-2
prs = self.underlying_model.likelihood(
self.underlying_domain.values[:-1],
modelparams,
expparams['x'] if self._expparams_scalar else expparams)
# shape (sides-1, n_mps, n_eps)
prs = np.tile(prs, (outcomes.shape[0],1,1,1)).transpose((1,0,2,3))
# shape (n_outcomes, sides-1, n_mps, n_eps)
os = self._example_domain.to_regular_array(outcomes)
# shape (n_outcomes, sides)
os = np.tile(os, (modelparams.shape[0],expparams.shape[0],1,1)).transpose((3,2,0,1))
# shape (n_outcomes, sides, n_mps, n_eps)
L = multinomial_pdf(os, prs)
assert not np.any(np.isnan(L))
return L
def simulate_experiment(self, modelparams, expparams, repeat=1):
super(MultinomialModel, self).simulate_experiment(modelparams, expparams)
n_sides = self.n_sides
n_mps = modelparams.shape[0]
n_eps = expparams.shape[0]
# Save a wee bit of time by only calculating the likelihoods of outcomes 0,...,d-2
prs = np.empty((n_sides,n_mps,n_eps))
prs[:-1,...] = self.underlying_model.likelihood(
self.underlying_domain.values[:-1],
modelparams,
expparams['x'] if self._expparams_scalar else expparams)
# shape (sides, n_mps, n_eps)
os = np.concatenate([
sample_multinomial(n_meas, prs[:,:,idx_n_meas], size=repeat)[np.newaxis,...]
for idx_n_meas, n_meas in enumerate(expparams['n_meas'].astype('int'))
]).transpose((3,2,0,1))
# convert to fancy data type
os = self._example_domain.from_regular_array(os)
return os[0,0,0] if os.size == 1 else os
class MLEModel(DerivedModel):
r"""
Uses the method of [JDD08]_ to approximate the maximum likelihood
estimator as the mean of a fictional posterior formed by amplifying the
Bayes update by a given power :math:`\gamma`. As :math:`\gamma \to
\infty`, this approximation to the MLE improves, but at the cost of
numerical stability.
:param float likelihood_power: Power to which the likelihood calls
should be rasied in order to amplify the Bayes update.
"""
def __init__(self, underlying_model, likelihood_power):
super(MLEModel, self).__init__(underlying_model)
self._pow = likelihood_power
def simulate_experiment(self, modelparams, expparams, repeat=1):
super(MLEModel, self).simulate_experiment(modelparams, expparams, repeat)
return self.underlying_model.simulate_experiment(modelparams, expparams, repeat)
def likelihood(self, outcomes, modelparams, expparams):
L = self.underlying_model.likelihood(outcomes, modelparams, expparams)
return L**self._pow
class RandomWalkModel(DerivedModel):
r"""
Model such that after each time step, a random perturbation is added to
each model parameter vector according to a given distribution.
:param Model underlying_model: Model representing the likelihood with no
random walk added.
:param Distribution step_distribution: Distribution over step vectors.
"""
def __init__(self, underlying_model, step_distribution):
self._step_dist = step_distribution
super(RandomWalkModel, self).__init__(underlying_model)
if self.underlying_model.n_modelparams != self._step_dist.n_rvs:
raise TypeError("Step distribution does not match model dimension.")
## METHODS ##
def likelihood(self, outcomes, modelparams, expparams):
super(RandomWalkModel, self).likelihood(outcomes, modelparams, expparams)
return self.underlying_model.likelihood(outcomes, modelparams, expparams)
def simulate_experiment(self, modelparams, expparams, repeat=1):
super(RandomWalkModel, self).simulate_experiment(modelparams, expparams, repeat)
return self.underlying_model.simulate_experiment(modelparams, expparams, repeat)
def update_timestep(self, modelparams, expparams):
# Note that the timestep update is presumed to be independent of the
# experiment.
steps = self._step_dist.sample(n=modelparams.shape[0] * expparams.shape[0])
# Break apart the first two axes and transpose.
steps = steps.reshape((modelparams.shape[0], expparams.shape[0], self.n_modelparams))
steps = steps.transpose((0, 2, 1))
return modelparams[:, :, np.newaxis] + steps
class GaussianRandomWalkModel(DerivedModel):
r"""
Model such that after each time step, a random perturbation is
added to each model parameter vector according to a
zero-mean gaussian distribution.
The :math:`n\times n` covariance matrix of this distribution is
either fixed and known, or its entries are treated as unknown,
being appended to the model parameters.
For diagonal covariance matrices, :math:`n` parameters are added to the model
storing the square roots of the diagonal entries of the covariance matrix.
For dense covariance matrices, :math:`n(n+1)/2` parameters are added to
the model, storing the entries of the lower triangular portion of the
Cholesky factorization of the covariance matrix.
:param Model underlying_model: Model representing the likelihood with no
random walk added.
:param random_walk_idxs: A list or ``np.slice`` of
``underlying_model`` model parameter indeces to add the random walk to.
Indeces larger than ``underlying_model.n_modelparams`` should not
be touched.
:param fixed_covariance: An ``np.ndarray`` specifying the fixed covariance
matrix (or diagonal thereof if ``diagonal`` is ``True``) of the
gaussian distribution. If set to ``None`` (default), this matrix is
presumed unknown and parameters are appended to the model describing
it.
:param boolean diagonal: Whether the gaussian distribution covariance matrix
is diagonal, or densely populated. Default is
``True``.
:param scale_mult: A function which takes an array of expparams and
outputs a real number for each one, representing the scale of the
given experiment. This is useful if different experiments have
different time lengths and therefore incur different dispersion amounts.\
If a string is given instead of a function,
thee scale multiplier is the ``exparam`` with that name.
:param model_transformation: Either ``None`` or a pair of functions
``(transform, inv_transform)`` specifying a transformation of ``modelparams``
(of the underlying model) before gaussian noise is added,
and the inverse operation after
the gaussian noise has been added.
"""
def __init__(
self, underlying_model, random_walk_idxs='all',
fixed_covariance=None, diagonal=True,
scale_mult=None, model_transformation=None
):
self._diagonal = diagonal
self._rw_idxs = np.s_[:underlying_model.n_modelparams] \
if random_walk_idxs == 'all' else random_walk_idxs
explicit_idxs = np.arange(underlying_model.n_modelparams)[self._rw_idxs]
if explicit_idxs.size == 0:
raise IndexError('At least one model parameter must take a random walk.')
self._rw_names = [
underlying_model.modelparam_names[idx]
for idx in explicit_idxs
]
self._n_rw = len(explicit_idxs)
self._srw_names = []
if fixed_covariance is None:
# In this case we need to lean the covariance parameters too,
# therefore, we need to add modelparams
self._has_fixed_covariance = False
if self._diagonal:
self._srw_names = ["\sigma_{{{}}}".format(name) for name in self._rw_names]
self._srw_idxs = (underlying_model.n_modelparams + \
np.arange(self._n_rw)).astype(np.int)
else:
self._srw_idxs = (underlying_model.n_modelparams +
np.arange(self._n_rw * (self._n_rw + 1) / 2)).astype(np.int)
# the following list of indeces tells us how to populate
# a cholesky matrix with a 1D list of values
self._srw_tri_idxs = np.tril_indices(self._n_rw)
for idx1, name1 in enumerate(self._rw_names):
for name2 in self._rw_names[:idx1+1]:
if name1 == name2:
self._srw_names.append("\sigma_{{{}}}".format(name1))
else:
self._srw_names.append("\sigma_{{{},{}}}".format(name2,name1))
else:
# In this case the covariance matrix is fixed and fully specified
self._has_fixed_covariance = True
if self._diagonal:
if fixed_covariance.ndim != 1:
raise ValueError('Diagonal covariance requested, but fixed_covariance has {} dimensions.'.format(fixed_covariance.ndim))
if fixed_covariance.size != self._n_rw:
raise ValueError('fixed_covariance dimension, {}, inconsistent with number of parameters, {}'.format(fixed_covariance.size, self.n_rw))
self._fixed_scale = np.sqrt(fixed_covariance)
else:
if fixed_covariance.ndim != 2:
raise ValueError('Dense covariance requested, but fixed_covariance has {} dimensions.'.format(fixed_covariance.ndim))
if fixed_covariance.size != self._n_rw **2 or fixed_covariance.shape[-2] != fixed_covariance.shape[-1]:
raise ValueError('fixed_covariance expected to be square with width {}'.format(self._n_rw))
self._fixed_chol = np.linalg.cholesky(fixed_covariance)
self._fixed_distribution = multivariate_normal(
np.zeros(self._n_rw),
np.dot(self._fixed_chol, self._fixed_chol.T)
)
super(GaussianRandomWalkModel, self).__init__(underlying_model)
if np.max(np.arange(self.n_modelparams)[self._rw_idxs]) > np.max(explicit_idxs):
raise IndexError('random_walk_idxs out of bounds; must index (a subset of ) underlying_model modelparams.')
if scale_mult is None:
self._scale_mult_fcn = (lambda expparams: 1)
elif isinstance(scale_mult, basestring):
self._scale_mult_fcn = lambda x: x[scale_mult]
else:
self._scale_mult_fcn = scale_mult
self._has_transformation = model_transformation is not None
if self._has_transformation:
self._transform = model_transformation[0]
self._inv_transform = model_transformation[1]
## PROPERTIES ##
@property
def modelparam_names(self):
return self.underlying_model.modelparam_names + self._srw_names
@property
def n_modelparams(self):
return len(self.modelparam_names)
@property
def is_n_outcomes_constant(self):
return False
## METHODS ##
def are_models_valid(self, modelparams):
ud_valid = self.underlying_model.are_models_valid(modelparams[...,:self.underlying_model.n_modelparams])
if self._has_fixed_covariance:
return ud_valid
elif self._diagonal:
pos_std = np.greater_equal(modelparams[...,self._srw_idxs], 0).all(axis=-1)
return np.logical_and(ud_valid, pos_std)
else:
return ud_valid
def likelihood(self, outcomes, modelparams, expparams):
super(GaussianRandomWalkModel, self).likelihood(outcomes, modelparams, expparams)
return self.underlying_model.likelihood(outcomes, modelparams[...,:self.underlying_model.n_modelparams], expparams)
def simulate_experiment(self, modelparams, expparams, repeat=1):
super(GaussianRandomWalkModel, self).simulate_experiment(modelparams, expparams, repeat)
return self.underlying_model.simulate_experiment(modelparams[...,:self.underlying_model.n_modelparams], expparams, repeat)
def est_update_covariance(self, modelparams):
"""
Returns the covariance of the gaussian noise process for one
unit step. In the case where the covariance is being learned,
the expected covariance matrix is returned.
:param modelparams: Shape `(n_models, n_modelparams)` shape array
of model parameters.
"""
if self._diagonal:
cov = (self._fixed_scale ** 2 if self._has_fixed_covariance \
else np.mean(modelparams[:, self._srw_idxs] ** 2, axis=0))
cov = np.diag(cov)
else:
if self._has_fixed_covariance:
cov = np.dot(self._fixed_chol, self._fixed_chol.T)
else:
chol = np.zeros((modelparams.shape[0], self._n_rw, self._n_rw))
chol[(np.s_[:],) + self._srw_tri_idxs] = modelparams[:, self._srw_idxs]
cov = np.mean(np.einsum('ijk,ilk->ijl', chol, chol), axis=0)
return cov
def update_timestep(self, modelparams, expparams):
n_mps = modelparams.shape[0]
n_eps = expparams.shape[0]
if self._diagonal:
scale = self._fixed_scale if self._has_fixed_covariance else modelparams[:, self._srw_idxs]
# the following works when _fixed_scale has shape (n_rw) or (n_mps,n_rw)
# in the latter, each particle gets dispersed by its own belief of the scale
steps = scale * np.random.normal(size = (n_eps, n_mps, self._n_rw))
steps = steps.transpose((1,2,0))
else:
if self._has_fixed_covariance:
steps = np.dot(
self._fixed_chol,
np.random.normal(size = (self._n_rw, n_mps * n_eps))
).reshape(self._n_rw, n_mps, n_eps).transpose((1,0,2))
else:
chol = np.zeros((n_mps, self._n_rw, self._n_rw))
chol[(np.s_[:],) + self._srw_tri_idxs] = modelparams[:, self._srw_idxs]
# each particle gets dispersed by its own belief of the cholesky
steps = np.einsum('kij,kjl->kil', chol, np.random.normal(size = (n_mps, self._n_rw, n_eps)))
# multiply by the scales of the current experiments
steps = self._scale_mult_fcn(expparams) * steps
if self._has_transformation:
# repeat model params for every expparam
new_mps = np.repeat(modelparams[np.newaxis,:,:], n_eps, axis=0).reshape((n_eps * n_mps, -1))
# run transformation on underlying slice
new_mps[:, :self.underlying_model.n_modelparams] = self._transform(
new_mps[:, :self.underlying_model.n_modelparams]
)
# add on the random steps to the relevant indeces
new_mps[:, self._rw_idxs] += steps.transpose((2,0,1)).reshape((n_eps * n_mps, -1))
# back to regular parameterization
new_mps[:, :self.underlying_model.n_modelparams] = self._inv_transform(
new_mps[:, :self.underlying_model.n_modelparams]
)
new_mps = new_mps.reshape((n_eps, n_mps, -1)).transpose((1,2,0))
else:
new_mps = np.repeat(modelparams[:,:,np.newaxis], n_eps, axis=2)
new_mps[:, self._rw_idxs, :] += steps
return new_mps
## TESTING CODE ###############################################################
if __name__ == "__main__":
import operator as op
from .test_models import SimplePrecessionModel
m = BinomialModel(SimplePrecessionModel())
os = np.array([6, 7, 8, 9, 10])
mps = np.array([[0.1], [0.35], [0.77]])
eps = np.array([(0.5 * np.pi, 10), (0.51 * np.pi, 10)], dtype=m.expparams_dtype)
L = m.likelihood(
os, mps, eps
)
print(L)
assert m.call_count == reduce(op.mul, [os.shape[0], mps.shape[0], eps.shape[0]]), "Call count inaccurate."
assert L.shape == (os.shape[0], mps.shape[0], eps.shape[0]), "Shape mismatch."
| agpl-3.0 |
GeekTrainer/Flask | Work/Trivia - Module 5/env/Lib/site-packages/werkzeug/testsuite/contrib/cache.py | 145 | 7212 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.cache
~~~~~~~~~~~~~~~~~~~~~~~~
Tests the cache system
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import time
import unittest
import tempfile
import shutil
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.contrib import cache
try:
import redis
try:
from redis.exceptions import ConnectionError as RedisConnectionError
cache.RedisCache(key_prefix='werkzeug-test-case:')._client.set('test','connection')
except RedisConnectionError:
redis = None
except ImportError:
redis = None
try:
import pylibmc as memcache
except ImportError:
try:
from google.appengine.api import memcache
except ImportError:
try:
import memcache
except ImportError:
memcache = None
class SimpleCacheTestCase(WerkzeugTestCase):
def test_get_dict(self):
c = cache.SimpleCache()
c.set('a', 'a')
c.set('b', 'b')
d = c.get_dict('a', 'b')
assert 'a' in d
assert 'a' == d['a']
assert 'b' in d
assert 'b' == d['b']
def test_set_many(self):
c = cache.SimpleCache()
c.set_many({0: 0, 1: 1, 2: 4})
assert c.get(2) == 4
c.set_many((i, i*i) for i in range(3))
assert c.get(2) == 4
class FileSystemCacheTestCase(WerkzeugTestCase):
def test_set_get(self):
tmp_dir = tempfile.mkdtemp()
try:
c = cache.FileSystemCache(cache_dir=tmp_dir)
for i in range(3):
c.set(str(i), i * i)
for i in range(3):
result = c.get(str(i))
assert result == i * i
finally:
shutil.rmtree(tmp_dir)
def test_filesystemcache_prune(self):
THRESHOLD = 13
tmp_dir = tempfile.mkdtemp()
c = cache.FileSystemCache(cache_dir=tmp_dir, threshold=THRESHOLD)
for i in range(2 * THRESHOLD):
c.set(str(i), i)
cache_files = os.listdir(tmp_dir)
shutil.rmtree(tmp_dir)
assert len(cache_files) <= THRESHOLD
def test_filesystemcache_clear(self):
tmp_dir = tempfile.mkdtemp()
c = cache.FileSystemCache(cache_dir=tmp_dir)
c.set('foo', 'bar')
cache_files = os.listdir(tmp_dir)
assert len(cache_files) == 1
c.clear()
cache_files = os.listdir(tmp_dir)
assert len(cache_files) == 0
shutil.rmtree(tmp_dir)
class RedisCacheTestCase(WerkzeugTestCase):
def make_cache(self):
return cache.RedisCache(key_prefix='werkzeug-test-case:')
def teardown(self):
self.make_cache().clear()
def test_compat(self):
c = self.make_cache()
c._client.set(c.key_prefix + 'foo', b'Awesome')
self.assert_equal(c.get('foo'), b'Awesome')
c._client.set(c.key_prefix + 'foo', b'42')
self.assert_equal(c.get('foo'), 42)
def test_get_set(self):
c = self.make_cache()
c.set('foo', ['bar'])
assert c.get('foo') == ['bar']
def test_get_many(self):
c = self.make_cache()
c.set('foo', ['bar'])
c.set('spam', 'eggs')
assert c.get_many('foo', 'spam') == [['bar'], 'eggs']
def test_set_many(self):
c = self.make_cache()
c.set_many({'foo': 'bar', 'spam': ['eggs']})
assert c.get('foo') == 'bar'
assert c.get('spam') == ['eggs']
def test_expire(self):
c = self.make_cache()
c.set('foo', 'bar', 1)
time.sleep(2)
assert c.get('foo') is None
def test_add(self):
c = self.make_cache()
# sanity check that add() works like set()
c.add('foo', 'bar')
assert c.get('foo') == 'bar'
c.add('foo', 'qux')
assert c.get('foo') == 'bar'
def test_delete(self):
c = self.make_cache()
c.add('foo', 'bar')
assert c.get('foo') == 'bar'
c.delete('foo')
assert c.get('foo') is None
def test_delete_many(self):
c = self.make_cache()
c.add('foo', 'bar')
c.add('spam', 'eggs')
c.delete_many('foo', 'spam')
assert c.get('foo') is None
assert c.get('spam') is None
def test_inc_dec(self):
c = self.make_cache()
c.set('foo', 1)
self.assert_equal(c.inc('foo'), 2)
self.assert_equal(c.dec('foo'), 1)
c.delete('foo')
def test_true_false(self):
c = self.make_cache()
c.set('foo', True)
assert c.get('foo') == True
c.set('bar', False)
assert c.get('bar') == False
class MemcachedCacheTestCase(WerkzeugTestCase):
def make_cache(self):
return cache.MemcachedCache(key_prefix='werkzeug-test-case:')
def teardown(self):
self.make_cache().clear()
def test_compat(self):
c = self.make_cache()
c._client.set(c.key_prefix + b'foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
def test_get_set(self):
c = self.make_cache()
c.set('foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
def test_get_many(self):
c = self.make_cache()
c.set('foo', 'bar')
c.set('spam', 'eggs')
self.assert_equal(c.get_many('foo', 'spam'), ['bar', 'eggs'])
def test_set_many(self):
c = self.make_cache()
c.set_many({'foo': 'bar', 'spam': 'eggs'})
self.assert_equal(c.get('foo'), 'bar')
self.assert_equal(c.get('spam'), 'eggs')
def test_expire(self):
c = self.make_cache()
c.set('foo', 'bar', 1)
time.sleep(2)
self.assert_is_none(c.get('foo'))
def test_add(self):
c = self.make_cache()
c.add('foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
c.add('foo', 'baz')
self.assert_equal(c.get('foo'), 'bar')
def test_delete(self):
c = self.make_cache()
c.add('foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
c.delete('foo')
self.assert_is_none(c.get('foo'))
def test_delete_many(self):
c = self.make_cache()
c.add('foo', 'bar')
c.add('spam', 'eggs')
c.delete_many('foo', 'spam')
self.assert_is_none(c.get('foo'))
self.assert_is_none(c.get('spam'))
def test_inc_dec(self):
c = self.make_cache()
c.set('foo', 1)
# XXX: Is this an intended difference?
c.inc('foo')
self.assert_equal(c.get('foo'), 2)
c.dec('foo')
self.assert_equal(c.get('foo'), 1)
def test_true_false(self):
c = self.make_cache()
c.set('foo', True)
self.assert_equal(c.get('foo'), True)
c.set('bar', False)
self.assert_equal(c.get('bar'), False)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SimpleCacheTestCase))
suite.addTest(unittest.makeSuite(FileSystemCacheTestCase))
if redis is not None:
suite.addTest(unittest.makeSuite(RedisCacheTestCase))
if memcache is not None:
suite.addTest(unittest.makeSuite(MemcachedCacheTestCase))
return suite
| apache-2.0 |
squirrelo/qiita | qiita_db/support_files/patches/python_patches/36.py | 1 | 4367 | from random import SystemRandom
from string import ascii_letters, digits
from os.path import exists, join, basename
from tarfile import open as taropen
from qiita_db.sql_connection import TRN
from qiita_db.artifact import Artifact
from qiita_db.util import (insert_filepaths, convert_to_id, get_mountpoint,
get_mountpoint_path_by_id)
pool = ascii_letters + digits
tgz_id = convert_to_id("tgz", "filepath_type")
_id, analysis_mp = get_mountpoint('analysis')[0]
with TRN:
# 2 and 3 are the ids of the 2 new software rows, the BIOM and
# target gene type plugins
for i in [2, 3]:
client_id = ''.join([SystemRandom().choice(pool) for _ in range(50)])
client_secret = ''.join(
[SystemRandom().choice(pool) for _ in range(255)])
sql = """INSERT INTO qiita.oauth_identifiers (client_id, client_secret)
VALUES (%s, %s)"""
TRN.add(sql, [client_id, client_secret])
sql = """INSERT INTO qiita.oauth_software (software_id, client_id)
VALUES (%s, %s)"""
TRN.add(sql, [i, client_id])
TRN.execute()
#
# Generating compressed files for picking failures -- artifact_type = BIOM
#
sql = """SELECT artifact_id FROM qiita.artifact
JOIN qiita.artifact_type USING (artifact_type_id)
WHERE artifact_type = 'BIOM'"""
TRN.add(sql)
for r in TRN.execute_fetchindex():
to_tgz = None
a = Artifact(r[0])
for _, fp, fp_type in a.filepaths:
if fp_type == 'directory':
# removing / from the path if it exists
to_tgz = fp[:-1] if fp[-1] == '/' else fp
break
if to_tgz is None:
continue
tgz = to_tgz + '.tgz'
if not exists(tgz):
with taropen(tgz, "w:gz") as tar:
tar.add(to_tgz, arcname=basename(to_tgz))
a_id = a.id
# Add the new tgz file to the artifact.
fp_ids = insert_filepaths([(tgz, tgz_id)], a_id, a.artifact_type,
"filepath", move_files=False)
sql = """INSERT INTO qiita.artifact_filepath
(artifact_id, filepath_id)
VALUES (%s, %s)"""
sql_args = [[a_id, fp_id] for fp_id in fp_ids]
TRN.add(sql, sql_args, many=True)
TRN.execute()
#
# Generating compressed files for analysis
#
TRN.add("SELECT analysis_id FROM qiita.analysis")
for result in TRN.execute_fetchindex():
analysis_id = result[0]
# retrieving all analysis filepaths, we could have used
# Analysis.all_associated_filepath_ids but we could run into the
# analysis not belonging to the current portal, thus using SQL
sql = """SELECT filepath, data_directory_id
FROM qiita.filepath
JOIN qiita.analysis_filepath USING (filepath_id)
WHERE analysis_id = %s"""
TRN.add(sql, [analysis_id])
fps = set([tuple(r) for r in TRN.execute_fetchindex()])
sql = """SELECT filepath, data_directory_id
FROM qiita.analysis_job
JOIN qiita.job USING (job_id)
JOIN qiita.job_results_filepath USING (job_id)
JOIN qiita.filepath USING (filepath_id)
WHERE analysis_id = %s"""
TRN.add(sql, [analysis_id])
fps = fps.union([tuple(r) for r in TRN.execute_fetchindex()])
# no filepaths in the analysis
if not fps:
continue
tgz = join(analysis_mp, '%d_files.tgz' % analysis_id)
if not exists(tgz):
full_fps = [join(get_mountpoint_path_by_id(mid), f)
for f, mid in fps]
with taropen(tgz, "w:gz") as tar:
for f in full_fps:
tar.add(f, arcname=basename(f))
# Add the new tgz file to the analysis.
fp_ids = insert_filepaths([(tgz, tgz_id)], analysis_id, 'analysis',
"filepath", move_files=False)
sql = """INSERT INTO qiita.analysis_filepath
(analysis_id, filepath_id)
VALUES (%s, %s)"""
sql_args = [[analysis_id, fp_id] for fp_id in fp_ids]
TRN.add(sql, sql_args, many=True)
TRN.execute()
| bsd-3-clause |
noba3/KoTos | addons/script.module.beautifulsoup4/lib/bs4/builder/_htmlparser.py | 412 | 8839 | """Use the HTMLParser library to parse HTML files that aren't too bad."""
__all__ = [
'HTMLParserTreeBuilder',
]
from HTMLParser import (
HTMLParser,
HTMLParseError,
)
import sys
import warnings
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = (
major > 3
or (major == 3 and minor > 2)
or (major == 3 and minor == 2 and release >= 3))
from bs4.element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from bs4.dammit import EntitySubstitution, UnicodeDammit
from bs4.builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
attr_dict = {}
for key, value in attrs:
# Change None attribute values to the empty string
# for consistency with the other tree builders.
if value is None:
value = ''
attr_dict[key] = value
attrvalue = '""'
self.soup.handle_starttag(name, None, None, attr_dict)
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed.
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
elif name.startswith('X'):
real_name = int(name.lstrip('X'), 16)
else:
real_name = int(name)
try:
data = unichr(real_name)
except (ValueError, OverflowError), e:
data = u"\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
elif data == 'DOCTYPE':
# i.e. "<!DOCTYPE>"
data = ''
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
if data.endswith("?") and data.lower().startswith("xml"):
# "An XHTML processing instruction using the trailing '?'
# will cause the '?' to be included in data." - HTMLParser
# docs.
#
# Strip the question mark so we don't end up with two
# question marks.
data = data[:-1]
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
features = [HTML, STRICT, HTMLPARSER]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT:
kwargs['strict'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, unicode):
yield (markup, None, None, False)
return
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
yield (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
try:
parser.feed(markup)
except HTMLParseError, e:
warnings.warn(RuntimeWarning(
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
raise e
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
| gpl-2.0 |
xxd3vin/spp-sdk | opt/Python27/Lib/encodings/cp1006.py | 593 | 13824 | """ Python Character Mapping Codec cp1006 generated from 'MAPPINGS/VENDORS/MISC/CP1006.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1006',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u06f0' # 0xA1 -> EXTENDED ARABIC-INDIC DIGIT ZERO
u'\u06f1' # 0xA2 -> EXTENDED ARABIC-INDIC DIGIT ONE
u'\u06f2' # 0xA3 -> EXTENDED ARABIC-INDIC DIGIT TWO
u'\u06f3' # 0xA4 -> EXTENDED ARABIC-INDIC DIGIT THREE
u'\u06f4' # 0xA5 -> EXTENDED ARABIC-INDIC DIGIT FOUR
u'\u06f5' # 0xA6 -> EXTENDED ARABIC-INDIC DIGIT FIVE
u'\u06f6' # 0xA7 -> EXTENDED ARABIC-INDIC DIGIT SIX
u'\u06f7' # 0xA8 -> EXTENDED ARABIC-INDIC DIGIT SEVEN
u'\u06f8' # 0xA9 -> EXTENDED ARABIC-INDIC DIGIT EIGHT
u'\u06f9' # 0xAA -> EXTENDED ARABIC-INDIC DIGIT NINE
u'\u060c' # 0xAB -> ARABIC COMMA
u'\u061b' # 0xAC -> ARABIC SEMICOLON
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u061f' # 0xAE -> ARABIC QUESTION MARK
u'\ufe81' # 0xAF -> ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
u'\ufe8d' # 0xB0 -> ARABIC LETTER ALEF ISOLATED FORM
u'\ufe8e' # 0xB1 -> ARABIC LETTER ALEF FINAL FORM
u'\ufe8e' # 0xB2 -> ARABIC LETTER ALEF FINAL FORM
u'\ufe8f' # 0xB3 -> ARABIC LETTER BEH ISOLATED FORM
u'\ufe91' # 0xB4 -> ARABIC LETTER BEH INITIAL FORM
u'\ufb56' # 0xB5 -> ARABIC LETTER PEH ISOLATED FORM
u'\ufb58' # 0xB6 -> ARABIC LETTER PEH INITIAL FORM
u'\ufe93' # 0xB7 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM
u'\ufe95' # 0xB8 -> ARABIC LETTER TEH ISOLATED FORM
u'\ufe97' # 0xB9 -> ARABIC LETTER TEH INITIAL FORM
u'\ufb66' # 0xBA -> ARABIC LETTER TTEH ISOLATED FORM
u'\ufb68' # 0xBB -> ARABIC LETTER TTEH INITIAL FORM
u'\ufe99' # 0xBC -> ARABIC LETTER THEH ISOLATED FORM
u'\ufe9b' # 0xBD -> ARABIC LETTER THEH INITIAL FORM
u'\ufe9d' # 0xBE -> ARABIC LETTER JEEM ISOLATED FORM
u'\ufe9f' # 0xBF -> ARABIC LETTER JEEM INITIAL FORM
u'\ufb7a' # 0xC0 -> ARABIC LETTER TCHEH ISOLATED FORM
u'\ufb7c' # 0xC1 -> ARABIC LETTER TCHEH INITIAL FORM
u'\ufea1' # 0xC2 -> ARABIC LETTER HAH ISOLATED FORM
u'\ufea3' # 0xC3 -> ARABIC LETTER HAH INITIAL FORM
u'\ufea5' # 0xC4 -> ARABIC LETTER KHAH ISOLATED FORM
u'\ufea7' # 0xC5 -> ARABIC LETTER KHAH INITIAL FORM
u'\ufea9' # 0xC6 -> ARABIC LETTER DAL ISOLATED FORM
u'\ufb84' # 0xC7 -> ARABIC LETTER DAHAL ISOLATED FORMN
u'\ufeab' # 0xC8 -> ARABIC LETTER THAL ISOLATED FORM
u'\ufead' # 0xC9 -> ARABIC LETTER REH ISOLATED FORM
u'\ufb8c' # 0xCA -> ARABIC LETTER RREH ISOLATED FORM
u'\ufeaf' # 0xCB -> ARABIC LETTER ZAIN ISOLATED FORM
u'\ufb8a' # 0xCC -> ARABIC LETTER JEH ISOLATED FORM
u'\ufeb1' # 0xCD -> ARABIC LETTER SEEN ISOLATED FORM
u'\ufeb3' # 0xCE -> ARABIC LETTER SEEN INITIAL FORM
u'\ufeb5' # 0xCF -> ARABIC LETTER SHEEN ISOLATED FORM
u'\ufeb7' # 0xD0 -> ARABIC LETTER SHEEN INITIAL FORM
u'\ufeb9' # 0xD1 -> ARABIC LETTER SAD ISOLATED FORM
u'\ufebb' # 0xD2 -> ARABIC LETTER SAD INITIAL FORM
u'\ufebd' # 0xD3 -> ARABIC LETTER DAD ISOLATED FORM
u'\ufebf' # 0xD4 -> ARABIC LETTER DAD INITIAL FORM
u'\ufec1' # 0xD5 -> ARABIC LETTER TAH ISOLATED FORM
u'\ufec5' # 0xD6 -> ARABIC LETTER ZAH ISOLATED FORM
u'\ufec9' # 0xD7 -> ARABIC LETTER AIN ISOLATED FORM
u'\ufeca' # 0xD8 -> ARABIC LETTER AIN FINAL FORM
u'\ufecb' # 0xD9 -> ARABIC LETTER AIN INITIAL FORM
u'\ufecc' # 0xDA -> ARABIC LETTER AIN MEDIAL FORM
u'\ufecd' # 0xDB -> ARABIC LETTER GHAIN ISOLATED FORM
u'\ufece' # 0xDC -> ARABIC LETTER GHAIN FINAL FORM
u'\ufecf' # 0xDD -> ARABIC LETTER GHAIN INITIAL FORM
u'\ufed0' # 0xDE -> ARABIC LETTER GHAIN MEDIAL FORM
u'\ufed1' # 0xDF -> ARABIC LETTER FEH ISOLATED FORM
u'\ufed3' # 0xE0 -> ARABIC LETTER FEH INITIAL FORM
u'\ufed5' # 0xE1 -> ARABIC LETTER QAF ISOLATED FORM
u'\ufed7' # 0xE2 -> ARABIC LETTER QAF INITIAL FORM
u'\ufed9' # 0xE3 -> ARABIC LETTER KAF ISOLATED FORM
u'\ufedb' # 0xE4 -> ARABIC LETTER KAF INITIAL FORM
u'\ufb92' # 0xE5 -> ARABIC LETTER GAF ISOLATED FORM
u'\ufb94' # 0xE6 -> ARABIC LETTER GAF INITIAL FORM
u'\ufedd' # 0xE7 -> ARABIC LETTER LAM ISOLATED FORM
u'\ufedf' # 0xE8 -> ARABIC LETTER LAM INITIAL FORM
u'\ufee0' # 0xE9 -> ARABIC LETTER LAM MEDIAL FORM
u'\ufee1' # 0xEA -> ARABIC LETTER MEEM ISOLATED FORM
u'\ufee3' # 0xEB -> ARABIC LETTER MEEM INITIAL FORM
u'\ufb9e' # 0xEC -> ARABIC LETTER NOON GHUNNA ISOLATED FORM
u'\ufee5' # 0xED -> ARABIC LETTER NOON ISOLATED FORM
u'\ufee7' # 0xEE -> ARABIC LETTER NOON INITIAL FORM
u'\ufe85' # 0xEF -> ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
u'\ufeed' # 0xF0 -> ARABIC LETTER WAW ISOLATED FORM
u'\ufba6' # 0xF1 -> ARABIC LETTER HEH GOAL ISOLATED FORM
u'\ufba8' # 0xF2 -> ARABIC LETTER HEH GOAL INITIAL FORM
u'\ufba9' # 0xF3 -> ARABIC LETTER HEH GOAL MEDIAL FORM
u'\ufbaa' # 0xF4 -> ARABIC LETTER HEH DOACHASHMEE ISOLATED FORM
u'\ufe80' # 0xF5 -> ARABIC LETTER HAMZA ISOLATED FORM
u'\ufe89' # 0xF6 -> ARABIC LETTER YEH WITH HAMZA ABOVE ISOLATED FORM
u'\ufe8a' # 0xF7 -> ARABIC LETTER YEH WITH HAMZA ABOVE FINAL FORM
u'\ufe8b' # 0xF8 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
u'\ufef1' # 0xF9 -> ARABIC LETTER YEH ISOLATED FORM
u'\ufef2' # 0xFA -> ARABIC LETTER YEH FINAL FORM
u'\ufef3' # 0xFB -> ARABIC LETTER YEH INITIAL FORM
u'\ufbb0' # 0xFC -> ARABIC LETTER YEH BARREE WITH HAMZA ABOVE ISOLATED FORM
u'\ufbae' # 0xFD -> ARABIC LETTER YEH BARREE ISOLATED FORM
u'\ufe7c' # 0xFE -> ARABIC SHADDA ISOLATED FORM
u'\ufe7d' # 0xFF -> ARABIC SHADDA MEDIAL FORM
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
liamgh/liamgreenhughes-sl4a-tf101 | python/src/Lib/test/test_sha.py | 58 | 1694 | # Testing sha module (NIST's Secure Hash Algorithm)
# use the three examples from Federal Information Processing Standards
# Publication 180-1, Secure Hash Standard, 1995 April 17
# http://www.itl.nist.gov/div897/pubs/fip180-1.htm
import warnings
warnings.filterwarnings("ignore", "the sha module is deprecated.*",
DeprecationWarning)
import sha
import unittest
from test import test_support
class SHATestCase(unittest.TestCase):
def check(self, data, digest):
# Check digest matches the expected value
obj = sha.new(data)
computed = obj.hexdigest()
self.assert_(computed == digest)
# Verify that the value doesn't change between two consecutive
# digest operations.
computed_again = obj.hexdigest()
self.assert_(computed == computed_again)
# Check hexdigest() output matches digest()'s output
digest = obj.digest()
hexd = ""
for c in digest:
hexd += '%02x' % ord(c)
self.assert_(computed == hexd)
def test_case_1(self):
self.check("abc",
"a9993e364706816aba3e25717850c26c9cd0d89d")
def test_case_2(self):
self.check("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"84983e441c3bd26ebaae4aa1f95129e5e54670f1")
def test_case_3(self):
self.check("a" * 1000000,
"34aa973cd4c4daa4f61eeb2bdbad27316534016f")
def test_case_4(self):
self.check(chr(0xAA) * 80,
'4ca0ef38f1794b28a8f8ee110ee79d48ce13be25')
def test_main():
test_support.run_unittest(SHATestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
jbrt/Array-XRay | arrays/vmax/vmax_filters.py | 1 | 8430 | #!/usr/bin/env python3
# coding: utf-8
"""
These objects are going to filter the input data and only keep the interesting
attributes. Convert of capacity units may be occur to stay more consistent
between arrays.
"""
import abc
from collections import OrderedDict
class VMAXFilter(object, metaclass=abc.ABCMeta):
""" Abstract class for all filters objects """
def __init__(self, data: list):
"""
Constructor
:param data: list of data to filter
"""
self._data = data
self._f_data = []
self._clean()
def __iter__(self):
return self
def __next__(self):
try:
one_element = self._f_data.pop(0)
except IndexError:
raise StopIteration
return one_element
@staticmethod
def _bytes_to_gb(value):
return int(int(value)/1073741824)
@abc.abstractmethod
def _clean(self):
raise NotImplementedError
class VMAXFastPolicy(VMAXFilter):
def __init__(self, data):
super(VMAXFastPolicy, self).__init__(data)
def _clean(self):
for to_clean in self._data:
clean = OrderedDict()
keys = ['fastPolicyId', 'tier_1_id', 'tier_2_id', 'tier_3_id',
'tier_4_id', 'tier_1_capacity', 'tier_2_capacity',
'tier_3_capacity', 'tier_4_capacity', 'storage_group']
for key in keys:
if key not in to_clean:
clean[key] = '' # Create and fill empty value
else:
clean[key] = to_clean[key]
self._f_data.append(clean)
class VMAXHost(VMAXFilter):
def __init__(self, data):
super(VMAXHost, self).__init__(data)
def _clean(self):
for to_clean in self._data:
clean = OrderedDict()
keys = ['hostId', 'num_of_initiators', 'num_of_host_groups',
'num_of_masking_views', 'consistent_lun', 'hostgroup',
'initiator', 'maskingview']
for key in keys:
if key not in to_clean:
clean[key] = '' # Create and fill empty value
else:
clean[key] = to_clean[key]
self._f_data.append(clean)
class VMAXHostGroup(VMAXFilter):
def __init__(self, data):
super(VMAXHostGroup, self).__init__(data)
def _clean(self):
for to_clean in self._data:
clean = OrderedDict()
keys = ['hostGroupId', 'num_of_hosts', 'num_of_initiators',
'num_of_masking_views', 'consistent_lun', 'maskingview']
for key in keys:
if key in to_clean:
clean[key] = to_clean[key]
hosts = []
for host in to_clean['host']:
hosts.append(host['hostId'])
clean['host'] = hosts
self._f_data.append(clean)
class VMAXInitiator(VMAXFilter):
def __init__(self, data):
super(VMAXInitiator, self).__init__(data)
def _clean(self):
for to_clean in self._data:
clean = OrderedDict()
keys = ['initiatorId', 'host', 'alias', 'hostGroup', 'on_fabric',
'logged_in', 'num_of_masking_views', 'maskingview',
'port_flags_override', 'num_of_host_groups', 'flags_in_effect']
for key in keys:
if key not in to_clean:
clean[key] = '' # Create and fill empty value
else:
clean[key] = to_clean[key]
if 'symmetrixPortKey' in to_clean:
ports = []
for zoning in to_clean['symmetrixPortKey']:
ports.append('%s:%s' % (zoning['directorId'], zoning['portId']))
clean['symmetrixPortKey'] = ports
else:
clean['symmetrixPortKey'] = ''
self._f_data.append(clean)
class VMAXMaskingView(VMAXFilter):
def __init__(self, data):
super(VMAXMaskingView, self).__init__(data)
def _clean(self):
for to_clean in self._data:
clean = OrderedDict()
keys = ['maskingViewId', 'hostId', 'hostGroupId', 'portGroupId',
'storageGroupId']
for key in keys:
if key not in to_clean:
clean[key] = '' # Create and fill empty value
else:
clean[key] = to_clean[key]
self._f_data.append(clean)
class VMAXPortGroup(VMAXFilter):
def __init__(self, data):
super(VMAXPortGroup, self).__init__(data)
def _clean(self):
for to_clean in self._data:
clean = OrderedDict()
keys = ['portGroupId', 'num_of_ports', 'num_of_masking_views']
for key in keys:
clean[key] = to_clean[key]
if 'symmetrixPortKey' in to_clean:
ports = []
for zoning in to_clean['symmetrixPortKey']:
ports.append('%s:%s' % (zoning['directorId'], zoning['portId']))
clean['symmetrixPortKey'] = ports
else:
clean['symmetrixPortKey'] = ''
self._f_data.append(clean)
class VMAXSRPool(VMAXFilter):
def __init__(self, data):
super(VMAXSRPool, self).__init__(data)
def _clean(self):
for to_clean in self._data:
clean = OrderedDict()
keys = ['srpId', 'emulation', 'total_usable_cap_gb',
'total_subscribed_cap_gb', 'total_allocated_cap_gb',
'total_snapshot_allocated_cap_gb',
'total_srdf_dse_allocated_cap_gb', 'reserved_cap_percent']
for key in keys:
clean[key] = to_clean[key]
self._f_data.append(clean)
class VMAXStorageGroup(VMAXFilter):
def __init__(self, data):
super(VMAXStorageGroup, self).__init__(data)
def _clean(self):
for to_clean in self._data:
clean = OrderedDict()
keys = ['storageGroupId', 'num_of_masking_views', 'type',
'num_of_child_sgs', 'num_of_vols', 'cap_gb',
'fast_policy_name', 'parent_storage_groups',
'child_storage_groups', 'maskingview']
for key in keys:
if key not in to_clean:
clean[key] = '' # Create and fill empty value
else:
clean[key] = to_clean[key]
self._f_data.append(clean)
class VMAXSystem(object):
def __init__(self, data):
self._data = data
self._response = None
self._clean()
def _clean(self):
clean = OrderedDict()
keys = ['symmetrixId', 'model', 'ucode', 'device_count', ]
for key in keys:
clean[key] = self._data[key]
# for key, value in self._data['physicalCapacity'].items():
# clean['physical_'+key] = value
for key, value in self._data['virtualCapacity'].items():
clean['virtual_'+key] = value
self._response = clean
def clean(self):
return self._response
class VMAXThinDevice(VMAXFilter):
def __init__(self, data):
super(VMAXThinDevice, self).__init__(data)
def _clean(self):
for to_clean in self._data:
clean = OrderedDict()
keys = ['volumeId', 'wwn', 'cap_gb', 'cap_mb', 'cap_cyl',
'volume_identifier', 'status', 'type', 'allocated_percent',
'num_of_front_end_paths', 'num_of_storage_groups',
'storageGroupId']
for key in keys:
if key not in to_clean:
clean[key] = '' # Create and fill empty value
else:
clean[key] = to_clean[key]
self._f_data.append(clean)
class VMAXThinPool(VMAXFilter):
def __init__(self, data):
super(VMAXThinPool, self).__init__(data)
def _clean(self):
for to_clean in self._data:
clean = OrderedDict()
keys = ['poolId', 'raid', 'diskTechnology', 'emulation',
'percent_allocated', 'percent_subscription',
'total_gb', 'enabled_gb', 'used_gb', 'free_gb']
for key in keys:
clean[key] = to_clean[key]
self._f_data.append(clean)
| lgpl-3.0 |
techsd/namebench | nb_third_party/jinja2/meta.py | 406 | 4144 | # -*- coding: utf-8 -*-
"""
jinja2.meta
~~~~~~~~~~~
This module implements various functions that exposes information about
templates that might be interesting for various kinds of applications.
:copyright: (c) 2010 by the Jinja Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.compiler import CodeGenerator
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment):
CodeGenerator.__init__(self, environment, '<introspection>',
'<introspection>')
self.undeclared_identifiers = set()
def write(self, x):
"""Don't write."""
def pull_locals(self, frame):
"""Remember all undeclared identifiers."""
self.undeclared_identifiers.update(frame.identifiers.undeclared)
def find_undeclared_variables(ast):
"""Returns a set of all variables in the AST that will be looked up from
the context at runtime. Because at compile time it's not known which
variables will be used depending on the path the execution takes at
runtime, all variables are returned.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
>>> meta.find_undeclared_variables(ast)
set(['bar'])
.. admonition:: Implementation
Internally the code generator is used for finding undeclared variables.
This is good to know because the code generator might raise a
:exc:`TemplateAssertionError` during compilation and as a matter of
fact this function can currently raise that exception as well.
"""
codegen = TrackingCodeGenerator(ast.environment)
codegen.visit(ast)
return codegen.undeclared_identifiers
def find_referenced_templates(ast):
"""Finds all the referenced templates from the AST. This will return an
iterator over all the hardcoded template extensions, inclusions and
imports. If dynamic inheritance or inclusion is used, `None` will be
yielded.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
>>> list(meta.find_referenced_templates(ast))
['layout.html', None]
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
nodes.Include)):
if not isinstance(node.template, nodes.Const):
# a tuple with some non consts in there
if isinstance(node.template, (nodes.Tuple, nodes.List)):
for template_name in node.template.items:
# something const, only yield the strings and ignore
# non-string consts that really just make no sense
if isinstance(template_name, nodes.Const):
if isinstance(template_name.value, basestring):
yield template_name.value
# something dynamic in there
else:
yield None
# something dynamic we don't know about here
else:
yield None
continue
# constant is a basestring, direct template name
if isinstance(node.template.value, basestring):
yield node.template.value
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
elif isinstance(node, nodes.Include) and \
isinstance(node.template.value, (tuple, list)):
for template_name in node.template.value:
if isinstance(template_name, basestring):
yield template_name
# something else we don't care about, we could warn here
else:
yield None
| apache-2.0 |
MER-GROUP/intellij-community | plugins/hg4idea/testData/bin/mercurial/hook.py | 93 | 7881 | # hook.py - hook support for mercurial
#
# Copyright 2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import os, sys, time, types
import extensions, util, demandimport
def _pythonhook(ui, repo, name, hname, funcname, args, throw):
'''call python hook. hook is callable object, looked up as
name in python module. if callable returns "true", hook
fails, else passes. if hook raises exception, treated as
hook failure. exception propagates if throw is "true".
reason for "true" meaning "hook failed" is so that
unmodified commands (e.g. mercurial.commands.update) can
be run as hooks without wrappers to convert return values.'''
ui.note(_("calling hook %s: %s\n") % (hname, funcname))
starttime = time.time()
obj = funcname
if not util.safehasattr(obj, '__call__'):
d = funcname.rfind('.')
if d == -1:
raise util.Abort(_('%s hook is invalid ("%s" not in '
'a module)') % (hname, funcname))
modname = funcname[:d]
oldpaths = sys.path
if util.mainfrozen():
# binary installs require sys.path manipulation
modpath, modfile = os.path.split(modname)
if modpath and modfile:
sys.path = sys.path[:] + [modpath]
modname = modfile
try:
demandimport.disable()
obj = __import__(modname)
demandimport.enable()
except ImportError:
e1 = sys.exc_type, sys.exc_value, sys.exc_traceback
try:
# extensions are loaded with hgext_ prefix
obj = __import__("hgext_%s" % modname)
demandimport.enable()
except ImportError:
demandimport.enable()
e2 = sys.exc_type, sys.exc_value, sys.exc_traceback
if ui.tracebackflag:
ui.warn(_('exception from first failed import attempt:\n'))
ui.traceback(e1)
if ui.tracebackflag:
ui.warn(_('exception from second failed import attempt:\n'))
ui.traceback(e2)
raise util.Abort(_('%s hook is invalid '
'(import of "%s" failed)') %
(hname, modname))
sys.path = oldpaths
try:
for p in funcname.split('.')[1:]:
obj = getattr(obj, p)
except AttributeError:
raise util.Abort(_('%s hook is invalid '
'("%s" is not defined)') %
(hname, funcname))
if not util.safehasattr(obj, '__call__'):
raise util.Abort(_('%s hook is invalid '
'("%s" is not callable)') %
(hname, funcname))
try:
try:
# redirect IO descriptors to the ui descriptors so hooks
# that write directly to these don't mess up the command
# protocol when running through the command server
old = sys.stdout, sys.stderr, sys.stdin
sys.stdout, sys.stderr, sys.stdin = ui.fout, ui.ferr, ui.fin
r = obj(ui=ui, repo=repo, hooktype=name, **args)
except KeyboardInterrupt:
raise
except Exception, exc:
if isinstance(exc, util.Abort):
ui.warn(_('error: %s hook failed: %s\n') %
(hname, exc.args[0]))
else:
ui.warn(_('error: %s hook raised an exception: '
'%s\n') % (hname, exc))
if throw:
raise
ui.traceback()
return True
finally:
sys.stdout, sys.stderr, sys.stdin = old
duration = time.time() - starttime
readablefunc = funcname
if isinstance(funcname, types.FunctionType):
readablefunc = funcname.__module__ + "." + funcname.__name__
ui.log('pythonhook', 'pythonhook-%s: %s finished in %0.2f seconds\n',
name, readablefunc, duration)
if r:
if throw:
raise util.Abort(_('%s hook failed') % hname)
ui.warn(_('warning: %s hook failed\n') % hname)
return r
def _exthook(ui, repo, name, cmd, args, throw):
ui.note(_("running hook %s: %s\n") % (name, cmd))
starttime = time.time()
env = {}
for k, v in args.iteritems():
if util.safehasattr(v, '__call__'):
v = v()
if isinstance(v, dict):
# make the dictionary element order stable across Python
# implementations
v = ('{' +
', '.join('%r: %r' % i for i in sorted(v.iteritems())) +
'}')
env['HG_' + k.upper()] = v
if repo:
cwd = repo.root
else:
cwd = os.getcwd()
if 'HG_URL' in env and env['HG_URL'].startswith('remote:http'):
r = util.system(cmd, environ=env, cwd=cwd, out=ui)
else:
r = util.system(cmd, environ=env, cwd=cwd, out=ui.fout)
duration = time.time() - starttime
ui.log('exthook', 'exthook-%s: %s finished in %0.2f seconds\n',
name, cmd, duration)
if r:
desc, r = util.explainexit(r)
if throw:
raise util.Abort(_('%s hook %s') % (name, desc))
ui.warn(_('warning: %s hook %s\n') % (name, desc))
return r
def _allhooks(ui):
hooks = []
for name, cmd in ui.configitems('hooks'):
if not name.startswith('priority'):
priority = ui.configint('hooks', 'priority.%s' % name, 0)
hooks.append((-priority, len(hooks), name, cmd))
return [(k, v) for p, o, k, v in sorted(hooks)]
_redirect = False
def redirect(state):
global _redirect
_redirect = state
def hook(ui, repo, name, throw=False, **args):
if not ui.callhooks:
return False
r = False
oldstdout = -1
try:
for hname, cmd in _allhooks(ui):
if hname.split('.')[0] != name or not cmd:
continue
if oldstdout == -1 and _redirect:
try:
stdoutno = sys.__stdout__.fileno()
stderrno = sys.__stderr__.fileno()
# temporarily redirect stdout to stderr, if possible
if stdoutno >= 0 and stderrno >= 0:
sys.__stdout__.flush()
oldstdout = os.dup(stdoutno)
os.dup2(stderrno, stdoutno)
except (OSError, AttributeError):
# files seem to be bogus, give up on redirecting (WSGI, etc)
pass
if util.safehasattr(cmd, '__call__'):
r = _pythonhook(ui, repo, name, hname, cmd, args, throw) or r
elif cmd.startswith('python:'):
if cmd.count(':') >= 2:
path, cmd = cmd[7:].rsplit(':', 1)
path = util.expandpath(path)
if repo:
path = os.path.join(repo.root, path)
try:
mod = extensions.loadpath(path, 'hghook.%s' % hname)
except Exception:
ui.write(_("loading %s hook failed:\n") % hname)
raise
hookfn = getattr(mod, cmd)
else:
hookfn = cmd[7:].strip()
r = _pythonhook(ui, repo, name, hname, hookfn, args, throw) or r
else:
r = _exthook(ui, repo, hname, cmd, args, throw) or r
finally:
if _redirect and oldstdout >= 0:
os.dup2(oldstdout, stdoutno)
os.close(oldstdout)
return r
| apache-2.0 |
fdvarela/odoo8 | addons/board/__init__.py | 439 | 1144 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import board
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
simontakite/sysadmin | pythonscripts/programmingpython/Ai/TicTacToe/tictactoe_lists.py | 2 | 19386 | # this file has been updated for Python 3.X
# at least enough to run--I'd probably change more given time and need
import random, sys, time
from tkinter import *
from tkinter.messagebox import showinfo, askyesno
from PP4E.Gui.Tools.guimaker import GuiMakerWindowMenu
User, Machine = 'user', 'machine' # players
X, O, Empty = 'X', 'O', ' ' # board cell states
Fontsz = 50 # defaults if no constructor args
Degree = 3 # default=3 rows/cols=tic-tac-toe
Mode = 'Expert2' # default machine move strategy
Debug = True
trace = print
def traceif(*args):
if Debug:
trace(*args)
def pp(board):
if Debug:
rows = (('\n\t' + str(row)) for row in board) # 3.x: was map/lambda in prior
return ''.join(rows)
helptext = """PyToe 1.1
Programming Python 4E
A Tic-tac-toe board game
written in Python with tkinter\n
Version 1.1: April 2010, Python 3.X port
Version 1.0: July 1999, developed for 2E\n
Click in cells to move.
Command-line arguments:\n
-degree N sets board size
N=number rows/columns\n
-mode M sets machine skill
M=Minimax, Expert1|2,...\n
-fg F, -bg B
F,B=color name\n
-fontsz N
N=marks size\n
-goesFirst user|machine
-userMark X|O"""
class Record:
def __init__(self):
self.win = self.loss = self.draw = 0
class TicTacToeBase(GuiMakerWindowMenu): # a kind of Frame
def __init__(self, parent=None, # with a menu bar
fg='black', bg='white', fontsz=Fontsz,
goesFirst=User, userMark=X,
degree=Degree):
self.nextMove = goesFirst
self.userMark = userMark
self.machineMark = (userMark==X and O) or X # or if/else expr
self.degree = degree
self.record = Record()
self.makeWidgets = lambda: self.drawBoard(fg, bg, fontsz) # no defaults
GuiMakerWindowMenu.__init__(self, parent=parent)
self.master.title('PyToe 1.1')
if goesFirst == Machine: self.machineMove() # else wait for click
def start(self):
self.helpButton = None
self.toolBar = None
self.menuBar = [ ('File', 0, [('Stats', 0, self.onStats),
('Quit', 0, self.quit)]),
('Help', 0, [('About', 0, self.onAbout)]) ]
def drawBoard(self, fg, bg, fontsz):
self.coord = {}
self.label = {}
self.board = []
for i in range(self.degree):
self.board.append([0] * self.degree)
frm = Frame(self)
frm.pack(expand=YES, fill=BOTH)
for j in range(self.degree):
widget = Label(frm, fg=fg, bg=bg,
text=' ', font=('courier', fontsz, 'bold'),
relief=SUNKEN, bd=4, padx=10, pady=10)
widget.pack(side=LEFT, expand=YES, fill=BOTH)
widget.bind('<Button-1>', self.onLeftClick)
self.coord[widget] = (i, j)
self.label[(i, j)] = widget
self.board[i][j] = Empty
def onLeftClick(self, event):
label = event.widget
row, col = self.coord[label]
if self.nextMove == User and self.board[row][col] == Empty:
label.config(text=self.userMark)
self.board[row][col] = self.userMark
self.nextMove = Machine
self.checkFinish()
self.machineMove()
def machineMove(self):
row, col = self.pickMove()
self.board[row][col] = self.machineMark
label = self.label[(row, col)]
label.config(text=self.machineMark)
self.checkFinish()
self.nextMove = User # wait for next left click or quit
def clearBoard(self):
for row, col in self.label.keys():
self.label[(row, col)].config(text=' ')
self.board[row][col] = Empty
#
# end test
#
def checkDraw(self, board=None):
board = board or self.board
for row in board:
if Empty in row:
return 0 # 3.x: True/False better
return 1 # none empty = draw or win
def checkWin(self, mark, board=None):
board = board or self.board
for row in board:
if row.count(mark) == self.degree: # check across
return 1 # row=all mark?
for col in range(self.degree):
for row in board: # check down
if row[col] != mark: # break to next col
break
else:
return 1
for row in range(self.degree): # check diag1
col = row # row == col
if board[row][col] != mark: break
else:
return 1
for row in range(self.degree): # check diag2
col = (self.degree-1) - row # row+col = degree-1
if board[row][col] != mark: break
else:
return 1
def checkFinish(self):
outcome = None
if self.checkWin(self.userMark):
outcome = "You've won!"
self.record.win += 1 # 3.x: changed to use += globally
elif self.checkWin(self.machineMark): # for both style and performance
outcome = 'I win again :-)'
self.record.loss += 1
elif self.checkDraw():
outcome = 'Looks like a draw'
self.record.draw += 1
if outcome:
result = 'Game Over: ' + outcome
if not askyesno('PyToe', result + '\n\nPlay another game?'):
self.onStats()
self.quit()
sys.exit() # don't return to caller
else:
self.clearBoard() # return and make move or wait for click
# player who moved last moves second next
#
# miscellaneous
#
def onAbout(self):
showinfo('PyToe 1.0', helptext)
def onStats(self):
showinfo('PyToe Stats',
'Your results:\n'
'wins: %(win)d, losses: %(loss)d, draws: %(draw)d'
% self.record.__dict__)
######################################
# subclass to customize move selection
######################################
#
# pick empty slot at random
#
class TicTacToeRandom(TicTacToeBase):
def pickMove(self):
empties = []
for row in self.degree: # 3.x: could be a comprehension
for col in self.degree:
if self.board[row][col] == Empty:
empties.append((row, col))
return random.choice(empties)
#
# pick imminent win or loss, else static score
#
class TicTacToeSmart(TicTacToeBase):
def pickMove(self):
self.update(); time.sleep(1) # too fast!
countMarks = self.countAcrossDown(), self.countDiagonal()
for row in range(self.degree):
for col in range(self.degree):
move = (row, col)
if self.board[row][col] == Empty:
if self.isWin(move, countMarks):
return move
for row in range(self.degree):
for col in range(self.degree):
move = (row, col)
if self.board[row][col] == Empty:
if self.isBlock(move, countMarks):
return move
best = 0
for row in range(self.degree):
for col in range(self.degree):
move = (row, col)
if self.board[row][col] == Empty:
score = self.scoreMove(move, countMarks)
if score >= best:
pick = move
best = score
trace('Picked', pick, 'score', best)
return pick
def countAcrossDown(self):
countRows = {} # sparse data structure
countCols = {} # zero counts aren't added
for row in range(self.degree):
for col in range(self.degree):
mark = self.board[row][col]
try:
countRows[(row, mark)] += 1
except KeyError:
countRows[(row, mark)] = 1
try:
countCols[(col, mark)] += 1
except KeyError:
countCols[(col, mark)] = 1
return countRows, countCols
def countDiagonal(self):
tally = {'X':0, 'O':0, ' ':0}
countDiag1 = tally.copy()
for row in range(self.degree):
col = row
mark = self.board[row][col]
countDiag1[mark] += 1 # 3.x: use += 1, globally
countDiag2 = tally.copy()
for row in range(self.degree):
col = (self.degree-1) - row
mark = self.board[row][col]
countDiag2[mark] += 1
return countDiag1, countDiag2
def isWin(self, T, countMarks): # 3.X drops tuple matching in arg lists
(row, col) = T
self.board[row][col] = self.machineMark
isWin = self.checkWin(self.machineMark)
self.board[row][col] = Empty
return isWin
def isBlock(self, T, countMarks):
(row, col) = T
self.board[row][col] = self.userMark
isLoss = self.checkWin(self.userMark)
self.board[row][col] = Empty
return isLoss
def scoreMove(self, T1, T2):
(row, col) = T1
((countRows, countCols), (countDiag1, countDiag2)) = T2 # 3.x: no arg tuples
return (
countCols.get((col, self.machineMark), 0) * 11 +
countRows.get((row, self.machineMark), 0) * 11 +
countDiag1[self.machineMark] * 11 +
countDiag1[self.machineMark] * 11
+
countCols.get((col, self.userMark), 0) * 10 +
countRows.get((row, self.userMark), 0) * 10 +
countDiag1[self.userMark] * 10 +
countDiag1[self.userMark] * 10
+
countCols.get((col, Empty), 0) * 11 +
countRows.get((row, Empty), 0) * 11 +
countDiag1[Empty] * 11 +
countDiag1[Empty] * 11)
#
# static score based on 1 or 2 move lookahead
#
class TicTacToeExpert1(TicTacToeSmart):
def pickMove(self):
self.update(); time.sleep(1)
countMarks = self.countAcrossDown(), self.countDiagonal()
best = 0
for row in range(self.degree):
for col in range(self.degree):
move = (row, col)
if self.board[row][col] == Empty:
score = self.scoreMove(move, countMarks)
if score > best:
pick = move
best = score
trace('Picked', pick, 'score', best)
return pick
def countAcrossDown(self):
tally = {'X':0, 'O':0, ' ':0} # uniform with diagonals
countRows = [] # no entries missing
countCols = [] # tally * degree fails
for row in range(self.degree):
countRows.append(tally.copy())
countCols.append(tally.copy())
for row in range(self.degree):
for col in range(self.degree):
mark = self.board[row][col]
countRows[row][mark] += 1 # 3.x: += 1
countCols[col][mark] += 1
return countRows, countCols
def scoreMove(self, T1, T2): # 3.x: no arg tuples
(row, col) = T1
((countRows, countCols), (countDiag1, countDiag2)) = T2
score = 0
mine = self.machineMark
user = self.userMark
# for empty slot (r,c):
partof = [countRows[row], countCols[col]] # check move row and col
if row == col: # plus diagonals, if any
partof.append(countDiag1)
if row+col == self.degree-1:
partof.append(countDiag2)
for line in partof:
if line[mine] == self.degree-1 and line[Empty] == 1:
score += 51 # 1 move to win
for line in partof:
if line[user] == self.degree-1 and line[Empty] == 1:
score += 25 # 1 move to loss
for line in partof:
if line[mine] == self.degree-2 and line[Empty] == 2:
score += 10 # 2 moves to win
for line in partof:
if line[user] == self.degree-2 and line[Empty] == 2:
score += 8 # 2 moves to loss
for line in partof:
if line[Empty] == self.degree: # prefer openness
score += 1
if score:
return score # detected pattern here?
else: # else use weighted scoring
for line in partof:
score += line[mine] * 3 + line[user] + line[Empty] * 2
return score / float(self.degree) # 3.x: float not really needed for /
#
# static score based on win or loss N moves ahead
#
class TicTacToeExpert2(TicTacToeExpert1):
def scoreMove(self, T1, T2): # 3.x: no arg tuples
(row, col) = T1
((countRows, countCols), (countDiag1, countDiag2)) = T2
score = 0
mine = self.machineMark
user = self.userMark
# for empty slot (r,c):
partof = [countRows[row], countCols[col]] # check move row and col
if row == col: # plus diagonals, if any
partof.append(countDiag1)
if row+col == self.degree-1:
partof.append(countDiag2)
weight = 3 ** (self.degree * 2) # 3.x: not 3L, int does long
for ahead in range(1, self.degree):
for line in partof:
if line[mine] == self.degree - ahead and line[Empty] == ahead:
score += weight
if line[user] == self.degree - ahead and line[Empty] == ahead:
score += weight // 3
weight = weight // 9 # 3.x: need // for int div
if score:
return score # detected pattern here?
else: # else use weighted scoring
for line in partof:
score += line[mine] * 3 + line[user] + line[Empty] * 2
return score / float(self.degree) # 3.x: float() not really needed
#
# search ahead through moves and countermoves
#
class TicTacToeMinimax(TicTacToeExpert2):
def pickMove(self):
self.update()
numMarks = self.degree ** 2
for row in self.board:
numMarks -= row.count(Empty)
if numMarks == 0:
return (self.degree // 2, self.degree // 2) # 3.x: need // for int div
else:
#traceif('\n\nPick move...')
t1 = time.clock()
maxdepth = numMarks + 4
#traceif(maxdepth)
score, pick = self.findMax(self.board, maxdepth)
trace('Time to move:', time.clock() - t1)
if score == -1:
# lookahead can be too pessimistic
# if best is a loss, use static score
pick = TicTacToeExpert2.pickMove(self)
return pick
def checkLeaf(self, board):
if self.checkWin(self.machineMark, board): # score from machine's view
return +1 # a win is good; a loss bad
elif self.checkWin(self.userMark, board):
return -1
elif self.checkDraw(board):
return 0
else:
return None
def findMax(self, board, depth): # machine move level
#traceif('max start', depth, pp(board))
if depth == 0: # find start of best move sequence
return 0, None # could return static score here???
else:
term = self.checkLeaf(board)
if term != None: # depth cutoff
#traceif('max term', term, pp(board))
return term, None # or endgame detected
else: # or check countermoves
best = -2
for row in range(self.degree):
for col in range(self.degree):
if board[row][col] == Empty:
board[row][col] = self.machineMark
below, m = self.findMin(board, depth-1)
board[row][col] = Empty
if below >= best:
best = below
pick = (row, col)
#traceif('max best at', depth, best, pick)
return best, pick
def findMin(self, board, depth): # user move level-find worst case
#traceif('min start', depth, pp(board))
if depth == 0: # assume she will do her best
return 0, None
else:
term = self.checkLeaf(board)
if term != None: # depth cutoff
#traceif('min term', term, pp(board))
return term, None # or endgame detected
else: # or check countermoves
best = +2
for row in range(self.degree):
for col in range(self.degree):
if board[row][col] == Empty:
board[row][col] = self.userMark
below, m = self.findMax(board, depth-1)
board[row][col] = Empty
if below < best:
best = below
pick = (row, col)
#traceif('min best at', depth, best, pick)
return best, pick
# moved to tictactoe.py:
# game object generator - external interface
# command-line logic
| gpl-2.0 |
motion2015/a3 | common/djangoapps/terrain/stubs/video_source.py | 181 | 1368 | """
Serve HTML5 video sources for acceptance tests
"""
from SimpleHTTPServer import SimpleHTTPRequestHandler
from .http import StubHttpService
from contextlib import contextmanager
import os
from logging import getLogger
LOGGER = getLogger(__name__)
class VideoSourceRequestHandler(SimpleHTTPRequestHandler):
"""
Request handler for serving video sources locally.
"""
def translate_path(self, path):
"""
Remove any extra parameters from the path.
For example /gizmo.mp4?1397160769634
becomes /gizmo.mp4
"""
root_dir = self.server.config.get('root_dir')
path = '{}{}'.format(root_dir, path)
return path.split('?')[0]
class VideoSourceHttpService(StubHttpService):
"""
Simple HTTP server for serving HTML5 Video sources locally for tests
"""
HANDLER_CLASS = VideoSourceRequestHandler
def __init__(self, port_num=0):
@contextmanager
def _remember_cwd():
"""
Files are automatically served from the current directory
so we need to change it, start the server, then set it back.
"""
curdir = os.getcwd()
try:
yield
finally:
os.chdir(curdir)
with _remember_cwd():
StubHttpService.__init__(self, port_num=port_num)
| agpl-3.0 |
crmccreary/openerp_server | openerp/addons/account/wizard/account_unreconcile.py | 9 | 2131 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
class account_unreconcile(osv.osv_memory):
_name = "account.unreconcile"
_description = "Account Unreconcile"
def trans_unrec(self, cr, uid, ids, context=None):
obj_move_line = self.pool.get('account.move.line')
if context is None:
context = {}
if context.get('active_ids', False):
obj_move_line._remove_move_reconcile(cr, uid, context['active_ids'], context=context)
return {'type': 'ir.actions.act_window_close'}
account_unreconcile()
class account_unreconcile_reconcile(osv.osv_memory):
_name = "account.unreconcile.reconcile"
_description = "Account Unreconcile Reconcile"
def trans_unrec_reconcile(self, cr, uid, ids, context=None):
obj_move_reconcile = self.pool.get('account.move.reconcile')
if context is None:
context = {}
rec_ids = context['active_ids']
if rec_ids:
obj_move_reconcile.unlink(cr, uid, rec_ids, context=context)
return {'type': 'ir.actions.act_window_close'}
account_unreconcile_reconcile()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
rodo/django-perf | foo/loader/management/commands/delete_all.py | 1 | 1510 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013,2014 Rodolphe Quiédeville <rodolphe@quiedeville.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import time
from django.core.management.base import BaseCommand
from foo.loader.models import Item
from optparse import make_option
from faker import Faker
class Command(BaseCommand):
help = 'Import datas'
option_list = BaseCommand.option_list + (
make_option("-n",
"--nbvalues",
dest="nbvalues",
help="number of values to input",
default=10),
)
def handle(self, *args, **options):
"""
Make
"""
nbvalues = options['nbvalues']
print Item.objects.all().count()
Item.objects.all().delete()
print Item.objects.all().count()
| gpl-3.0 |
Snesi/spindl | spindl_lib/charter.py | 1 | 14820 | #!/usr/bin/python
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2013 <Zane Swafford> <zane@zaneswafford.com>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
### END LICENSE
from pygal import Pie, Bar
from pygal.style import Style
from timeFormat import time_in_span, tuple_time, unformat_time
from os import system
from operator import itemgetter
from datetime import timedelta
from math import ceil
from gi.repository import GLib
CONST_MAX_DATA_ENTRIES = 15
CONST_MAX_VERTICAL_ENTRIES = 20
CONST_COLOR_LIST = ('#729fcf', '#ef2929', '#fce94f', '#8ae234', '#ad7fa8',
'#fcaf3e', '#3465a4', '#cc0000', '#edd400', '#73d216',
'#75507b', '#f57900', '#204a87', '#a40000', '#c4a000',
'#4e9a06', '#5c3566', '#ce5c00', '#d3d7cf')
class Charter:
def __init__(self, font, filepath, webview, webview_window,
loading_spinner):
self.font = font
self.filepath = filepath
self.webview = webview
# Turn off the right click menu for the webview
self.webview.props.settings.props.enable_default_context_menu = False
self.webview_window = webview_window
self.loading_spinner = loading_spinner
self.loading_spinner.set_visible(False)
self.data = []
self.type = None
# Size is a tuple of (width, height)
self.size = (450, 350)
self.chart = None
self.colorlist = ['#729fcf', '#ef2929', '#fce94f', '#8ae234', '#ad7fa8',
'#fcaf3e', '#3465a4', '#cc0000', '#edd400', '#73d216',
'#75507b', '#f57900', '#204a87', '#a40000', '#c4a000',
'#4e9a06', '#5c3566', '#ce5c00', '#d3d7cf']
#self.sort_colorlist()
# The custom pygal style used for the pie graph.
self.style = Style(background='#F7F6F6',
plot_background='#F7F6F6',
foreground='#888a85',
foreground_light='#888a85',
foreground_dark='#555753',
opacity='.6',
opacity_hover='.9',
transition='200ms ease-in',
colors=(self.colorlist))
self.visible = True
def add_entry(self, label, time, color):
"""Adds an entry to data and gives it a label, time, and color"""
# If the color is not set
if color == None:
# Set the color to light grey
color = len(self.colorlist)-1
# If color is specified
else:
# Make sure it is a valid color from the colorlist
while color >= len(self.colorlist)-1:
color -= len(self.colorlist)-1
# add the entry to the data
self.data.append((label, time, color))
def compound_other_data(self, data):
"""Compounds smallest data entries into 'other' entry"""
# This function is necessary to keep legend from growing larger than the
# widget it is contained in.
# Get the sum of all values (the [1] index in the entries)
sum_of_values = 0
for entry in data:
sum_of_values += entry[1]
# Set the minimum amount to one percent of the total amount
minimum_amount = 0.01 * sum_of_values
# Create a list item 'other' and give it a value of 0 and the last color
# in the CONST_COLOR_LIST.
other = ['Other ', 0, len(CONST_COLOR_LIST)-1]
entries_to_compound = []
entries_compunded = False
for entry in data:
if entry[1] <= minimum_amount:
other[1] += entry[1]
entries_to_compound.append(entry)
entries_compunded = True
# If there is more than one entry to compound into other
if len(entries_to_compound) > 1:
for entry in entries_to_compound:
del data[data.index(entry)]
# If the data still has too many entries, compound the smallest into the
# 'Other' entry
if len(data) > CONST_MAX_DATA_ENTRIES:
self.sort_data_by_size(data)
entries_to_compound = []
for entry in xrange((len(data) - CONST_MAX_DATA_ENTRIES)):
other[1] += data[entry][1]
entries_to_compound.append(data[entry])
entries_compunded = True
for entry in entries_to_compound:
del data[data.index(entry)]
if entries_compunded:
data.append(other)
def create_chart(self, chart_type=None, span=None):
"""Creates a chart of the given type based the data"""
if not chart_type == None:
self.type = chart_type
if self.type == 'pie':
self.create_pie_chart(self.data, span)
elif self.type == 'bar':
self.create_bar_chart(self.data, span)
def create_pie_chart(self, data=None, span='all', no=None):
"""Creates a pie chart from the the data"""
# Create the list of objects to be added to the chart
chart_list = []
# If the span has been specified, then get the logs only for that time
if not span == None and not span == 'all':
# Iterate through the log data.
for log in self.data:
# Get and format the information we need from the log.
activity = log[0]
log_start = unformat_time(tuple_time(log[1]))
log_end = unformat_time(tuple_time(log[2]))
color = log[3]
minimum = unformat_time(span[1])
maximum = unformat_time(span[2])
# Add the time and activity to the chart_list.
log_time = time_in_span(log_start, log_end, minimum, maximum)
# Check if the activity has already been added to chart_list.
in_chart_list = False
for entry in chart_list:
# If the activity is in the chart_list, make a note and add.
# its time to the existing list item.
if entry[0] == activity:
entry[1] += log_time
in_chart_list = True
# If the log is not in the chart_list and it is in the span, add
# it to the chart_list.
if not in_chart_list and log_time > 0:
chart_list.append([activity, log_time, color])
else:
# If span is not specified then the data are totals.
# Set the chart_list equal to the total data.
for total in data:
chart_list.append((total[0], total[2], total[3]))
# Add each entry is the chart_list to the chart
self.sort(chart_list)
# Data must be organized for day, month, etc. before using
# If size has been specified
if not self.size == (None, None):
self.chart = Pie(style=self.style,
print_values=False,
fill=True,
human_readable=True,
include_x_axis=True,
width=self.size[0],
height=self.size[1])
# If size has not already been specified
else:
# Let the graph dynamically resize within webview
self.chart = Pie(style=self.style, print_values=False, fill=True,
human_readable=True, include_x_axis=True)
if not chart_list == []:
for entry in chart_list:
self.chart.add(entry[0], entry[1])
def create_bar_chart(self, data, span):
"""Creates a bar chart from the the data"""
# Initialize the chart_list
chart_list = []
for log in data:
activity_time = 0
activity = log[0]
log_start = unformat_time(tuple_time(log[1]))
log_end = unformat_time(tuple_time(log[2]))
color = log[3]
minimum = span[1]
maximum = span[2]
minimum = unformat_time(minimum)
maximum = unformat_time(maximum)
activity_time += time_in_span(log_start, log_end, minimum, maximum)
in_chart_list = False
for entry in chart_list:
if entry[0] == activity:
entry[1] += activity_time
in_chart_list = True
if not in_chart_list and activity_time > 0:
chart_list.append([activity, activity_time, color])
self.sort(chart_list)
# Data must be organized for day, month, etc. before using
# If size has been specified
if not self.size == (None, None):
self.chart = Bar(style=self.style, y_scale=60.0,
print_values=False, include_x_axis=True,
width=self.size[0], height=self.size[1])
# If size has not already been specified
else:
# Let the graph dynamically resize within webview
self.chart = Bar(style=self.style, print_values=False,
include_x_axis=True, y_scale=60.0)
self.set_y_labels(chart_list)
## Add each entry is the chart_list to the chart
if not chart_list == []:
for entry in chart_list:
time = str(timedelta(seconds=entry[1]))
if time[1] == ':':
time = '0' + time
self.chart.add(entry[0], [{'value':entry[1], 'label':time}])
else:
self.chart = Pie(style=self.style, width=self.size[0],
height=self.size[1])
def set_y_labels(self, chart_list):
"""Sets the y labels on a bar chart"""
# Set up the y axis
maximum_time_in_seconds = 0
for entry in chart_list:
if entry[1] > maximum_time_in_seconds:
maximum_time_in_seconds = entry[1]
max_number_of_minutes = int(ceil(maximum_time_in_seconds/60))+2
y_labels = []
if max_number_of_minutes > 2:
if max_number_of_minutes < 30:
for minute in xrange(max_number_of_minutes+1):
y_labels.append(minute*60)
elif max_number_of_minutes >= 30 and max_number_of_minutes < 60:
for minute in xrange((max_number_of_minutes/5)+1):
y_labels.append(minute*60*5)
elif max_number_of_minutes >= 60 and max_number_of_minutes < 120:
for minute in xrange((max_number_of_minutes/10)+2):
y_labels.append(minute*60*10)
elif max_number_of_minutes >= 120 and max_number_of_minutes < 240:
for minute in xrange((max_number_of_minutes/15)+1):
y_labels.append(minute*60*15)
elif max_number_of_minutes >= 240 and max_number_of_minutes < 480:
for minute in xrange((max_number_of_minutes/20)+1):
y_labels.append(minute*60*20)
elif max_number_of_minutes >= 480 and max_number_of_minutes < 960:
for minute in xrange((max_number_of_minutes/30)+1):
y_labels.append(minute*60*30)
elif max_number_of_minutes >= 960:
for minute in xrange((max_number_of_minutes/60)+1):
y_labels.append(minute*3600)
else:
for second in xrange((maximum_time_in_seconds)+1):
y_labels.append(second)
self.chart.y_labels = y_labels
def convert_y_axis_to_time(self, label):
"""Converts y axis labels from seconds to minutes"""
y_value_in_time = ''
y_value_in_time = str(timedelta(seconds=int(label)))
if y_value_in_time[1] == ':':
y_value_in_time = '0' + y_value_in_time
if not self.filepath == None:
convert_y_axis = ("sed -i 's/class=\\\"\\\">%s.0/class=\\\"\\\"" +
">%s/g' " + self.filepath) % (str(label),
y_value_in_time)
system(convert_y_axis)
# Then convert the major y axises (The zeroeth and first amounts) to
# a formatted time if the label is a major axis.
convert_major_y_axis = ("sed -i 's/class=\\\"major\\\">%s.0/" +
"class=\\\"major\\\">%s/g' " +
self.filepath) % (str(label),
y_value_in_time)
system(convert_major_y_axis)
def fix_tooltip(self):
"""Changes the SVG file's default mouseover tooltip to no longer contain
value for time in seconds"""
if not self.filepath == None:
system(("sed -i 's/<desc class=\"value\">[0-9]*<\/desc>//g' " +
self.filepath))
def clear(self):
"""Resets the data and chart"""
self.data = []
self.chart = None
def sort_data_by_size(self, data):
"""Used to sort the pie slices by time from largest to smallest."""
# Make a duplicate of the data so it does not get tampered with
sorted_data = data
# Sort from smallest to largest based on time.
sorted_data.sort(key=itemgetter(1))
# Make sure that the Other entry is at the end of the list if it exists
for entry in sorted_data:
if entry[0] == 'Other ':
sorted_data.insert(0,sorted_data.pop(sorted_data.index(entry)))
# Then set data as the sorted data.
data = sorted_data#[::-1]
def sort_colorlist(self, data):
"""Used to make the order of the color_list match the order of the
pie_list's activity colors"""
# Create an empty list to put the sorted colors in
sorted_colorlist = []
# Iterate through the chart data
for entry in data:
# Get the specified color from the chart data
color = int(entry[2])
# Arrange the colorlist so that the given datum recieves that color
if color < (len(CONST_COLOR_LIST)-1) or entry[0] == 'Other ':
sorted_colorlist.append(CONST_COLOR_LIST[color])
else:
sorted_colorlist.append(CONST_COLOR_LIST[(color-(len(CONST_COLOR_LIST)-1))])
# Set the colorlist to the sorted_colorlist
self.colorlist = sorted_colorlist
if not self.colorlist == []:
self.style = Style(background='#F7F6F6',
plot_background='#F7F6F6',
foreground='#888a85',
foreground_light='#888a85',
foreground_dark='#555753',
opacity='.6',
opacity_hover='.9',
transition='200ms ease-in',
colors=(self.colorlist))
def sort(self, data):
"""Sort the data and colors"""
if not data == []:
self.compound_other_data(data)
self.sort_data_by_size(data)
self.sort_colorlist(data)
def send_to_svg(self):
"""Send the prepared pie graph to an SVG file"""
self.chart.render_to_file(self.filepath)
# Set the font in the svg file to the font specified during __init__
self.fix_font()
if hasattr(self.chart, 'y_labels'):
self.fix_tooltip()
for label in self.chart.y_labels:
self.convert_y_axis_to_time(label)
def fix_font(self):
"""Changes the SVG file's default font (monospace) to the font specified
when the charter was initialized"""
if not self.font == None:
system(("sed -i 's/font-family:monospace/font-family:" + self.font
+ "/g' " + self.filepath))
def start_loading_animation(self):
"""Callback to start loading animation"""
GLib.timeout_add(400, self.get_loading_animation)
def get_loading_animation(self):
"""Checks to see wheteher or not we should continue loading animation"""
if self.visible:
chart_loading = not (str(self.webview.get_load_status()) == '<enum WEBKIT_LOAD_FAILED of type WebKitLoadStatus>'
or str(self.webview.get_load_status()) == '<enum WEBKIT_LOAD_FINISHED of type WebKitLoadStatus>')
if not chart_loading:
self.loading_spinner.stop()
self.loading_spinner.set_visible(False)
self.webview_window.set_visible(True)
return chart_loading
else:
return False
def load_into_webview(self, initial=False):
"""Load the SVG file for the chart into the webview"""
#self.sort()
self.send_to_svg()
if initial:
self.webview.open(self.filepath)
else:
self.webview.reload()
if self.visible:
self.webview_window.set_visible(False)
self.loading_spinner.set_visible(True)
self.loading_spinner.start()
self.start_loading_animation()
def set_visible(self, visible=True):
self.visible = visible
self.webview_window.set_visible(visible)
if not visible:
self.loading_spinner.set_visible(False) | apache-2.0 |
jenshnielsen/basemap | examples/maskoceans.py | 4 | 1922 | from mpl_toolkits.basemap import Basemap, shiftgrid, maskoceans, interp
import numpy as np
import matplotlib.pyplot as plt
# example showing how to mask out 'wet' areas on a contour or pcolor plot.
topodatin = np.loadtxt('etopo20data.gz')
lonsin = np.loadtxt('etopo20lons.gz')
latsin = np.loadtxt('etopo20lats.gz')
# shift data so lons go from -180 to 180 instead of 20 to 380.
topoin,lons1 = shiftgrid(180.,topodatin,lonsin,start=False)
lats1 = latsin
fig=plt.figure()
# setup basemap
m=Basemap(resolution='l',projection='lcc',lon_0=-100,lat_0=40,width=8.e6,height=6.e6)
lons, lats = np.meshgrid(lons1,lats1)
x, y = m(lons, lats)
# interpolate land/sea mask to topo grid, mask ocean values.
# output may look 'blocky' near coastlines, since data is at much
# lower resolution than land/sea mask.
topo = maskoceans(lons, lats, topoin)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (original grid)')
fig=plt.figure()
# interpolate topo data to higher resolution grid (to better match
# the land/sea mask). Output looks less 'blocky' near coastlines.
nlats = 3*topoin.shape[0]
nlons = 3*topoin.shape[1]
lons = np.linspace(-180,180,nlons)
lats = np.linspace(-90,90,nlats)
lons, lats = np.meshgrid(lons, lats)
x, y = m(lons, lats)
topo = interp(topoin,lons1,lats1,lons,lats,order=1)
# interpolate land/sea mask to topo grid, mask ocean values.
topo = maskoceans(lons, lats, topo)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (data on finer grid)')
plt.show()
| gpl-2.0 |
Dexus/ubuntu-trusty-postfix | debian/tests/test-postfix.py | 1 | 21854 | #!/usr/bin/python
#
# test-postfix.py quality assurance test script for postfix
# Copyright (C) 2008-2012 Canonical Ltd.
# Author: Kees Cook <kees@ubuntu.com>
# Author: Marc Deslauriers <marc.deslauriers@canonical.com>
# Author: Jamie Strandboge <jamie@canonical.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# QRT-Packages: postfix sasl2-bin procmail python-pexpect
# QRT-Privilege: root
# QRT-Conflicts: exim4
'''
Note: When installing postfix, select "Internet Site". This script will
not work if "Local Only" was selected.
How to run against a clean schroot named 'hardy':
schroot -c hardy -u root -- sh -c 'apt-get -y install procmail python-unit postfix sasl2-bin python-pexpect lsb-release && ./test-postfix.py -v'
Tests:
00: setup
10: basic plain auth setup
11: above, but with CVE reproducers
20: sasl non-PLAIN setup
21: 20, but with CVE reproducers
99: restore configs
'''
import unittest, subprocess, re, pexpect, smtplib, socket, os, time, tempfile
import testlib
class PostfixTest(testlib.TestlibCase):
'''Test Postfix MTA.'''
def _setUp(self):
'''Create server configs.'''
# Move listener to localhost:2525
conf_file = '/etc/postfix/master.cf'
lines = open(conf_file)
contents = ''
for cfline in lines:
if cfline.startswith('smtp') and 'smtpd' in cfline and 'inet' in cfline:
contents += '127.0.0.1:2525 inet n - - - - smtpd\n'
else:
contents += "%s\n" % cfline
testlib.config_replace(conf_file, contents, append=False)
conf_file = '/etc/postfix/main.cf'
# Use mbox only
testlib.config_comment(conf_file,'home_mailbox')
testlib.config_set(conf_file,'mailbox_command','procmail -a "$EXTENSION"')
# Turn on sasl
self._setup_sasl("PLAIN")
reply = self._check_auth("PLAIN")
def setUp(self):
'''Set up prior to each test_* function'''
# list of files that we update
self.conf_files = [ '/etc/postfix/master.cf', '/etc/postfix/main.cf', '/etc/default/saslauthd', '/etc/postfix/sasl/smtpd.conf', '/etc/sasldb2']
self.user = testlib.TestUser(lower=True)
self.s = None
# Silently allow for this connection to fail, to handle the
# initial setup of the postfix server.
try:
self.s = smtplib.SMTP('localhost', port=2525)
except:
pass
def _tearDown(self):
'''Restore server configs'''
for f in self.conf_files:
testlib.config_restore(f)
# put saslauthd back
for f in ['/var/spool/postfix/var/run/saslauthd', '/var/run/saslauthd']:
if os.path.isfile(f) or os.path.islink(f):
os.unlink(f)
elif os.path.exists(f):
testlib.recursive_rm(f)
subprocess.call(['mkdir','-p','/var/run/saslauthd'])
subprocess.call(['/etc/init.d/saslauthd', 'stop'], stdout=subprocess.PIPE)
subprocess.call(['/etc/init.d/saslauthd', 'start'], stdout=subprocess.PIPE)
def tearDown(self):
'''Clean up after each test_* function'''
try:
self.s.quit()
except:
pass
self.user = None
def _restart_server(self):
'''Restart server'''
subprocess.call(['/etc/init.d/postfix', 'stop'], stdout=subprocess.PIPE)
assert subprocess.call(['/etc/init.d/postfix', 'start'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) == 0
# Postfix exits its init script before the master listener has started
time.sleep(2)
def _setup_sasl(self, mech, other_mech="", force_sasldb=False):
'''Setup sasl for mech'''
conf_file = '/etc/postfix/main.cf'
for field in ['smtpd_sasl_type','smtpd_sasl_local_domain','smtpd_tls_auth_only']:
testlib.config_comment(conf_file,field)
testlib.config_set(conf_file,'smtpd_sasl_path','smtpd')
testlib.config_set(conf_file,'smtpd_sasl_auth_enable','yes')
#testlib.config_set(conf_file,'broken_sasl_auth_clients','yes')
testlib.config_set(conf_file,'smtpd_sasl_authenticated_header','yes')
testlib.config_set(conf_file,'smtpd_tls_loglevel','2')
# setup smtpd.conf and the sasl users
contents = ''
self.assertTrue(mech in ['LOGIN', 'PLAIN', 'CRAM-MD5', 'DIGEST-MD5'], "Invalid mech: %s" % mech)
if not force_sasldb and (mech == "PLAIN" or mech == "LOGIN"):
conf_file = '/etc/default/saslauthd'
testlib.config_set(conf_file, 'START', 'yes', spaces=False)
contents = '''
pwcheck_method: saslauthd
allowanonymouslogin: 0
allowplaintext: 1
mech_list: %s %s
''' % (mech, other_mech)
# attach SASL to postfix chroot
subprocess.call(['mkdir','-p','/var/spool/postfix/var/run/saslauthd'])
subprocess.call(['rm','-rf','/var/run/saslauthd'])
subprocess.call(['ln','-s','/var/spool/postfix/var/run/saslauthd','/var/run/saslauthd'])
subprocess.call(['/etc/init.d/saslauthd', 'stop'], stdout=subprocess.PIPE)
assert subprocess.call(['/etc/init.d/saslauthd', 'start'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) == 0
# Force crackful perms so chroot'd postfix can talk to saslauthd
subprocess.call(['chmod','o+x','/var/spool/postfix/var/run/saslauthd'])
else:
plaintext = "1"
if mech == "LOGIN" or mech == "PLAIN":
plaintext = "0"
contents = '''
pwcheck_method: auxprop
allowanonymouslogin: 0
allowplaintext: %s
mech_list: %s %s
''' % (plaintext, mech, other_mech)
# Add user to sasldb2
testlib.config_replace("/etc/sasldb2", '', append=False)
rc, report = testlib.cmd(['postconf', '-h', 'myhostname'])
expected = 0
result = 'Got exit code %d, expected %d\n' % (rc, expected)
self.assertEquals(expected, rc, result + report)
child = pexpect.spawn('saslpasswd2 -c -u %s %s' % (report.strip(), self.user.login))
time.sleep(0.2)
child.expect(r'(?i)password', timeout=5)
time.sleep(0.2)
child.sendline(self.user.password)
time.sleep(0.2)
child.expect(r'.*(for verification)', timeout=5)
time.sleep(0.2)
child.sendline(self.user.password)
time.sleep(0.2)
rc = child.expect('\n', timeout=5)
time.sleep(0.2)
self.assertEquals(rc, expected, "passwd returned %d" %(rc))
child.kill(0)
os.chmod("/etc/sasldb2", 0640)
rc, report = testlib.cmd(['chgrp', 'postfix', '/etc/sasldb2'])
expected = 0
result = 'Got exit code %d, expected %d\n' % (rc, expected)
self.assertEquals(expected, rc, result + report)
# Force crackful perms so chroot'd postfix can talk to saslauthd
subprocess.call(['mv', '-f', '/etc/sasldb2', '/var/spool/postfix/etc'])
subprocess.call(['ln', '-s', '/var/spool/postfix/etc/sasldb2', '/etc/sasldb2'])
conf_file = '/etc/postfix/sasl/smtpd.conf'
testlib.config_replace(conf_file, contents, append=False)
# Restart server
self._restart_server()
def _is_listening(self):
'''Is the server listening'''
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
s.connect(('localhost',2525))
greeting = s.recv(1024)
# 220 gorgon.outflux.net ESMTP Postfix (Ubuntu)
self.assertTrue(greeting.startswith('220 '),greeting)
self.assertTrue('ESMTP' in greeting,greeting)
self.assertTrue('Postfix' in greeting,greeting)
self.assertFalse('MTA' in greeting,greeting)
s.close()
def test_00_listening(self):
'''Postfix is listening'''
# Get the main instance running
self._setUp()
self._is_listening()
def _vrfy(self, address, valid = True):
self.s.putcmd("vrfy",address)
code, msg = self.s.getreply()
reply = '%d %s' % (code, msg)
if valid:
self.assertEquals(code, 252, reply)
self.assertTrue(address in msg, reply)
else:
self.assertEquals(code, 550, reply)
self.assertTrue('Recipient address rejected' in msg, reply)
self.assertTrue('<%s>' % (address) in msg, reply)
def test_10_commands(self):
'''Basic SMTP commands'''
#s = smtplib.SMTP('localhost', port=2525)
# EHLO
code, msg = self.s.ehlo()
reply = '%d %s' % (code, msg)
self.assertEquals(code, 250, reply)
self.assertEquals(self.s.does_esmtp, 1, reply)
self.assertTrue('8BITMIME' in self.s.ehlo_resp, reply)
# No help available
self.s.putcmd("help")
code, msg = self.s.getreply()
reply = '%d %s' % (code, msg)
self.assertEquals(code, 502, reply)
self.assertTrue('Error' in msg, reply)
# VRFY addresses
self._vrfy('address@example.com', valid=True)
self._vrfy('does-not-exist', valid=False)
self._vrfy(self.user.login, valid=True)
def _test_deliver_mail(self, user_sent_to, auth_user=None, auth_pass=None, use_tls=False):
'''Perform mail delivery'''
if auth_user and auth_pass:
self.s.login(auth_user, auth_pass)
if use_tls:
self.s.starttls()
failed = self.s.sendmail('root',[user_sent_to.login,'does-not-exist'],'''From: Rooty <root>
To: "%s" <%s>
Subject: This is test 1
Hello, nice to meet you.
''' % (user_sent_to.gecos, user_sent_to.login))
#for addr in failed.keys():
# print '%s %d %s' % (addr, failed[addr][0], failed[addr][1])
self.assertEquals(len(failed),1,failed)
self.assertTrue(failed.has_key('does-not-exist'),failed)
self.assertEquals(failed['does-not-exist'][0],550,failed)
# Frighteningly, postfix seems to accept email before confirming
# a successful write to disk for the recipient!
time.sleep(2)
def _test_mail_in_spool(self, user_directed_to, target_spool_user=None, spool_file=None, auth_user=None, use_tls=False):
'''Check that mail arrived in the spool'''
# Handle the case of forwarded emails
if target_spool_user == None:
target_spool_user = user_directed_to
# Read delivered email
if spool_file == None:
spool_file = '/var/mail/%s' % (target_spool_user.login)
time.sleep(1)
contents = open(spool_file).read()
# Server-side added headers...
self.assertTrue('\nReceived: ' in contents, contents)
if use_tls and self.lsb_release['Release'] > 6.06:
expected = ' (Postfix) with ESMTPS id '
else:
expected = ' (Postfix) with ESMTP id '
if auth_user:
if self.lsb_release['Release'] < 8.04:
self._skipped("Received header portion")
else:
expected = ' (Postfix) with ESMTPA id '
self.assertTrue('(Authenticated sender: %s)' % (auth_user))
self.assertTrue(expected in contents, 'Looking for "%s" in email:\n%s' % (expected, contents))
self.assertTrue('\nMessage-Id: ' in contents, contents)
self.assertTrue('\nDate: ' in contents, contents)
# client-side headers/body...
self.assertTrue('\nSubject: This is test 1' in contents, contents)
self.assertTrue('\nFrom: Rooty' in contents, contents)
self.assertTrue('\nTo: "Buddy %s" <%s@' % (user_directed_to.login, user_directed_to.login) in contents, contents)
self.assertTrue('\nHello, nice to meet you.' in contents, contents)
def _test_roundtrip_mail(self, user_sent_to, user_to_check=None, spool_file=None, auth_user=None, auth_pass=None, use_tls=False):
'''Send and check email delivery'''
self._test_deliver_mail(user_sent_to, auth_user, auth_pass, use_tls=use_tls)
self._test_mail_in_spool(user_sent_to, user_to_check, spool_file, auth_user=auth_user, use_tls=use_tls)
def test_10_sending_mail_direct(self):
'''Mail delivered normally'''
self._test_roundtrip_mail(self.user)
def test_10_sending_mail_direct_with_tls(self):
'''Mail delivered normally with TLS'''
self._test_roundtrip_mail(self.user, use_tls=True)
def test_10_sending_mail_direct_auth(self):
'''Mail authentication'''
# Verify rejected bad password and user
self.assertRaises(smtplib.SMTPAuthenticationError, self.s.login, 'root', 'crapcrapcrap')
self.assertRaises(smtplib.SMTPAuthenticationError, self.s.login, self.user.login, 'crapcrapcrap')
self.s.login(self.user.login, self.user.password)
def test_10_sending_mail_direct_auth_full(self):
'''Mail delivered with authentication'''
# Perform end-to-end authentication test
self._test_roundtrip_mail(self.user, auth_user=self.user.login, auth_pass=self.user.password)
def _write_forward(self, user, contents):
forward_filename = '/home/%s/.forward' % (user.login)
open(forward_filename,'w').write(contents)
os.chown(forward_filename, user.uid, user.gid)
def test_10_sending_mail_forward_normal(self):
'''Mail delivered via .forward'''
forward_user = testlib.TestUser(lower=True)
self._write_forward(forward_user, self.user.login+'\n')
self._test_roundtrip_mail(forward_user, self.user)
def test_10_sending_mail_forward_xternal(self):
'''Mail processed by commands in .forward'''
# Create user-writable redirected mbox destination
mbox, mbox_name = testlib.mkstemp_fill('',prefix='test-postfix.mbox-')
mbox.close()
os.chown(mbox_name, self.user.uid, self.user.gid)
# Create a script to run in the .forward
redir, redir_name = testlib.mkstemp_fill('''#!/bin/bash
/bin/cat > "%s"
''' % (mbox_name),prefix='test-postfix.redir-')
redir.close()
os.chmod(redir_name,0755)
self._write_forward(self.user,'|%s\n' % (redir_name))
# SKIP TESTING, FAILS IN TESTBED
#self._test_roundtrip_mail(self.user, spool_file=mbox_name)
os.unlink(redir_name)
os.unlink(mbox_name)
def test_11_security_CVE_2008_2936(self):
'''CVE-2008-2936 fixed'''
# First, create our "target" file
secret = '/root/secret.txt'
open(secret,'w').write('Secret information\n')
os.chmod(secret, 0700)
# Now, create a symlink to the target (we're going to use /var/tmp
# since we're assuming it, /root, /var/mail are on the same filesystem.
# For most chroot testing, /tmp is mounted from the real machine.
if os.path.exists('/var/tmp/secret.link'):
os.unlink('/var/tmp/secret.link')
self.assertEquals(subprocess.call(['su','-c','ln -s /root/secret.txt /var/tmp/secret.link',self.user.login]),0,"Symlink creation")
# Now, the hardlink, which in ubuntu's case needs to be done by root.
os.link('/var/tmp/secret.link','/var/mail/%s' % (self.user.login))
# Email delivered to this user will be written to the root-owned
# file now if the CVE is unfixed.
failed = self.s.sendmail('root',[self.user.login],'''From: Evil <root>
To: "%s" <%s>
Subject: This is an overwrite test
Hello, nice to pwn you.
''' % (self.user.gecos, self.user.login))
self.assertEquals(len(failed),0,failed)
# Pause for delivery
time.sleep(2)
contents = open(secret).read()
# Clean up before possible failures
os.unlink('/var/mail/%s' % (self.user.login))
os.unlink('/var/tmp/secret.link')
os.unlink(secret)
# Check results
self.assertTrue('Secret information' in contents, contents)
self.assertFalse('nice to pwn you' in contents, contents)
def _check_auth(self, mech):
'''Check AUTH: side effect-- self.s is set'''
try:
self.s.quit()
except:
pass
self.s = smtplib.SMTP('localhost', port=2525)
self._is_listening()
# has mech
code, msg = self.s.ehlo()
reply = '%d %s' % (code, msg)
self.assertEquals(code, 250, reply)
self.assertEquals(self.s.does_esmtp, 1, reply)
self.assertTrue('%s' % mech in self.s.ehlo_resp, reply)
return reply
def test_20_sasldb_cram_md5(self):
'''Test sasldb CRAM-MD5'''
# Quit the setUp() connection, restart the server and reconnect
self.s.quit()
self._setup_sasl("CRAM-MD5")
reply = self._check_auth("CRAM-MD5")
self.assertTrue('PLAIN' not in reply, reply)
# Verify rejected bad password and user
self.assertRaises(smtplib.SMTPAuthenticationError, self.s.login, 'root', 'crapcrapcrap')
self.assertRaises(smtplib.SMTPAuthenticationError, self.s.login, self.user.login, 'crapcrapcrap')
# Perform end-to-end authentication test
self._test_roundtrip_mail(self.user, auth_user=self.user.login, auth_pass=self.user.password)
def test_20_sasldb_digest_md5(self):
'''Test sasldb DIGEST-MD5 is supported'''
# Quit the setUp() connection, restart the server and reconnect
self.s.quit()
self._setup_sasl("DIGEST-MD5")
reply = self._check_auth("DIGEST-MD5")
self.assertTrue('PLAIN' not in reply, reply)
# TODO: Perform end-to-end authentication test (need alternative to smtplib)
#self.assertRaises(smtplib.SMTPAuthenticationError, self.s.login, 'root', 'crapcrapcrap')
#self.assertRaises(smtplib.SMTPAuthenticationError, self.s.login, self.user.login, 'crapcrapcrap')
#self._test_roundtrip_mail(self.user, auth_user=self.user.login, auth_pass=self.user.password)
def test_20_sasldb_login(self):
'''Test sasldb LOGIN is supported'''
# Quit the setUp() connection, restart the server and reconnect
self.s.quit()
self._setup_sasl("LOGIN", force_sasldb=True)
reply = self._check_auth("LOGIN")
self.assertTrue('PLAIN' not in reply, reply)
# TODO: Perform end-to-end authentication test (need alternative to smtplib)
#self.assertRaises(smtplib.SMTPAuthenticationError, self.s.login, 'root', 'crapcrapcrap')
#self.assertRaises(smtplib.SMTPAuthenticationError, self.s.login, self.user.login, 'crapcrapcrap')
#self._test_roundtrip_mail(self.user, auth_user=self.user.login, auth_pass=self.user.password)
def test_20_sasldb_plain(self):
'''Test sasldb PLAIN'''
# Quit the setUp() connection, restart the server and reconnect
self.s.quit()
self._setup_sasl("PLAIN", force_sasldb=True)
reply = self._check_auth("PLAIN")
# Verify rejected bad password and user
self.assertRaises(smtplib.SMTPAuthenticationError, self.s.login, 'root', 'crapcrapcrap')
self.assertRaises(smtplib.SMTPAuthenticationError, self.s.login, self.user.login, 'crapcrapcrap')
# TODO: Perform end-to-end authentication test (need alternative to smtplib)
self._test_roundtrip_mail(self.user, auth_user=self.user.login, auth_pass=self.user.password)
def test_21_security_CVE_2011_1720(self):
'''CVE-2011-1720 fixed'''
# http://www.postfix.org/CVE-2011-1720.html
# setup sasl and connect
self.s.quit()
self._setup_sasl("CRAM-MD5", "DIGEST-MD5")
# verify sasl support
rc, report = testlib.cmd(['postconf', 'smtpd_sasl_auth_enable'])
expected = 0
result = 'Got exit code %d, expected %d\n' % (rc, expected)
self.assertEquals(expected, rc, result + report)
self.assertTrue('yes' in report, "Could not find 'yes' in report:\n%s" % report)
if self.lsb_release['Release'] > 6.06:
rc, report = testlib.cmd(['postconf', 'smtpd_sasl_type'])
expected = 0
result = 'Got exit code %d, expected %d\n' % (rc, expected)
self.assertEquals(expected, rc, result + report)
self.assertTrue('cyrus' in report, "Could not find 'cyrus' in report:\n%s" % report)
# ehlo
reply = self._check_auth("CRAM-MD5")
self.assertTrue('DIGEST-MD5' in reply, reply)
code, msg = self.s.docmd("AUTH", "CRAM-MD5")
reply = '%d %s' % (code, msg)
self.assertEquals(code, 334, reply)
code, msg = self.s.docmd("*")
reply = '%d %s' % (code, msg)
self.assertEquals(code, 501, reply)
error = False
try:
code, msg = self.s.docmd("AUTH", "DIGEST-MD5")
except:
error = True
self.assertFalse(error, "server disconnected")
reply = '%d %s' % (code, msg)
self.assertEquals(code, 334, reply)
def test_99_restore(self):
'''Restore configuration'''
self._tearDown()
if __name__ == '__main__':
unittest.main()
| epl-1.0 |
abstract-open-solutions/OCB | addons/website_report/report.py | 257 | 2124 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.web.http import request
from openerp.osv import osv
class Report(osv.Model):
_inherit = 'report'
def translate_doc(self, cr, uid, doc_id, model, lang_field, template, values, context=None):
if request and hasattr(request, 'website'):
if request.website is not None:
v = request.website.get_template(template)
request.session['report_view_ids'].append({
'name': v.name,
'id': v.id,
'xml_id': v.xml_id,
'inherit_id': v.inherit_id.id,
'header': False,
'active': v.active,
})
return super(Report, self).translate_doc(cr, uid, doc_id, model, lang_field, template, values, context=context)
def render(self, cr, uid, ids, template, values=None, context=None):
if request and hasattr(request, 'website'):
if request.website is not None:
request.session['report_view_ids'] = []
return super(Report, self).render(cr, uid, ids, template, values=values, context=context)
| agpl-3.0 |
jdinuncio/ansible-modules-extras | cloud/centurylink/clc_publicip.py | 23 | 12571 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
module: clc_publicip
short_description: Add and Delete public ips on servers in CenturyLink Cloud.
description:
- An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
version_added: "2.0"
options:
protocol:
description:
- The protocol that the public IP will listen for.
default: TCP
choices: ['TCP', 'UDP', 'ICMP']
required: False
ports:
description:
- A list of ports to expose. This is required when state is 'present'
required: False
default: None
server_ids:
description:
- A list of servers to create public ips on.
required: True
state:
description:
- Determine whether to create or delete public IPs. If present module will not create a second public ip if one
already exists.
default: present
choices: ['present', 'absent']
required: False
wait:
description:
- Whether to wait for the tasks to finish before returning.
choices: [ True, False ]
default: True
required: False
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Add Public IP to Server
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create Public IP For Servers
clc_publicip:
protocol: TCP
ports:
- 80
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
state: present
register: clc
- name: debug
debug:
var: clc
- name: Delete Public IP from Server
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create Public IP For Servers
clc_publicip:
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
state: absent
register: clc
- name: debug
debug:
var: clc
'''
RETURN = '''
server_ids:
description: The list of server ids that are changed
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
'''
__version__ = '${version}'
import os
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcPublicIp(object):
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
self._set_clc_credentials_from_env()
params = self.module.params
server_ids = params['server_ids']
ports = params['ports']
protocol = params['protocol']
state = params['state']
if state == 'present':
changed, changed_server_ids, requests = self.ensure_public_ip_present(
server_ids=server_ids, protocol=protocol, ports=ports)
elif state == 'absent':
changed, changed_server_ids, requests = self.ensure_public_ip_absent(
server_ids=server_ids)
else:
return self.module.fail_json(msg="Unknown State: " + state)
self._wait_for_requests_to_complete(requests)
return self.module.exit_json(changed=changed,
server_ids=changed_server_ids)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
server_ids=dict(type='list', required=True),
protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
ports=dict(type='list'),
wait=dict(type='bool', default=True),
state=dict(default='present', choices=['present', 'absent']),
)
return argument_spec
def ensure_public_ip_present(self, server_ids, protocol, ports):
"""
Ensures the given server ids having the public ip available
:param server_ids: the list of server ids
:param protocol: the ip protocol
:param ports: the list of ports to expose
:return: (changed, changed_server_ids, results)
changed: A flag indicating if there is any change
changed_server_ids : the list of server ids that are changed
results: The result list from clc public ip call
"""
changed = False
results = []
changed_server_ids = []
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.PublicIPs().public_ips) == 0]
ports_to_expose = [{'protocol': protocol, 'port': port}
for port in ports]
for server in servers_to_change:
if not self.module.check_mode:
result = self._add_publicip_to_server(server, ports_to_expose)
results.append(result)
changed_server_ids.append(server.id)
changed = True
return changed, changed_server_ids, results
def _add_publicip_to_server(self, server, ports_to_expose):
result = None
try:
result = server.PublicIPs().Add(ports_to_expose)
except CLCException as ex:
self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def ensure_public_ip_absent(self, server_ids):
"""
Ensures the given server ids having the public ip removed if there is any
:param server_ids: the list of server ids
:return: (changed, changed_server_ids, results)
changed: A flag indicating if there is any change
changed_server_ids : the list of server ids that are changed
results: The result list from clc public ip call
"""
changed = False
results = []
changed_server_ids = []
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.PublicIPs().public_ips) > 0]
for server in servers_to_change:
if not self.module.check_mode:
result = self._remove_publicip_from_server(server)
results.append(result)
changed_server_ids.append(server.id)
changed = True
return changed, changed_server_ids, results
def _remove_publicip_from_server(self, server):
result = None
try:
for ip_address in server.PublicIPs().public_ips:
result = ip_address.Delete()
except CLCException as ex:
self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def _wait_for_requests_to_complete(self, requests_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param requests_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in requests_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process public ip request')
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _get_servers_from_clc(self, server_ids, message):
"""
Gets list of servers form CLC api
"""
try:
return self.clc.v2.Servers(server_ids).servers
except CLCException as exception:
self.module.fail_json(msg=message + ': %s' % exception)
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcPublicIp._define_module_argument_spec(),
supports_check_mode=True
)
clc_public_ip = ClcPublicIp(module)
clc_public_ip.process_request()
if __name__ == '__main__':
main()
| gpl-3.0 |
grevutiu-gabriel/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/failuremap.py | 134 | 4062 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# FIXME: This probably belongs in the buildbot module.
class FailureMap(object):
def __init__(self):
self._failures = []
def add_regression_window(self, builder, regression_window):
self._failures.append({
'builder': builder,
'regression_window': regression_window,
})
def is_empty(self):
return not self._failures
def failing_revisions(self):
failing_revisions = [failure_info['regression_window'].revisions()
for failure_info in self._failures]
return sorted(set(sum(failing_revisions, [])))
def builders_failing_for(self, revision):
return self._builders_failing_because_of([revision])
def tests_failing_for(self, revision):
tests = [failure_info['regression_window'].failing_tests()
for failure_info in self._failures
if revision in failure_info['regression_window'].revisions()
and failure_info['regression_window'].failing_tests()]
result = set()
for test in tests:
result = result.union(test)
return sorted(result)
def failing_tests(self):
return set(sum([self.tests_failing_for(revision) for revision in self.failing_revisions()], []))
def _old_failures(self, is_old_failure):
return filter(lambda revision: is_old_failure(revision),
self.failing_revisions())
def _builders_failing_because_of(self, revisions):
revision_set = set(revisions)
return [failure_info['builder'] for failure_info in self._failures
if revision_set.intersection(
failure_info['regression_window'].revisions())]
# FIXME: We should re-process old failures after some time delay.
# https://bugs.webkit.org/show_bug.cgi?id=36581
def filter_out_old_failures(self, is_old_failure):
old_failures = self._old_failures(is_old_failure)
old_failing_builder_names = set([builder.name()
for builder in self._builders_failing_because_of(old_failures)])
# We filter out all the failing builders that could have been caused
# by old_failures. We could miss some new failures this way, but
# emperically, this reduces the amount of spam we generate.
failures = self._failures
self._failures = [failure_info for failure_info in failures
if failure_info['builder'].name() not in old_failing_builder_names]
self._cache = {}
| bsd-3-clause |
alphageek-xyz/site | landing/models.py | 1 | 1448 | import re
from django.db import models
from django.utils.functional import cached_property
from landing.utils import markup_markdown
class ServiceManager(models.Manager):
def last_modified(self):
return self.latest('modified').modified
class Service(models.Model):
objects = ServiceManager()
class Meta:
ordering = ('order',)
get_latest_by = 'modified'
name = models.CharField(
verbose_name='Service Name',
max_length=100,
unique=True,
)
description = models.TextField(
verbose_name='Description',
blank=True
)
order = models.IntegerField(
null=True,
)
modified = models.DateTimeField(
auto_now=True,
)
@cached_property
def html(self):
return markup_markdown(
self.description
)
@cached_property
def anchor_id(self):
return re.sub(
" ?[&/\\@ ]+ ?", '_', self.name
)[:30]
def get_absolute_url(self):
from django.urls import reverse
return '%s#%s' % (reverse('services'), self.anchor_id)
def save(self, *args, **kwargs):
if not self.order:
self.order = 1 + (
Service.objects.aggregate(
n=models.Max('order')
)['n'] or 0
)
return super(Service, self).save(*args, **kwargs)
def __str__(self):
return str(self.name)
| bsd-3-clause |
adamchainz/ansible | lib/ansible/modules/packaging/language/pear.py | 70 | 7380 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2012, Afterburn <http://github.com/afterburn>
# (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
# (c) 2015, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pear
short_description: Manage pear/pecl packages
description:
- Manage PHP packages with the pear package manager.
version_added: 2.0
author:
- "'jonathan.lestrelin' <jonathan.lestrelin@gmail.com>"
options:
name:
description:
- Name of the package to install, upgrade, or remove.
required: true
state:
description:
- Desired state of the package.
required: false
default: "present"
choices: ["present", "absent", "latest"]
'''
EXAMPLES = '''
# Install pear package
- pear:
name: Net_URL2
state: present
# Install pecl package
- pear:
name: pecl/json_post
state: present
# Upgrade package
- pear:
name: Net_URL2
state: latest
# Remove packages
- pear:
name: Net_URL2,pecl/json_post
state: absent
'''
import os
def get_local_version(pear_output):
"""Take pear remoteinfo output and get the installed version"""
lines = pear_output.split('\n')
for line in lines:
if 'Installed ' in line:
installed = line.rsplit(None, 1)[-1].strip()
if installed == '-':
continue
return installed
return None
def get_repository_version(pear_output):
"""Take pear remote-info output and get the latest version"""
lines = pear_output.split('\n')
for line in lines:
if 'Latest ' in line:
return line.rsplit(None, 1)[-1].strip()
return None
def query_package(module, name, state="present"):
"""Query the package status in both the local system and the repository.
Returns a boolean to indicate if the package is installed,
and a second boolean to indicate if the package is up-to-date."""
if state == "present":
lcmd = "pear info %s" % (name)
lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
if lrc != 0:
# package is not installed locally
return False, False
rcmd = "pear remote-info %s" % (name)
rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
# get the version installed locally (if any)
lversion = get_local_version(rstdout)
# get the version in the repository
rversion = get_repository_version(rstdout)
if rrc == 0:
# Return True to indicate that the package is installed locally,
# and the result of the version number comparison
# to determine if the package is up-to-date.
return True, (lversion == rversion)
return False, False
def remove_packages(module, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
installed, updated = query_package(module, package)
if not installed:
continue
cmd = "pear uninstall %s" % (package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, state, packages):
install_c = 0
for i, package in enumerate(packages):
# if the package is installed and state == present
# or state == latest and is up-to-date then skip
installed, updated = query_package(module, package)
if installed and (state == 'present' or (state == 'latest' and updated)):
continue
if state == 'present':
command = 'install'
if state == 'latest':
command = 'upgrade'
cmd = "pear %s %s" % (command, package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to install %s" % (package))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
module.exit_json(changed=False, msg="package(s) already installed")
def check_packages(module, packages, state):
would_be_changed = []
for package in packages:
installed, updated = query_package(module, package)
if ((state in ["present", "latest"] and not installed) or
(state == "absent" and installed) or
(state == "latest" and not updated)):
would_be_changed.append(package)
if would_be_changed:
if state == "absent":
state = "removed"
module.exit_json(changed=True, msg="%s package(s) would be %s" % (
len(would_be_changed), state))
else:
module.exit_json(change=False, msg="package(s) already %s" % state)
def exe_exists(program):
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
return True
return False
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=['pkg']),
state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed'])),
required_one_of = [['name']],
supports_check_mode = True)
if not exe_exists("pear"):
module.fail_json(msg="cannot find pear executable in PATH")
p = module.params
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
elif p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p['name']:
pkgs = p['name'].split(',')
pkg_files = []
for i, pkg in enumerate(pkgs):
pkg_files.append(None)
if module.check_mode:
check_packages(module, pkgs, p['state'])
if p['state'] in ['present', 'latest']:
install_packages(module, p['state'], pkgs)
elif p['state'] == 'absent':
remove_packages(module, pkgs)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
captainpete/rethinkdb | external/gtest_1.6.0/test/gtest_output_test.py | 1733 | 12005 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| agpl-3.0 |
kailIII/emaresa | trunk.pe/reportes_tributarios/report/libro_compra.py | 2 | 14001 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: OpenDrive Ltda
# Copyright (c) 2013 Opendrive Ltda
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import time
from openerp.report import report_sxw
class Parser( report_sxw.rml_parse ):
def __init__(self,cr,uid,name,context):
super(Parser,self).__init__(cr,uid,name,context=context)
self.localcontext.update({
'formatRut':self.formatRut,
'time': time,
'_periodos_v': self._periodos_v,
'corto_dat_v': self.corto_dat_v,
'get__v': self._get__v,
'nuevo':self.nuevo,
'detalle':self.detalle,
'subtotales':self.subtotales,
'totales':self.totales,
})
def formatRut(self, rut):
if rut:
return rut[2:-7]+'.'+rut[-7:-4]+'.'+rut[-4:-1]+'-'+rut[-1:]
return ''
def _periodos_v(self, period_list):
aux_=0
feci=0
fecf=0
for period_id in period_list:
if aux_==0:
self.cr.execute("select name from account_period where id=" + str(period_id) + "")
for record in self.cr.fetchall():
feci= record[0]
aux_=aux_+1
self.cr.execute("select name from account_period where id=" + str(period_id) + "")
for record in self.cr.fetchall():
fecf=record[0]
return 'Desde ' + feci + ' Hasta ' + fecf
def corto_dat_v(self,arg1,largo):
if len(arg1)>largo:
descripcion=arg1[:largo-1]
else:
descripcion=arg1
return descripcion
def _get__v(self,co,pe,si,ty):
d = []
Lds=''
Lds_=''
cc=0
#cl=0
tpOo= 0
aeOo= 0
aeS=0
txS=0
unS=0
toS=0
cl=0
aeT=0
txT=0
unT=0
toT=0
d.append({'auxiliar':'t',})
for p in pe:
Lds = Lds + str(p) + ","
while cc<len(Lds)-1:
Lds_= Lds_ + Lds[cc]
cc=cc + 1
sql = "SELECT ai.reference,date_invoice,rp.vat, rp.name, aj.code, ai.amount_untaxed, ai.amount_tax, ai.amount_total, ai.fiscal_position, (select CASE WHEN sum(ait.base_amount) is null then 0 else sum(ait.base_amount) end as a from account_invoice_tax ait where UPPER(ait.name) like UPPER('%exento%') and ait.invoice_id = ai.id) base_amount FROM public.account_invoice ai, public.account_journal aj, public.res_partner rp WHERE ai.state not in ('draft', 'cancel') and ai.partner_id = rp.id AND aj.id = ai.journal_id and aj.code between '100' and '142' and ai.period_id in ("+"".join(map(str, Lds_))+") and ai.company_id = "+ str(co[0]) + " order by aj.cod"
self.cr.execute(sql)
for record in self.cr.fetchall():
nmOo = record[0]
dtOo = record[1]
rtOo = record[2]
clOo = record[3]
tpOo = ""
aeOo = record[9]
if record[4]=="101":
tpOo= "FN"
elif record[4]=="102":
tpOo= "FE"
elif record[4]=="103":
tpOo= "FI"
elif record[4]=="":
tpOo= "SC"
txOo = record[6] #tax
#if record[8]==1:
#aeOo="Afecto"
#elif record[8]==2:
#txOo= '0'
#aeOo="Exento"
unOo = record[5] #untaxed
toOo = record[7] #total
if cl==56:
#OoO={'auxiliar':'tT'}
#d.append(OoO)
OoO={
'number': '',
'x_tipo_doc': '',
'date_invoice': '',
'rut': '',
'proveedor': 'SUB TOTAL',
'afe_exe':self.formatLang(aeS, digits=0),
'iva': self.formatLang(txS, digits=0),
'neto_': self.formatLang(unS, digits=0),
'total_': self.formatLang(toS, digits=0),
'auxiliar':'dT'
}
d.append(OoO)
aeS=0
txS=0
unS=0
toS=0
cl=0
d.append({'auxiliar':'t',})
OoO={
'number': nmOo,
'x_tipo_doc': tpOo,
'date_invoice': dtOo,
'rut': rtOo,
'proveedor': clOo,
'afe_exe':self.formatLang(aeOo, digits=0),
'iva': self.formatLang(txOo, digits=0),
'neto_': self.formatLang(unOo, digits=0),
'total_': self.formatLang(toOo, digits=0),
'auxiliar':'d'
}
#sub total
aeS+=aeOo
txS+=txOo
unS+=unOo
toS+=toOo
d.append(OoO)
#total final
aeT+=aeOo
txT+=txOo
unT+=unOo
toT+=toOo
cl=cl+1
#preguntar k onda
OoO={
'number': '',
'x_tipo_doc': '',
'date_invoice': '',
'rut': '',
'proveedor': 'SUB TOTAL',
'afe_exe':self.formatLang(aeS, digits=0),
'iva': self.formatLang(txS, digits=0),
'neto_': self.formatLang(unS, digits=0),
'total_': self.formatLang(toS, digits=0),
'auxiliar':'dT'
}
d.append(OoO)
OoO={
'number': '',
'x_tipo_doc': '',
'date_invoice': '',
'rut': '',
'proveedor': 'TOTAL',
'afe_exe':self.formatLang(aeT, digits=0),
'iva': self.formatLang(txT, digits=0),
'neto_': self.formatLang(unT, digits=0),
'total_': self.formatLang(toT, digits=0),
'auxiliar':'dT'
}
d.append(OoO)
aeS=0
txS=0
unS=0
toS=0
return d
def nuevo(self,co,pe,si,ty):
data=[]
periodos = ",".join(map(str,pe))
sql="""
select id, name from account_journal aj where id in (
select journal_id
from account_invoice ai
where ai.state not in ('draft', 'cancel')
and ai.period_id in ({0})
and ai.company_id = {1}
)
and aj.code between '100' and '142'
order by aj.code
""".format(periodos,str(co[0]))
self.cr.execute(sql)
for record in self.cr.fetchall():
data.insert(len(data)+1,{'id':record[0],
'name':record[1],
} )
return data
'''def detalle(self,journal_id,co,pe,si,ty):
data = []
periodos = ",".join(map(str,pe))
sql="""select
ai.reference
,date_invoice
,rp.vat
, rp.name
, ai.amount_untaxed
, ai.amount_tax
, ai.amount_total
, ai.fiscal_position
, (select CASE WHEN sum(ait.base_amount) is null then 0
else sum(ait.base_amount) end as a
from account_invoice_tax ait
where UPPER(ait.name) like UPPER('%iva%')
and ait.invoice_id = ai.id) base_amount
FROM public.account_invoice ai
, public.res_partner rp
WHERE ai.state not in ('draft', 'cancel')
and ai.partner_id = rp.id
AND ai.journal_id = {0}
and ai.period_id in ({1})
and ai.company_id = {2}
order by date_invoice;
""".format(journal_id, periodos, str(co[0]))
self.cr.execute(sql)
for record in self.cr.fetchall():
data.insert(len(data)+1,
{
'number': record[0],
'x_tipo_doc': "",
'date_invoice': record[1],
'rut': record[2],
'proveedor': record[3],
'afe_exe':self.formatLang(record[8], digits=0),
'cc_amount_untaxed': self.formatLang(record[5], digits=0),
'cc_amount_tax': self.formatLang(record[4], digits=0),
'cc_amount_total': self.formatLang(record[6], digits=0),
'auxiliar':'d'
})
# 'number': record[0],
# 'x_tipo_doc': "",
# 'date_invoice': record[1],
# 'rut': record[2],
# 'cliente': record[3],
# 'afe_exe':self.formatLang(record[7], digits=0),
# 'cc_amount_tax': self.formatLang(record[5], digits=0),
# 'cc_amount_untaxed': self.formatLang(record[4], digits=0),
# 'cc_amount_total': self.formatLang(record[6], digits=0),
# 'auxiliar':'d'
return data'''
#la palabra exento cambia la columna de exento#
def detalle(self,journal_id,co,pe,si,ty):
data = []
periodos = ",".join(map(str,pe))
sql="""select
ai.reference
,date_invoice
,rp.vat
, rp.name
, ai.amount_untaxed
, ai.amount_tax
, ai.amount_total
, ai.fiscal_position
, (select CASE WHEN sum(ait.base_amount) is null then 0
else sum(ait.base_amount) end as a
from account_invoice_tax ait
where UPPER(ait.name) like UPPER('%exento%')
and ait.invoice_id = ai.id) base_amount
FROM public.account_invoice ai
, public.res_partner rp
WHERE ai.state not in ('draft', 'cancel')
and ai.partner_id = rp.id
AND ai.journal_id = {0}
and ai.period_id in ({1})
and ai.company_id = {2}
order by date_invoice;
""".format(journal_id, periodos, str(co[0]))
self.cr.execute(sql)
for record in self.cr.fetchall():
data.insert(len(data)+1,
{
'number': record[0],
'x_tipo_doc': "",
'date_invoice': record[1],
'rut': record[2],
'proveedor': record[3],
'afe_exe':self.formatLang(record[8], digits=0),
'cc_amount_untaxed': self.formatLang(record[5], digits=0),
'cc_amount_tax': self.formatLang(record[4]-record[8], digits=0),
'cc_amount_total': self.formatLang(record[6], digits=0),
'auxiliar':'d'
})
# 'number': record[0],
# 'x_tipo_doc': "",
# 'date_invoice': record[1],
# 'rut': record[2],
# 'cliente': record[3],
# 'afe_exe':self.formatLang(record[7], digits=0),
# 'cc_amount_tax': self.formatLang(record[5], digits=0),
# 'cc_amount_untaxed': self.formatLang(record[4], digits=0),
# 'cc_amount_total': self.formatLang(record[6], digits=0),
# 'auxiliar':'d'
return data
def subtotales(self,journal_id,co,pe):
periodos = ",".join(map(str,pe))
data=[]
sql="""SELECT
count(*) as cantidad
, sum(ai.amount_untaxed) amount_untaxed
, sum(ai.amount_tax) amount_tax
, sum(ai.amount_total) amount_total
, sum((
select
CASE WHEN sum(ait.base_amount) is
null
then 0 else sum(ait.base_amount)
end as a
from account_invoice_tax ait
where UPPER(ait.name) like UPPER('%exento%')
and ait.invoice_id = ai.id
)) base_amount
FROM public.account_invoice ai
, public.res_partner rp
WHERE ai.state not in ('draft', 'cancel')
and ai.partner_id = rp.id
AND ai.journal_id = {0}
and ai.period_id in ({1})
and ai.company_id = {2}
""".format(journal_id, periodos, str(co[0]))
self.cr.execute(sql)
for record in self.cr.fetchall():
data.insert(len(data)+1,
{
'cantidad':self.formatLang(record[0], digits=0)
,'base_amount':self.formatLang(record[4], digits=0)
,'amount_untaxed':self.formatLang(record[1]-record[4], digits=0)
,'amount_tax':self.formatLang(record[2], digits=0)
,'amount_total':self.formatLang(record[3], digits=0)
})
return data
def totales(self,co,pe):
periodos = ",".join(map(str,pe))
data=[]
sql="""select sum(cantidad) cantidad, sum(amount_untaxed) amount_untaxed, sum(amount_tax) amount_tax,sum(amount_total) amount_total, sum(base_amount) base_amount
from (
select count(*) as cantidad
, coalesce(sum(ai.amount_untaxed),0) amount_untaxed
, coalesce(sum(ai.amount_tax),0) amount_tax
, coalesce(sum(ai.amount_total),0) amount_total
, coalesce(sum((
select
CASE WHEN sum(ait.base_amount) is
null
then 0 else sum(ait.base_amount)
end as a
from account_invoice_tax ait
where UPPER(ait.name) like UPPER('%exento%')
and ait.invoice_id = ai.id
)),0) base_amount
FROM public.account_invoice ai
, public.res_partner rp
WHERE ai.state not in ('draft', 'cancel')
and ai.partner_id = rp.id
AND ai.journal_id in (
select id from account_journal aj where aj.code between '100' and '199' and not
UPPER(name) like UPPER('%nota%') and not UPPER(name) like UPPER('%credito%') and ai.company_id = {1}
)
and ai.period_id in ({0})
and ai.company_id = {1}
union
select count(*) as cantidad
, coalesce(sum(ai.amount_untaxed),0)*-1 amount_untaxed
, coalesce(sum(ai.amount_tax),0)*-1 amount_tax
, coalesce(sum(ai.amount_total),0)*-1 amount_total
, coalesce(sum((
select
CASE WHEN sum(ait.base_amount) is
null
then 0 else sum(ait.base_amount)
end as a
from account_invoice_tax ait
where UPPER(ait.name) like UPPER('%iva%')
and ait.invoice_id = ai.id
)),0)*-1 base_amount
FROM public.account_invoice ai
, public.res_partner rp
WHERE ai.state not in ('draft', 'cancel')
and ai.partner_id = rp.id
AND ai.journal_id in (
select id from account_journal aj where aj.code between '100' and '199' and
UPPER(name) like UPPER('%nota%') and UPPER(name) like UPPER('%credito%') and ai.company_id = {1}
)
and ai.period_id in ({0})
and ai.company_id = {1}
) as a
""".format( periodos, str(co[0]))
self.cr.execute(sql)
for record in self.cr.fetchall():
data.insert(len(data)+1,
{
'cantidad':self.formatLang(record[0], digits=0)
,'base_amount':self.formatLang(record[4], digits=0)
,'amount_untaxed':self.formatLang(record[1]-record[4], digits=0)
,'amount_tax':self.formatLang(record[2], digits=0)
,'amount_total':self.formatLang(record[3], digits=0)
})
return data
report_sxw.report_sxw('report.libro_compra_rml', 'reportes.tributarios',
'reportes_tributarios/report/libro_compra.rml', parser=Parser, header=False)
| agpl-3.0 |
Brazelton-Lab/bio_utils | bio_utils/blast_tools/retrieve_query_sequences.py | 1 | 6139 | #! /usr/bin/env python3
"""Returns query sequence from BLAST hits below a specified E-Value
Usage:
retrieve_query_sequences.py --fastqq <FASTA or FASTQ file>
--b6 <B6 or M8 file> --e_value <max E-Value>
--output <output file> [--fastq]
Copyright:
retrieve_query_sequences.py recover query sequence from BLAST alignment
Copyright (C) 2015 William Brazelton, Alex Hyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
from bio_utils.blast_tools import b6_evalue_filter
from bio_utils.iterators import fasta_iter
from bio_utils.iterators import fastq_iter
from collections import defaultdict
import sys
__author__ = 'William Brazelton, Alex Hyer'
__email__ = 'theonehyer@gmail.com'
__license__ = 'GPLv3'
__maintainer__ = 'Alex Hyer'
__status__ = 'Production'
__version__ = '2.0.0'
def query_sequence_retriever(fastaq_handle, b6_handle, e_value,
fastaq='fasta', *args, **kwargs):
"""Returns FASTA entries for subject sequences from BLAST hits
Stores B6/M8 entries with E-Values below the e_value cutoff. Then iterates
through the FASTA file and if an entry matches the query of an B6/M8
entry, it's sequence is extracted and returned as a FASTA entry
plus the E-Value.
Args:
fastaq_handle (file): FASTA or FASTQ file handle, can technically
be any iterable that returns FASTA/Q "lines"
b6_handle (file): B6/M8 file handle, can technically
be any iterable that returns B6/M8"lines"
e_value (float): Max E-Value of entry to return
fastaq (str): ['fasta', 'fastq'] whether file handle is a FASTA or
FASTQ file
*args: Variable length argument list for b6_iter
**kwargs: Arbitrary keyword arguments for b6_iter
Yields:
FastaEntry: class containing all FASTA data
FastqEntry if fastaq='fastq'
Example:
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> fasta_handle = open('test.fasta')
>>> b6_handle = open('test.b6')
>>> for entry in query_sequence_retriever(fasta_handle,
... b6_handle, 1e5)
... print(entry.sequence) # Print aligned query sequence
"""
filtered_b6 = defaultdict(list)
for entry in b6_evalue_filter(b6_handle, e_value, *args, **kwargs):
filtered_b6[entry.query].append(
(entry.query_start, entry.query_end, entry._evalue_str))
fastaq_iter = fasta_iter if fastaq == 'fasta' else fastq_iter
for fastaqEntry in fastaq_iter(fastaq_handle):
if fastaqEntry.id in filtered_b6:
for alignment in filtered_b6[fastaqEntry.id]:
start = alignment[0] - 1
end = alignment[1] - 1
# Get query sequence
if start < end:
query_sequence = fastaqEntry.sequence[start:end]
elif start > end:
query_sequence = fastaqEntry.sequence[end:start][::-1]
else:
query_sequence = fastaqEntry.sequence[start]
fastaqEntry.sequence = query_sequence
# Get query quality
if fastaq == 'fastq':
if start < end:
query_quality = fastaqEntry.quality[start:end]
elif start > end:
query_quality = fastaqEntry.quality[end:start][::-1]
else:
query_quality = fastaqEntry.quality[start]
fastaqEntry.quality = query_quality
# Add E-value to FASTA/Q header
if fastaqEntry.description == '':
fastaqEntry.description = 'E-value: '
else:
fastaqEntry.description += ' E-value: '
fastaqEntry.description += alignment[2]
yield fastaqEntry
def main():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.
RawDescriptionHelpFormatter)
parser.add_argument('-f', '--fastaq',
type=argparse.FileType('rU'),
help='query FASTAQ file')
parser.add_argument('-b', '--b6',
type=argparse.FileType('rU'),
help='B6/M8 file with alignment data')
parser.add_argument('-e', '--e_value',
type=float,
help='upper E-Value cutoff')
parser.add_argument('--fastq',
action='store_true',
help='specifies that input is FASTQ')
parser.add_argument('-o', '--output',
type=argparse.FileType('w'),
default=sys.stdout,
nargs='?',
help=' optional output file [Default: STDOUT]')
args = parser.parse_args()
fastaq = 'fastq' if args.fastq else 'fasta'
for fastaEntry in query_sequence_retriever(args.fastaq,
args.b6,
args.e_value,
fastaq=fastaq):
args.output.write(fastaEntry.write())
if __name__ == '__main__':
main()
sys.exit(0)
| gpl-3.0 |
ktriponis/ansible-modules-core | system/sysctl.py | 29 | 11846 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, David "DaviXX" CHANIAL <david.chanial@gmail.com>
# (c) 2014, James Tanner <tanner.jc@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: sysctl
short_description: Manage entries in sysctl.conf.
description:
- This module manipulates sysctl entries and optionally performs a C(/sbin/sysctl -p) after changing them.
version_added: "1.0"
options:
name:
description:
- The dot-separated path (aka I(key)) specifying the sysctl variable.
required: true
default: null
aliases: [ 'key' ]
value:
description:
- Desired value of the sysctl key.
required: false
default: null
aliases: [ 'val' ]
state:
description:
- Whether the entry should be present or absent in the sysctl file.
choices: [ "present", "absent" ]
default: present
ignoreerrors:
description:
- Use this option to ignore errors about unknown keys.
choices: [ "yes", "no" ]
default: no
reload:
description:
- If C(yes), performs a I(/sbin/sysctl -p) if the C(sysctl_file) is
updated. If C(no), does not reload I(sysctl) even if the
C(sysctl_file) is updated.
choices: [ "yes", "no" ]
default: "yes"
sysctl_file:
description:
- Specifies the absolute path to C(sysctl.conf), if not C(/etc/sysctl.conf).
required: false
default: /etc/sysctl.conf
sysctl_set:
description:
- Verify token value with the sysctl command and set with -w if necessary
choices: [ "yes", "no" ]
required: false
version_added: 1.5
default: False
notes: []
requirements: []
author: David "DaviXX" CHANIAL <david.chanial@gmail.com>
'''
EXAMPLES = '''
# Set vm.swappiness to 5 in /etc/sysctl.conf
- sysctl: name=vm.swappiness value=5 state=present
# Remove kernel.panic entry from /etc/sysctl.conf
- sysctl: name=kernel.panic state=absent sysctl_file=/etc/sysctl.conf
# Set kernel.panic to 3 in /tmp/test_sysctl.conf
- sysctl: name=kernel.panic value=3 sysctl_file=/tmp/test_sysctl.conf reload=no
# Set ip forwarding on in /proc and do not reload the sysctl file
- sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes
# Set ip forwarding on in /proc and in the sysctl file and reload if necessary
- sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes state=present reload=yes
'''
# ==============================================================
import os
import tempfile
import re
class SysctlModule(object):
def __init__(self, module):
self.module = module
self.args = self.module.params
self.sysctl_cmd = self.module.get_bin_path('sysctl', required=True)
self.sysctl_file = self.args['sysctl_file']
self.proc_value = None # current token value in proc fs
self.file_value = None # current token value in file
self.file_lines = [] # all lines in the file
self.file_values = {} # dict of token values
self.changed = False # will change occur
self.set_proc = False # does sysctl need to set value
self.write_file = False # does the sysctl file need to be reloaded
self.process()
# ==============================================================
# LOGIC
# ==============================================================
def process(self):
# Whitespace is bad
self.args['name'] = self.args['name'].strip()
self.args['value'] = self._parse_value(self.args['value'])
thisname = self.args['name']
# get the current proc fs value
self.proc_value = self.get_token_curr_value(thisname)
# get the currect sysctl file value
self.read_sysctl_file()
if thisname not in self.file_values:
self.file_values[thisname] = None
# update file contents with desired token/value
self.fix_lines()
# what do we need to do now?
if self.file_values[thisname] is None and self.args['state'] == "present":
self.changed = True
self.write_file = True
elif self.file_values[thisname] is None and self.args['state'] == "absent":
self.changed = False
elif self.file_values[thisname] != self.args['value']:
self.changed = True
self.write_file = True
# use the sysctl command or not?
if self.args['sysctl_set']:
if self.proc_value is None:
self.changed = True
elif not self._values_is_equal(self.proc_value, self.args['value']):
self.changed = True
self.set_proc = True
# Do the work
if not self.module.check_mode:
if self.write_file:
self.write_sysctl()
if self.write_file and self.args['reload']:
self.reload_sysctl()
if self.set_proc:
self.set_token_value(self.args['name'], self.args['value'])
def _values_is_equal(self, a, b):
"""Expects two string values. It will split the string by whitespace
and compare each value. It will return True if both lists are the same,
contain the same elements and the same order."""
if a is None or b is None:
return False
a = a.split()
b = b.split()
if len(a) != len(b):
return False
return len([i for i, j in zip(a, b) if i == j]) == len(a)
def _parse_value(self, value):
if value is None:
return ''
elif isinstance(value, bool):
if value:
return '1'
else:
return '0'
elif isinstance(value, basestring):
if value.lower() in BOOLEANS_TRUE:
return '1'
elif value.lower() in BOOLEANS_FALSE:
return '0'
else:
return value.strip()
else:
return value
# ==============================================================
# SYSCTL COMMAND MANAGEMENT
# ==============================================================
# Use the sysctl command to find the current value
def get_token_curr_value(self, token):
thiscmd = "%s -e -n %s" % (self.sysctl_cmd, token)
rc,out,err = self.module.run_command(thiscmd)
if rc != 0:
return None
else:
return out
# Use the sysctl command to set the current value
def set_token_value(self, token, value):
if len(value.split()) > 0:
value = '"' + value + '"'
thiscmd = "%s -w %s=%s" % (self.sysctl_cmd, token, value)
rc,out,err = self.module.run_command(thiscmd)
if rc != 0:
self.module.fail_json(msg='setting %s failed: %s' % (token, out + err))
else:
return rc
# Run sysctl -p
def reload_sysctl(self):
# do it
if get_platform().lower() == 'freebsd':
# freebsd doesn't support -p, so reload the sysctl service
rc,out,err = self.module.run_command('/etc/rc.d/sysctl reload')
else:
# system supports reloading via the -p flag to sysctl, so we'll use that
sysctl_args = [self.sysctl_cmd, '-p', self.sysctl_file]
if self.args['ignoreerrors']:
sysctl_args.insert(1, '-e')
rc,out,err = self.module.run_command(sysctl_args)
if rc != 0:
self.module.fail_json(msg="Failed to reload sysctl: %s" % str(out) + str(err))
# ==============================================================
# SYSCTL FILE MANAGEMENT
# ==============================================================
# Get the token value from the sysctl file
def read_sysctl_file(self):
lines = []
if os.path.isfile(self.sysctl_file):
try:
f = open(self.sysctl_file, "r")
lines = f.readlines()
f.close()
except IOError, e:
self.module.fail_json(msg="Failed to open %s: %s" % (self.sysctl_file, str(e)))
for line in lines:
line = line.strip()
self.file_lines.append(line)
# don't split empty lines or comments
if not line or line.startswith("#"):
continue
k, v = line.split('=',1)
k = k.strip()
v = v.strip()
self.file_values[k] = v.strip()
# Fix the value in the sysctl file content
def fix_lines(self):
checked = []
self.fixed_lines = []
for line in self.file_lines:
if not line.strip() or line.strip().startswith("#"):
self.fixed_lines.append(line)
continue
tmpline = line.strip()
k, v = line.split('=',1)
k = k.strip()
v = v.strip()
if k not in checked:
checked.append(k)
if k == self.args['name']:
if self.args['state'] == "present":
new_line = "%s=%s\n" % (k, self.args['value'])
self.fixed_lines.append(new_line)
else:
new_line = "%s=%s\n" % (k, v)
self.fixed_lines.append(new_line)
if self.args['name'] not in checked and self.args['state'] == "present":
new_line = "%s=%s\n" % (self.args['name'], self.args['value'])
self.fixed_lines.append(new_line)
# Completely rewrite the sysctl file
def write_sysctl(self):
# open a tmp file
fd, tmp_path = tempfile.mkstemp('.conf', '.ansible_m_sysctl_', os.path.dirname(self.sysctl_file))
f = open(tmp_path,"w")
try:
for l in self.fixed_lines:
f.write(l.strip() + "\n")
except IOError, e:
self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, str(e)))
f.flush()
f.close()
# replace the real one
self.module.atomic_move(tmp_path, self.sysctl_file)
# ==============================================================
# main
def main():
# defining module
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=['key'], required=True),
value = dict(aliases=['val'], required=False),
state = dict(default='present', choices=['present', 'absent']),
reload = dict(default=True, type='bool'),
sysctl_set = dict(default=False, type='bool'),
ignoreerrors = dict(default=False, type='bool'),
sysctl_file = dict(default='/etc/sysctl.conf')
),
supports_check_mode=True
)
result = SysctlModule(module)
module.exit_json(changed=result.changed)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
sebrandon1/nova | nova/tests/unit/cert/test_rpcapi.py | 18 | 2926 | # Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.cert.rpcapi
"""
import mock
from oslo_config import cfg
from nova.cert import rpcapi as cert_rpcapi
from nova import context
from nova import test
CONF = cfg.CONF
class CertRpcAPITestCase(test.NoDBTestCase):
def _test_cert_api(self, method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = cert_rpcapi.CertAPI()
self.assertIsNotNone(rpcapi.client)
self.assertEqual(CONF.cert_topic, rpcapi.client.target.topic)
orig_prepare = rpcapi.client.prepare
with test.nested(
mock.patch.object(rpcapi.client, 'call'),
mock.patch.object(rpcapi.client, 'prepare'),
mock.patch.object(rpcapi.client, 'can_send_version'),
) as (
rpc_mock, prepare_mock, csv_mock
):
prepare_mock.return_value = rpcapi.client
rpc_mock.return_value = 'foo'
csv_mock.side_effect = (
lambda v: orig_prepare().can_send_version())
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(rpc_mock.return_value, retval)
prepare_mock.assert_called_once_with()
rpc_mock.assert_called_once_with(ctxt, method, **kwargs)
def test_revoke_certs_by_user(self):
self._test_cert_api('revoke_certs_by_user', user_id='fake_user_id')
def test_revoke_certs_by_project(self):
self._test_cert_api('revoke_certs_by_project',
project_id='fake_project_id')
def test_revoke_certs_by_user_and_project(self):
self._test_cert_api('revoke_certs_by_user_and_project',
user_id='fake_user_id',
project_id='fake_project_id')
def test_generate_x509_cert(self):
self._test_cert_api('generate_x509_cert',
user_id='fake_user_id',
project_id='fake_project_id')
def test_fetch_ca(self):
self._test_cert_api('fetch_ca', project_id='fake_project_id')
def test_fetch_crl(self):
self._test_cert_api('fetch_crl', project_id='fake_project_id')
def test_decrypt_text(self):
self._test_cert_api('decrypt_text',
project_id='fake_project_id', text='blah')
| apache-2.0 |
solent-eng/solent | solent/ext/windows_form_grid_console/ext.py | 2 | 1387 | # // license
# Copyright 2016, Free Software Foundation.
#
# This file is part of Solent.
#
# Solent is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# Solent is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# Solent. If not, see <http://www.gnu.org/licenses/>.
from .impl_grid_console import ImplGridConsole
from solent import log
def init_ext(zero_h, cb_grid_console_splat, cb_grid_console_kevent, cb_grid_console_mevent, cb_grid_console_closed, engine, width, height):
impl_grid_console = ImplGridConsole()
impl_grid_console.zero(
zero_h=zero_h,
cb_grid_console_splat=cb_grid_console_splat,
cb_grid_console_kevent=cb_grid_console_kevent,
cb_grid_console_mevent=cb_grid_console_mevent,
cb_grid_console_closed=cb_grid_console_closed,
engine=engine,
width=width,
height=height)
#
form_grid_console = impl_grid_console.form
return form_grid_console
| lgpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.