code
stringlengths
1
199k
import math a,n =10000,10000000 ret = 1 #余数 def PowerMod(a, n, ret): if n == 0: return ret if n % 2: # n为奇数 ret = ret * a % 20132013 return PowerMod(a*a%20132013, n/2, ret) #n为偶数。a^n %m = (a^2^n/2)%m = ((a^2%m)^n/2)%m print PowerMod(a, n, ret)
import unittest from ctauto.exceptions import CTAutoMissingEndOfMetablockError, \ CTAutoBrokenEndOfMetablockError, \ CTAutoInvalidMetablockError, \ CTAutoInvalidIdError, \ CTAutoMissingEndOfStringError, \ CTAutoInvalidStringError, \ CTAutoIncompleteEscapeSequence, \ CTAutoInvalidEscapeSequence, \ CTAutoTrailingCharacterAfterQuotedText, \ CTAutoInvalidNumberError from ctauto.blocks import Block, MetaBlock from ctauto.tokens import SimpleTextToken, QuotedTextToken, NumericToken, \ DotToken, LeftSquareBracketToken, RightSquareBracketToken from ctauto.parser import EndOfFileCharacter, Parser, TemplateParser _TEST_CONTENT = "<% metacode 1 %>\n" \ "#include <stdio.h>\n" \ "\n" \ "int main(void)\n" \ "{\n" \ " <% metacode 2 %>\n" \ " // <% metacode 3 %>\n" \ " return 0;\n" \ " <% metacode 4 . [ 1 ] %>\n" \ "}\n" class TestParser(unittest.TestCase): def test_parse(self): class TestParser(Parser): def reset(self, content, source): self.source = source self.content = content self.indexes = [] self.characters = [] return self.first def finalize(self): return self.indexes, self.characters def first(self, index, character): self.indexes.append(index) self.characters.append(character) return self.second def second(self, index, character): self.indexes.append(index) self.characters.append(character) return self.third def third(self, index, character): if character is EndOfFileCharacter: self.indexes.append(index) self.characters.append(character) return self.indexes[-1] = index self.characters[-1] = character return self.third parser = TestParser() indexes, characters = parser.parse(_TEST_CONTENT, "test") self.assertEqual(parser.source, "test") self.assertEqual(parser.content, _TEST_CONTENT) length = len(_TEST_CONTENT) self.assertEqual(indexes, [0, length-1, length]) self.assertEqual(characters, ['<', '\n', EndOfFileCharacter]) class TestTemplateParser(unittest.TestCase): def test_template_parse(self): parser = TemplateParser() blocks = parser.parse(_TEST_CONTENT, "test") self.assertEqual(parser.source, "test") self.assertEqual(parser.content, _TEST_CONTENT) self.assertEqual(len(blocks), 8) block = blocks[0] self.assertIsInstance(block, MetaBlock) self.assertEqual(block.content, " metacode 1 ") self.assertEqual(block.tokens, [SimpleTextToken(1, "metacode"), NumericToken(1, "1")]) block = blocks[1] self.assertIsInstance(block, Block) self.assertEqual(block.content, "\n" "#include <stdio.h>\n" "\n" "int main(void)\n" "{\n" " ") block = blocks[2] self.assertIsInstance(block, MetaBlock) self.assertEqual(block.content, " metacode 2 ") self.assertEqual(block.tokens, [SimpleTextToken(6, "metacode"), NumericToken(6, "2")]) block = blocks[3] self.assertIsInstance(block, Block) self.assertEqual(block.content, "\n" " // ") block = blocks[4] self.assertIsInstance(block, MetaBlock) self.assertEqual(block.content, " metacode 3 ") self.assertEqual(block.tokens, [SimpleTextToken(7, "metacode"), NumericToken(7, "3")]) block = blocks[5] self.assertIsInstance(block, Block) self.assertEqual(block.content, "\n" " return 0;\n" " ") block = blocks[6] self.assertIsInstance(block, MetaBlock) self.assertEqual(block.content, " metacode 4 . [ 1 ] ") self.assertEqual(block.tokens, [SimpleTextToken(9, "metacode"), NumericToken(9, "4"), DotToken(9), LeftSquareBracketToken(9), NumericToken(9, "1"), RightSquareBracketToken(9)]) block = blocks[7] self.assertIsInstance(block, Block) self.assertEqual(block.content, "\n" "}\n") def test_invalid_ends_of_metablock(self): parser = TemplateParser() with self.assertRaises(CTAutoMissingEndOfMetablockError): parser.parse("<% %", "test") with self.assertRaises(CTAutoBrokenEndOfMetablockError): parser.parse("<% %!", "test") def test_invalid_metablock(self): parser = TemplateParser() with self.assertRaises(CTAutoInvalidMetablockError): parser.parse("<% ! %>", "test") def test_end_of_metablock_while_skipping_whitespaces(self): parser = TemplateParser() with self.assertRaises(CTAutoMissingEndOfMetablockError): parser.parse(" <% ", "test") def test_multiline_metablock(self): parser = TemplateParser() blocks = parser.parse("<%\tx\n\ty\n\tz\n\tt%>", "test") self.assertEqual(blocks[0].tokens, [SimpleTextToken(1, "x"), SimpleTextToken(2, "y"), SimpleTextToken(3, "z"), SimpleTextToken(4, "t")]) def test_simple_text_token(self): parser = TemplateParser() blocks = parser.parse("<%test%>", "test") self.assertEqual(blocks[0].tokens, [SimpleTextToken(1, "test")]) blocks = parser.parse("<% test %>", "test") self.assertEqual(blocks[0].tokens, [SimpleTextToken(1, "test")]) with self.assertRaises(CTAutoMissingEndOfMetablockError): parser.parse("<%s test", "test") with self.assertRaises(CTAutoInvalidIdError): parser.parse("<%s test! %>", "test") def test_quoted_text_token(self): parser = TemplateParser() blocks = parser.parse("<%\"test\"%>", "test") self.assertEqual(blocks[0].tokens, [QuotedTextToken(1, "test")]) blocks = parser.parse("<% \"test \\\\ \\\"test\\\" \\n \\t \\r \\a\" %>", "test") self.assertEqual(blocks[0].tokens, [QuotedTextToken(1, "test \\ \"test\" \n \t \r \\a")]) with self.assertRaises(CTAutoMissingEndOfStringError): parser.parse("<%\"test%>", "test") with self.assertRaises(CTAutoInvalidStringError): parser.parse("<%\"test\n%>", "test") with self.assertRaises(CTAutoIncompleteEscapeSequence): parser.parse("<% \"test \\", "test") with self.assertRaises(CTAutoInvalidEscapeSequence): parser.parse("<% \"test \\\n test\" %>", "test") with self.assertRaises(CTAutoMissingEndOfMetablockError): parser.parse("<% \"test\"", "test") with self.assertRaises(CTAutoTrailingCharacterAfterQuotedText): parser.parse("<% \"test\"test %>", "test") def test_numeric_token(self): parser = TemplateParser() blocks = parser.parse("<% 1234567890 %>", "test") self.assertEqual(blocks[0].tokens, [NumericToken(1, "1234567890")]) blocks = parser.parse("<%1234567890%>", "test") self.assertEqual(blocks[0].tokens, [NumericToken(1, "1234567890")]) with self.assertRaises(CTAutoMissingEndOfMetablockError): parser.parse("<%1234567890", "test") with self.assertRaises(CTAutoInvalidNumberError): parser.parse("<% 1234567890test %>", "test") def test_simple_token_as_terminator(self): parser = TemplateParser() blocks = parser.parse("<% test.test %>", "test") self.assertEqual(blocks[0].tokens, [SimpleTextToken(1, "test"), DotToken(1), SimpleTextToken(1, "test")]) blocks = parser.parse("<% 1234567890[test %>", "test") self.assertEqual(blocks[0].tokens, [NumericToken(1, "1234567890"), LeftSquareBracketToken(1), SimpleTextToken(1, "test")]) blocks = parser.parse("<% \"test\"]test %>", "test") self.assertEqual(blocks[0].tokens, [QuotedTextToken(1, "test"), RightSquareBracketToken(1), SimpleTextToken(1, "test")]) test_suite = unittest.TestSuite([unittest.defaultTestLoader.loadTestsFromTestCase(TestParser), unittest.defaultTestLoader.loadTestsFromTestCase(TestTemplateParser)]) if __name__ == '__main__': unittest.main()
"""Basic tests for the CherryPy core: request handling.""" from cherrypy.test import test test.prefer_parent_path() import os localDir = os.path.dirname(__file__) import cherrypy access_log = os.path.join(localDir, "access.log") error_log = os.path.join(localDir, "error.log") tartaros = u'\u03a4\u1f71\u03c1\u03c4\u03b1\u03c1\u03bf\u03c2' erebos = u'\u0388\u03c1\u03b5\u03b2\u03bf\u03c2.com' def setup_server(): class Root: def index(self): return "hello" index.exposed = True def uni_code(self): cherrypy.request.login = tartaros cherrypy.request.remote.name = erebos uni_code.exposed = True def slashes(self): cherrypy.request.request_line = r'GET /slashed\path HTTP/1.1' slashes.exposed = True def whitespace(self): # User-Agent = "User-Agent" ":" 1*( product | comment ) # comment = "(" *( ctext | quoted-pair | comment ) ")" # ctext = <any TEXT excluding "(" and ")"> # TEXT = <any OCTET except CTLs, but including LWS> # LWS = [CRLF] 1*( SP | HT ) cherrypy.request.headers['User-Agent'] = 'Browzuh (1.0\r\n\t\t.3)' whitespace.exposed = True def as_string(self): return "content" as_string.exposed = True def as_yield(self): yield "content" as_yield.exposed = True def error(self): raise ValueError() error.exposed = True error._cp_config = {'tools.log_tracebacks.on': True} root = Root() cherrypy.config.update({'log.error_file': error_log, 'log.access_file': access_log, }) cherrypy.tree.mount(root) from cherrypy.test import helper, logtest class AccessLogTests(helper.CPWebCase, logtest.LogCase): logfile = access_log def testNormalReturn(self): self.markLog() self.getPage("/as_string", headers=[('Referer', 'http://www.cherrypy.org/'), ('User-Agent', 'Mozilla/5.0')]) self.assertBody('content') self.assertStatus(200) intro = '%s - - [' % self.interface() self.assertLog(-1, intro) if [k for k, v in self.headers if k.lower() == 'content-length']: self.assertLog(-1, '] "GET %s/as_string HTTP/1.1" 200 7 ' '"http://www.cherrypy.org/" "Mozilla/5.0"' % self.prefix()) else: self.assertLog(-1, '] "GET %s/as_string HTTP/1.1" 200 - ' '"http://www.cherrypy.org/" "Mozilla/5.0"' % self.prefix()) def testNormalYield(self): self.markLog() self.getPage("/as_yield") self.assertBody('content') self.assertStatus(200) intro = '%s - - [' % self.interface() self.assertLog(-1, intro) if [k for k, v in self.headers if k.lower() == 'content-length']: self.assertLog(-1, '] "GET %s/as_yield HTTP/1.1" 200 7 "" ""' % self.prefix()) else: self.assertLog(-1, '] "GET %s/as_yield HTTP/1.1" 200 - "" ""' % self.prefix()) def testEscapedOutput(self): # Test unicode in access log pieces. self.markLog() self.getPage("/uni_code") self.assertStatus(200) self.assertLog(-1, repr(tartaros.encode('utf8'))[1:-1]) # Test the erebos value. Included inline for your enlightenment. # Note the 'r' prefix--those backslashes are literals. self.assertLog(-1, r'\xce\x88\xcf\x81\xce\xb5\xce\xb2\xce\xbf\xcf\x82') # Test backslashes in output. self.markLog() self.getPage("/slashes") self.assertStatus(200) self.assertLog(-1, r'"GET /slashed\\path HTTP/1.1"') # Test whitespace in output. self.markLog() self.getPage("/whitespace") self.assertStatus(200) # Again, note the 'r' prefix. self.assertLog(-1, r'"Browzuh (1.0\r\n\t\t.3)"') class ErrorLogTests(helper.CPWebCase, logtest.LogCase): logfile = error_log def testTracebacks(self): # Test that tracebacks get written to the error log. self.markLog() ignore = helper.webtest.ignored_exceptions ignore.append(ValueError) try: self.getPage("/error") self.assertInBody("raise ValueError()") self.assertLog(0, 'HTTP Traceback (most recent call last):') self.assertLog(-3, 'raise ValueError()') finally: ignore.pop() if __name__ == '__main__': helper.testmain()
from typing import Optional from django.contrib.auth.models import User from jba_core import exceptions def get_user_by_credentials(username: str, password: str) -> Optional[User]: try: user = User.objects.get(username=username) if not user.check_password(password): raise exceptions.IncorrectCredentials return user except User.DoesNotExist: raise exceptions.UserNotFound except: raise exceptions.SomethingWrong
from vsg.rules import token_indent from vsg import token lTokens = [] lTokens.append(token.generic_clause.generic_keyword) class rule_002(token_indent): ''' This rule checks the indent of the **generic** keyword. **Violation** .. code-block:: vhdl entity fifo is generic ( entity fifo is generic ( **Fix** .. code-block:: vhdl entity fifo is generic ( entity fifo is generic ( ''' def __init__(self): token_indent.__init__(self, 'generic', '002', lTokens)
import os import pathlib import sysconfig import compileall import subprocess prefix = pathlib.Path(os.environ.get('MESON_INSTALL_PREFIX', '/usr/local')) datadir = prefix / 'share' destdir = os.environ.get('DESTDIR', '') if not destdir: print('Compiling gsettings schemas...') subprocess.call(['glib-compile-schemas', str(datadir / 'glib-2.0' / 'schemas')]) print('Updating icon cache...') subprocess.call(['gtk-update-icon-cache', '-qtf', str(datadir / 'icons' / 'hicolor')]) print('Updating desktop database...') subprocess.call(['update-desktop-database', '-q', str(datadir / 'applications')]) print('Compiling python bytecode...') moduledir = sysconfig.get_path('purelib', vars={'base': str(prefix)}) compileall.compile_dir(destdir + os.path.join(moduledir, 'eidisi'), optimize=2)
""" Module implementing a dialog to enter the data for a copy or rename operation. """ from __future__ import unicode_literals import os.path from PyQt5.QtCore import pyqtSlot from PyQt5.QtWidgets import QDialog, QDialogButtonBox from E5Gui.E5PathPicker import E5PathPickerModes from .Ui_HgCopyDialog import Ui_HgCopyDialog class HgCopyDialog(QDialog, Ui_HgCopyDialog): """ Class implementing a dialog to enter the data for a copy or rename operation. """ def __init__(self, source, parent=None, move=False): """ Constructor @param source name of the source file/directory (string) @param parent parent widget (QWidget) @param move flag indicating a move operation (boolean) """ super(HgCopyDialog, self).__init__(parent) self.setupUi(self) self.source = source if os.path.isdir(self.source): self.targetPicker.setMode(E5PathPickerModes.DirectoryMode) else: self.targetPicker.setMode(E5PathPickerModes.SaveFileMode) if move: self.setWindowTitle(self.tr('Mercurial Move')) else: self.forceCheckBox.setEnabled(False) self.sourceEdit.setText(source) self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False) msh = self.minimumSizeHint() self.resize(max(self.width(), msh.width()), msh.height()) def getData(self): """ Public method to retrieve the copy data. @return the target name (string) and a flag indicating the operation should be enforced (boolean) """ target = self.targetPicker.text() if not os.path.isabs(target): sourceDir = os.path.dirname(self.sourceEdit.text()) target = os.path.join(sourceDir, target) return target, self.forceCheckBox.isChecked() @pyqtSlot(str) def on_targetPicker_textChanged(self, txt): """ Private slot to handle changes of the target. @param txt contents of the target edit (string) """ self.buttonBox.button(QDialogButtonBox.Ok).setEnabled( os.path.isabs(txt) or os.path.dirname(txt) == "")
from __future__ import print_function, division, absolute_import import os import sys import time import shutil from six.moves.urllib.request import urlopen from six.moves.urllib.error import URLError, HTTPError import tarfile import platform import numpy as np if sys.version_info[0] == 2: def urlretrieve(url, filename, reporthook=None, data=None): ''' This function is adpated from: https://github.com/fchollet/keras Original work Copyright (c) 2014-2015 keras contributors ''' def chunk_read(response, chunk_size=8192, reporthook=None): total_size = response.info().get('Content-Length').strip() total_size = int(total_size) count = 0 while 1: chunk = response.read(chunk_size) if not chunk: break count += 1 if reporthook: reporthook(count, chunk_size, total_size) yield chunk response = urlopen(url, data) with open(filename, 'wb') as fd: for chunk in chunk_read(response, reporthook=reporthook): fd.write(chunk) else: from six.moves.urllib.request import urlretrieve class Progbar(object): ''' This function is adpated from: https://github.com/fchollet/keras Original work Copyright (c) 2014-2015 keras contributors Modified work Copyright 2016-2017 TrungNT ''' def __init__(self, target, title=''): ''' @param target: total number of steps expected ''' self.width = 39 self.target = target self.sum_values = {} self.unique_values = [] self.start = time.time() self.total_width = 0 self.seen_so_far = 0 self.title = title def update(self, current, values=[]): ''' @param current: index of current step @param values: list of tuples (name, value_for_last_step). The progress bar will display averages for these values. ''' for k, v in values: if k not in self.sum_values: self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far] self.unique_values.append(k) else: self.sum_values[k][0] += v * (current - self.seen_so_far) self.sum_values[k][1] += (current - self.seen_so_far) self.seen_so_far = current now = time.time() prev_total_width = self.total_width sys.stdout.write("\b" * prev_total_width) sys.stdout.write("\r") numdigits = int(np.floor(np.log10(self.target))) + 1 barstr = '%s %%%dd/%%%dd [' % (self.title, numdigits, numdigits) bar = barstr % (current, self.target) prog = float(current) / self.target prog_width = int(self.width * prog) if prog_width > 0: bar += ('=' * (prog_width - 1)) if current < self.target: bar += '>' else: bar += '=' bar += ('.' * (self.width - prog_width)) bar += ']' sys.stdout.write(bar) self.total_width = len(bar) if current: time_per_unit = (now - self.start) / current else: time_per_unit = 0 eta = time_per_unit * (self.target - current) info = '' if current < self.target: info += ' - ETA: %ds' % eta else: info += ' - %ds' % (now - self.start) for k in self.unique_values: info += ' - %s:' % k if type(self.sum_values[k]) is list: avg = self.sum_values[k][0] / max(1, self.sum_values[k][1]) if abs(avg) > 1e-3: info += ' %.4f' % avg else: info += ' %.4e' % avg else: info += ' %s' % self.sum_values[k] self.total_width += len(info) if prev_total_width > self.total_width: info += ((prev_total_width - self.total_width) * " ") sys.stdout.write(info) if current >= self.target: if "Linux" in platform.platform(): sys.stdout.write("\n\n") else: sys.stdout.write("\n") sys.stdout.flush() def add(self, n, values=[]): self.update(self.seen_so_far + n, values) def get_file(fname, origin, untar=False, datadir=None): ''' This function is adpated from: https://github.com/fchollet/keras Original work Copyright (c) 2014-2015 keras contributors Modified work Copyright 2016-2017 TrungNT Return ------ file path of the downloaded file ''' # ====== check valid datadir ====== # if datadir is None: datadir = os.path.join(os.path.expanduser('~'), '.bay2') if not os.path.exists(datadir): os.mkdir(datadir) elif not os.path.exists(datadir): raise ValueError('Cannot find folder at path:' + str(datadir)) # ====== download the file ====== # if untar: untar_fpath = os.path.join(datadir, fname) fpath = untar_fpath + '.tar.gz' else: fpath = os.path.join(datadir, fname) if not os.path.exists(fpath): print('Downloading data from', origin) global _progbar _progbar = None def dl_progress(count, block_size, total_size): global _progbar if _progbar is None: _progbar = Progbar(total_size) else: _progbar.update(count * block_size) error_msg = 'URL fetch failure on {}: {} -- {}' try: try: urlretrieve(origin, fpath, dl_progress) except URLError as e: raise Exception(error_msg.format(origin, e.errno, e.reason)) except HTTPError as e: raise Exception(error_msg.format(origin, e.code, e.msg)) except (Exception, KeyboardInterrupt) as e: if os.path.exists(fpath): os.remove(fpath) raise _progbar = None if untar: if not os.path.exists(untar_fpath): print('Untaring file...') tfile = tarfile.open(fpath, 'r:gz') try: tfile.extractall(path=datadir) except (Exception, KeyboardInterrupt) as e: if os.path.exists(untar_fpath): if os.path.isfile(untar_fpath): os.remove(untar_fpath) else: shutil.rmtree(untar_fpath) raise tfile.close() return untar_fpath return fpath
from dungeon.dungeon import Dungeon, Hub from entity.player.players import Player, Party import entity.item.items as items import sys, os import base import web.server try: import dill except: dill = None PARTY = Party() class Manager: def __init__(self): self.checked = False def get_current_release(self): latest = None try: import requests latest = requests.get('https://api.github.com/repos/microwaveabletoaster/dunces-and-dungeons/releases/latest').json()['tag_name'] except: base.put("could not reach the update service :'(") return latest def update_check(self): base.put('checking for update...') latest = self.get_current_release() if latest: if latest == self.RELEASE_ID: base.put('you\'re up to date!') else: base.put("---------------=====UPDATE!!=====-----------\nan update to dunces and dungeons has been released! \ngo download it now from here: https://github.com/microwaveabletoaster/dunces-and-dungeons/releases \nit probably contains super important bugfixes and or more neat features, so don't dawdle!! \n\n<3 the team\n") self.checked = True def main(self,webbed=False): self.webbed = webbed if webbed: # ha amphibian joke base.IS_WEB_VERSION = True base.SERVER = web.server web.server.party = PARTY print 'MOVED ON' base.BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ver = [] with open('%s/version.dunce' % base.BASE_DIR, 'r+') as f: contents = f.read() if contents is '': base.put('writing') f.write(self.get_current_release().replace('.',' ').replace('v','')) ver = contents.split(' ') self.RELEASE_ID = ('v%s.%s.%s' % (ver[0],ver[1],ver[2])).strip() if not self.checked: self.update_check() go = True intro = """ ______ _ _______ _______ _______ ( __ \ |\ /|( ( /|( ____ \( ____ \( ____ \\ | ( \ )| ) ( || \ ( || ( \/| ( \/| ( \/ | | ) || | | || \ | || | | (__ | (_____ | | | || | | || (\ \) || | | __) (_____ ) | | ) || | | || | \ || | | ( ) | | (__/ )| (___) || ) \ || (____/\| (____/\/\____) | (______/ (_______)|/ )_)(_______/(_______/\_______) _______ _ ______ ( ___ )( ( /|( __ \\ | ( ) || \ ( || ( \ ) | (___) || \ | || | ) | | ___ || (\ \) || | | | | ( ) || | \ || | ) | | ) ( || ) \ || (__/ ) |/ \||/ )_)(______/ ______ _ _______ _______ _______ _ _______ ( __ \ |\ /|( ( /|( ____ \( ____ \( ___ )( ( /|( ____ \\ | ( \ )| ) ( || \ ( || ( \/| ( \/| ( ) || \ ( || ( \/ | | ) || | | || \ | || | | (__ | | | || \ | || (_____ | | | || | | || (\ \) || | ____ | __) | | | || (\ \) |(_____ ) | | ) || | | || | \ || | \_ )| ( | | | || | \ | ) | | (__/ )| (___) || ) \ || (___) || (____/\| (___) || ) \ |/\____) | (______/ (_______)|/ )_)(_______)(_______/(_______)|/ )_)\_______) copyleft (c) 2016 John Dikeman and Cameron Egger """ base.put(intro) cho = 0 # most of this code is super redundant cause cho is hardcoded but do i care? nope lol. if cho is not None: if cho is 0: self.new_game() if cho is 1: li = [] if os.path.exists('%s/saves/' % base.BASE_DIR): for dirpath, dirname, filename in os.walk('%s/saves/' % base.BASE_DIR): for fi in filename: if '.dunce' in fi: li.append(fi) else: base.put('no saves to choose from!') op = base.make_choice(li,"savefile") if dill: if op is not None: go = False base.put('loading session') dill.load_session('%s/saves/%s' % (base.BASE_DIR,li[op])) else: base.put('save/load support is disabled because you haven\'t installed dill!') def new_game(self): # PARTY.current_dungeon.start() if self.webbed: party_size = base.get_input('enter the size of your party: ') if int(party_size) is 0: base.put("you can't play with zero people, dingus") sys.exit() # creating all the players in the party for a in range(int(party_size)): name = base.get_input('enter the name of player %d: ' % a) PARTY.add_player(Player(name)) base.put('Game Start') base.put(PARTY.to_str()) dungeon = Hub(PARTY) PARTY.hub = dungeon PARTY.current_dungeon = dungeon PARTY.current_dungeon.start() while(PARTY.end): PARTY.handle_player_turn() if(PARTY.end): PARTY.current_dungeon.handle_monster_turn() base.put("\n\n------------=========GAME OVER=========------------") else: party_size = base.get_input('enter the size of your party: ') if int(party_size) is 0: base.put("you can't play with zero people, dingus") sys.exit() # creating all the players in the party for a in range(int(party_size)): name = base.get_input('enter the name of player %d: ' % a) PARTY.add_player(Player(name)) base.put('Game Start') base.put(PARTY.to_str()) dungeon = Hub(PARTY) PARTY.hub = dungeon PARTY.current_dungeon = dungeon PARTY.current_dungeon.start() while(PARTY.end): PARTY.handle_player_turn() if(PARTY.end): PARTY.current_dungeon.handle_monster_turn() base.put("\n\n------------=========GAME OVER=========------------") if __name__ == '__main__': game = Manager() try: if sys.argv[1] == 'web': print 'initializing web server. point your browser to http://localhost:5000.' game.main(True) else: game.main() except IndexError: game.main()
import os import sys import shutil import straight.plugin import numpy as np import pkg_resources from os import path from core import utils from core import argparser from core import log from core import parser def main(): ## Parse arguments ap = argparser.init_arg_parser() options = ap.parse_args() ## Collect input gbks from folder input_files = [] if not path.isdir(options["input_folder"]): log.error("Specified folder didn't exist '%s'" % (options["input_folder"])) sys.exit(1) else: for filename in os.listdir(options["input_folder"]): filepath = path.join(options["input_folder"], filename) if not path.isdir(filepath): ext = path.splitext(filepath)[1][1:] if ext in ["gbk"]: input_files.append(filename) ## Initial check parameters metadata = {} if options["mode"] == "train": ## check and load metadata file if not path.exists(options["training_metadata"]): log.error("Specified file didn't exist '%s'" % (options["training_metadata"])) sys.exit(1) else: metadata = parser.parse_training_metadata(options["training_metadata"]) options["single_values"] = [[]] * len(input_files) options["train_set"] = [] options["test_set"] = [] # remove GBKs not listed in metadata input_files[:] = [bgc for bgc in input_files if utils.get_bgc_name(bgc) in metadata["bgc"]] # features if "features" not in options: if "features" not in metadata: options["features"] = [{"name": plugin.name, "params": [], "subs": [sub for sub in plugin.features]} for plugin in utils.load_plugins("feature_extraction")] else: options["features"] = metadata["features"] # algorithm mode (classification / regression) if metadata["mode"] == "CLASSIFICATION": options["algo_mode"] = "classification" if "algorithm" not in options: if "algorithm" not in metadata: options["algorithm"] = {"name": "svm", "params": []} else: options["algorithm"] = metadata["algorithm"] elif metadata["mode"] == "REGRESSION": options["algo_mode"] = "regression" if "algorithm" not in options: if "algorithm" not in metadata: options["algorithm"] = {"name": "linear_regression", "params": []} else: options["algorithm"] = metadata["algorithm"] else: log.error("Incorrect metadata file format '%s'" % (options["training_metadata"])) sys.exit(1) # single values (from right hand side of data column) & train/test set distribution for i, fp in enumerate(input_files): bgc_id = utils.get_bgc_name(fp) if bgc_id in metadata["bgc"]: idx_meta = metadata["bgc"].index(bgc_id) options["single_values"][i] = metadata["single_values"][idx_meta] if idx_meta in metadata["train_set"]: options["train_set"].append(i) if idx_meta in metadata["test_set"]: options["test_set"].append(i) else: log.error("'%s' is not included in your metadata" % (bgc_id)) sys.exit(1) # pair values for training set (from its own table from the metadata) options["train_pair_values"] = [[None] * len(options["train_set"]) for _ in range(len(options["train_set"]))] for i, idx1 in enumerate(options["train_set"]): for j, idx2 in enumerate(options["train_set"]): if len(metadata["train_pair_values"]) > i and len(metadata["train_pair_values"][i]) > j: options["train_pair_values"][i][j] = metadata["train_pair_values"][i][j] # pair values for test set (from its own table from the metadata) options["test_pair_values"] = [[None] * len(options["test_set"]) for _ in range(len(options["test_set"]))] for i, idx1 in enumerate(options["test_set"]): for j, idx2 in enumerate(options["test_set"]): if len(metadata["test_pair_values"]) > i and len(metadata["test_pair_values"][i]) > j: options["test_pair_values"][i][j] = metadata["test_pair_values"][i][j] if options["mode"] == "predict": ## check and load model file print "..." ## further checks.. algo_type = utils.get_algo_type(options["algorithm"]["name"]) if algo_type not in ["classification", "regression"]: log.error("Selected algorithm '%s' did not exist" % (algo["name"])) sys.exit(1) if options["algo_mode"] != algo_type: log.error("Selected algorithm '%s' is for %s, but the provided data is for %s." % (options["algorithm"]["name"], algo_type, options["algo_mode"])) sys.exit(1) options["features_scope"] = "" for idx, feature in enumerate(options["features"]): for plugin in utils.load_plugins("feature_extraction"): if plugin.name == feature["name"]: if len(options["features_scope"]) > 0 and plugin.scope != options["features_scope"]: log.error("You selected features of different scope ('%s:%s', '%s:%s'). Please select only combination of features with the same scope." % (feature["name"], plugin.scope, options["features"][idx - 1]["name"], options["features_scope"])) sys.exit(1) options["features_scope"] = plugin.scope break if len(feature["subs"]) < 1: for plugin in utils.load_plugins("feature_extraction"): if plugin.name == feature["name"]: feature["subs"].extend(plugin.features) break for sub in feature["subs"]: for plugin in utils.load_plugins("feature_extraction"): if plugin.name == feature["name"]: if sub not in plugin.features: log.error("Feature unknown: '%s'" % sub) sys.exit(1) ## Check output folder if not options["output_folder"]: options["output_folder"] = path.join(os.getcwd(), path.basename(options["input_folder"])) if path.isdir(options["output_folder"]): # output folder exist, probable disrupted job if not options["continue"] and not options["overwrite"]: log.error("Output folder '%s' exist. Previous run? use --continue to continue, or --overwrite to start over." % options["output_folder"]) sys.exit(1) elif options["overwrite"]: shutil.rmtree(options["output_folder"]) os.makedirs(options["output_folder"]) elif options["reset_preprocesses"]: bgcjsonpath = path.join(options["output_folder"], "bgcjson") if path.exists(bgcjsonpath): shutil.rmtree(bgcjsonpath) else: os.makedirs(options["output_folder"]) ## Parse gbks ## TODO: multi-threading? log.info("Started preprocessing input files..") utils.print_progress(0, len(input_files), prefix='Preprocessing input GBKs..', suffix='', decimals=1) for i, filename in enumerate(input_files): filepath = path.join(options["input_folder"], filename) if not (path.exists(path.join(options["output_folder"], "bgcjson", "%s.bgcjson" % utils.get_bgc_name(filepath)))): bgc = parser.parse_gbk(filepath) if bgc is not None: utils.save_bgcjson(bgc, options["output_folder"]) utils.print_progress(i + 1, len(input_files), prefix='Preprocessing input GBKs..', suffix='', decimals=1, bar_length=100) log.info("Finished preprocessing input files..") ## Do feature extraction # step 1: make folder structure & index file feature_folder = utils.create_feature_folder(input_files, options["output_folder"]) # step 2: traverse FE modules and run algorithms, then save the results feature_extraction_plugins = [] for plugin in utils.load_plugins("feature_extraction"): if ("features" not in options) or (plugin.name in [feature["name"] for feature in options["features"]]): feature_extraction_plugins.append(plugin) # calculate features options["feature_values"] = {} if options["features_scope"] == "pair": log.info("Started feature extraction for all BGC pairs..") nrcomb = len(input_files) * (len(input_files) - 1) / 2 count = 0 utils.print_progress(0, nrcomb, prefix='Feature extraction..', suffix='', decimals=1) for i, fn1 in enumerate(input_files): for j, fn2 in enumerate(input_files): if i < j: bgc1 = parser.parse_bgcjson(path.join(options["output_folder"], "bgcjson", "%s.bgcjson" % utils.get_bgc_name(fn1))) bgc2 = parser.parse_bgcjson(path.join(options["output_folder"], "bgcjson", "%s.bgcjson" % utils.get_bgc_name(fn2))) for plugin in feature_extraction_plugins: if plugin.name not in options["feature_values"]: options["feature_values"][plugin.name] = {} results = plugin.calculate(bgc1, bgc2) options["feature_values"][plugin.name]["%d+%d" % (i, j)] = [float(result) for result in results] count += 1 utils.print_progress(count, nrcomb, prefix='Feature extraction..', suffix='', decimals=1) elif options["features_scope"] == "single": log.info("Started feature extraction for all BGCs..") count = 0 utils.print_progress(0, len(input_files), prefix='Feature extraction..', suffix='', decimals=1) for i, fn in enumerate(input_files): bgc = parser.parse_bgcjson(path.join(options["output_folder"], "bgcjson", "%s.bgcjson" % utils.get_bgc_name(fn))) for plugin in feature_extraction_plugins: if plugin.name not in options["feature_values"]: options["feature_values"][plugin.name] = {} results = plugin.calculate(bgc) options["feature_values"][plugin.name]["%d" % (i)] = [float(result) for result in results] count += 1 utils.print_progress(count, len(input_files), prefix='Feature extraction..', suffix='', decimals=1) else: log.error("Invalid features scope: '%s'" % options["features_scope"]) sys.exit(1) ## Load features & value matrix features_rows = [] if options["features_scope"] == "pair": for i, fn1 in enumerate(input_files): for j, fn2 in enumerate(input_files): if i < j: features_rows.append([i, j]) elif options["features_scope"] == "single": for i in xrange(0, len(input_files)): features_rows.append([i]) else: log.error("Invalid features scope: '%s'" % options["features_scope"]) sys.exit(1) if "features_columns" not in options: options["features_columns"] = [] for feature in options["features"]: for sub in feature["subs"]: options["features_columns"].append("%s.%s" % (feature["name"], sub)) features_matrix = {} for row_ids in ["+".join([str(row_id) for row_id in row_ids]) for row_ids in features_rows]: row = [None] * len(options["features_columns"]) for plugin in feature_extraction_plugins: plugin_folder = path.join(feature_folder, plugin.name) values = options["feature_values"][plugin.name][row_ids] if (len(values) != len(plugin.features)): # technically impossible to reach this, unless output from calculate != #of results expected log.error("...") sys.exit(1) else: for n, col in enumerate(plugin.features): colname = ("%s.%s" % (plugin.name, col)) if colname in options["features_columns"]: row[options["features_columns"].index(colname)] = values[n] features_matrix[row_ids] = row ## Execute algorithms & save results if options["mode"] == "train": ## Fetch feature & values training matrix training_matrix = [] training_target = [] training_rownames = [] if options["features_scope"] == "pair": for i, idx1 in enumerate(options["train_set"]): for j, idx2 in enumerate(options["train_set"]): if idx1 < idx2: training_matrix.append(features_matrix["%d+%d" % (idx1, idx2)]) training_rownames.append("%s+%s" % (utils.get_bgc_name(input_files[idx1]), utils.get_bgc_name(input_files[idx2]))) if options["algo_mode"] == "classification": class1 = options["single_values"][idx1].split(",") class2 = options["single_values"][idx2].split(",") training_target.append(int(len(set(class1) & set(class2)) > 0)) elif options["algo_mode"] == "regression": training_target.append(float(options["train_pair_values"][i][j])) elif options["features_scope"] == "single": for idx in options["train_set"]: training_matrix.append(features_matrix["%d" % (idx)]) training_rownames.append("%s" % (utils.get_bgc_name(input_files[idx1]))) training_target.append(options["single_values"][idx]) training_matrix = np.array(training_matrix) training_target = np.array(training_target) ## Fetch feature & values testing matrix testing_matrix = [] testing_target = [] testing_rownames = [] if options["features_scope"] == "pair": for i, idx1 in enumerate(options["test_set"]): for j, idx2 in enumerate(options["test_set"]): if idx1 < idx2: testing_matrix.append(features_matrix["%d+%d" % (idx1, idx2)]) testing_rownames.append("%s+%s" % (utils.get_bgc_name(input_files[idx1]), utils.get_bgc_name(input_files[idx2]))) if options["algo_mode"] == "classification": class1 = options["single_values"][idx1].split(",") class2 = options["single_values"][idx2].split(",") testing_target.append(int(len(set(class1) & set(class2)) > 0)) elif options["algo_mode"] == "regression": testing_target.append(float(options["test_pair_values"][i][j])) elif options["features_scope"] == "single": for idx in options["test_set"]: testing_matrix.append(features_matrix["%d" % (idx)]) testing_rownames.append("%s" % (utils.get_bgc_name(input_files[idx1]))) testing_target.append(options["single_values"][idx]) testing_matrix = np.array(testing_matrix) testing_target = np.array(testing_target) ## Load the training model module = None for plugin in utils.load_plugins(options["algo_mode"]): if plugin.name == options["algorithm"]["name"]: module = plugin break if module == None: log.error("Failed to load module: '%s.%s'" % (options["algo_mode"], options["algorithm"]["name"])) sys.exit(1) else: log.info("Training model...") classifier = module.train(training_matrix, training_target, options["algorithm"]["params"]) # save model & its metadata to file model_metadata = { "mode": options["algo_mode"], "algorithm": options["algorithm"], "features": options["features"], "columns": options["features_columns"], "training_data_count": len(training_matrix), "environment": { "bgc-learn": utils.get_version(), "scikit-learn": pkg_resources.get_distribution("scikit-learn").version, "numpy": pkg_resources.get_distribution("numpy").version, "scipy": pkg_resources.get_distribution("scipy").version, } } save_name = utils.save_result_model(classifier, model_metadata, options["output_folder"]) # calculate accuracies & save summaries result_training = ({}, []) if len(training_matrix) > 0: result_training = module.test(training_matrix, training_target, classifier) utils.save_result_testing("training-%s" % (save_name), training_rownames, options["features_columns"], training_matrix, training_target, result_training, options["output_folder"]) result_testing = ({}, []) if len(testing_matrix) > 0: result_testing = module.test(testing_matrix, testing_target, classifier) utils.save_result_testing("testing-%s" % (save_name), testing_rownames, options["features_columns"], testing_matrix, testing_target, result_testing, options["output_folder"]) elif options["mode"] == "predict": print "..." ## Cleanup log.info("Cleaning up..") shutil.rmtree(feature_folder) # remove feature folder ## done log.info("Analysis done. your result is available inside the folder '%s'." % options["output_folder"]) if __name__ == "__main__": main()
from SimpleLexicon import SimpleLexicon from LOTlib.Evaluation.EvaluationException import RecursionDepthException class RecursiveLexicon(SimpleLexicon): """ A lexicon where word meanings can call each other. Analogous to a RecursiveLOTHypothesis from a LOTHypothesis. To achieve this, we require the LOThypotheses in self.values to take a "recurse" call that is always passed in by default here on __call__ as the first argument. This throws a RecursionDepthException when it gets too deep. See Examples.EvenOdd """ def __init__(self, recursive_depth_bound=10, *args, **kwargs): self.recursive_depth_bound = recursive_depth_bound SimpleLexicon.__init__(self, *args, **kwargs) def __call__(self, word, *args): """ Wrap in self as a first argument that we don't have to in the grammar. This way, we can use self(word, X Y) as above. """ self.recursive_call_depth = 0 return self.value[word](self.recursive_call, *args) # pass in "self" as lex, using the recursive version def recursive_call(self, word, *args): """ This gets called internally on recursive calls. It keeps track of the depth to allow us to escape """ self.recursive_call_depth += 1 if self.recursive_call_depth > self.recursive_depth_bound: raise RecursionDepthException # print ">>>", self.value[word] return self.value[word](self.recursive_call, *args)
from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8("MainWindow")) MainWindow.resize(764, 593) MainWindow.setMinimumSize(QtCore.QSize(650, 500)) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8("centralwidget")) self.mediaView = QtGui.QFrame(self.centralwidget) self.mediaView.setGeometry(QtCore.QRect(0, 0, 461, 231)) self.mediaView.setStyleSheet(_fromUtf8("")) self.mediaView.setFrameShape(QtGui.QFrame.StyledPanel) self.mediaView.setFrameShadow(QtGui.QFrame.Raised) self.mediaView.setObjectName(_fromUtf8("mediaView")) self.subtitle = QtGui.QLabel(self.centralwidget) self.subtitle.setGeometry(QtCore.QRect(250, 240, 261, 17)) font = QtGui.QFont() font.setPointSize(12) self.subtitle.setFont(font) self.subtitle.setStyleSheet(_fromUtf8("color:white;")) self.subtitle.setText(_fromUtf8("")) self.subtitle.setObjectName(_fromUtf8("subtitle")) self.controlView = QtGui.QWidget(self.centralwidget) self.controlView.setGeometry(QtCore.QRect(30, 270, 661, 130)) self.controlView.setMinimumSize(QtCore.QSize(510, 130)) self.controlView.setMaximumSize(QtCore.QSize(16777215, 130)) self.controlView.setObjectName(_fromUtf8("controlView")) self.verticalLayout = QtGui.QVBoxLayout(self.controlView) self.verticalLayout.setMargin(0) self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.gridLayout_8 = QtGui.QGridLayout() self.gridLayout_8.setMargin(1) self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8")) self.timeDone = QtGui.QLabel(self.controlView) self.timeDone.setMinimumSize(QtCore.QSize(60, 0)) self.timeDone.setMaximumSize(QtCore.QSize(60, 16777215)) self.timeDone.setAlignment(QtCore.Qt.AlignCenter) self.timeDone.setObjectName(_fromUtf8("timeDone")) self.gridLayout_8.addWidget(self.timeDone, 0, 0, 1, 1) self.seekBar = QtGui.QSlider(self.controlView) self.seekBar.setMinimumSize(QtCore.QSize(365, 18)) self.seekBar.setMaximumSize(QtCore.QSize(16777215, 18)) self.seekBar.setOrientation(QtCore.Qt.Horizontal) self.seekBar.setObjectName(_fromUtf8("seekBar")) self.gridLayout_8.addWidget(self.seekBar, 0, 1, 1, 1) self.timeLeft = QtGui.QLabel(self.controlView) self.timeLeft.setMinimumSize(QtCore.QSize(60, 18)) self.timeLeft.setMaximumSize(QtCore.QSize(60, 18)) self.timeLeft.setAlignment(QtCore.Qt.AlignCenter) self.timeLeft.setObjectName(_fromUtf8("timeLeft")) self.gridLayout_8.addWidget(self.timeLeft, 0, 2, 1, 1) self.verticalLayout.addLayout(self.gridLayout_8) self.gridLayout_4 = QtGui.QGridLayout() self.gridLayout_4.setMargin(1) self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4")) self.muteButton = QtGui.QPushButton(self.controlView) self.muteButton.setMinimumSize(QtCore.QSize(30, 30)) self.muteButton.setMaximumSize(QtCore.QSize(30, 30)) self.muteButton.setText(_fromUtf8("")) self.muteButton.setObjectName(_fromUtf8("muteButton")) self.gridLayout_4.addWidget(self.muteButton, 0, 4, 1, 1) self.expansionWidget_3 = QtGui.QWidget(self.controlView) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.expansionWidget_3.sizePolicy().hasHeightForWidth()) self.expansionWidget_3.setSizePolicy(sizePolicy) self.expansionWidget_3.setObjectName(_fromUtf8("expansionWidget_3")) self.gridLayout_7 = QtGui.QGridLayout(self.expansionWidget_3) self.gridLayout_7.setMargin(0) self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7")) self.gridLayout_4.addWidget(self.expansionWidget_3, 0, 1, 1, 1) self.volumeBar = QtGui.QSlider(self.controlView) self.volumeBar.setMinimumSize(QtCore.QSize(175, 0)) self.volumeBar.setMaximumSize(QtCore.QSize(100, 16777215)) self.volumeBar.setOrientation(QtCore.Qt.Horizontal) self.volumeBar.setObjectName(_fromUtf8("volumeBar")) self.gridLayout_4.addWidget(self.volumeBar, 0, 5, 1, 1) self.mediaSettingsWidget = QtGui.QWidget(self.controlView) self.mediaSettingsWidget.setMinimumSize(QtCore.QSize(140, 60)) self.mediaSettingsWidget.setMaximumSize(QtCore.QSize(140, 60)) self.mediaSettingsWidget.setObjectName(_fromUtf8("mediaSettingsWidget")) self.horizontalLayout_6 = QtGui.QHBoxLayout(self.mediaSettingsWidget) self.horizontalLayout_6.setMargin(0) self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6")) self.fullscreenButton = QtGui.QPushButton(self.mediaSettingsWidget) self.fullscreenButton.setMinimumSize(QtCore.QSize(30, 30)) self.fullscreenButton.setMaximumSize(QtCore.QSize(30, 30)) self.fullscreenButton.setText(_fromUtf8("")) self.fullscreenButton.setObjectName(_fromUtf8("fullscreenButton")) self.horizontalLayout_6.addWidget(self.fullscreenButton) self.playlistButton = QtGui.QPushButton(self.mediaSettingsWidget) self.playlistButton.setMinimumSize(QtCore.QSize(30, 30)) self.playlistButton.setMaximumSize(QtCore.QSize(30, 30)) self.playlistButton.setText(_fromUtf8("")) self.playlistButton.setObjectName(_fromUtf8("playlistButton")) self.horizontalLayout_6.addWidget(self.playlistButton) self.stopButton = QtGui.QPushButton(self.mediaSettingsWidget) self.stopButton.setMinimumSize(QtCore.QSize(30, 30)) self.stopButton.setMaximumSize(QtCore.QSize(30, 30)) self.stopButton.setText(_fromUtf8("")) self.stopButton.setObjectName(_fromUtf8("stopButton")) self.horizontalLayout_6.addWidget(self.stopButton) self.gridLayout_4.addWidget(self.mediaSettingsWidget, 0, 0, 1, 1) self.mediaControlWidget = QtGui.QWidget(self.controlView) self.mediaControlWidget.setMinimumSize(QtCore.QSize(225, 70)) self.mediaControlWidget.setMaximumSize(QtCore.QSize(225, 70)) self.mediaControlWidget.setObjectName(_fromUtf8("mediaControlWidget")) self.horizontalLayout_7 = QtGui.QHBoxLayout(self.mediaControlWidget) self.horizontalLayout_7.setMargin(0) self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7")) self.previous = QtGui.QPushButton(self.mediaControlWidget) self.previous.setMinimumSize(QtCore.QSize(40, 40)) self.previous.setMaximumSize(QtCore.QSize(40, 40)) self.previous.setText(_fromUtf8("")) self.previous.setObjectName(_fromUtf8("previous")) self.horizontalLayout_7.addWidget(self.previous) self.playState = QtGui.QPushButton(self.mediaControlWidget) self.playState.setMinimumSize(QtCore.QSize(50, 50)) self.playState.setMaximumSize(QtCore.QSize(50, 50)) self.playState.setText(_fromUtf8("")) icon = QtGui.QIcon.fromTheme(_fromUtf8("play-2.svg")) self.playState.setIcon(icon) self.playState.setObjectName(_fromUtf8("playState")) self.horizontalLayout_7.addWidget(self.playState) self.next = QtGui.QPushButton(self.mediaControlWidget) self.next.setMinimumSize(QtCore.QSize(40, 40)) self.next.setMaximumSize(QtCore.QSize(40, 40)) self.next.setText(_fromUtf8("")) self.next.setObjectName(_fromUtf8("next")) self.horizontalLayout_7.addWidget(self.next) self.gridLayout_4.addWidget(self.mediaControlWidget, 0, 2, 1, 1) self.expansionWidget_4 = QtGui.QWidget(self.controlView) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.expansionWidget_4.sizePolicy().hasHeightForWidth()) self.expansionWidget_4.setSizePolicy(sizePolicy) self.expansionWidget_4.setObjectName(_fromUtf8("expansionWidget_4")) self.gridLayout_4.addWidget(self.expansionWidget_4, 0, 3, 1, 1) self.verticalLayout.addLayout(self.gridLayout_4) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtGui.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 764, 29)) self.menubar.setObjectName(_fromUtf8("menubar")) self.menuFile = QtGui.QMenu(self.menubar) self.menuFile.setObjectName(_fromUtf8("menuFile")) self.menuPlayback = QtGui.QMenu(self.menubar) self.menuPlayback.setObjectName(_fromUtf8("menuPlayback")) self.menuSpeed = QtGui.QMenu(self.menuPlayback) self.menuSpeed.setObjectName(_fromUtf8("menuSpeed")) self.menu_Subtitles = QtGui.QMenu(self.menubar) self.menu_Subtitles.setObjectName(_fromUtf8("menu_Subtitles")) self.menu_Audio = QtGui.QMenu(self.menubar) self.menu_Audio.setObjectName(_fromUtf8("menu_Audio")) self.menu_Video = QtGui.QMenu(self.menubar) self.menu_Video.setObjectName(_fromUtf8("menu_Video")) MainWindow.setMenuBar(self.menubar) self.actionOpen_File = QtGui.QAction(MainWindow) self.actionOpen_File.setShortcutContext(QtCore.Qt.WindowShortcut) self.actionOpen_File.setObjectName(_fromUtf8("actionOpen_File")) self.actionExit = QtGui.QAction(MainWindow) self.actionExit.setObjectName(_fromUtf8("actionExit")) self.actionOpen_Multiple_Files = QtGui.QAction(MainWindow) self.actionOpen_Multiple_Files.setObjectName(_fromUtf8("actionOpen_Multiple_Files")) self.actionAdd_Subtitle_File = QtGui.QAction(MainWindow) self.actionAdd_Subtitle_File.setObjectName(_fromUtf8("actionAdd_Subtitle_File")) self.actionJump_Forward = QtGui.QAction(MainWindow) self.actionJump_Forward.setObjectName(_fromUtf8("actionJump_Forward")) self.actionJump_Backward = QtGui.QAction(MainWindow) self.actionJump_Backward.setObjectName(_fromUtf8("actionJump_Backward")) self.actionX0_5 = QtGui.QAction(MainWindow) self.actionX0_5.setObjectName(_fromUtf8("actionX0_5")) self.actionX_1 = QtGui.QAction(MainWindow) self.actionX_1.setObjectName(_fromUtf8("actionX_1")) self.actionX_2 = QtGui.QAction(MainWindow) self.actionX_2.setObjectName(_fromUtf8("actionX_2")) self.actionX_4 = QtGui.QAction(MainWindow) self.actionX_4.setObjectName(_fromUtf8("actionX_4")) self.actionX_8 = QtGui.QAction(MainWindow) self.actionX_8.setObjectName(_fromUtf8("actionX_8")) self.actionAdd_Subtitle_Track = QtGui.QAction(MainWindow) self.actionAdd_Subtitle_Track.setObjectName(_fromUtf8("actionAdd_Subtitle_Track")) self.actionPlay = QtGui.QAction(MainWindow) self.actionPlay.setObjectName(_fromUtf8("actionPlay")) self.actionPause = QtGui.QAction(MainWindow) self.actionPause.setObjectName(_fromUtf8("actionPause")) self.actionStop = QtGui.QAction(MainWindow) self.actionStop.setObjectName(_fromUtf8("actionStop")) self.actionPrevious = QtGui.QAction(MainWindow) self.actionPrevious.setObjectName(_fromUtf8("actionPrevious")) self.actionNext = QtGui.QAction(MainWindow) self.actionNext.setObjectName(_fromUtf8("actionNext")) self.actionJump_to_specific_time = QtGui.QAction(MainWindow) self.actionJump_to_specific_time.setObjectName(_fromUtf8("actionJump_to_specific_time")) self.actionIncrease_Volume = QtGui.QAction(MainWindow) self.actionIncrease_Volume.setObjectName(_fromUtf8("actionIncrease_Volume")) self.actionDecrease_Volume = QtGui.QAction(MainWindow) self.actionDecrease_Volume.setObjectName(_fromUtf8("actionDecrease_Volume")) self.actionMute = QtGui.QAction(MainWindow) self.actionMute.setObjectName(_fromUtf8("actionMute")) self.actionFullscreen = QtGui.QAction(MainWindow) self.actionFullscreen.setCheckable(False) self.actionFullscreen.setObjectName(_fromUtf8("actionFullscreen")) self.actionShift_forward_by_1_second = QtGui.QAction(MainWindow) self.actionShift_forward_by_1_second.setObjectName(_fromUtf8("actionShift_forward_by_1_second")) self.actionShift_backward_by_1_second = QtGui.QAction(MainWindow) self.actionShift_backward_by_1_second.setObjectName(_fromUtf8("actionShift_backward_by_1_second")) self.menuFile.addAction(self.actionOpen_File) self.menuFile.addAction(self.actionOpen_Multiple_Files) self.menuFile.addSeparator() self.menuFile.addAction(self.actionExit) self.menuSpeed.addAction(self.actionX0_5) self.menuSpeed.addAction(self.actionX_1) self.menuSpeed.addAction(self.actionX_2) self.menuSpeed.addAction(self.actionX_4) self.menuSpeed.addAction(self.actionX_8) self.menuPlayback.addAction(self.actionJump_Forward) self.menuPlayback.addAction(self.actionJump_Backward) self.menuPlayback.addAction(self.menuSpeed.menuAction()) self.menuPlayback.addSeparator() self.menuPlayback.addAction(self.actionPlay) self.menuPlayback.addAction(self.actionStop) self.menuPlayback.addSeparator() self.menuPlayback.addAction(self.actionPrevious) self.menuPlayback.addAction(self.actionNext) self.menuPlayback.addSeparator() self.menuPlayback.addAction(self.actionJump_to_specific_time) self.menu_Subtitles.addAction(self.actionAdd_Subtitle_Track) self.menu_Subtitles.addSeparator() self.menu_Subtitles.addAction(self.actionShift_forward_by_1_second) self.menu_Subtitles.addAction(self.actionShift_backward_by_1_second) self.menu_Audio.addAction(self.actionIncrease_Volume) self.menu_Audio.addAction(self.actionDecrease_Volume) self.menu_Audio.addAction(self.actionMute) self.menu_Audio.addSeparator() self.menu_Video.addAction(self.actionFullscreen) self.menubar.addAction(self.menuFile.menuAction()) self.menubar.addAction(self.menuPlayback.menuAction()) self.menubar.addAction(self.menu_Subtitles.menuAction()) self.menubar.addAction(self.menu_Audio.menuAction()) self.menubar.addAction(self.menu_Video.menuAction()) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None)) self.timeDone.setText(_translate("MainWindow", "00:00:00", None)) self.timeLeft.setText(_translate("MainWindow", "00:00:00", None)) self.muteButton.setToolTip(_translate("MainWindow", "volume", None)) self.fullscreenButton.setToolTip(_translate("MainWindow", "Fullscreen", None)) self.playlistButton.setToolTip(_translate("MainWindow", "Playlist", None)) self.stopButton.setToolTip(_translate("MainWindow", "Stop", None)) self.previous.setToolTip(_translate("MainWindow", "Previous", None)) self.playState.setToolTip(_translate("MainWindow", "Play/Pause", None)) self.next.setToolTip(_translate("MainWindow", "Next", None)) self.menuFile.setTitle(_translate("MainWindow", "&Media", None)) self.menuPlayback.setTitle(_translate("MainWindow", "P&layback", None)) self.menuSpeed.setTitle(_translate("MainWindow", "&Speed", None)) self.menu_Subtitles.setTitle(_translate("MainWindow", "&Subtitles", None)) self.menu_Audio.setTitle(_translate("MainWindow", "&Audio ", None)) self.menu_Video.setTitle(_translate("MainWindow", "&Video", None)) self.actionOpen_File.setText(_translate("MainWindow", "&Open File", None)) self.actionOpen_File.setShortcut(_translate("MainWindow", "Ctrl+O", None)) self.actionExit.setText(_translate("MainWindow", "&Exit", None)) self.actionExit.setShortcut(_translate("MainWindow", "Ctrl+Q", None)) self.actionOpen_Multiple_Files.setText(_translate("MainWindow", "Open &Multiple Files", None)) self.actionOpen_Multiple_Files.setShortcut(_translate("MainWindow", "Ctrl+Shift+O", None)) self.actionAdd_Subtitle_File.setText(_translate("MainWindow", "&Add Subtitle File", None)) self.actionJump_Forward.setText(_translate("MainWindow", "&Jump Forward", None)) self.actionJump_Forward.setShortcut(_translate("MainWindow", "Ctrl+Shift++", None)) self.actionJump_Backward.setText(_translate("MainWindow", "Jump &Backward", None)) self.actionJump_Backward.setShortcut(_translate("MainWindow", "Ctrl+Shift+-", None)) self.actionX0_5.setText(_translate("MainWindow", "&x 0.5", None)) self.actionX_1.setText(_translate("MainWindow", "&Normal Speed", None)) self.actionX_2.setText(_translate("MainWindow", "x &2", None)) self.actionX_4.setText(_translate("MainWindow", "x &4", None)) self.actionX_8.setText(_translate("MainWindow", "x &8", None)) self.actionAdd_Subtitle_Track.setText(_translate("MainWindow", "&Add Subtitle Track", None)) self.actionPlay.setText(_translate("MainWindow", "&Play/Pause", None)) self.actionPlay.setShortcut(_translate("MainWindow", "Space", None)) self.actionPause.setText(_translate("MainWindow", "Pause", None)) self.actionPause.setShortcut(_translate("MainWindow", "Space", None)) self.actionStop.setText(_translate("MainWindow", "St&op", None)) self.actionStop.setShortcut(_translate("MainWindow", "Ctrl+Shift+S", None)) self.actionPrevious.setText(_translate("MainWindow", "P&revious", None)) self.actionPrevious.setShortcut(_translate("MainWindow", "Ctrl+Shift+Left", None)) self.actionNext.setText(_translate("MainWindow", "&Next", None)) self.actionNext.setShortcut(_translate("MainWindow", "Ctrl+Shift+Right", None)) self.actionJump_to_specific_time.setText(_translate("MainWindow", "J&ump to specific time", None)) self.actionJump_to_specific_time.setShortcut(_translate("MainWindow", "Ctrl+T", None)) self.actionIncrease_Volume.setText(_translate("MainWindow", "&Increase Volume", None)) self.actionIncrease_Volume.setShortcut(_translate("MainWindow", "Ctrl+Up", None)) self.actionDecrease_Volume.setText(_translate("MainWindow", "&Decrease Volume", None)) self.actionDecrease_Volume.setShortcut(_translate("MainWindow", "Ctrl+Down", None)) self.actionMute.setText(_translate("MainWindow", "&Mute", None)) self.actionMute.setShortcut(_translate("MainWindow", "M", None)) self.actionFullscreen.setText(_translate("MainWindow", "&Fullscreen", None)) self.actionFullscreen.setShortcut(_translate("MainWindow", "F", None)) self.actionShift_forward_by_1_second.setText(_translate("MainWindow", "&Shift Forward By 1 Second", None)) self.actionShift_forward_by_1_second.setShortcut(_translate("MainWindow", "H", None)) self.actionShift_backward_by_1_second.setText(_translate("MainWindow", "Shift &Backward By 1 Second", None)) self.actionShift_backward_by_1_second.setShortcut(_translate("MainWindow", "G", None))
"""A few constants which do not depend on other project files""" DEFAULT_ENCODING = 'utf-8' SETTINGS_ENCODING = 'utf-8' CONFIG_DIR = '.codimension3'
from __future__ import unicode_literals import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('tableaubord', '0041_auto_20170507_2344'), ] operations = [ migrations.AlterField( model_name='evenement', name='dateheure', field=models.DateTimeField(default=datetime.datetime(2017, 5, 7, 23, 52, 53, 961581), verbose_name='Date/heure evenement '), ), ]
import os from ecmwfapi import ECMWFDataServer server = ECMWFDataServer() time=["06"] year=["2013"] param=["129.128","130.128","131.128","132.128","157.128","151.128"] nam=["hgt","air","uwnd","vwnd","rhum","psl"] for y in year: #for m in month: for p in range(len(param)): for t in time: date=y+"-01-01/to/"+y+"-12-31" print date print nam[p]+"."+y+"."+t+".nc" os.system('echo "############################################################# ^_^"') server.retrieve({ 'dataset' : "interim", 'levelist' : "1/2/3/5/7/10/20/30/50/70/100/125/150/175/200/225/250/300/350/400/450/500/550/600/650/700/750/775/800/825/850/875/900/925/950/975/1000", 'step' : "0", 'number' : "all", 'levtype' : "pl", # set to "sl" for surface level 'date' : date, 'time' : t , 'origin' : "all", 'type' : "an", 'param' : "129.128/130.128/131.128/132.128/157.128", 'param' : param[p], 'area' : "0/0/-40/100", # Four values as North/West/South/East 'grid' : "1.5/1.5", # Two values: West-East/North-South increments 'format' : "netcdf", # if grib, just comment this line 'target' : nam[p]+"."+y+"."+t+".nc" })
from _commandbase import RadianceCommand from ..datatype import RadiancePath, RadianceTuple from ..parameters.gensky import GenskyParameters import os class Gensky(RadianceCommand): u""" gensky - Generate an annual Perez sky matrix from a weather tape. The attributes for this class and their data descriptors are given below. Please note that the first two inputs for each descriptor are for internal naming purposes only. Attributes: outputName: An optional name for output file name (Default: 'untitled'). monthDayHour: A tuple containing inputs for month, day and hour. genskyParameters: Radiance parameters for gensky. If None Default parameters will be set. You can use self.genskyParameters to view, add or remove the parameters before executing the command. Usage: from honeybee.radiance.parameters.gensky import GenSkyParameters from honeybee.radiance.command.gensky import GenSky # create and modify genskyParameters. In this case a sunny with no sun # will be generated. gnskyParam = GenSkyParameters() gnskyParam.sunnySkyNoSun = True # create the gensky Command. gnsky = GenSky(monthDayHour=(1,1,11), genskyParameters=gnskyParam, outputName = r'd:/sunnyWSun_010111.sky' ) # run gensky gnsky.execute() > """ monthDayHour = RadianceTuple('monthDayHour', 'month day hour', tupleSize=3, testType=False) outputFile = RadiancePath('outputFile', descriptiveName='output sky file', relativePath=None, checkExists=False) def __init__(self, outputName='untitled', monthDayHour=None, genskyParameters=None): """Init command.""" RadianceCommand.__init__(self) self.outputFile = outputName if outputName.lower().endswith(".sky") \ else outputName + ".sky" """results file for sky (Default: untitled)""" self.monthDayHour = monthDayHour self.genskyParameters = genskyParameters @classmethod def fromSkyType(cls, outputName='untitled', monthDayHour=(1, 21, 12), skyType=0, latitude=None, longitude=None, meridian=None): """Create a sky by sky type. Args: outputName: An optional name for output file name (Default: 'untitled'). monthDayHour: A tuple containing inputs for month, day and hour. skyType: An intger between 0-5 for CIE sky type. 0: [+s] Sunny with sun, 1: [-s] Sunny without sun, 2: [+i] Intermediate with sun, 3: [-i] Intermediate with no sun, 4: [-c] Cloudy overcast sky, 5: [-u] Uniform cloudy sky latitude: [-a] A float number to indicate site altitude. Negative angle indicates south latitude. longitude: [-o] A float number to indicate site latitude. Negative angle indicates east longitude. meridian: [-m] A float number to indicate site meridian west of Greenwich. """ _skyParameters = GenskyParameters(latitude=latitude, longitude=longitude, meridian=meridian) # modify parameters based on sky type try: skyType = int(skyType) except TypeError: "skyType should be an integer between 0-5." assert 0 <= skyType <= 5, "Sky type should be an integer between 0-5." if skyType == 0: _skyParameters.sunnySky = True elif skyType == 1: _skyParameters.sunnySky = False elif skyType == 2: _skyParameters.intermSky = True elif skyType == 3: _skyParameters.intermSky = False elif skyType == 4: _skyParameters.cloudySky = True elif skyType == 5: _skyParameters.uniformCloudySky = True return cls(outputName=outputName, monthDayHour=monthDayHour, genskyParameters=_skyParameters) @classmethod def createUniformSkyfromIlluminanceValue(cls, outputName="untitled", illuminanceValue=10000): """Uniform CIE sky based on illuminance value. Attributes: outputName: An optional name for output file name (Default: 'untitled'). illuminanceValue: Desired illuminance value in lux """ assert float(illuminanceValue) >= 0, "Illuminace value can't be negative." _skyParameters = GenskyParameters(zenithBrightHorzDiff=illuminanceValue / 179.0) return cls(outputName=outputName, genskyParameters=_skyParameters) @classmethod def fromRadiationValues(cls): """Create a sky based on sky radiation values.""" raise NotImplementedError() @property def genskyParameters(self): """Get and set genskyParameters.""" return self.__genskyParameters @genskyParameters.setter def genskyParameters(self, genskyParam): self.__genskyParameters = genskyParam if genskyParam is not None \ else GenskyParameters() assert hasattr(self.genskyParameters, "isRadianceParameters"), \ "input genskyParameters is not a valid parameters type." def toRadString(self, relativePath=False): """Return full command as a string.""" # generate the name from self.weaFile radString = "%s %s %s > %s" % ( self.normspace(os.path.join(self.radbinPath, 'gensky')), self.monthDayHour.toRadString().replace("-monthDayHour ", ""), self.genskyParameters.toRadString(), self.normspace(self.outputFile.toRadString()) ) return radString @property def inputFiles(self): """Input files for this command.""" return None
from nose.tools import * from DeckMaker.notetranslator import NoteTranslator def setup(): print "SETUP!" def teardown(): print "TEAR DOWN!" def test_basic(): t = NoteTranslator() assert_equal(t.GetMidiCodeForHumans("E5"),64) assert_equal(t.GetMidiCodeForHumans("C1"),12) assert_equal(t.GetMidiCodeForHumans("Ab6"),80) assert_equal(t.GetMidiCodeForHumans("Gb7"),90) assert_equal(t.GetMidiCodeForHumans("D#2"),27) pass def test_hex(): t = NoteTranslator() assert_equal(t.GetHexString(t.GetMidiCodeForHumans("E5")),"40") assert_equal(t.GetHexString(t.GetMidiCodeForHumans("C1")),"c") assert_equal(t.GetHexString(t.GetMidiCodeForHumans("Ab6")),"50") assert_equal(t.GetHexString(t.GetMidiCodeForHumans("Gb7")),"5a") assert_equal(t.GetHexString(t.GetMidiCodeForHumans("D#2")),"1b") pass def test_GetTriadCodes(): t = NoteTranslator() assert_equal(t.GetTriadCodes( t.GetMidiCodeForHumans("C4"), "minor", 3),[48, 53, 56]) assert_equal(t.GetTriadCodes( t.GetMidiCodeForHumans("Ab2"), "major", 2),[32, 40, 35]) assert_equal(t.GetTriadCodes( t.GetMidiCodeForHumans("G#6"), "minor", 1),[80, 83, 87]) def test_GetTriadHexCodeStrings(): t = NoteTranslator() assert_equal(t.GetTriadHexCodeStrings( t.GetMidiCodeForHumans("C4"), "major", 1),['30', '34', '37']) assert_equal(t.GetTriadHexCodeStrings( t.GetMidiCodeForHumans("Ab2"), "major", 2),['20', '28', '23']) assert_equal(t.GetTriadHexCodeStrings( t.GetMidiCodeForHumans("G#6"), "minor", 1),['50', '53', '57'])
import sys import os import logging import random import PyQt4 from PyQt4.QtCore import * import constants class Model(QAbstractTableModel): keys = list() modelType = None def __init__(self, parent = None): ''' ''' self.log = logging.getLogger('Model') #self.log.debug('__init__ start') super(QAbstractTableModel, self).__init__(parent) def rowCount(self, parent = None): ''' ''' #self.log.debug('rowCount start') #self.log.debug('rowCount end') if hasattr(self, 'album') and self.album: if hasattr(self.album, 'rows'): return len(self.album.rows) return 0 def columnCount(self, parent = None): ''' ''' #self.log.debug('columnCount start') #self.log.debug('columnCount end') return len(self.keys) def data(self, index, role = None): ''' ''' #self.log.debug('data start') if index.isValid(): if index.row() >= 0 or index.row() < len(self.rows): if role == Qt.DisplayRole or role == Qt.ToolTipRole or role == Qt.EditRole: return self.album.rows[index.row()][self.keys[index.column()]] #self.log.debug('data end') return QVariant() def setData(self, index, value, role): ''' ''' #self.log.debug('setData start') if index.isValid() and role == Qt.EditRole: key = self.keys[index.column()] row = index.row() value = unicode(value.toString()) self.album.rows[index.row()][key] = value self.emit(SIGNAL('dataChanged'), index, index) #self.log.debug('setData end') return True def headerData(self, section, orientation, role): ''' ''' #self.log.debug('headerData start' + str(section)) if section >= 0 and section < len(self.keys): if orientation == Qt.Horizontal and role == Qt.DisplayRole: return self.keys[section] #self.log.debug('headerData end ') return QVariant() def flags(self, index): ''' ''' #self.log.debug('flags start') if self.modelType == constants.ModelType.ModelTypeFinal: return super(QAbstractTableModel, self).flags(index) | Qt.ItemIsEditable #self.log.debug('flags end') return super(QAbstractTableModel, self).flags(index) def getModelType(self): ''' ''' #self.log.debug('getModelType start') #self.log.debug('getModelType end') return self.modelType #def getState(self): #''' ''' ##self.log.debug('getState start') ##self.log.debug('getState end') #return None
class System(object): """ This is the father class for all different systems. """ def __init__(self, *args, **kwargs): pass def update(self, dt): raise NotImplementedError
import socket import argparse import sys import magic_ping import os import settings import signal import logging import struct logging.basicConfig(format=u'%(levelname)-8s [%(asctime)s] %(message)s', level=logging.DEBUG, filename=u'client.log') def signal_handler(signal, frame): print("\nSTOP CLIENT.") logging.info("STOP CLIENT.") exit(0) def create_cmd_parser(): parser = argparse.ArgumentParser() parser.add_argument('-f', '--file', required=True, type=argparse.FileType(mode='rb')) parser.add_argument('-a', '--address', required=True) parser.add_argument('-c', '--cypher', action='store_const', const=True) return parser signal.signal(signal.SIGINT, signal_handler) if __name__ == '__main__': p = create_cmd_parser() arguments = p.parse_args(sys.argv[1:]) file = arguments.file file_name = file.name file_size = os.stat(file_name).st_size address = arguments.address ID = 1 s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP) packet_number = 1 data = file_name.encode() if arguments.cypher: data = struct.pack('b', 1) + data else: data = struct.pack('b', 0) + data logging.debug("Start sending file to %s" % address) magic_ping.send_ping(s, address, ID, data, packet_number) print('start sending') already_sent = 0 # размер уже отправленной части while True: data = file.read(settings.DATA_SIZE) if arguments.cypher: data = [a ^ b for (a, b) in zip(data, settings.KEY)] # шифруем XORом с ключом data = bytes(data) if not data: break already_sent += len(data) packet_number += 1 magic_ping.send_ping(s, address, ID, data, packet_number) logging.info('Отправлено: %.2f %%' % (already_sent / file_size * 100)) print('Отправлено: %.2f %%' % (already_sent / file_size * 100)) magic_ping.send_ping(s, address, ID, bytes(0), packet_number=0) logging.debug("Packets sent: %d" % packet_number) print("send:", packet_number) file.close() client_address, packet_number, checksum = magic_ping.receive_ping(s, ID, {}) # проверяем корректность передачи if checksum and settings.md5_checksum(file_name) != checksum.decode(): logging.warning("Файл передался с ошибками!!!") print("Файл передался с ошибками!!!") s.close()
import logging import traceback import flask from tribble.api import application from tribble.api import utils from tribble.common.db import db_proc from tribble.common.db import zone_status from tribble.common import rpc from tribble.common import system_config mod = flask.Blueprint('zones', __name__) LOG = logging.getLogger('tribble-api') CONFIG = system_config.ConfigurationSetup() DEFAULT = CONFIG.config_args() DB = application.DB @mod.route('/v1/schematics/<sid>/zones', methods=['GET']) def zones_get(sid): """Return a list of zones. Method is accessible with GET /v1/schematics/<sid>/zones :param sid: ``str`` # schematic ID :return json, status: ``tuple`` """ parsed_data = utils.zone_basic_handler(sid=sid) if parsed_data[0] is False: return utils.return_msg(msg=parsed_data[1], status=parsed_data[2]) else: _success, schematic, zones, user_id = parsed_data LOG.debug('%s %s %s %s', _success, schematic, zones, user_id) try: return_zones = [] for zone in zones: dzone = utils.pop_ts(zone.__dict__) instances = db_proc.get_instances(zon=zone) if instances: dzone['instance_quantity'] = len(instances) return_zones.append(dzone) except Exception: LOG.error(traceback.format_exc()) return utils.return_msg(msg='Unexpected Error', status=500) else: return utils.return_msg(msg=return_zones, status=200) @mod.route('/v1/schematics/<sid>/zones/<zid>', methods=['GET']) def zone_get(sid, zid): """Return a zone. Method is accessible with GET /v1/schematics/<sid>/zones/<zid> :param sid: ``str`` # schematic ID :param zid: ``str`` # Zone ID :return json, status: ``tuple`` """ parsed_data = utils.zone_basic_handler(sid=sid, zid=zid) if parsed_data[0] is False: return utils.return_msg(msg=parsed_data[1], status=parsed_data[2]) else: _success, schematic, zone, user_id = parsed_data _zone = utils.pop_ts(temp=zone.__dict__) instances = db_proc.get_instances(zon=zone) if instances: _zone['instances'] = [ utils.pop_ts(temp=instance.__dict__) for instance in instances ] LOG.debug('%s %s %s %s', _success, schematic, zone, user_id) return utils.return_msg(msg=_zone, status=200) @mod.route('/v1/schematics/<sid>/zones/<zid>', methods=['DELETE']) def zone_delete(sid=None, zid=None): """Delete a Zone. Method is accessible with DELETE /v1/schematics/<sid>/zones/<zid> :param sid: ``str`` # schematic ID :param zid: ``str`` # Zone ID :return json, status: ``tuple`` """ parsed_data = utils.zone_basic_handler(sid=sid, zid=zid) if parsed_data[0] is False: return utils.return_msg(msg=parsed_data[1], status=parsed_data[2]) else: _success, schematic, zone, user_id = parsed_data if zone.zone_state == 'BUILDING': build_response = ( 'Zone Delete can not be performed because Zone "%s" has a' ' Pending Status' % zone.id ) return utils.return_msg(msg=build_response, status=200) LOG.debug('%s %s %s %s', _success, schematic, zone, user_id) try: config = db_proc.get_configmanager(skm=schematic) instances = db_proc.get_instances(zon=zone) packet = utils.build_cell( job='zone_delete', schematic=schematic, zone=zone, config=config ) packet['uuids'] = [instance.instance_id for instance in instances] rpc.default_publisher(message=packet) sess = DB.session zone_status.ZoneState(cell=packet).delete() except Exception: LOG.error(traceback.format_exc()) return utils.return_msg(msg='unexpected error', status=500) else: db_proc.commit_session(session=sess) return utils.return_msg(msg='deletes received', status=203) @mod.route('/v1/schematics/<sid>/zones/<zid>/purge', methods=['DELETE']) def zone_purge(sid=None, zid=None): """purge a Zone. This is used to remove all indication of a zone without attempting to disconnect or otherwise clean up the zone or any of its may be attached instances. Method is accessible with DELETE /v1/schematics/<sid>/zones/<zid>/purge :param sid: ``str`` # schematic ID :param zid: ``str`` # Zone ID :return json, status: ``tuple`` """ parsed_data = utils.zone_basic_handler(sid=sid, zid=zid) if parsed_data[0] is False: return utils.return_msg(msg=parsed_data[1], status=parsed_data[2]) else: _success, schematic, zone, user_id = parsed_data LOG.debug('%s %s %s %s', _success, schematic, zone, user_id) try: sess = DB.session db_proc.delete_item(session=sess, item=zone) except Exception: LOG.error(traceback.format_exc()) return utils.return_msg(msg='unexpected error', status=500) else: db_proc.commit_session(session=sess) return utils.return_msg( msg='zone %s was purged' % zone.id, status=203 ) @mod.route('/v1/schematics/<sid>/zones/<zid>', methods=['PUT']) def zone_put(sid=None, zid=None): """Update a Zone. Method is accessible with PUT /v1/schematics/<sid>/zones/<zid> :param sid: ``str`` # schematic ID :param zid: ``str`` # Zone ID :return json, status: ``tuple`` """ parsed_data = utils.zone_data_handler(sid=sid) if parsed_data[0] is False: return utils.return_msg(msg=parsed_data[1], status=parsed_data[2]) else: _success, schematic, payload, user_id = parsed_data LOG.debug('%s %s %s %s', _success, schematic, payload, user_id) zone = db_proc.get_zones_by_id(skm=schematic, zid=zid) if not zone: return utils.return_msg(msg='no zones found', status=404) try: sess = DB.session sess = db_proc.put_zone( session=sess, zon=zone, put=payload ) except Exception: LOG.error(traceback.format_exc()) return utils.return_msg(msg='unexpected error', status=500) else: db_proc.commit_session(session=sess) return utils.return_msg(msg='updates received', status=201) @mod.route('/v1/schematics/<sid>/zones', methods=['POST']) def zone_post(sid=None): """Post a Zone. Method is accessible with POST /v1/schematics/<sid>/zones :param sid: ``str`` # schematic ID :return json, status: ``tuple`` """ parsed_data = utils.zone_data_handler(sid=sid, check_for_zone=True) if parsed_data[0] is False: return utils.return_msg(msg=parsed_data[1], status=parsed_data[2]) else: _success, schematic, payload, user_id = parsed_data LOG.debug('%s %s %s %s', _success, schematic, payload, user_id) config = db_proc.get_configmanager(skm=schematic) try: sess = DB.session for _zn in payload['zones']: ssh_user = _zn.get('ssh_user') pub = _zn.get('ssh_key_pub') pri = _zn.get('ssh_key_pri') key_name = _zn.get('key_name') ssh_key = db_proc.post_instanceskeys( pub=pub, pri=pri, sshu=ssh_user, key_name=key_name ) db_proc.add_item(session=sess, item=ssh_key) zone = db_proc.post_zones( skm=schematic, zon=_zn, ssh=ssh_key ) db_proc.add_item(session=sess, item=zone) packet = utils.build_cell( job='build', schematic=schematic, zone=zone, sshkey=ssh_key, config=config ) rpc.default_publisher(message=packet) except Exception: LOG.error(traceback.format_exc()) return utils.return_msg(msg='Unexpected Error', status=500) else: db_proc.commit_session(session=sess) msg = 'Application requests have been recieved for Schematic %s' % sid return utils.return_msg(msg=msg, status=200) @mod.route('/v1/schematics/<sid>/zones/<zid>/redeploy', methods=['POST']) def redeploy_zone(sid=None, zid=None): """Redploy a zone. This method will interate over an existing zone and ensure that all things known in the zone are built and in an active state. Method is accessible with POST /v1/schematics/<sid>/zones :param sid: ``str`` # schematic ID :param zid: ``str`` # Zone ID :return json, status: ``tuple`` """ parsed_data = utils.zone_basic_handler(sid=sid, zid=zid) if parsed_data[0] is False: return utils.return_msg(msg=parsed_data[1], status=parsed_data[2]) else: _success, schematic, zone, user_id = parsed_data LOG.debug('%s %s %s %s', _success, schematic, zone, user_id) config = db_proc.get_configmanager(skm=schematic) key = db_proc.get_instanceskeys(zon=zone) ints = db_proc.get_instances(zon=zone) base_qty = int(zone.quantity) numr_qty = len(ints) if base_qty > numr_qty: difference = base_qty - numr_qty packet = utils.build_cell( job='redeploy_build', schematic=schematic, zone=zone, sshkey=key, config=config ) packet['quantity'] = difference LOG.debug(packet) rpc.default_publisher(message=packet) msg = 'Building %s Instances for Zone %s' % (difference, zone.id) return utils.return_msg(msg=msg, status=200) elif base_qty < numr_qty: difference = numr_qty - base_qty packet = utils.build_cell( job='redeploy_delete', schematic=schematic, zone=zone, sshkey=key, config=config ) instances = [ins.instance_id for ins in ints] remove_instances = instances[:difference] packet['uuids'] = remove_instances LOG.debug(packet) remove_ids = [ ins for ins in ints if ins.instance_id in remove_instances ] try: sess = DB.session for instance_id in remove_ids: db_proc.delete_item(session=sess, item=instance_id) except Exception: LOG.error(traceback.format_exc()) return utils.return_msg(msg='Unexpected Error', status=500) else: rpc.default_publisher(message=packet) db_proc.commit_session(session=sess) msg = 'Removing %s Instances for Zone %s' % (difference, zone.id) return utils.return_msg(msg=msg, status=200) else: return utils.return_msg(msg='nothing to do', status=200) @mod.route('/v1/schematics/<sid>/zones/<zid>/resetstate', methods=['POST']) def reset_zone_state(sid=None, zid=None): r"""Reset the state of a zone to active. This method will reset the state of an existing zone no matter the current state. The new state after invoking this method will be set to "ACTIVE RESET" Method is accessible with POST /v1/schematics/<sid>/zones :param sid: ``str`` # schematic ID :param zid: ``str`` # Zone ID :return json, status: ``tuple`` """ parsed_data = utils.zone_basic_handler(sid=sid, zid=zid) if parsed_data[0] is False: return utils.return_msg(msg=parsed_data[1], status=parsed_data[2]) else: _success, schematic, zone, user_id = parsed_data LOG.debug('%s %s %s %s', _success, schematic, zone, user_id) cell = {'zone_state': 'ACTIVE RESET'} try: sess = DB.session db_proc.put_zone(session=sess, zon=zone, put=cell) except Exception: LOG.error(traceback.format_exc()) return utils.return_msg(msg='unexpected error', status=500) else: db_proc.commit_session(session=sess) return utils.return_msg( msg='Zone State for %s has been Reset' % zid, status=200 )
import time import logging logger = logging.getLogger(__name__) import re import urllib2 import urlparse from bs4.element import Comment from ..htmlcleanup import stripHTML from .. import exceptions as exceptions from base_adapter import BaseSiteAdapter, makeDate class LiteroticaSiteAdapter(BaseSiteAdapter): def __init__(self, config, url): BaseSiteAdapter.__init__(self, config, url) logger.debug("LiteroticaComAdapter:__init__ - url='%s'" % url) self.decode = ["utf8", "Windows-1252"] # 1252 is a superset of iso-8859-1. # Most sites that claim to be # iso-8859-1 (and some that claim to be # utf8) are really windows-1252. # Each adapter needs to have a unique site abbreviation. self.story.setMetadata('siteabbrev','litero') # normalize to first chapter. Not sure if they ever have more than 2 digits. storyId = self.parsedUrl.path.split('/',)[2] # replace later chapters with first chapter but don't remove numbers # from the URL that disambiguate stories with the same title. storyId = re.sub("-ch-?\d\d", "", storyId) self.story.setMetadata('storyId', storyId) ## accept m(mobile)url, but use www. url = re.sub("^(www|german|spanish|french|dutch|italian|romanian|portuguese|other)\.i", "\1", url) ## strip ?page=... url = re.sub("\?page=.*$", "", url) ## set url self._setURL(url) # The date format will vary from site to site. # http://docs.python.org/library/datetime.html#strftime-strptime-behavior self.dateformat = "%m/%d/%y" @staticmethod def getSiteDomain(): return 'literotica.com' @classmethod def getAcceptDomains(cls): return ['www.literotica.com', 'www.i.literotica.com', 'german.literotica.com', 'german.i.literotica.com', 'spanish.literotica.com', 'spanish.i.literotica.com', 'french.literotica.com', 'french.i.literotica.com', 'dutch.literotica.com', 'dutch.i.literotica.com', 'italian.literotica.com', 'italian.i.literotica.com', 'romanian.literotica.com', 'romanian.i.literotica.com', 'portuguese.literotica.com', 'portuguese.i.literotica.com', 'other.literotica.com', 'other.i.literotica.com'] @classmethod def getSiteExampleURLs(cls): return "http://www.literotica.com/s/story-title https://www.literotica.com/s/story-title http://portuguese.literotica.com/s/story-title http://german.literotica.com/s/story-title" def getSiteURLPattern(self): return r"https?://(www|german|spanish|french|dutch|italian|romanian|portuguese|other)(\.i)?\.literotica\.com/s/([a-zA-Z0-9_-]+)" def getCategories(self, soup): if self.getConfig("use_meta_keywords"): categories = soup.find("meta", {"name":"keywords"})['content'].split(', ') categories = [c for c in categories if not self.story.getMetadata('title') in c] if self.story.getMetadata('author') in categories: categories.remove(self.story.getMetadata('author')) logger.debug("Meta = %s" % categories) for category in categories: # logger.debug("\tCategory=%s" % category) self.story.addToList('eroticatags', category.title()) def extractChapterUrlsAndMetadata(self): """ NOTE: Some stories can have versions, e.g. /my-story-ch-05-version-10 NOTE: If two stories share the same title, a running index is added, e.g.: /my-story-ch-02-1 Strategy: * Go to author's page, search for the current story link, * If it's in a tr.root-story => One-part story * , get metadata and be done * If it's in a tr.sl => Chapter in series * Search up from there until we find a tr.ser-ttl (this is the story) * Gather metadata * Search down from there for all tr.sl until the next tr.ser-ttl, foreach * Chapter link is there """ if not (self.is_adult or self.getConfig("is_adult")): raise exceptions.AdultCheckRequired(self.url) logger.debug("Chapter/Story URL: <%s> " % self.url) try: data1 = self._fetchUrl(self.url) soup1 = self.make_soup(data1) #strip comments from soup [comment.extract() for comment in soup1.findAll(text=lambda text:isinstance(text, Comment))] except urllib2.HTTPError, e: if e.code == 404: raise exceptions.StoryDoesNotExist(self.url) else: raise e if "This submission is awaiting moderator's approval" in data1: raise exceptions.StoryDoesNotExist("This submission is awaiting moderator's approval. %s"%self.url) # author a = soup1.find("span", "b-story-user-y") self.story.setMetadata('authorId', urlparse.parse_qs(a.a['href'].split('?')[1])['uid'][0]) authorurl = a.a['href'] if authorurl.startswith('//'): authorurl = self.parsedUrl.scheme+':'+authorurl self.story.setMetadata('authorUrl', authorurl) self.story.setMetadata('author', a.text) # get the author page try: dataAuth = self._fetchUrl(authorurl) soupAuth = self.make_soup(dataAuth) #strip comments from soup [comment.extract() for comment in soupAuth.findAll(text=lambda text:isinstance(text, Comment))] except urllib2.HTTPError, e: if e.code == 404: raise exceptions.StoryDoesNotExist(authorurl) else: raise e ## Find link to url in author's page ## site has started using //domain.name/asdf urls remove https?: from front ## site has started putting https back on again. storyLink = soupAuth.find('a', href=re.compile(r'(https?:)?'+re.escape(self.url[self.url.index(':')+1:]))) if storyLink is not None: # pull the published date from the author page # default values from single link. Updated below if multiple chapter. logger.debug("Found story on the author page.") date = storyLink.parent.parent.findAll('td')[-1].text self.story.setMetadata('datePublished', makeDate(date, self.dateformat)) self.story.setMetadata('dateUpdated',makeDate(date, self.dateformat)) if storyLink is not None: urlTr = storyLink.parent.parent if "sl" in urlTr['class']: isSingleStory = False else: isSingleStory = True else: raise exceptions.FailedToDownload("Couldn't find story <%s> on author's page <%s>" % (self.url, authorurl)) if isSingleStory: self.story.setMetadata('title', storyLink.text.strip('/')) logger.debug('Title: "%s"' % storyLink.text.strip('/')) self.story.setMetadata('description', urlTr.findAll("td")[1].text) self.story.addToList('category', urlTr.findAll("td")[2].text) date = urlTr.findAll('td')[-1].text self.story.setMetadata('datePublished', makeDate(date, self.dateformat)) self.story.setMetadata('dateUpdated',makeDate(date, self.dateformat)) self.chapterUrls = [(storyLink.text, self.url)] averrating = stripHTML(storyLink.parent) ## title (0.00) averrating = averrating[averrating.rfind('(')+1:averrating.rfind(')')] try: self.story.setMetadata('averrating', float(averrating)) except: pass # parse out the list of chapters else: seriesTr = urlTr.previousSibling while 'ser-ttl' not in seriesTr['class']: seriesTr = seriesTr.previousSibling m = re.match("^(?P<title>.*?):\s(?P<numChapters>\d+)\sPart\sSeries$", seriesTr.find("strong").text) self.story.setMetadata('title', m.group('title')) seriesTitle = m.group('title') ## Walk the chapters chapterTr = seriesTr.nextSibling self.chapterUrls = [] dates = [] descriptions = [] ratings = [] chapters = [] while chapterTr is not None and 'sl' in chapterTr['class']: description = "%d. %s" % (len(descriptions)+1,stripHTML(chapterTr.findAll("td")[1])) description = stripHTML(chapterTr.findAll("td")[1]) chapterLink = chapterTr.find("td", "fc").find("a") self.story.addToList('eroticatags', chapterTr.findAll("td")[2].text) pub_date = makeDate(chapterTr.findAll('td')[-1].text, self.dateformat) dates.append(pub_date) chapterTr = chapterTr.nextSibling chapter_title = chapterLink.text if self.getConfig("clean_chapter_titles"): logger.debug('\tChapter Name: "%s"' % chapterLink.string) logger.debug('\tChapter Name: "%s"' % chapterLink.text) if chapterLink.text.lower().startswith(seriesTitle.lower()): chapter = chapterLink.text[len(seriesTitle):].strip() logger.debug('\tChapter: "%s"' % chapter) if chapter == '': chapter_title = 'Chapter %d' % (len(self.chapterUrls) + 1) else: separater_char = chapter[0] logger.debug('\tseparater_char: "%s"' % separater_char) chapter = chapter[1:].strip() if separater_char in [":", "-"] else chapter logger.debug('\tChapter: "%s"' % chapter) if chapter.lower().startswith('ch.'): chapter = chapter[len('ch.'):] try: chapter_title = 'Chapter %d' % int(chapter) except: chapter_title = 'Chapter %s' % chapter elif chapter.lower().startswith('pt.'): chapter = chapter[len('pt.'):] try: chapter_title = 'Part %d' % int(chapter) except: chapter_title = 'Part %s' % chapter elif separater_char in [":", "-"]: chapter_title = chapter # if chapter_title == '': # chapter_title = chapterLink.string # pages include full URLs. chapurl = chapterLink['href'] if chapurl.startswith('//'): chapurl = self.parsedUrl.scheme + ':' + chapurl logger.debug("Chapter URL: " + chapurl) logger.debug("Chapter Title: " + chapter_title) logger.debug("Chapter description: " + description) chapters.append((chapter_title, chapurl, description, pub_date)) numrating = stripHTML(chapterLink.parent) ## title (0.00) numrating = numrating[numrating.rfind('(')+1:numrating.rfind(')')] try: ratings.append(float(numrating)) except: pass chapters = sorted(chapters, key=lambda chapter: chapter[3]) for i, chapter in enumerate(chapters): self.chapterUrls.append((chapter[0], chapter[1])) descriptions.append("%d. %s" % (i + 1, chapter[2])) ## Set the oldest date as publication date, the newest as update date dates.sort() self.story.setMetadata('datePublished', dates[0]) self.story.setMetadata('dateUpdated', dates[-1]) self.story.setMetadata('datePublished', chapters[0][3]) self.story.setMetadata('dateUpdated', chapters[-1][3]) ## Set description to joint chapter descriptions self.setDescription(authorurl,"<p>"+"</p>\n<p>".join(descriptions)+"</p>") if len(ratings) > 0: self.story.setMetadata('averrating','%4.2f' % (sum(ratings) / float(len(ratings)))) # normalize on first chapter URL. self._setURL(self.chapterUrls[0][1]) # reset storyId to first chapter. self.story.setMetadata('storyId',self.parsedUrl.path.split('/',)[2]) self.story.setMetadata('numChapters', len(self.chapterUrls)) self.story.setMetadata('category', soup1.find('div', 'b-breadcrumbs').findAll('a')[1].string) self.getCategories(soup1) return def getPageText(self, raw_page, url): logger.debug('Getting page text') raw_page = raw_page.replace('<div class="b-story-body-x x-r15"><div><p>','<div class="b-story-body-x x-r15"><div>') page_soup = self.make_soup(raw_page) [comment.extract() for comment in page_soup.findAll(text=lambda text:isinstance(text, Comment))] story2 = page_soup.find('div', 'b-story-body-x').div div = self.utf8FromSoup(url, story2) fullhtml = unicode(div) fullhtml = re.sub(r'<br />\s*<br />', r'</p><p>', fullhtml) fullhtml = re.sub(r'^<div>', r'', fullhtml) fullhtml = re.sub(r'</div>$', r'', fullhtml) fullhtml = re.sub(r'(<p><br/></p>\s+)+$', r'', fullhtml) return fullhtml def getChapterText(self, url): logger.debug('Getting chapter text from: %s' % url) raw_page = self._fetchUrl(url) page_soup = self.make_soup(raw_page) pages = page_soup.find('select', {'name' : 'page'}) page_nums = [page.text for page in pages.findAll('option')] if pages else 0 fullhtml = "" self.getCategories(page_soup) if self.getConfig("description_in_chapter"): chapter_description = page_soup.find("meta", {"name" : "description"})['content'] logger.debug("\tChapter description: %s" % chapter_description) fullhtml += '<p><b>Description:</b> %s</p><hr />' % chapter_description fullhtml += self.getPageText(raw_page, url) if pages: for page_no in xrange(2, len(page_nums) + 1): page_url = url + "?page=%s" % page_no logger.debug("page_url= %s" % page_url) raw_page = self._fetchUrl(page_url) fullhtml += self.getPageText(raw_page, url) return fullhtml def getClass(): return LiteroticaSiteAdapter
from copy import deepcopy from base import Base class Container(Base): """ Component that allows an entity to contain one or more child entities. """ def __init__(self): Base.__init__(self, children=list, max_bulk=int) @property def saveable_fields(self): fields = self.fields.keys() fields.remove("children") return fields class BulkLimitError(Exception): """Error that gets raised when the item would exceed the bulk limit of the container.""" def __init__(self, bulk, max_bulk): self.bulk = bulk self.max_bulk = max_bulk def __str__(self): return "Item would exceed the bulk limit of the container." class NoFreeSlotError(Exception): """Error that gets raised when the container has no free slots.""" def __str__(self): return "Container can't hold any more items." def get_free_slot(container): """Returns the first slot of the container that is not occupied.""" index = 0 for child in container.children: if not child: return index index += 1 raise NoFreeSlotError def get_total_bulk(container): """Returns the bulk of all items in the container.""" total_bulk = 0 for child in container.children: if child: total_bulk += child.bulk return total_bulk def get_total_weight(container): """Returns the weight of all items in the container.""" total_weight = 0 for child in container.children: if child: total_weight += child.weight return total_weight def get_item(container, slot_or_type): """Returns the item that is in the slot, or has the given type.""" if type(slot_or_type) == int: if len(container.children) >= (slot_or_type + 1): return container.children[slot_or_type] else: for child in container.children: if child and child.item_type == slot_or_type: return child return None def remove_item(container, slot_or_type): """Removes the item at the given slot, or with the given type.""" if type(slot_or_type) == int: item = get_item(container, slot_or_type) if item: container.children[slot_or_type] = None item.container = None item.slot = -1 else: for child in container.children: if child and child.item_type == slot_or_type: container.children[child.slot] = None child.container = None child.slot = -1 def take_item(container, slot_or_type): """Moves the item at the given slot, or with the given type, out of the container and returns it.""" item = get_item(container, slot_or_type) if item: remove_item(container, slot_or_type) return item def put_item(container, item, slot=-1): """Puts the item at the given slot in the container. Returns the item previously at the slot.""" if slot == -1: slot = get_free_slot(container) total_bulk = get_total_bulk(container) total_bulk += item.bulk old_item = get_item(container, slot) if old_item: total_bulk -= old_item.bulk if total_bulk > container.max_bulk: raise BulkLimitError(total_bulk, container.max_bulk) remove_item(container, slot) container.children[slot] = item if item.container: remove_item(item.container, item.slot) item.container = container item.slot = slot return old_item
from flask import Flask, render_template, request, jsonify, session, redirect, escape, url_for import MySQLdb import bcrypt from esipy import App from esipy import EsiClient from esipy import EsiSecurity from esipy.exceptions import APIException import time import json import requests import datetime import math app = Flask(__name__) class ServerError(Exception):pass class DB: conn = None def connect(self): config = {} execfile("config.conf",config) self.conn = MySQLdb.connect( host=config['dbHost'], user=config['dbUser'], passwd=config['dbPass'], db=config['dbBase'] ) self.conn.autocommit(True) self.conn.set_character_set('utf8') def query(self, sql, args=None): try: cursor = self.conn.cursor() cursor.execute(sql,args) except (AttributeError, MySQLdb.OperationalError): self.connect() cursor = self.conn.cursor() cursor.execute(sql,args) return cursor if __name__ == '__main__': config = {} execfile("config.conf",config) serverIP = config['serverIP'] serverPort = config['serverPort'] rounds = 10 debug = config['debug'] cer = config['ssl_cer'] key = config['ssl_key'] context = (cer,key) app.secret_key = config['appKey'] esi_app = App.create('https://esi.tech.ccp.is/latest/swagger.json?datasource=tranquility') security = EsiSecurity( app=esi_app, redirect_uri=config['callbackURL'], client_id=config['clientID'], secret_key=config['secretKey'] ) client = EsiClient(security=security) scopes = ['esi-location.read_location.v1','esi-skills.read_skillqueue.v1','esi-skills.read_skills.v1','esi-clones.read_clones.v1'] db = DB() def profit(): extractorID = "40519" injectorID = "40520" plexID = "44992" priceList = [] url = "http://api.eve-central.com/api/marketstat/json?regionlimit=10000002&typeid=" try: prices = requests.get(url+extractorID).json()[0] extractorPrice = prices['buy']['fivePercent'] extractorPricen= prices['sell']['fivePercent'] prices = requests.get(url+injectorID).json()[0] injectorPrice = prices['sell']['fivePercent'] injectorPricen= prices['buy']['fivePercent'] prices = requests.get(url+plexID).json()[0] plexPrice = prices['buy']['fivePercent'] plexPricen= prices['sell']['fivePercent'] injectorsMonth = 3.888 profit = round(((injectorsMonth * (injectorPrice - extractorPrice)) - (plexPrice * 500))/1000000,2) nonoptimal = round(((injectorsMonth * (injectorPricen - extractorPricen)) - (plexPricen * 500))/1000000,2) return "<a href='https://market.madpilot.nl/static/graph/farm-month.png'>Projected profits: (min)"+str(nonoptimal)+"mil - (max)"+str(profit)+"mil </a>" except: return "<a href='https://market.madpilot.nl/static/graph/farm-month.png'>Projected profits: (min)"+str(0)+"mil - (max)"+str(0)+"mil </a>" def isk(extractors): extractorID = "40519" injectorID = "40520" plexID = "44992" priceList = [] url = "http://api.eve-central.com/api/marketstat/json?regionlimit=10000002&typeid=" try: prices = requests.get(url+extractorID).json()[0] extractorPrice = prices['buy']['fivePercent'] extractorPricen= prices['sell']['fivePercent'] prices = requests.get(url+injectorID).json()[0] injectorPrice = prices['sell']['fivePercent'] injectorPricen= prices['buy']['fivePercent'] prices = requests.get(url+plexID).json()[0] plexPrice = prices['buy']['fivePercent'] plexPricen= prices['sell']['fivePercent'] maxProfit = round(((injectorPrice - extractorPrice) * extractors)/1000000,2) minProfit = round(((injectorPricen - extractorPricen) * extractors)/1000000,2) except: maxProfit = 0 minProfit = 0 return [maxProfit, minProfit] def isReady(char_id): checkDelay = 1800 cur = db.query("SELECT UNIX_TIMESTAMP(updated) FROM cache_table WHERE character_id = %s",[char_id]) lastChecked = cur.fetchone() curTime = int(time.time()) if lastChecked: lastCheckedEpoch = lastChecked[0] if (curTime - lastCheckedEpoch) < checkDelay: print("Checktime is less than "+str(checkDelay)+" Seconds (current: "+str(curTime - lastCheckedEpoch)+"). Skipping") return False return True return True @app.route('/') def index(): error = None if 'username' not in session: error = "Not logged in" return redirect(url_for('login', error=error)) secure = security.get_auth_uri(scopes=scopes) cur = db.query("SELECT id FROM users WHERE user = %s;", [session['username']]) for row in cur.fetchall(): userID = row[0] characters = [] cur = db.query("SELECT character_id, access_token, refresh_token, expires, expires_in, added, updated FROM characters WHERE owner_id = %s;", [userID]) allSP = 0 extractableSP = 0 numExtract = 0 for row in cur.fetchall(): epoch = round(time.time()) expires = row[3] - row[4] - epoch if expires < 0: expires = 0 refresh = {u'access_token': row[1], u'refresh_token': row[2], u'expires_in': expires} security.update_token(refresh) ready = isReady(row[0]) if not ready: cur = db.query("SELECT * FROM cache_table WHERE character_id=%s",[row[0]]) cache = cur.fetchall()[0] #Get character name charName = esi_app.op['get_characters_names'](character_ids=[row[0]]) result = client.request(charName) charName = json.loads(result.raw)[0].get('character_name') print "Character "+charName #Get character location if ready: charLocation = esi_app.op['get_characters_character_id_location'](character_id=row[0]) result = client.request(charLocation) location = json.loads(result.raw) sol = esi_app.op['get_universe_systems_system_id'](system_id=location.get('solar_system_id')) sol = json.loads(client.request(sol).raw).get('name') cur = db.query("INSERT INTO cache_table (character_id,char_location) VALUES (%s,%s) ON DUPLICATE KEY UPDATE char_location=%s",[row[0],result.raw,result.raw]) else: location = json.loads(cache[3]) sol = esi_app.op['get_universe_systems_system_id'](system_id=location.get('solar_system_id')) sol = json.loads(client.request(sol).raw).get('name') #Get current training skill + queue if ready: charTrain = esi_app.op['get_characters_character_id_skillqueue'](character_id=row[0]) result = client.request(charTrain) training = json.loads(result.raw) cur = db.query("INSERT INTO cache_table (character_id,char_queue) VALUES (%s,%s) ON DUPLICATE KEY UPDATE char_queue=%s",[row[0],result.raw,result.raw]) else: training = json.loads(cache[4]) currentlyTrainingStart = training[0].get('start_date') currentlyTrainingEnd = training[0].get('finish_date') startTrainEpoch = int(time.mktime(time.strptime(currentlyTrainingStart, "%Y-%m-%dT%H:%M:%SZ"))) endTrainEpoch = int(time.mktime(time.strptime(currentlyTrainingEnd, "%Y-%m-%dT%H:%M:%SZ"))) if endTrainEpoch < epoch: while endTrainEpoch < epoch and len(training)>1: del training[0] currentlyTrainingStart = training[0].get('start_date') currentlyTrainingEnd = training[0].get('finish_date') startTrainEpoch = int(time.mktime(time.strptime(currentlyTrainingStart, "%Y-%m-%dT%H:%M:%SZ"))) endTrainEpoch = int(time.mktime(time.strptime(currentlyTrainingEnd, "%Y-%m-%dT%H:%M:%SZ"))) trainedSpCur = training[0].get('training_start_sp') - training[0].get('level_start_sp') endQueue = training[-1].get('finish_date') currentlyTraining = training[0].get('skill_id') currentlyTrainingLevel = training[0].get('finished_level') curSkillStartSP = training[0].get('level_start_sp') curSkillEndSP = training[0].get('level_end_sp') curSkillSP = curSkillEndSP - curSkillStartSP #Get currently training name skillName = esi_app.op['get_universe_types_type_id'](type_id=currentlyTraining) result = client.request(skillName) skillName = json.loads(result.raw).get('name') #Get character total sp if ready: charSkill = esi_app.op['get_characters_character_id_skills'](character_id=row[0]) result = client.request(charSkill) sp = json.loads(result.raw) totalSp = sp.get('total_sp') cur = db.query("INSERT INTO cache_table (character_id,char_skills) VALUES (%s,%s) ON DUPLICATE KEY UPDATE char_skills=%s",[row[0],result.raw,result.raw]) else: sp = json.loads(cache[5]) totalSp = sp.get('total_sp') #Get current training skill rank skillRank = esi_app.op['universe_types_type_id'](type_id=currentlyTraining) result = client.request(skillRank) skillDogma = json.loads(result.raw).get('dogma_attributes') print skillDogma skillRank = 5 # for skill in skillDogma: # if skill.get('attribute_id') == 275: # skillRank = skill.get('value') # break; startTrainEpoch = int(time.mktime(time.strptime(currentlyTrainingStart, "%Y-%m-%dT%H:%M:%SZ"))) endTrainEpoch = int(time.mktime(time.strptime(currentlyTrainingEnd, "%Y-%m-%dT%H:%M:%SZ"))) totalTrainTime = endTrainEpoch - startTrainEpoch trainedTime = epoch - startTrainEpoch # skillStartSP = (250 * math.pow(5.65685,currentlyTrainingLevel-1)) # skillEndSP = (250 * math.pow(5.65685,currentlyTrainingLevel)) spPerSec = float(curSkillSP) / float(totalTrainTime) trainedSP = int(spPerSec * trainedTime) totalSp += trainedSP allSP += totalSp #Prettify dates timeLeftCurrent = datetime.datetime.strptime(currentlyTrainingEnd, "%Y-%m-%dT%H:%M:%SZ").replace(microsecond=0) - datetime.datetime.now().replace(microsecond=0) endQueueLeft = datetime.datetime.strptime(endQueue, "%Y-%m-%dT%H:%M:%SZ").replace(microsecond=0) - datetime.datetime.now().replace(microsecond=0) currentlyTrainingEnd = time.strftime("%Y-%m-%d %H:%M",time.gmtime(int(time.mktime(time.strptime(currentlyTrainingEnd, "%Y-%m-%dT%H:%M:%SZ"))))) endQueue = time.strftime("%Y-%m-%d %H:%M",time.gmtime(int(time.mktime(time.strptime(endQueue, "%Y-%m-%dT%H:%M:%SZ"))))) #Get Cybernetics skill for skill in sp.get('skills'): if skill.get('skill_id') == 3411: cyberLevel = skill.get('current_skill_level') break; #Get character attributes #Assume 2700(max) for now, until attributes are added to ESI startTime = time.mktime(time.strptime(currentlyTrainingStart, "%Y-%m-%dT%H:%M:%SZ")) timeDone = epoch - startTime spAdded = int(timeDone / 60 / 60 * 2700) if totalSp > 5500000: exSP = totalSp - 5000000 extractableSP += exSP exSP = int(exSP / 500000) numExtract += exSP totalSp = format(totalSp, "8,d") queueStatus = None if endTrainEpoch < epoch: queueStatus = "Queue empty!" characters.append( { "characterName": charName, "characterID": row[0], "characterImage": "https://image.eveonline.com/Character/"+str(row[0])+"_64.jpg", "totalSP": totalSp, "characterLocation": sol, "currentEnd":currentlyTrainingEnd, "queueEnd": endQueue, "currentlyTraining": currentlyTraining, "timeLeftCurrent": timeLeftCurrent, "endQueueLeft": endQueueLeft, "currentlyTrainingLevel": currentlyTrainingLevel, "currentlyTrainingName": skillName, "cyberneticsLevel": cyberLevel, "queueStatus": queueStatus }) print "----------" allSP = format(allSP, "8,d") extractableSP = format(extractableSP, "8,d") stats = [{ "allSP": allSP, "exSP": extractableSP, "numEx": numExtract }] profits = isk(numExtract) return render_template('index.html',secUrl=secure, characters=characters, stats=stats, profit=profit(), profits=profits) @app.route('/login', methods=['GET', 'POST']) def login(): error = None error = request.args['error'] if 'username' in session: return redirect(url_for('index')) try: if request.method == 'POST': username = request.form['username'] cur = db.query("SELECT COUNT(1) FROM users WHERE user = %s", [username]) if not cur.fetchone()[0]: raise ServerError('Incorrect username / password') password = request.form['password'] cur = db.query("SELECT pass FROM users WHERE user = %s;", [username]) for row in cur.fetchall(): if bcrypt.hashpw(password.encode('utf-8'), row[0]) == row[0]: session['username'] = request.form['username'] return redirect(url_for('index')) raise ServerError('Incorrect username / password') except ServerError as e: error = str(e) return render_template('login.html', error=error) @app.route('/logout') def logout(): session.pop('username', None) return redirect(url_for('index')) @app.route('/register', methods=['GET', 'POST']) def register(): error = None if 'username' not in session: try: if request.method == 'POST': username = request.form['username'] password = request.form['password'] email = request.form['email'] if not username or not password or not email: raise ServerError('Fill in all fields please') password = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt(rounds)) cur = db.query("INSERT INTO users (`user`, `pass`, `email`) VALUES (%s,%s,%s)", [username, password, email]) except ServerError as e: error = str(e) return render_template('register.html', error=error) if config['registerEnabled']: return render_template('register.html') error = "Registration is disabled by the admin" return redirect(url_for('login', error=error)) if session['username'] == 'admin': return render_template('register.html') error = "Only available to admins" return redirect(url_for('login', error=error)) @app.route('/userPage', methods=['GET','POST']) def userPage(): return "User! :-)" @app.route('/oauth') def oauth(): code = request.args.get('code') if not code: return redirect(url_for('index')) token = security.auth(code) access_token = token['access_token'] refresh_token = token['refresh_token'] expires_in = token['expires_in'] cur = db.query("SELECT id FROM users WHERE user = %s;", [session['username']]) for row in cur.fetchall(): userID = row[0] verify = security.verify() charID = verify.get('CharacterID') print userID print charID print token print token['access_token'] print token['refresh_token'] print token['expires_in'] epoch = round(time.time()) expires = epoch + int(expires_in) cur = db.query("INSERT INTO characters (owner_id, character_id, access_token, refresh_token, expires, expires_in) VALUES (%s,%s,%s,%s,%s,%s) ON DUPLICATE KEY UPDATE access_token=%s, refresh_token=%s, expires=%s, expires_in=%s",[userID,charID,access_token,refresh_token,expires,int(expires_in),access_token,refresh_token,expires,int(expires_in)]) return redirect(url_for('index')) if __name__ == '__main__': if config['ssl']: app.run( host=serverIP, port=serverPort, ssl_context=context, debug=debug ) else: app.run( host=serverIP, port=serverPort, debug=debug )
from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QDialog from .details_table import DetailsModel class DetailsDialog(QDialog): def __init__(self, parent, app, **kwargs): super().__init__(parent, Qt.Tool, **kwargs) self.app = app self.model = app.model.details_panel self._setupUi() # To avoid saving uninitialized geometry on appWillSavePrefs, we track whether our dialog # has been shown. If it has, we know that our geometry should be saved. self._shown_once = False self.app.prefs.restoreGeometry('DetailsWindowRect', self) self.tableModel = DetailsModel(self.model) # tableView is defined in subclasses self.tableView.setModel(self.tableModel) self.model.view = self self.app.willSavePrefs.connect(self.appWillSavePrefs) def _setupUi(self): # Virtual pass def show(self): self._shown_once = True super().show() #--- Events def appWillSavePrefs(self): if self._shown_once: self.app.prefs.saveGeometry('DetailsWindowRect', self) #--- model --> view def refresh(self): self.tableModel.beginResetModel() self.tableModel.endResetModel()
from ePuck import ePuck import time import sys import re epucks = { '1797' : '10:00:E8:6C:A2:B6', '1903' : '10:00:E8:6C:A1:C7' } def log(text): """ Show @text in standart output with colors """ blue = '\033[1;34m' off = '\033[1;m' print(''.join((blue, '[Log] ', off, str(text)))) def error(text): red = '\033[1;31m' off = '\033[1;m' print(''.join((red, '[Error] ', off, str(text)))) def main(mac): global_speed = 180 fs_speed = 0.6 threshold = 1000 log('Conecting with ePuck') try: # First, create an ePuck object. # If you want debug information: #~ robot = ePuck(mac, debug = True) # else: robot = ePuck(mac) # Second, connect to it robot.connect() # You can enable various sensors at the same time. Take a look to # to DIC_SENSORS for know the name of the sensors robot.enable('floor', 'proximity') leds_on = [0] * 8 log('Conection complete. CTRL+C to stop') log('Library version: ' + robot.version) times_got = [] except Exception, e: error(e) sys.exit(1) try: while True: # Important: when you execute 'step()', al sensors # and actuators are updated. All changes you do on the ePuck # will be effectives after this method, not before robot.step() # Now, we can get updated information from the sensors floor_sensors = robot.get_floor_sensors() prox_sensors = robot.get_proximity() # line_follower delta = floor_sensors[2] - floor_sensors[0] l_speed = global_speed - fs_speed * delta r_speed = global_speed + fs_speed * delta # Now, we set the motor speed. Remember that we need to execute 'step()' # for make this command effective robot.set_motors_speed(l_speed, r_speed) # leds on/off for index, s in enumerate(prox_sensors): if int(s) > threshold and leds_on[index] == 0: # Switch On robot.set_led(index, 1) leds_on[index] = 1 elif int(s) < threshold and leds_on[index] == 1: # Switch Off robot.set_led(index, 0) leds_on[index] = 0 except KeyboardInterrupt: log('Stoping the robot. Bye!') robot.close() sys.exit() except Exception, e: error(e) return 0 if __name__ == '__main__': X = '([a-fA-F0-9]{2}[:|\-]?){6}' if len(sys.argv) < 2: error("Usage: " + sys.argv[0] + " ePuck_ID | MAC Address") sys.exit() robot_id = sys.argv[1] if epucks.has_key(robot_id): main(epucks[robot_id]) elif re.match(X, robot_id) != 0: main(robot_id) else: error('You have to indicate the MAC direction of the robot')
from django.contrib import admin from iitem_database.models import Item, ItemClass, Area, Creature, Drops, Found, UserItems, ItemReview admin.site.register(Item) admin.site.register(ItemClass) admin.site.register(Area) admin.site.register(Creature) admin.site.register(Drops) admin.site.register(Found) admin.site.register(UserItems) admin.site.register(ItemReview)
import sys sys.path.append("..") from ucnacore.PyxUtils import * from math import * from ucnacore.LinFitter import * from bisect import bisect from calib.FieldMapGen import * def clip_function(y,rho,h,R): sqd = sqrt(rho**2-y**2) if sqd==0: sqd = 1e-10 return h*rho**2/R*atan(y/sqd)+2*sqd/(3*R)*(3*h*y/2+rho**2-y**2) def survival_fraction(h,rho,R): d = R-h if d < -rho: return 1 if h <= -rho: return 0 c1 = 0 if d < rho: sqd = sqrt(rho**2-d**2) c1 = pi/2*rho**2-d*sqd-rho**2*atan(d/sqd) return ( c1 + clip_function(min(h,rho),rho,h,R) - clip_function(max(h-R,-rho),rho,h,R))/(pi*rho**2) def radial_clip_function(r,rho,h,R): return r**2*(3*h-2*r)/(6*R**2) def radial_survival_fraction(h,rho,R): d = h-R if d > rho: return 1 if h <= 0: return 0 c1 = 0 if d > 0: c1 = (h-R)**2 return ( c1 + radial_clip_function(min(h,rho),rho,h,R) - radial_clip_function(max(d,0),rho,h,R) )/(rho**2) class rot3: def __init__(self,t1,t2,t3,s=1.0): self.c1,self.s1 = cos(t1),sin(t1) self.c2,self.s2 = cos(t2),sin(t2) self.c3,self.s3 = cos(t3),sin(t3) self.s = s def __call__(self,(x,y,z)): x,y = self.c1*x+self.s1*y,self.c1*y-self.s1*x y,z = self.c2*y+self.s2*z,self.c2*z-self.s2*y z,x = self.c3*z+self.s3*x,self.c3*x-self.s3*z return self.s*x,self.s*y,self.s*z class path3d: def __init__(self): self.pts = [] self.sty = [] self.endsty = [] self.breakunder = False self.nopatch = False def addpt(self,(x,y,z),s=1): self.pts.append((x*s,y*s,z*s)) def apply(self,transf): self.pts = [transf(xyz) for xyz in self.pts] def finish(self): self.p = path.path() self.p.append(path.moveto(self.pts[0][0],self.pts[0][1])) for g in self.pts[1:]: self.p.append(path.lineto(g[0],g[1])) self.patchpts = [] self.underpts = [] def nearestpt(self,(x,y)): d0 = 1e20 n = None for i in range(len(self.pts)): d1 = (self.pts[i][0]-x)**2+(self.pts[i][1]-y)**2 if d1 < d0: d0 = d1 n = i return n def znear(self,(x,y)): return self.pts[self.nearestpt((x,y))][2] def znearc(self,c): x,y = self.p.at(c) x,y = 100*x.t,100*y.t return self.znear((x,y)) def addPatch(self,c,z): self.patchpts.append((c,z)) def drawto(self,cnvs): cnvs.stroke(self.p,self.sty) def interleave(p3d1,p3d2): print "Finding intersection points..." is1,is2 = p3d1.p.intersect(p3d2.p) print "determining patch z..." assert len(is1)==len(is2) for i in range(len(is1)): z1 = p3d1.znearc(is1[i]) z2 = p3d2.znearc(is2[i]) if z1>z2: p3d1.addPatch(is1[i],z1) p3d2.underpts.append(is2[i]) else: p3d2.addPatch(is2[i],z2) p3d1.underpts.append(is1[i]) print "done." def drawInterleaved(c,ps): print "Drawing base curves..." for p in ps: p.p = p.p.normpath() if p.breakunder: splits = [] for s in p.underpts: splits += [s-p.breakunder*0.5,s+p.breakunder*0.5] psplit = p.p.split(splits) for seg in psplit[0::2]: c.stroke(seg,p.sty) else: c.stroke(p.p,p.sty+p.endsty) print "Preparing patches..." patches = [] for (pn,p) in enumerate(ps): if p.nopatch: continue p.patchpts.sort() splits = [] for s in p.patchpts: splits += [s[0]-0.05,s[0]+0.05] psplit = p.p.split(splits) patches += [ (patch[1],pn,psplit[2*n+1]) for n,patch in enumerate(p.patchpts) ] patches.sort() print "Patching intersections..." for p in patches: c.stroke(p[2],ps[p[1]].sty) print "Done." def fieldPath(fmap,z0,z1,c,cmax,npts=50): pfield = path3d() for z in unifrange(z0,z1,npts): Bdens = c/sqrt(fmap(z)+0.0001) if abs(Bdens) < cmax: pfield.addpt((0,Bdens,z)) return pfield def larmor_unif(fT,theta,KE,t): b = electron_beta(KE) z = t*b*cos(theta)*3e8 # m r = 3.3e-6*b*(KE+511)*sin(theta)/fT # m f = 2.8e10*fT # Hz return r*cos(2*pi*f*t),r*sin(2*pi*f*t),z def larmor_step(p,pt2_per_B,fT): nu = 2.8e10*fT*2*pi # angular frequency, Hz pt = sqrt(fT*pt2_per_B) # transverse momentum component, keV if p<=pt: return 0,nu pl = sqrt(p**2-pt**2) # longitudinal momentum, keV vz = pl/sqrt(p*p+511*511)*3e8; # z velocity, m/s return vz,nu def larmorPath(fmap,p,pt2_per_B,z0,z1,dt,theta=0): lpath = path3d() z = z0 vz = 1 while z0 <= z <= z1 and vz>0: fT = fmap(z) # magnetic field, T r = 3.3e-6*sqrt(pt2_per_B/fT) # larmor radius, m lpath.addpt((r*cos(theta),r*sin(theta),z)) # step to next point vz,nu = larmor_step(p,pt2_per_B,fmap(z)) theta += nu*dt z += vz*dt return lpath def plot_larmor_trajectory(): fmap = fieldMap() fmap.addFlat(-1.0,0.01,1.0) fmap.addFlat(0.015,1.0,0.6) #fmap.addFlat(-1.0,0.01,0.6) #fmap.addFlat(0.08,1.0,1.0) fT = fmap(0) theta = 1.4 KE = 511. #rot = rot3(0,0.0,-pi/2-0.2,500) rot = rot3(0,0.0,-pi/2+0.2,500) tm = 1e-9 doFinal = True plarmor = larmorPath(fmap,500,495**2/fmap(0),0,0.02,5e-13,3*pi/4) plarmor.apply(rot) #plarmor.sty = [style.linewidth.thick,rgb.red] plarmor.sty = [style.linewidth.thick] plarmor.endsty = [deco.earrow()] plarmor.finish() x0,y0 = plarmor.p.at(plarmor.p.begin()) fieldlines = [] w = 0.0025 cmagf = canvas.canvas() for o in unifrange(-w,w,20): pf = fieldPath(fmap,-0.002,0.022,o,1.02*w) if len(pf.pts) < 10: continue pf.apply(rot) pf.finish() pf.breakunder = 0.07 pf.nopatch = True #pf.sty=[style.linewidth.thin,rgb.blue] pf.sty=[style.linewidth.thin] # field line color/style fieldlines.append(pf) pf.drawto(cmagf) if doFinal: interleave(plarmor,pf) #cmagf.stroke(path.circle(x0,y0,0.07),[deco.filled([rgb.green])]) cmagf.stroke(path.circle(x0,y0,0.07),[deco.filled([rgb.white]),style.linewidth.Thick]) cmagf.writetofile("/Users/michael/Desktop/Bfield.pdf") c = canvas.canvas() if doFinal: drawInterleaved(c,[plarmor,]+fieldlines) else: plarmor.drawto(c) for pf in fieldlines: pf.drawto(c) #c.stroke(path.circle(x0,y0,0.07),[deco.filled([rgb.green])]) c.stroke(path.circle(x0,y0,0.07),[deco.filled([rgb.white]),style.linewidth.Thick]) c.writetofile("/Users/michael/Desktop/larmor_spiral.pdf") def plot_spectrometer_field(): fmap = fieldMap() fmap.addFlat(-3,-2.8,0.01) fmap.addFlat(-2.3,-2.1,0.6) fmap.addFlat(-1.6,1.6,1.0) fmap.addFlat(2.1,2.3,0.6) fmap.addFlat(2.8,3,0.01) rot = rot3(0.0,0.0,-pi/2.,10.) w = 0.25 cmagf = canvas.canvas() for o in unifrange(-w,w,20): pf = fieldPath(fmap,-2.6,2.6,o,w,400) pf.apply(rot) #if len(pf.pts) < 10: # continue pf.finish() #pf.sty=[style.linewidth.thin,rgb.blue] pf.sty=[style.linewidth.thin] # field line color/style pf.drawto(cmagf) cmagf.writetofile("/Users/michael/Desktop/Bfield.pdf") def larmor_clipping_plot(): gSurv=graph.graphxy(width=20,height=10, x=graph.axis.lin(title="Source offset [mm]"), y=graph.axis.lin(title="",min=0,max=1), key = graph.key.key(pos="bl")) gSurv.texrunner.set(lfs='foils17pt') rho = 1.5 h0 = 9.5 gdat = [ [h0-h,survival_fraction(h,rho,2*3.3),survival_fraction(h,rho,2*3.3/2)] for h in unifrange(h0-10,h0,100) ] gdat = [ g+[0.5*(g[2]<=1e-3)+(g[2]>1e-3)*(g[1]/(g[2]+1e-6)),] for g in gdat] gSurv.plot(graph.data.points(gdat,x=1,y=3,title="500keV line survival"),[graph.style.line([style.linewidth.Thick,rgb.blue])]) gSurv.plot(graph.data.points(gdat,x=1,y=2,title="1MeV line survival"),[graph.style.line([style.linewidth.Thick,rgb.red])]) gSurv.plot(graph.data.points(gdat,x=1,y=4,title="1MeV:500keV survival ratio"),[graph.style.line([style.linewidth.Thick])]) gSurv.writetofile("/Users/michael/Desktop/survival_%g.pdf"%rho) def radial_clipping_plot(): gSurv=graph.graphxy(width=20,height=10, x=graph.axis.lin(title="Source spot radius [mm]",min=0,max=9.5), y=graph.axis.lin(title="",min=0,max=1), key = graph.key.key(pos="bl")) gSurv.texrunner.set(lfs='foils17pt') h = 9.5 gdat = [ [rho,radial_survival_fraction(h,rho,3.3),radial_survival_fraction(h,rho,3.3/2.0)] for rho in unifrange(0.,9.5,200) ] gdat = [ g+[0.5*(g[2]<=1e-3)+(g[2]>1e-3)*(g[1]/(g[2]+1e-6)),] for g in gdat] gSurv.plot(graph.data.points(gdat,x=1,y=3,title="500keV line survival"),[graph.style.line([style.linewidth.Thick,rgb.blue])]) gSurv.plot(graph.data.points(gdat,x=1,y=2,title="1MeV line survival"),[graph.style.line([style.linewidth.Thick,rgb.red])]) gSurv.plot(graph.data.points(gdat,x=1,y=4,title="1MeV:500keV survival ratio"),[graph.style.line([style.linewidth.Thick])]) gSurv.writetofile("/Users/michael/Desktop/survival_radial.pdf") if __name__ == "__main__": #larmor_clipping_plot() #radial_clipping_plot() #plot_larmor_trajectory() plot_spectrometer_field()
""" Usage: from PBSQuery import PBSQuery This class gets the info from the pbs_server via the pbs.py module for the several batch objects. All get..() functions return an dictionary with id as key and batch object as value There are four batch objects: - server - queue - job - node Each object can be handled as an dictionary and has several member functions. The second parameter is an python list and can be used if you are only interested in certain resources, see example There are the following functions for PBSQuery: job - getjob(job_id, attributes=<default is all>) getjobs(attributes=<default is all>) node - getnode(node_id, attributes=<default is all>) getnodes(attributes=<default is all>) queue - getqueue(queue_id, attributes=<default is all>) getqueues(attributes=<default is all>) server - get_serverinfo(attributes=<default is all>) Here is an example how to use the module: from PBSQuery import PBSQuery p = PBSQuery() nodes = p.getnodes() for name,node in nodes.items(): print name if node.is_free(): print node, node['state'] l = [ 'state', 'np' ] nodes = p.getnodes(l) for name,node in nodes.items(): print node, node['state'] The parameter 'attributes' is an python list of resources that you are interested in, eg: only show state of nodes l = list() l.append('state') nodes = p.getnodes(l) """ import pbs import UserDict import string import sys import re import types class PBSError(Exception): def __init__(self, msg=''): self.msg = msg Exception.__init__(self, msg) def __repr__(self): return self.msg __str__ = __repr__ class PBSQuery: # a[key] = value, key and value are data type string # OLD_DATA_STRUCTURE = False def __init__(self, server=None): if not server: self.server = pbs.pbs_default() else: self.server = server self._connect() ## this is needed for getjob a jobid is made off: # sequence_number.server (is not self.server) # self.job_server_id = list(self.get_serverinfo())[0] self._disconnect() def _connect(self): """Connect to the PBS/Torque server""" self.con = pbs.pbs_connect(self.server) if self.con < 0: str = "Could not make a connection with %s\n" %(self.server) raise PBSError(str) def _disconnect(self): """Close the PBS/Torque connection""" pbs.pbs_disconnect(self.con) self.attribs = 'NULL' def _list_2_attrib(self, list): """Convert a python list to an attrib list suitable for pbs""" self.attribs = pbs.new_attrl( len(list) ) i = 0 for attrib in list: # So we can user Resource attrib = attrib.split('.') self.attribs[i].name = attrib[0] i = i + 1 def _pbsstr_2_list(self, str, delimiter): """Convert a string to a python list and use delimiter as spit char""" l = sting.splitfields(str, delimiter) if len(l) > 1: return l def _list_2_dict(self, l, class_func): """ Convert a pbsstat function list to a class dictionary, The data structure depends on the function new_data_structure(). Default data structure is: class[key] = value, Where key and value are of type string Future release, can be set by new_data_structure(): - class[key] = value where value can be: 1. a list of values of type string 2. a dictionary with as list of values of type string. If values contain a '=' character eg: print node['np'] >> [ '2' ] print node['status']['arch'] >> [ 'x86_64' ] """ self.d = {} for item in l: new = class_func() self.d[item.name] = new new.name = item.name for a in item.attribs: if self.OLD_DATA_STRUCTURE: if a.resource: key = '%s.%s' %(a.name, a.resource) else: key = '%s' %(a.name) new[key] = a.value else: values = string.split(a.value, ',') sub_dict = string.split(a.value, '=') # We must creat sub dicts, only for specified # key values # if a.name in ['status', 'Variable_List']: for v in values: tmp_l = v.split('=') ## Support for multiple EVENT mesages in format [key=value:]+ # format eg: message=EVENT:sample.time=1288864220.003,EVENT:kernel=upgrade,cputotals.user=0 # message=ERROR <text> # if tmp_l[0] in ['message']: if tmp_l[1].startswith('EVENT:'): tmp_d = dict() new['event'] = class_func(tmp_d) message_list = v.split(':') for event_type in message_list[1:]: tmp_l = event_type.split('=') new['event'][ tmp_l[0] ] = tmp_l[1:] else: ## ERROR message # new['error'] = tmp_l [1:] elif tmp_l[0].startswith('EVENT:'): message_list = v.split(':') for event_type in message_list[1:]: tmp_l = event_type.split('=') new['event'][ tmp_l[0] ] = tmp_l[1:] else: ## Check if we already added the key # if new.has_key(a.name): new[a.name][ tmp_l[0] ] = tmp_l[1:] else: tmp_d = dict() tmp_d[ tmp_l[0] ] = tmp_l[1:] new[a.name] = class_func(tmp_d) else: ## Check if it is a resource type variable, eg: # - Resource_List.(nodes, walltime, ..) # if a.resource: if new.has_key(a.name): new[a.name][a.resource] = values else: tmp_d = dict() tmp_d[a.resource] = values new[a.name] = class_func(tmp_d) else: # Simple value # new[a.name] = values self._free(l) def _free(self, memory): """ freeing up used memmory """ pbs.pbs_statfree(memory) def _statserver(self, attrib_list=None): """Get the server config from the pbs server""" if attrib_list: self._list_2_attrib(attrib_list) else: self.attribs = 'NULL' self._connect() serverinfo = pbs.pbs_statserver(self.con, self.attribs, 'NULL') self._disconnect() self._list_2_dict(serverinfo, server) def get_serverinfo(self, attrib_list=None): self._statserver(attrib_list) return self.d def _statqueue(self, queue_name='', attrib_list=None): """Get the queue config from the pbs server""" if attrib_list: self._list_2_attrib(attrib_list) else: self.attribs = 'NULL' self._connect() queues = pbs.pbs_statque(self.con, queue_name, self.attribs, 'NULL') self._disconnect() self._list_2_dict(queues, queue) def getqueue(self, name, attrib_list=None): self._statqueue(name, attrib_list) try: return self.d[name] except KeyError, detail: return self.d def getqueues(self, attrib_list=None): self._statqueue('', attrib_list) return self.d def _statnode(self, select='', attrib_list=None, property=None): """Get the node config from the pbs server""" if attrib_list: self._list_2_attrib(attrib_list) else: self.attribs = 'NULL' if property: select = ':%s' %(property) self._connect() nodes = pbs.pbs_statnode(self.con, select, self.attribs, 'NULL') self._disconnect() self._list_2_dict(nodes, node) def getnode(self, name, attrib_list=None): self._statnode(name, attrib_list) try: return self.d[name] except KeyError, detail: return self.d def getnodes(self, attrib_list=None): self._statnode('', attrib_list) return self.d def getnodes_with_property(self, property, attrib_list=None): self._statnode('', attrib_list, property) return self.d def _statjob(self, job_name='', attrib_list=None): """Get the job config from the pbs server""" if attrib_list: self._list_2_attrib(attrib_list) else: self.attribs = 'NULL' self._connect() jobs = pbs.pbs_statjob(self.con, job_name, self.attribs, 'NULL') self._disconnect() self._list_2_dict(jobs, job) def getjob(self, name, attrib_list=None): ## To make sure we use the full name of a job; Changes a name # like 1234567 into 1234567.job_server_id # if len(name.split('.')) == 1 : name = name.split('.')[0] + "." + self.job_server_id self._statjob(name, attrib_list) try: return self.d[name] except KeyError, detail: return self.d def getjobs(self, attrib_list=None): self._statjob('', attrib_list) return self.d def get_server_name(self): return self.server def new_data_structure(self): """ Use the new data structure. Is now the default """ self.OLD_DATA_STRUCTURE = False def old_data_structure(self): """ Use the old data structure. This function is obselete and will be removed in a future release """ self.OLD_DATA_STRUCTURE = True class _PBSobject(UserDict.UserDict): TRUE = 1 FALSE = 0 def __init__(self, dictin = None): UserDict.UserDict.__init__(self) self.name = None if dictin: if dictin.has_key('name'): self.name = dictin['name'] del dictin['name'] self.data = dictin def get_value(self, key): if self.has_key(key): return self[key] else: return None def __repr__(self): return repr(self.data) def __str__(self): return str(self.data) def __getattr__(self, name): """ override the class attribute get method. Return the value from the Userdict """ try: return self.data[name] except KeyError: error = 'Attribute key error: %s' %(name) raise PBSError(error) ## Disabled for this moment, BvdV 16 July 2010 # #def __setattr__(self, name, value): # """ # override the class attribute set method only when the UserDict # has set its class attribute # """ # if self.__dict__.has_key('data'): # self.data[name] = value # else: # self.__dict__[name] = value def __iter__(self): return iter(self.data.keys()) def uniq(self, list): """Filter out unique items of a list""" uniq_items = {} for item in list: uniq_items[item] = 1 return uniq_items.keys() def return_value(self, key): """Function that returns a value independent of new or old data structure""" if isinstance(self[key], types.ListType): return self[key][0] else: return self[key] class job(_PBSobject): """PBS job class""" def is_running(self): value = self.return_value('job_state') if value == 'Q': return self.TRUE else: return self.FALSE def get_nodes(self, unique=None): """ Returns a list of the nodes which run this job format: * exec_host: gb-r10n14/5+gb-r10n14/4+gb-r10n14/3+gb-r10n14/2+gb-r10n14/1+gb-r10n14/0 * split on '+' and if uniq is set split on '/' """ nodes = self.get_value('exec_host') if isinstance(nodes, str): if nodes: nodelist = string.split(nodes,'+') if not unique: return nodelist else: l = list() for n in nodelist: t = string.split(n,'/') if t[0] not in l: l.append(t[0]) return l else: return list() else: l = list() for n in nodes: nlist = string.split(n,'+') if unique: for entry in nlist: t = string.split(entry,'/') if t[0] not in l: l.append(t[0]) else: l += nlist return l class node(_PBSobject): """PBS node class""" def is_free(self): """Check if node is free""" value = self.return_value('state') if value == 'free': return self.TRUE else: return self.FALSE def has_job(self): """Does the node run a job""" try: a = self['jobs'] return self.TRUE except KeyError, detail: return self.FALSE def get_jobs(self, unique=None): """Returns a list of the currently running job-id('s) on the node""" jobs = self.get_value('jobs') if jobs: if isinstance(jobs, str): jlist = re.compile('[^\\ /]\\d+[^/.]').findall( jobs ) if not unique: return jlist else: return self.uniq(jlist) else: job_re = re.compile('^(?:\d+/)?(.+)') l = list() if unique: for j in jobs: jobstr = job_re.findall(j.strip())[0] if jobstr not in l: l.append(jobstr) return l else: return jobs return list() class queue(_PBSobject): """PBS queue class""" def is_enabled(self): value = self.return_value('enabled') if value == 'True': return self.TRUE else: return self.FALSE def is_execution(self): value = self.return_value('queue_type') if value == 'Execution': return self.TRUE else: return self.FALSE class server(_PBSobject): """PBS server class""" def get_version(self): return self.get_value('pbs_version') def main(): p = PBSQuery() serverinfo = p.get_serverinfo() for server in serverinfo.keys(): print server, ' version: ', serverinfo[server].get_version() for resource in serverinfo[server].keys(): print '\t ', resource, ' = ', serverinfo[server][resource] queues = p.getqueues() for queue in queues.keys(): print queue if queues[queue].is_execution(): print '\t ', queues[queue] if queues[queue].has_key('acl_groups'): print '\t acl_groups: yes' else: print '\t acl_groups: no' jobs = p.getjobs() for name,job in jobs.items(): if job.is_running(): print job l = ['state'] nodes = p.getnodes(l) for name,node in nodes.items(): if node.is_free(): print node if __name__ == "__main__": main()
from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: _fromUtf8 = lambda s: s class Ui_Form(object): def setupUi(self, Form): Form.setObjectName(_fromUtf8("Form")) Form.resize(820, 519) Form.setWindowTitle(_("Form")) self.gridLayout_2 = QtGui.QGridLayout(Form) self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2")) self.tabWidget = QtGui.QTabWidget(Form) self.tabWidget.setObjectName(_fromUtf8("tabWidget")) self.tab = QtGui.QWidget() self.tab.setObjectName(_fromUtf8("tab")) self.gridLayout_9 = QtGui.QGridLayout(self.tab) self.gridLayout_9.setObjectName(_fromUtf8("gridLayout_9")) self.label_7 = QtGui.QLabel(self.tab) self.label_7.setText(_("Choose &language (requires restart):")) self.label_7.setObjectName(_fromUtf8("label_7")) self.gridLayout_9.addWidget(self.label_7, 2, 0, 1, 1) self.opt_language = QtGui.QComboBox(self.tab) self.opt_language.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLengthWithIcon) self.opt_language.setMinimumContentsLength(20) self.opt_language.setObjectName(_fromUtf8("opt_language")) self.gridLayout_9.addWidget(self.opt_language, 2, 1, 1, 1) self.opt_systray_icon = QtGui.QCheckBox(self.tab) self.opt_systray_icon.setText(_("Enable system &tray icon (needs restart)")) self.opt_systray_icon.setObjectName(_fromUtf8("opt_systray_icon")) self.gridLayout_9.addWidget(self.opt_systray_icon, 3, 0, 1, 1) self.label_17 = QtGui.QLabel(self.tab) self.label_17.setText(_("User Interface &layout (needs restart):")) self.label_17.setObjectName(_fromUtf8("label_17")) self.gridLayout_9.addWidget(self.label_17, 1, 0, 1, 1) self.opt_gui_layout = QtGui.QComboBox(self.tab) self.opt_gui_layout.setMaximumSize(QtCore.QSize(250, 16777215)) self.opt_gui_layout.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLengthWithIcon) self.opt_gui_layout.setMinimumContentsLength(20) self.opt_gui_layout.setObjectName(_fromUtf8("opt_gui_layout")) self.gridLayout_9.addWidget(self.opt_gui_layout, 1, 1, 1, 1) self.opt_disable_animations = QtGui.QCheckBox(self.tab) self.opt_disable_animations.setToolTip(_("Disable all animations. Useful if you have a slow/old computer.")) self.opt_disable_animations.setText(_("Disable &animations")) self.opt_disable_animations.setObjectName(_fromUtf8("opt_disable_animations")) self.gridLayout_9.addWidget(self.opt_disable_animations, 3, 1, 1, 1) self.opt_disable_tray_notification = QtGui.QCheckBox(self.tab) self.opt_disable_tray_notification.setText(_("Disable &notifications in system tray")) self.opt_disable_tray_notification.setObjectName(_fromUtf8("opt_disable_tray_notification")) self.gridLayout_9.addWidget(self.opt_disable_tray_notification, 4, 0, 1, 1) self.opt_show_splash_screen = QtGui.QCheckBox(self.tab) self.opt_show_splash_screen.setText(_("Show &splash screen at startup")) self.opt_show_splash_screen.setObjectName(_fromUtf8("opt_show_splash_screen")) self.gridLayout_9.addWidget(self.opt_show_splash_screen, 4, 1, 1, 1) self.groupBox_2 = QtGui.QGroupBox(self.tab) self.groupBox_2.setTitle(_("&Toolbar")) self.groupBox_2.setObjectName(_fromUtf8("groupBox_2")) self.gridLayout_8 = QtGui.QGridLayout(self.groupBox_2) self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8")) self.opt_toolbar_icon_size = QtGui.QComboBox(self.groupBox_2) self.opt_toolbar_icon_size.setObjectName(_fromUtf8("opt_toolbar_icon_size")) self.gridLayout_8.addWidget(self.opt_toolbar_icon_size, 0, 1, 1, 1) self.label_5 = QtGui.QLabel(self.groupBox_2) self.label_5.setText(_("&Icon size:")) self.label_5.setObjectName(_fromUtf8("label_5")) self.gridLayout_8.addWidget(self.label_5, 0, 0, 1, 1) self.opt_toolbar_text = QtGui.QComboBox(self.groupBox_2) self.opt_toolbar_text.setObjectName(_fromUtf8("opt_toolbar_text")) self.gridLayout_8.addWidget(self.opt_toolbar_text, 1, 1, 1, 1) self.label_8 = QtGui.QLabel(self.groupBox_2) self.label_8.setText(_("Show &text under icons:")) self.label_8.setObjectName(_fromUtf8("label_8")) self.gridLayout_8.addWidget(self.label_8, 1, 0, 1, 1) self.gridLayout_9.addWidget(self.groupBox_2, 7, 0, 1, 2) spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) self.gridLayout_9.addItem(spacerItem, 8, 0, 1, 1) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) self.label_2 = QtGui.QLabel(self.tab) self.label_2.setText(_("Interface font:")) self.label_2.setObjectName(_fromUtf8("label_2")) self.horizontalLayout.addWidget(self.label_2) self.font_display = QtGui.QLineEdit(self.tab) self.font_display.setReadOnly(True) self.font_display.setObjectName(_fromUtf8("font_display")) self.horizontalLayout.addWidget(self.font_display) self.gridLayout_9.addLayout(self.horizontalLayout, 6, 0, 1, 1) self.change_font_button = QtGui.QPushButton(self.tab) self.change_font_button.setText(_("Change &font (needs restart)")) self.change_font_button.setObjectName(_fromUtf8("change_font_button")) self.gridLayout_9.addWidget(self.change_font_button, 6, 1, 1, 1) self.label_widget_style = QtGui.QLabel(self.tab) self.label_widget_style.setText(_("User interface &style (needs restart):")) self.label_widget_style.setObjectName(_fromUtf8("label_widget_style")) self.gridLayout_9.addWidget(self.label_widget_style, 0, 0, 1, 1) self.opt_ui_style = QtGui.QComboBox(self.tab) self.opt_ui_style.setObjectName(_fromUtf8("opt_ui_style")) self.gridLayout_9.addWidget(self.opt_ui_style, 0, 1, 1, 1) self.opt_book_list_tooltips = QtGui.QCheckBox(self.tab) self.opt_book_list_tooltips.setText(_("Show &tooltips in the book list")) self.opt_book_list_tooltips.setObjectName(_fromUtf8("opt_book_list_tooltips")) self.gridLayout_9.addWidget(self.opt_book_list_tooltips, 5, 0, 1, 1) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8(I("lt.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.tabWidget.addTab(self.tab, icon, _fromUtf8("")) self.tab_4 = QtGui.QWidget() self.tab_4.setObjectName(_fromUtf8("tab_4")) self.gridLayout_12 = QtGui.QGridLayout(self.tab_4) self.gridLayout_12.setObjectName(_fromUtf8("gridLayout_12")) self.label_3 = QtGui.QLabel(self.tab_4) self.label_3.setText(_("Note that <b>comments</b> will always be displayed at the end, regardless of the position you assign here.")) self.label_3.setWordWrap(True) self.label_3.setObjectName(_fromUtf8("label_3")) self.gridLayout_12.addWidget(self.label_3, 2, 1, 1, 1) self.opt_use_roman_numerals_for_series_number = QtGui.QCheckBox(self.tab_4) self.opt_use_roman_numerals_for_series_number.setText(_("Use &Roman numerals for series")) self.opt_use_roman_numerals_for_series_number.setChecked(True) self.opt_use_roman_numerals_for_series_number.setObjectName(_fromUtf8("opt_use_roman_numerals_for_series_number")) self.gridLayout_12.addWidget(self.opt_use_roman_numerals_for_series_number, 0, 1, 1, 1) self.groupBox = QtGui.QGroupBox(self.tab_4) self.groupBox.setTitle(_("Select displayed metadata")) self.groupBox.setObjectName(_fromUtf8("groupBox")) self.gridLayout_3 = QtGui.QGridLayout(self.groupBox) self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3")) self.df_up_button = QtGui.QToolButton(self.groupBox) self.df_up_button.setToolTip(_("Move up")) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(_fromUtf8(I("arrow-up.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.df_up_button.setIcon(icon1) self.df_up_button.setObjectName(_fromUtf8("df_up_button")) self.gridLayout_3.addWidget(self.df_up_button, 0, 1, 1, 1) self.df_down_button = QtGui.QToolButton(self.groupBox) self.df_down_button.setToolTip(_("Move down")) icon2 = QtGui.QIcon() icon2.addPixmap(QtGui.QPixmap(_fromUtf8(I("arrow-down.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.df_down_button.setIcon(icon2) self.df_down_button.setObjectName(_fromUtf8("df_down_button")) self.gridLayout_3.addWidget(self.df_down_button, 2, 1, 1, 1) self.field_display_order = QtGui.QListView(self.groupBox) self.field_display_order.setAlternatingRowColors(True) self.field_display_order.setObjectName(_fromUtf8("field_display_order")) self.gridLayout_3.addWidget(self.field_display_order, 0, 0, 3, 1) spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) self.gridLayout_3.addItem(spacerItem1, 1, 1, 1, 1) self.gridLayout_12.addWidget(self.groupBox, 2, 0, 2, 1) self.hboxlayout = QtGui.QHBoxLayout() self.hboxlayout.setObjectName(_fromUtf8("hboxlayout")) self.label = QtGui.QLabel(self.tab_4) self.label.setText(_("Default author link template:")) self.label.setObjectName(_fromUtf8("label")) self.hboxlayout.addWidget(self.label) self.opt_default_author_link = QtGui.QLineEdit(self.tab_4) self.opt_default_author_link.setToolTip(_("<p>Enter a template to be used to create a link for\n" "an author in the books information dialog. This template will\n" "be used when no link has been provided for the author using\n" "Manage Authors. You can use the values {author} and\n" "{author_sort}, and any template function.")) self.opt_default_author_link.setObjectName(_fromUtf8("opt_default_author_link")) self.hboxlayout.addWidget(self.opt_default_author_link) self.gridLayout_12.addLayout(self.hboxlayout, 0, 0, 1, 1) self.opt_bd_show_cover = QtGui.QCheckBox(self.tab_4) self.opt_bd_show_cover.setText(_("Show &cover in the book details panel")) self.opt_bd_show_cover.setObjectName(_fromUtf8("opt_bd_show_cover")) self.gridLayout_12.addWidget(self.opt_bd_show_cover, 1, 0, 1, 2) icon3 = QtGui.QIcon() icon3.addPixmap(QtGui.QPixmap(_fromUtf8(I("book.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.tabWidget.addTab(self.tab_4, icon3, _fromUtf8("")) self.tab_2 = QtGui.QWidget() self.tab_2.setObjectName(_fromUtf8("tab_2")) self.gridLayout_10 = QtGui.QGridLayout(self.tab_2) self.gridLayout_10.setObjectName(_fromUtf8("gridLayout_10")) self.opt_categories_using_hierarchy = EditWithComplete(self.tab_2) self.opt_categories_using_hierarchy.setToolTip(_("A comma-separated list of categories in which items containing\n" "periods are displayed in the tag browser trees. For example, if\n" "this box contains \'tags\' then tags of the form \'Mystery.English\'\n" "and \'Mystery.Thriller\' will be displayed with English and Thriller\n" "both under \'Mystery\'. If \'tags\' is not in this box,\n" "then the tags will be displayed each on their own line.")) self.opt_categories_using_hierarchy.setObjectName(_fromUtf8("opt_categories_using_hierarchy")) self.gridLayout_10.addWidget(self.opt_categories_using_hierarchy, 3, 2, 1, 3) self.label_9 = QtGui.QLabel(self.tab_2) self.label_9.setText(_("Tags browser category &partitioning method:")) self.label_9.setObjectName(_fromUtf8("label_9")) self.gridLayout_10.addWidget(self.label_9, 0, 0, 1, 2) self.opt_tags_browser_partition_method = QtGui.QComboBox(self.tab_2) self.opt_tags_browser_partition_method.setToolTip(_("Choose how tag browser subcategories are displayed when\n" "there are more items than the limit. Select by first\n" "letter to see an A, B, C list. Choose partitioned to\n" "have a list of fixed-sized groups. Set to disabled\n" "if you never want subcategories")) self.opt_tags_browser_partition_method.setObjectName(_fromUtf8("opt_tags_browser_partition_method")) self.gridLayout_10.addWidget(self.opt_tags_browser_partition_method, 0, 2, 1, 1) self.label_10 = QtGui.QLabel(self.tab_2) self.label_10.setText(_("&Collapse when more items than:")) self.label_10.setObjectName(_fromUtf8("label_10")) self.gridLayout_10.addWidget(self.label_10, 0, 3, 1, 1) self.opt_tags_browser_collapse_at = QtGui.QSpinBox(self.tab_2) self.opt_tags_browser_collapse_at.setToolTip(_("If a Tag Browser category has more than this number of items, it is divided\n" "up into subcategories. If the partition method is set to disable, this value is ignored.")) self.opt_tags_browser_collapse_at.setMaximum(10000) self.opt_tags_browser_collapse_at.setObjectName(_fromUtf8("opt_tags_browser_collapse_at")) self.gridLayout_10.addWidget(self.opt_tags_browser_collapse_at, 0, 4, 1, 1) spacerItem2 = QtGui.QSpacerItem(690, 252, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) self.gridLayout_10.addItem(spacerItem2, 5, 0, 1, 5) self.label_8111 = QtGui.QLabel(self.tab_2) self.label_8111.setText(_("Categories not to partition:")) self.label_8111.setObjectName(_fromUtf8("label_8111")) self.gridLayout_10.addWidget(self.label_8111, 1, 2, 1, 1) self.opt_tag_browser_dont_collapse = EditWithComplete(self.tab_2) self.opt_tag_browser_dont_collapse.setToolTip(_("A comma-separated list of categories that are not to\n" "be partitioned even if the number of items is larger than\n" "the value shown above. This option can be used to\n" "avoid collapsing hierarchical categories that have only\n" "a few top-level elements.")) self.opt_tag_browser_dont_collapse.setObjectName(_fromUtf8("opt_tag_browser_dont_collapse")) self.gridLayout_10.addWidget(self.opt_tag_browser_dont_collapse, 1, 3, 1, 2) self.opt_show_avg_rating = QtGui.QCheckBox(self.tab_2) self.opt_show_avg_rating.setText(_("Show &average ratings in the tags browser")) self.opt_show_avg_rating.setChecked(True) self.opt_show_avg_rating.setObjectName(_fromUtf8("opt_show_avg_rating")) self.gridLayout_10.addWidget(self.opt_show_avg_rating, 2, 0, 1, 5) self.label_81 = QtGui.QLabel(self.tab_2) self.label_81.setText(_("Categories with &hierarchical items:")) self.label_81.setObjectName(_fromUtf8("label_81")) self.gridLayout_10.addWidget(self.label_81, 3, 0, 1, 1) self.opt_tag_browser_old_look = QtGui.QCheckBox(self.tab_2) self.opt_tag_browser_old_look.setText(_("Use &alternating row colors in the Tag Browser")) self.opt_tag_browser_old_look.setObjectName(_fromUtf8("opt_tag_browser_old_look")) self.gridLayout_10.addWidget(self.opt_tag_browser_old_look, 4, 0, 1, 5) icon4 = QtGui.QIcon() icon4.addPixmap(QtGui.QPixmap(_fromUtf8(I("tags.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.tabWidget.addTab(self.tab_2, icon4, _fromUtf8("")) self.tab_3 = QtGui.QWidget() self.tab_3.setObjectName(_fromUtf8("tab_3")) self.gridLayout_11 = QtGui.QGridLayout(self.tab_3) self.gridLayout_11.setObjectName(_fromUtf8("gridLayout_11")) self.opt_separate_cover_flow = QtGui.QCheckBox(self.tab_3) self.opt_separate_cover_flow.setText(_("Show cover &browser in a separate window (needs restart)")) self.opt_separate_cover_flow.setObjectName(_fromUtf8("opt_separate_cover_flow")) self.gridLayout_11.addWidget(self.opt_separate_cover_flow, 0, 0, 1, 2) self.label_6 = QtGui.QLabel(self.tab_3) self.label_6.setText(_("&Number of covers to show in browse mode (needs restart):")) self.label_6.setObjectName(_fromUtf8("label_6")) self.gridLayout_11.addWidget(self.label_6, 1, 0, 1, 1) self.opt_cover_flow_queue_length = QtGui.QSpinBox(self.tab_3) self.opt_cover_flow_queue_length.setObjectName(_fromUtf8("opt_cover_flow_queue_length")) self.gridLayout_11.addWidget(self.opt_cover_flow_queue_length, 1, 1, 1, 1) spacerItem3 = QtGui.QSpacerItem(690, 283, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) self.gridLayout_11.addItem(spacerItem3, 4, 0, 1, 2) self.opt_cb_fullscreen = QtGui.QCheckBox(self.tab_3) self.opt_cb_fullscreen.setText(_("When showing cover browser in separate window, show it &fullscreen")) self.opt_cb_fullscreen.setObjectName(_fromUtf8("opt_cb_fullscreen")) self.gridLayout_11.addWidget(self.opt_cb_fullscreen, 2, 0, 1, 2) self.fs_help_msg = QtGui.QLabel(self.tab_3) self.fs_help_msg.setStyleSheet(_fromUtf8("margin-left: 1.5em")) self.fs_help_msg.setText(_("You can press the %s keys to toggle full screen mode.")) self.fs_help_msg.setWordWrap(True) self.fs_help_msg.setObjectName(_fromUtf8("fs_help_msg")) self.gridLayout_11.addWidget(self.fs_help_msg, 3, 0, 1, 2) icon5 = QtGui.QIcon() icon5.addPixmap(QtGui.QPixmap(_fromUtf8(I("cover_flow.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.tabWidget.addTab(self.tab_3, icon5, _fromUtf8("")) self.gridLayout_2.addWidget(self.tabWidget, 0, 0, 1, 1) self.label_7.setBuddy(self.opt_language) self.label_17.setBuddy(self.opt_gui_layout) self.label_5.setBuddy(self.opt_toolbar_icon_size) self.label_8.setBuddy(self.opt_toolbar_text) self.label_2.setBuddy(self.font_display) self.label_widget_style.setBuddy(self.opt_ui_style) self.label.setBuddy(self.opt_default_author_link) self.label_9.setBuddy(self.opt_tags_browser_partition_method) self.label_10.setBuddy(self.opt_tags_browser_collapse_at) self.label_8111.setBuddy(self.opt_tag_browser_dont_collapse) self.label_81.setBuddy(self.opt_categories_using_hierarchy) self.label_6.setBuddy(self.opt_cover_flow_queue_length) self.retranslateUi(Form) self.tabWidget.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(Form) def retranslateUi(self, Form): self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _("Main Interface")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _("Book Details")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _("Tag Browser")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _("Cover Browser")) from calibre.gui2.complete2 import EditWithComplete
""" Author : qbeenslee Created : 2014/12/12 """ import re CLIENT_ID = "TR5kVmYeMEh9M" ''' 传输令牌格式 加密方式$迭代次数$盐$结果串 举个栗子: ====start==== md5$23$YUXQ_-2GfwhzVpt5IQWp$3ebb6e78bf7d0c1938578855982e2b1c ====end==== ''' MATCH_PWD = r"md5\$(\d\d)\$([a-zA-Z0-9_\-]{20})\$([a-f0-9]{32})" REMATCH_PWD = re.compile(MATCH_PWD) SUPPORT_IMAGE_TYPE_LIST = ['image/gif', 'image/jpeg', 'image/png', 'image/bmp', 'image/x-png', 'application/octet-stream'] MAX_UPLOAD_FILE_SIZE = 10485760 # 10*1024*1024 =10M MIN_IMAGE_SIZE = {'w': 10, 'h': 10} MAX_IMAGE_SIZE = {'w': 4000, 'h': 4000} THUMB_SIZE_SMALL = {'w': 100, 'h': 100, 'thumb': 's'} THUMB_SIZE_NORMAL = {'w': 480, 'h': 480, 'thumb': 'n'} THUMB_SIZE_LARGE = {'w': 3000, 'h': 3000, 'thumb': 'l'} THUMB_SIZE_ORIGIN = {'w': 0, 'h': 0, 'thumb': 'r'} MAX_SHARE_DESCRIPTION_SIZE = 140 NOW_ANDROID_VERSION_CODE = 7 NOW_VERSION_DOWNLOAD_URL = "/static/download/nepenthes-beta0.9.3.apk" MAX_RAND_EMAIL_CODE = 99999 MIN_RAND_EMAIL_CODE = 10000 PRECISION = 12 LOACTION_PRECISION = 4 PAGE_SIZE = 10
from os import path as os_path from sys import path as sys_path from pkgutil import extend_path __extended_path = "/home/pi/Documents/desenvolvimentoRos/src/tf2_ros/src".split(";") for p in reversed(__extended_path): sys_path.insert(0, p) del p del sys_path __path__ = extend_path(__path__, __name__) del extend_path __execfiles = [] for p in __extended_path: src_init_file = os_path.join(p, __name__ + '.py') if os_path.isfile(src_init_file): __execfiles.append(src_init_file) else: src_init_file = os_path.join(p, __name__, '__init__.py') if os_path.isfile(src_init_file): __execfiles.append(src_init_file) del src_init_file del p del os_path del __extended_path for __execfile in __execfiles: with open(__execfile, 'r') as __fh: exec(__fh.read()) del __fh del __execfile del __execfiles
from Motion import * import sys if __name__ == '__main__': if len(sys.argv) < 2: exit("Must specify target image file") try: drive = Drive() drive.turn(float(sys.argv[1])) except: exit("Specify direction change")
import xml.dom.minidom from time import strftime, strptime from sys import exit from textwrap import wrap from os import path def colorize(the_color='blue',entry='',new_line=0): color={'gray':30,'green':32,'red':31,'blue':34,'magenta':35,'cyan':36,'white':37,'highgreen':42,'highblue':44,'highred':41,'highgray':47} if new_line==1: new_line='\n' else: new_line='' return_me='\033[1;'+str(color[the_color])+'m'+entry+'\033[1;m'+new_line return return_me def getText(nodelist): rc = [] for node in nodelist: if node.nodeType == node.TEXT_NODE: rc.append(node.data) return ''.join(rc) def aws_print_error(error_obj): error_code=getText(xml.dom.minidom.parseString(error_obj[2]).documentElement.getElementsByTagName('Code')[0].childNodes) error_message=getText(xml.dom.minidom.parseString(error_obj[2]).documentElement.getElementsByTagName('Message')[0].childNodes) error_message=colorize('red',"ERROR",1)+colorize('red',"AWS Error Code: ")+error_code+colorize('red',"\nError Message: ")+error_message print error_message exit() return True def print_error(error_text): error_message=colorize('red',"ERROR",1)+colorize('red',"\nError Message: ")+error_text print error_message exit() return True def makeover(entry,ismonochrome=False): if ismonochrome==False: output=colorize('gray','========================================',1) output+=colorize('cyan',entry['entry'],1) output+=colorize('cyan',strftime("%H:%M %m.%d.%Y", strptime(entry['date'],"%Y-%m-%dT%H:%M:%S+0000")),1) output+=colorize('gray','ID: '+entry.name,0) else: output="========================================\n" output+=entry['entry']+"\n" output+=strftime("%H:%M %m.%d.%Y", strptime(entry['date'],"%Y-%m-%dT%H:%M:%S+0000"))+"\n" output+='ID: '+entry.name return output def print_help(): filepath = path.join(path.dirname(path.abspath(__file__)), 'DOCUMENTATION.mkd') f = open(filepath,'r') print f.read() f.close() exit()
from ert.cwrap import BaseCClass, CWrapper from ert.enkf import AnalysisConfig, EclConfig, EnkfObs, EnKFState, LocalConfig, ModelConfig, EnsembleConfig, PlotConfig, SiteConfig, ENKF_LIB, EnkfSimulationRunner, EnkfFsManager, ErtWorkflowList, PostSimulationHook from ert.enkf.enums import EnkfInitModeEnum from ert.util import SubstitutionList, Log class EnKFMain(BaseCClass): def __init__(self, model_config, strict=True): c_ptr = EnKFMain.cNamespace().bootstrap(model_config, strict, False) super(EnKFMain, self).__init__(c_ptr) self.__simulation_runner = EnkfSimulationRunner(self) self.__fs_manager = EnkfFsManager(self) @classmethod def createCReference(cls, c_pointer, parent=None): obj = super(EnKFMain, cls).createCReference(c_pointer, parent) obj.__simulation_runner = EnkfSimulationRunner(obj) obj.__fs_manager = EnkfFsManager(obj) return obj @staticmethod def createNewConfig(config_file, storage_path, case_name, dbase_type, num_realizations): EnKFMain.cNamespace().create_new_config(config_file, storage_path, case_name, dbase_type, num_realizations) def getRealisation(self , iens): """ @rtype: EnKFState """ if 0 <= iens < self.getEnsembleSize(): return EnKFMain.cNamespace().iget_state(self, iens).setParent(self) else: raise IndexError("iens value:%d invalid Valid range: [0,%d)" % (iens , len(self))) def set_eclbase(self, eclbase): EnKFMain.cNamespace().set_eclbase(self, eclbase) def umount(self): self.__fs_manager.umount() def free(self): self.umount() EnKFMain.cNamespace().free(self) def getEnsembleSize(self): """ @rtype: int """ return EnKFMain.cNamespace().get_ensemble_size(self) def resizeEnsemble(self, value): EnKFMain.cNamespace().resize_ensemble(self, value) def ensembleConfig(self): """ @rtype: EnsembleConfig """ return EnKFMain.cNamespace().get_ens_config(self).setParent(self) def analysisConfig(self): """ @rtype: AnalysisConfig """ return EnKFMain.cNamespace().get_analysis_config(self).setParent(self) def getModelConfig(self): """ @rtype: ModelConfig """ return EnKFMain.cNamespace().get_model_config(self).setParent(self) def logh(self): """ @rtype: Log """ return EnKFMain.cNamespace().get_logh(self).setParent(self) def local_config(self): """ @rtype: LocalConfig """ return EnKFMain.cNamespace().get_local_config(self).setParent(self) def siteConfig(self): """ @rtype: SiteConfig """ return EnKFMain.cNamespace().get_site_config(self).setParent(self) def eclConfig(self): """ @rtype: EclConfig """ return EnKFMain.cNamespace().get_ecl_config(self).setParent(self) def plotConfig(self): """ @rtype: PlotConfig """ return EnKFMain.cNamespace().get_plot_config(self).setParent(self) def set_datafile(self, datafile): EnKFMain.cNamespace().set_datafile(self, datafile) def get_schedule_prediction_file(self): schedule_prediction_file = EnKFMain.cNamespace().get_schedule_prediction_file(self) return schedule_prediction_file def set_schedule_prediction_file(self, file): EnKFMain.cNamespace().set_schedule_prediction_file(self, file) def getDataKW(self): """ @rtype: SubstitutionList """ return EnKFMain.cNamespace().get_data_kw(self) def clearDataKW(self): EnKFMain.cNamespace().clear_data_kw(self) def addDataKW(self, key, value): EnKFMain.cNamespace().add_data_kw(self, key, value) def getMountPoint(self): return EnKFMain.cNamespace().get_mount_point(self) def del_node(self, key): EnKFMain.cNamespace().del_node(self, key) def getObservations(self): """ @rtype: EnkfObs """ return EnKFMain.cNamespace().get_obs(self).setParent(self) def load_obs(self, obs_config_file): EnKFMain.cNamespace().load_obs(self, obs_config_file) def reload_obs(self): EnKFMain.cNamespace().reload_obs(self) def get_pre_clear_runpath(self): pre_clear = EnKFMain.cNamespace().get_pre_clear_runpath(self) return pre_clear def set_pre_clear_runpath(self, value): EnKFMain.cNamespace().set_pre_clear_runpath(self, value) def iget_keep_runpath(self, iens): ikeep = EnKFMain.cNamespace().iget_keep_runpath(self, iens) return ikeep def iset_keep_runpath(self, iens, keep_runpath): EnKFMain.cNamespace().iset_keep_runpath(self, iens, keep_runpath) def get_templates(self): return EnKFMain.cNamespace().get_templates(self).setParent(self) def get_site_config_file(self): site_conf_file = EnKFMain.cNamespace().get_site_config_file(self) return site_conf_file def getUserConfigFile(self): """ @rtype: str """ config_file = EnKFMain.cNamespace().get_user_config_file(self) return config_file def getHistoryLength(self): return EnKFMain.cNamespace().get_history_length(self) def getMemberRunningState(self, ensemble_member): """ @rtype: EnKFState """ return EnKFMain.cNamespace().iget_state(self, ensemble_member).setParent(self) def get_observations(self, user_key, obs_count, obs_x, obs_y, obs_std): EnKFMain.cNamespace().get_observations(self, user_key, obs_count, obs_x, obs_y, obs_std) def get_observation_count(self, user_key): return EnKFMain.cNamespace().get_observation_count(self, user_key) def getEnkfSimulationRunner(self): """ @rtype: EnkfSimulationRunner """ return self.__simulation_runner def getEnkfFsManager(self): """ @rtype: EnkfFsManager """ return self.__fs_manager def getWorkflowList(self): """ @rtype: ErtWorkflowList """ return EnKFMain.cNamespace().get_workflow_list(self).setParent(self) def getPostSimulationHook(self): """ @rtype: PostSimulationHook """ return EnKFMain.cNamespace().get_qc_module(self) def exportField(self, keyword, path, iactive, file_type, report_step, state, enkfFs): """ @type keyword: str @type path: str @type iactive: BoolVector @type file_type: EnkfFieldFileFormatEnum @type report_step: int @type state: EnkfStateType @type enkfFs: EnkfFs """ assert isinstance(keyword, str) return EnKFMain.cNamespace().export_field_with_fs(self, keyword, path, iactive, file_type, report_step, state, enkfFs) def loadFromForwardModel(self, realization, iteration, fs): EnKFMain.cNamespace().load_from_forward_model(self, iteration, realization, fs) def submitSimulation(self , run_arg): EnKFMain.cNamespace().submit_simulation( self , run_arg) def getRunContextENSEMPLE_EXPERIMENT(self , fs , iactive , init_mode = EnkfInitModeEnum.INIT_CONDITIONAL , iteration = 0): return EnKFMain.cNamespace().alloc_run_context_ENSEMBLE_EXPERIMENT( self , fs , iactive , init_mode , iteration ) cwrapper = CWrapper(ENKF_LIB) cwrapper.registerType("enkf_main", EnKFMain) cwrapper.registerType("enkf_main_ref", EnKFMain.createCReference) EnKFMain.cNamespace().bootstrap = cwrapper.prototype("c_void_p enkf_main_bootstrap(char*, bool, bool)") EnKFMain.cNamespace().free = cwrapper.prototype("void enkf_main_free(enkf_main)") EnKFMain.cNamespace().get_ensemble_size = cwrapper.prototype("int enkf_main_get_ensemble_size( enkf_main )") EnKFMain.cNamespace().get_ens_config = cwrapper.prototype("ens_config_ref enkf_main_get_ensemble_config( enkf_main )") EnKFMain.cNamespace().get_model_config = cwrapper.prototype("model_config_ref enkf_main_get_model_config( enkf_main )") EnKFMain.cNamespace().get_local_config = cwrapper.prototype("local_config_ref enkf_main_get_local_config( enkf_main )") EnKFMain.cNamespace().get_analysis_config = cwrapper.prototype("analysis_config_ref enkf_main_get_analysis_config( enkf_main)") EnKFMain.cNamespace().get_site_config = cwrapper.prototype("site_config_ref enkf_main_get_site_config( enkf_main)") EnKFMain.cNamespace().get_ecl_config = cwrapper.prototype("ecl_config_ref enkf_main_get_ecl_config( enkf_main)") EnKFMain.cNamespace().get_plot_config = cwrapper.prototype("plot_config_ref enkf_main_get_plot_config( enkf_main)") EnKFMain.cNamespace().set_eclbase = cwrapper.prototype("ui_return_obj enkf_main_set_eclbase( enkf_main, char*)") EnKFMain.cNamespace().set_datafile = cwrapper.prototype("void enkf_main_set_data_file( enkf_main, char*)") EnKFMain.cNamespace().get_schedule_prediction_file = cwrapper.prototype("char* enkf_main_get_schedule_prediction_file( enkf_main )") EnKFMain.cNamespace().set_schedule_prediction_file = cwrapper.prototype("void enkf_main_set_schedule_prediction_file( enkf_main , char*)") EnKFMain.cNamespace().get_data_kw = cwrapper.prototype("subst_list_ref enkf_main_get_data_kw(enkf_main)") EnKFMain.cNamespace().clear_data_kw = cwrapper.prototype("void enkf_main_clear_data_kw(enkf_main)") EnKFMain.cNamespace().add_data_kw = cwrapper.prototype("void enkf_main_add_data_kw(enkf_main, char*, char*)") EnKFMain.cNamespace().resize_ensemble = cwrapper.prototype("void enkf_main_resize_ensemble(enkf_main, int)") EnKFMain.cNamespace().del_node = cwrapper.prototype("void enkf_main_del_node(enkf_main, char*)") EnKFMain.cNamespace().get_obs = cwrapper.prototype("enkf_obs_ref enkf_main_get_obs(enkf_main)") EnKFMain.cNamespace().load_obs = cwrapper.prototype("void enkf_main_load_obs(enkf_main, char*)") EnKFMain.cNamespace().reload_obs = cwrapper.prototype("void enkf_main_reload_obs(enkf_main)") EnKFMain.cNamespace().get_pre_clear_runpath = cwrapper.prototype("bool enkf_main_get_pre_clear_runpath(enkf_main)") EnKFMain.cNamespace().set_pre_clear_runpath = cwrapper.prototype("void enkf_main_set_pre_clear_runpath(enkf_main, bool)") EnKFMain.cNamespace().iget_keep_runpath = cwrapper.prototype("int enkf_main_iget_keep_runpath(enkf_main, int)") EnKFMain.cNamespace().iset_keep_runpath = cwrapper.prototype("void enkf_main_iset_keep_runpath(enkf_main, int, int_vector)") EnKFMain.cNamespace().get_templates = cwrapper.prototype("ert_templates_ref enkf_main_get_templates(enkf_main)") EnKFMain.cNamespace().get_site_config_file = cwrapper.prototype("char* enkf_main_get_site_config_file(enkf_main)") EnKFMain.cNamespace().get_history_length = cwrapper.prototype("int enkf_main_get_history_length(enkf_main)") EnKFMain.cNamespace().get_observations = cwrapper.prototype("void enkf_main_get_observations(enkf_main, char*, int, long*, double*, double*)") EnKFMain.cNamespace().get_observation_count = cwrapper.prototype("int enkf_main_get_observation_count(enkf_main, char*)") EnKFMain.cNamespace().iget_state = cwrapper.prototype("enkf_state_ref enkf_main_iget_state(enkf_main, int)") EnKFMain.cNamespace().get_workflow_list = cwrapper.prototype("ert_workflow_list_ref enkf_main_get_workflow_list(enkf_main)") EnKFMain.cNamespace().get_qc_module = cwrapper.prototype("qc_module_ref enkf_main_get_qc_module(enkf_main)") EnKFMain.cNamespace().fprintf_config = cwrapper.prototype("void enkf_main_fprintf_config(enkf_main)") EnKFMain.cNamespace().create_new_config = cwrapper.prototype("void enkf_main_create_new_config(char* , char*, char* , char* , int)") EnKFMain.cNamespace().get_user_config_file = cwrapper.prototype("char* enkf_main_get_user_config_file(enkf_main)") EnKFMain.cNamespace().get_mount_point = cwrapper.prototype("char* enkf_main_get_mount_root( enkf_main )") EnKFMain.cNamespace().export_field = cwrapper.prototype("bool enkf_main_export_field(enkf_main, char*, char*, bool_vector, enkf_field_file_format_enum, int, enkf_state_type_enum)") EnKFMain.cNamespace().export_field_with_fs = cwrapper.prototype("bool enkf_main_export_field_with_fs(enkf_main, char*, char*, bool_vector, enkf_field_file_format_enum, int, enkf_state_type_enum, enkf_fs_manager)") EnKFMain.cNamespace().load_from_forward_model = cwrapper.prototype("void enkf_main_load_from_forward_model_from_gui(enkf_main, int, bool_vector, enkf_fs)") EnKFMain.cNamespace().submit_simulation = cwrapper.prototype("void enkf_main_isubmit_job(enkf_main , run_arg)") EnKFMain.cNamespace().alloc_run_context_ENSEMBLE_EXPERIMENT= cwrapper.prototype("ert_run_context_obj enkf_main_alloc_ert_run_context_ENSEMBLE_EXPERIMENT( enkf_main , enkf_fs , bool_vector , enkf_init_mode_enum , int)")
''' This file is part of pyShop pyShop is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. pyShop is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with pyShop. If not, see <http://www.gnu.org/licenses/>. Copyright (c) Steve "Uru" West 2012 <uruwolf@gmail.com> ''' from products.models import Catergory, Product from django.contrib import admin admin.site.register(Catergory) class ProductAdmin(admin.ModelAdmin): '''Contains the admin panel settings for product objects Currently set to display the name and catergory, be filterable by catergory and searchable via name and description.''' list_display = ('name', 'catergory') list_filter = ['catergory'] search_fields = ['name', 'description'] admin.site.register(Product, ProductAdmin)
import hashlib import json import logging import os LOG = logging.getLogger(__name__) from oneconf.hosts import Hosts, HostError from oneconf.distributor import get_distro from oneconf.paths import ONECONF_CACHE_DIR, PACKAGE_LIST_PREFIX class PackageSetHandler(object): """ Direct access to database for getting and updating the list """ def __init__(self, hosts=None): self.hosts = hosts if not hosts: self.hosts = Hosts() self.distro = get_distro() self.last_storage_sync = None # create cache for storage package list, indexed by hostid self.package_list = {} def update(self): '''update the store with package list''' hostid = self.hosts.current_host['hostid'] LOG.debug("Updating package list") newpkg_list = self.distro.compute_local_packagelist() LOG.debug("Creating the checksum") checksum = hashlib.sha224(str(newpkg_list)).hexdigest() LOG.debug("Package list need refresh") self.package_list[hostid] = {'valid': True, 'package_list': newpkg_list} with open(os.path.join(self.hosts.get_currenthost_dir(), '%s_%s' % (PACKAGE_LIST_PREFIX, hostid)), 'w') as f: json.dump(self.package_list[hostid]['package_list'], f) if self.hosts.current_host['packages_checksum'] != checksum: self.hosts.current_host['packages_checksum'] = checksum self.hosts.save_current_host() LOG.debug("Update done") def get_packages(self, hostid=None, hostname=None, only_manual=False): '''get all installed packages from the storage''' hostid = self.hosts.get_hostid_from_context(hostid, hostname) LOG.debug ("Request for package list for %s with only manual packages reduced scope to: %s", hostid, only_manual) package_list = self._get_installed_packages(hostid) if only_manual: package_list = [package_elem for package_elem in package_list if package_list[package_elem]["auto"] == False] return package_list def _get_installed_packages(self, hostid): '''get installed packages from the storage or cache Return: uptodate package_list''' need_reload = False try: if self.package_list[hostid]['valid']: LOG.debug("Hit cache for package list") package_list = self.package_list[hostid]['package_list'] else: need_reload = True except KeyError: need_reload = True if need_reload: self.package_list[hostid] = {'valid': True, 'package_list': self._get_packagelist_from_store(hostid)} return self.package_list[hostid]['package_list'] def diff(self, distant_hostid=None, distant_hostname=None): '''get a diff from current package state from another host This function can be use to make a diff between all packages installed on both computer , use_cache Return: (packages_to_install (packages in distant_hostid not in local_hostid), packages_to_remove (packages in local hostid not in distant_hostid)) ''' distant_hostid = self.hosts.get_hostid_from_context(distant_hostid, distant_hostname) LOG.debug("Collecting all installed packages on this system") local_package_list = set(self.get_packages(self.hosts.current_host['hostid'], False)) LOG.debug("Collecting all installed packages on the other system") distant_package_list = set(self.get_packages(distant_hostid, False)) LOG.debug("Comparing") packages_to_install = [x for x in distant_package_list if x not in local_package_list] packages_to_remove = [x for x in local_package_list if x not in distant_package_list] # for Dbus which doesn't like empty list if not packages_to_install: packages_to_install = '' if not packages_to_remove: packages_to_remove = '' return(packages_to_install, packages_to_remove) def _get_packagelist_from_store(self, hostid): '''load package list for every computer in cache''' LOG.debug('get package list from store for hostid: %s' % hostid) # load current content in cache try: with open(os.path.join(self.hosts.get_currenthost_dir(), '%s_%s' % (PACKAGE_LIST_PREFIX, hostid)), 'r') as f: # can be none in corrupted null file pkg_list = json.load(f) except (IOError, ValueError): LOG.warning ("no valid package list stored for hostid: %s" % hostid) pkg_list = None if pkg_list is None: pkg_list = {} # there is no way that no package is installed in current host # At least, there is oneconf ;) Ask for refresh if hostid == self.hosts.current_host['hostid']: LOG.debug ("Processing first update for current host") self.update() pkg_list = self.package_list[hostid]['package_list'] return pkg_list
from __future__ import unicode_literals import markdown from markdown.treeprocessors import Treeprocessor from markdown.blockprocessors import BlockProcessor import re from markdown import util import xml.etree.ElementTree as ET import copy from markdown.inlinepatterns import IMAGE_LINK_RE class InFigureParser(object): def transform(self, parent, element, legend, index, InP = False): if InP: lelems = list(element.iter()) oldImg = lelems[-1] element.remove(oldImg) else: oldImg = element nFig = util.etree.Element("figure") nFigCaption = util.etree.Element("figcaption") contentLegend = legend.items() for el in legend: legend.remove(el) nFigCaption.append(el) nFig.append(oldImg) nFig.append(nFigCaption) parent.remove(element) parent.remove(legend) parent.insert(index, nFig) class FigureParser(InFigureParser): def __init__(self, ignoringImg): InFigureParser.__init__(self) self.ignoringImg = ignoringImg self.ree = re.compile(r"^" + IMAGE_LINK_RE + r"(\n|$)") def detect(self, element, type): if element == None: return False lelems = list(element.iter()) #print repr(element.text) return (type == "unknown" or type == "Figure") \ and element.tag=="p" \ and( ( element.text is not None \ and self.ree.search(element.text)) \ or ( (element.text is None or element.text.strip() == "") \ and (len(lelems) == 1 or (len(lelems)==2 and lelems[0] is element)) \ and lelems[-1].tag == "img" \ and (lelems[-1].attrib["src"] not in self.ignoringImg))) def transform(self, parent, element, legend, index): InFigureParser.transform(self, parent, element, legend, index, True) class EquationParser(InFigureParser): def detect(self, element, type): if element == None: return False lelems = list(element.iter()) return (type == "unknown" or type == "Equation") \ and element.tag=="p" \ and (element.text is None or element.text.strip() == "") \ and (len(lelems) == 1 or (len(lelems)==2 and lelems[0] is element)) \ and lelems[-1].tag == "mathjax" def transform(self, parent, element, legend, index): InFigureParser.transform(self, parent, element, legend, index, True) class CodeParser(InFigureParser): def __init__(self, md): self.md = md def detect(self, element, type): if element == None: return False if (type == "unknown" or type == "Code") and element.tag=="p" : hs = self.md.htmlStash for i in range(hs.html_counter): if element.text == hs.get_placeholder(i) : Teste = ET.fromstring(hs.rawHtmlBlocks[i][0].encode('utf-8')) if Teste is not None and Teste.tag=="table" and "class" in Teste.attrib and Teste.attrib["class"] == "codehilitetable": return True else: return False return False class QuoteParser(InFigureParser): def detect(self, element, type): if element == None: return False return (type == "unknown" or type == "Source") and element.tag=="blockquote" class TableParser(object): def detect(self, element, type): if element == None: return False return (type == "unknown" or type == "Table") and element.tag=="table" def transform(self, parent, element, legend, index): parent.remove(legend) cap = util.etree.Element('caption') contentLegend = legend.items() for el in legend: legend.remove(el) cap.append(el) element.insert(0, cap) class VideoParser(InFigureParser): def detect(self, element, type): if element == None: return False lelems = list(element.iter()) return (type == "unknown" or type == "Video") \ and element.tag=="iframe" class SmartLegendProcessor(Treeprocessor): def __init__(self, parser, configs, md): Treeprocessor.__init__(self, parser) self.configs = configs self.processors = ( FigureParser(configs["IGNORING_IMG"]), EquationParser(), CodeParser(md), TableParser(), VideoParser(), QuoteParser()) def run(self, root): root = self.parse_legend(root) root = self.parse_autoimg(root) return root def parse_legend(self, root): elemsToInspect = [root] while len(elemsToInspect) > 0: elem = elemsToInspect.pop() Restart=True while Restart: Restart = False precedent = None i=0 for nelem in elem: if nelem.tag in self.configs["PARENTS"] and nelem not in elemsToInspect: elemsToInspect.append(nelem) if nelem.tag == "customlegend" and precedent is not None : # and len(list(nelem.itertext())) == 0 : proc = self.detectElement(precedent, nelem.attrib["type"]) if proc is not None: proc.transform(elem, precedent, nelem, i-1) Restart = True break precedent = nelem i+=1 return root def parse_autoimg(self, root): elemsToInspect = [root] while len(elemsToInspect) > 0: elem = elemsToInspect.pop() Restart=True while Restart: Restart = False i=0 for nelem in elem: if nelem.tag in self.configs["PARENTS"] and nelem not in elemsToInspect: elemsToInspect.append(nelem) #Auto Legend for image if nelem.tag == 'p' and len(list(nelem.itertext())) == 0 : lelems = list(nelem.iter()) if (len(lelems) == 1 or (len(lelems)==2 and lelems[0] is nelem)) \ and lelems[-1].tag == "img" \ and lelems[-1].attrib["alt"] != "" \ and not (lelems[-1].attrib["src"] in self.configs["IGNORING_IMG"]): oldImg = lelems[-1] nelem.remove(oldImg) nFig = util.etree.Element("figure") nFigCaption = util.etree.Element("figcaption") nFigCaption.text = oldImg.attrib["alt"] oldImg.attrib["alt"]="" nFig.append(oldImg) nFig.append(nFigCaption) nelem.insert(i-1, nFig) Restart = True break i+=1 return root def detectElement(self, elem, legend): for proc in self.processors: if proc.detect(elem, legend) : return proc return None class LegendProcessor(BlockProcessor): def __init__(self, parser, md, configs): BlockProcessor.__init__(self, parser) self.md = md self.configs = configs self.processors = ( FigureParser(configs["IGNORING_IMG"]), EquationParser(), CodeParser(md), TableParser(), VideoParser(), QuoteParser()) self.RE = re.compile(r'(^|(?<=\n))((?P<typelegend>Figure|Table|Code|Equation|Video|Source)\s?)*\:\s?(?P<txtlegend>.*?)(\n|$)') def detectElement(self, elem, legend): for proc in self.processors: if proc.detect(elem, legend) : return proc return None def test(self, parent, block): mLeg = self.RE.search(block) if not bool(mLeg): return False else: return True def test_complete(self, parent, block): mLeg = self.RE.search(block) gd = mLeg.groupdict() if gd["typelegend"] is None: type = "unknown" else: type = gd["typelegend"] sibling = self.lastChild(parent) return self.detectElement(sibling, type) is not None def run(self, parent, blocks): block = blocks.pop(0) mLeg = self.RE.search(block) before = block[:mLeg.start()] after = block[mLeg.end():] contentStart = block[mLeg.start():mLeg.start("txtlegend")] cpp = None if before: cpp = copy.copy(parent) self.parser.parseBlocks(cpp, [before]) else: cpp = parent if not self.test_complete(cpp, block): blocks.insert(0, block) return False elif before: self.parser.parseBlocks(parent, [before]) nLegend = util.etree.Element("customlegend") self.parser.parseChunk(nLegend, mLeg.group('txtlegend')) gd = mLeg.groupdict() if gd["typelegend"] is None: nLegend.set("type", "unknown") else: nLegend.set("type", gd["typelegend"]) nLegend.set("rawStart", contentStart) parent.append(nLegend) if after: blocks.insert(0,after) class SmartLegendExtension(markdown.extensions.Extension): def __init__(self, configs={}): self.configs = { "IGNORING_IMG" : [], "PARENTS" : [], } for key, value in configs.items(): self.configs[key] = value if "div" not in self.configs["PARENTS"]: self.configs["PARENTS"].append("div") pass def extendMarkdown(self, md, md_globals): md.registerExtension(self) md.treeprocessors.add('smart-legend', SmartLegendProcessor(md.parser,self.configs, md),"_end") md.parser.blockprocessors.add('legend-processor', LegendProcessor(md.parser,md, self.configs),"_begin") def makeExtension(configs={}): return SmartImgExtension(configs=configs)
import tkinter, tkinter.ttk import logging from devparrot.core import session, userLogging class StatusBar(tkinter.Frame, logging.Handler): def __init__(self, parent): tkinter.Frame.__init__(self, parent) logging.Handler.__init__(self) self.pack(side=tkinter.BOTTOM, fill=tkinter.X) self['relief'] = 'sunken' session.userLogger.addHandler(self) self.label = tkinter.Label(self) self.label.pack(side='left', fill=tkinter.BOTH, expand=True) self.defaultColor = self['background'] self.label['anchor'] = 'nw' separator = tkinter.ttk.Separator(self, orient="vertical") separator.pack(side='left', fill='y') self.insertLabel = tkinter.ttk.Label(self) self.insertLabel.pack(side='right', expand=False, fill="none") session.eventSystem.connect('mark_set', self.on_mark_set) self.currentLevel = 0 self.callbackId = 0 def flush(self): """overide logging.Handler.flush""" pass def clear(self): self.currentLevel = 0 self.label['text'] = "" self.label['background'] = self.defaultColor self.callbackId = 0 def emit(self,record): """overide logging.Handler.emit""" if record.levelno >= self.currentLevel: self.currentLevel = record.levelno self.label['text'] = record.getMessage() if self.currentLevel == userLogging.INFO: self.label['background'] = session.config.get('ok_color') if self.currentLevel == userLogging.ERROR: self.label['background'] = session.config.get('error_color') if self.currentLevel == userLogging.INVALID: self.label['background'] = session.config.get('invalid_color') if self.callbackId: self.after_cancel(self.callbackId) self.callbackId = self.after(5000, self.clear) def on_mark_set(self, model, name, index): if name == "insert": if model.sel_isSelection(): self.insertLabel['text'] = "[%s:%s]"%(model.index("sel.first"), model.index("sel.last")) else: self.insertLabel['text'] = str(model.index("insert"))
from Sire.Base import * from Sire.IO import * from Sire.Mol import * from glob import glob from nose.tools import assert_equal, assert_almost_equal has_mol2 = True try: p = Mol2() except: # No Mol2 support. has_mol2 = False def test_read_write(verbose=False): if not has_mol2: return # Glob all of the Mol2 files in the example file directory. mol2files = glob('../io/*mol2') # Loop over all test files. for file in mol2files: # Test in parallel and serial mode. for use_par in [True, False]: if verbose: print("Reading Mol2 file: %s" % file) print("Parallel = %s" % use_par) # Parse the file into a Mol2 object. p = Mol2(file, {"parallel" : wrap(use_par)}) if verbose: print("Constructing molecular system...") # Construct a Sire molecular system. s = p.toSystem() if verbose: print("Reconstructing Mol2 data from molecular system...") # Now re-parse the molecular system. p = Mol2(s, {"parallel" : wrap(use_par)}) if verbose: print("Passed!\n") def test_atom_coords(verbose=False): if not has_mol2: return # Test atoms. atoms = ["N", "CA", "C", "O", "CB"] # Test coordinates. coords = [[ -2.9880, -2.0590, -2.6220], [ -3.8400, -2.0910, -7.4260], [ -6.4250, -3.9190, -10.9580], [ -6.1980, -6.0090, -14.2910], [ -9.8700, -6.5500, -15.2480]] # Test in parallel and serial mode. for use_par in [True, False]: if verbose: print("Reading Mol2 file: ../io/complex.mol2") print("Parallel = %s" % use_par) # Parse the Mol2 file. p = Mol2('../io/complex.mol2', {"parallel" : wrap(use_par)}) if verbose: print("Constructing molecular system...") # Create a molecular system. s = p.toSystem() # Get the first molecule. m = s[MolIdx(0)] if verbose: print("Checking atomic coordinates...") # Loop over all of the atoms. for i in range(0, len(atoms)): # Extract the atom from the residue "i + 1". a = m.atom(AtomName(atoms[i]) + ResNum(i+1)) # Extract the atom coordinates. c = a.property("coordinates") # Validate parsed coordinates against known values. assert_almost_equal( c[0], coords[i][0] ) assert_almost_equal( c[1], coords[i][1] ) assert_almost_equal( c[2], coords[i][2] ) if verbose: print("Passed!\n") def test_residues(verbose=False): if not has_mol2: return # Test in parallel and serial mode. for use_par in [True, False]: if verbose: print("Reading Mol2 file: ../io/complex.mol2") print("Parallel = %s" % use_par) # Parse the Mol2 file. p = Mol2('../io/complex.mol2', {"parallel" : wrap(use_par)}) if verbose: print("Constructing molecular system...") # Create a molecular system. s = p.toSystem() # Get the two molecules. m1 = s[MolIdx(0)] m2 = s[MolIdx(1)] # Get the chains from the molecules. c1 = m1.chains() c2 = m2.chains() if verbose: print("Checking chain and residue data...") # Check the number of chains in each molecule. assert_equal( len(c1), 3 ) assert_equal( len(c2), 1 ) # Check the number of residues in each chain of the first molecule. assert_equal( len(c1[0].residues()), 118 ) assert_equal( len(c1[1].residues()), 114 ) assert_equal( len(c1[2].residues()), 118 ) # Check the number of residues in the single chain of the second molecule. assert_equal( len(c2[0].residues()), 1 ) # Check some specific residue names in the first chain from the first molecule. assert_equal( c1[0].residues()[0].name().toString(), "ResName('PRO1')" ) assert_equal( c1[1].residues()[1].name().toString(), "ResName('MET2')" ) assert_equal( c1[1].residues()[2].name().toString(), "ResName('PHE3')" ) if verbose: print("Passed!\n") if __name__ == "__main__": test_read_write(True) test_atom_coords(True) test_residues(True)
''' Dialogs and widgets Responsible for creation, restoration of accounts are defined here. Namely: CreateAccountDialog, CreateRestoreDialog, RestoreSeedDialog ''' from functools import partial from kivy.app import App from kivy.clock import Clock from kivy.lang import Builder from kivy.properties import ObjectProperty, StringProperty, OptionProperty from kivy.core.window import Window from electrum_gui.kivy.uix.dialogs import EventsDialog from electrum.i18n import _ Builder.load_string(''' <WizardTextInput@TextInput> border: 4, 4, 4, 4 font_size: '15sp' padding: '15dp', '15dp' background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1) foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1) hint_text_color: self.foreground_color background_active: 'atlas://gui/kivy/theming/light/create_act_text_active' background_normal: 'atlas://gui/kivy/theming/light/create_act_text_active' size_hint_y: None height: '48sp' <WizardButton@Button>: root: None size_hint: 1, None height: '48sp' on_press: if self.root: self.root.dispatch('on_press', self) on_release: if self.root: self.root.dispatch('on_release', self) <-WizardDialog> text_color: .854, .925, .984, 1 #auto_dismiss: False size_hint: None, None canvas.before: Color: rgba: 0, 0, 0, .9 Rectangle: size: Window.size Color: rgba: .239, .588, .882, 1 Rectangle: size: Window.size crcontent: crcontent # add electrum icon BoxLayout: orientation: 'vertical' if self.width < self.height else 'horizontal' padding: min(dp(42), self.width/8), min(dp(60), self.height/9.7),\ min(dp(42), self.width/8), min(dp(72), self.height/8) spacing: '27dp' GridLayout: id: grid_logo cols: 1 pos_hint: {'center_y': .5} size_hint: 1, .42 #height: self.minimum_height Image: id: logo_img mipmap: True allow_stretch: True size_hint: 1, None height: '110dp' source: 'atlas://gui/kivy/theming/light/electrum_icon640' Widget: size_hint: 1, None height: 0 if stepper.opacity else dp(15) Label: color: root.text_color opacity: 0 if stepper.opacity else 1 text: 'ELECTRUM' size_hint: 1, None height: self.texture_size[1] if self.opacity else 0 font_size: '33sp' font_name: 'data/fonts/tron/Tr2n.ttf' Image: id: stepper allow_stretch: True opacity: 0 source: 'atlas://gui/kivy/theming/light/stepper_left' size_hint: 1, None height: grid_logo.height/2.5 if self.opacity else 0 Widget: size_hint: None, None size: '5dp', '5dp' GridLayout: cols: 1 id: crcontent spacing: '13dp' <CreateRestoreDialog> Label: color: root.text_color size_hint: 1, None text_size: self.width, None height: self.texture_size[1] text: _("Wallet file not found!!")+"\\n\\n" +\ _("Do you want to create a new wallet ")+\ _("or restore an existing one?") Widget size_hint: 1, None height: dp(15) GridLayout: id: grid orientation: 'vertical' cols: 1 spacing: '14dp' size_hint: 1, None height: self.minimum_height WizardButton: id: create text: _('Create a new seed') root: root WizardButton: id: restore text: _('I already have a seed') root: root <RestoreSeedDialog> Label: color: root.text_color size_hint: 1, None text_size: self.width, None height: self.texture_size[1] text: "[b]ENTER YOUR SEED PHRASE[/b]" GridLayout cols: 1 padding: 0, '12dp' orientation: 'vertical' spacing: '12dp' size_hint: 1, None height: self.minimum_height WizardTextInput: id: text_input_seed size_hint: 1, None height: '110dp' hint_text: _('Enter your seedphrase') on_text: root._trigger_check_seed() Label: font_size: '12sp' text_size: self.width, None size_hint: 1, None height: self.texture_size[1] halign: 'justify' valign: 'middle' text: root.message on_ref_press: import webbrowser webbrowser.open('https://electrum.org/faq.html#seed') GridLayout: rows: 1 spacing: '12dp' size_hint: 1, None height: self.minimum_height WizardButton: id: back text: _('Back') root: root Button: id: scan text: _('QR') on_release: root.scan_seed() WizardButton: id: next text: _('Next') root: root <ShowSeedDialog> spacing: '12dp' Label: color: root.text_color size_hint: 1, None text_size: self.width, None height: self.texture_size[1] text: "[b]PLEASE WRITE DOWN YOUR SEED PHRASE[/b]" GridLayout: id: grid cols: 1 pos_hint: {'center_y': .5} size_hint_y: None height: dp(180) orientation: 'vertical' Button: border: 4, 4, 4, 4 halign: 'justify' valign: 'middle' font_size: self.width/15 text_size: self.width - dp(24), self.height - dp(12) #size_hint: 1, None #height: self.texture_size[1] + dp(24) color: .1, .1, .1, 1 background_normal: 'atlas://gui/kivy/theming/light/white_bg_round_top' background_down: self.background_normal text: root.seed_text Label: rows: 1 size_hint: 1, .7 id: but_seed border: 4, 4, 4, 4 halign: 'justify' valign: 'middle' font_size: self.width/21 text: root.message text_size: self.width - dp(24), self.height - dp(12) GridLayout: rows: 1 spacing: '12dp' size_hint: 1, None height: self.minimum_height WizardButton: id: back text: _('Back') root: root WizardButton: id: confirm text: _('Confirm') root: root ''') class WizardDialog(EventsDialog): ''' Abstract dialog to be used as the base for all Create Account Dialogs ''' crcontent = ObjectProperty(None) def __init__(self, **kwargs): super(WizardDialog, self).__init__(**kwargs) self.action = kwargs.get('action') _trigger_size_dialog = Clock.create_trigger(self._size_dialog) Window.bind(size=_trigger_size_dialog, rotation=_trigger_size_dialog) _trigger_size_dialog() Window.softinput_mode = 'pan' def _size_dialog(self, dt): app = App.get_running_app() if app.ui_mode[0] == 'p': self.size = Window.size else: #tablet if app.orientation[0] == 'p': #portrait self.size = Window.size[0]/1.67, Window.size[1]/1.4 else: self.size = Window.size[0]/2.5, Window.size[1] def add_widget(self, widget, index=0): if not self.crcontent: super(WizardDialog, self).add_widget(widget) else: self.crcontent.add_widget(widget, index=index) def on_dismiss(self): app = App.get_running_app() if app.wallet is None and self._on_release is not None: print "on dismiss: stopping app" app.stop() else: Window.softinput_mode = 'below_target' class CreateRestoreDialog(WizardDialog): ''' Initial Dialog for creating or restoring seed''' def on_parent(self, instance, value): if value: app = App.get_running_app() self._back = _back = partial(app.dispatch, 'on_back') class ShowSeedDialog(WizardDialog): seed_text = StringProperty('') message = StringProperty('') def on_parent(self, instance, value): if value: app = App.get_running_app() stepper = self.ids.stepper stepper.opacity = 1 stepper.source = 'atlas://gui/kivy/theming/light/stepper_full' self._back = _back = partial(self.ids.back.dispatch, 'on_release') class RestoreSeedDialog(WizardDialog): message = StringProperty('') def __init__(self, **kwargs): super(RestoreSeedDialog, self).__init__(**kwargs) self._test = kwargs['test'] self._trigger_check_seed = Clock.create_trigger(self.check_seed) def check_seed(self, dt): self.ids.next.disabled = not bool(self._test(self.get_seed_text())) def get_seed_text(self): ti = self.ids.text_input_seed text = unicode(ti.text).strip() text = ' '.join(text.split()) return text def scan_seed(self): def on_complete(text): self.ids.text_input_seed.text = text app = App.get_running_app() app.scan_qr(on_complete) def on_parent(self, instance, value): if value: tis = self.ids.text_input_seed tis.focus = True tis._keyboard.bind(on_key_down=self.on_key_down) stepper = self.ids.stepper stepper.opacity = 1 stepper.source = ('atlas://gui/kivy/theming' '/light/stepper_restore_seed') self._back = _back = partial(self.ids.back.dispatch, 'on_release') app = App.get_running_app() #app.navigation_higherarchy.append(_back) def on_key_down(self, keyboard, keycode, key, modifiers): if keycode[0] in (13, 271): self.on_enter() return True def on_enter(self): #self._remove_keyboard() # press next next = self.ids.next if not next.disabled: next.dispatch('on_release') def _remove_keyboard(self): tis = self.ids.text_input_seed if tis._keyboard: tis._keyboard.unbind(on_key_down=self.on_key_down) tis.focus = False def close(self): self._remove_keyboard() app = App.get_running_app() #if self._back in app.navigation_higherarchy: # app.navigation_higherarchy.pop() # self._back = None super(RestoreSeedDialog, self).close()
from ._pylib import ffi, lib from . import tile from cached_property import cached_property def find(path): core = lib.mCoreFind(path.encode('UTF-8')) if core == ffi.NULL: return None return Core._init(core) def findVF(vf): core = lib.mCoreFindVF(vf.handle) if core == ffi.NULL: return None return Core._init(core) def loadPath(path): core = find(path) if not core or not core.loadFile(path): return None return core def loadVF(vf): core = findVF(vf) if not core or not core.loadROM(vf): return None return core def needsReset(f): def wrapper(self, *args, **kwargs): if not self._wasReset: raise RuntimeError("Core must be reset first") return f(self, *args, **kwargs) return wrapper class Core(object): if hasattr(lib, 'PLATFORM_GBA'): PLATFORM_GBA = lib.PLATFORM_GBA if hasattr(lib, 'PLATFORM_GB'): PLATFORM_GB = lib.PLATFORM_GB def __init__(self, native): self._core = native self._wasReset = False @cached_property def tiles(self): return tile.TileView(self) @classmethod def _init(cls, native): core = ffi.gc(native, native.deinit) success = bool(core.init(core)) if not success: raise RuntimeError("Failed to initialize core") if hasattr(cls, 'PLATFORM_GBA') and core.platform(core) == cls.PLATFORM_GBA: from .gba import GBA return GBA(core) if hasattr(cls, 'PLATFORM_GB') and core.platform(core) == cls.PLATFORM_GB: from .gb import GB return GB(core) return Core(core) def _deinit(self): self._core.deinit(self._core) def loadFile(self, path): return bool(lib.mCoreLoadFile(self._core, path.encode('UTF-8'))) def isROM(self, vf): return bool(self._core.isROM(vf.handle)) def loadROM(self, vf): return bool(self._core.loadROM(self._core, vf.handle)) def loadSave(self, vf): return bool(self._core.loadSave(self._core, vf.handle)) def loadTemporarySave(self, vf): return bool(self._core.loadTemporarySave(self._core, vf.handle)) def loadPatch(self, vf): return bool(self._core.loadPatch(self._core, vf.handle)) def autoloadSave(self): return bool(lib.mCoreAutoloadSave(self._core)) def autoloadPatch(self): return bool(lib.mCoreAutoloadPatch(self._core)) def platform(self): return self._core.platform(self._core) def desiredVideoDimensions(self): width = ffi.new("unsigned*") height = ffi.new("unsigned*") self._core.desiredVideoDimensions(self._core, width, height) return width[0], height[0] def setVideoBuffer(self, image): self._core.setVideoBuffer(self._core, image.buffer, image.stride) def reset(self): self._core.reset(self._core) self._wasReset = True @needsReset def runFrame(self): self._core.runFrame(self._core) @needsReset def runLoop(self): self._core.runLoop(self._core) @needsReset def step(self): self._core.step(self._core) @staticmethod def _keysToInt(*args, **kwargs): keys = 0 if 'raw' in kwargs: keys = kwargs['raw'] for key in args: keys |= 1 << key return keys def setKeys(self, *args, **kwargs): self._core.setKeys(self._core, self._keysToInt(*args, **kwargs)) def addKeys(self, *args, **kwargs): self._core.addKeys(self._core, self._keysToInt(*args, **kwargs)) def clearKeys(self, *args, **kwargs): self._core.clearKeys(self._core, self._keysToInt(*args, **kwargs)) @needsReset def frameCounter(self): return self._core.frameCounter(self._core) def frameCycles(self): return self._core.frameCycles(self._core) def frequency(self): return self._core.frequency(self._core) def getGameTitle(self): title = ffi.new("char[16]") self._core.getGameTitle(self._core, title) return ffi.string(title, 16).decode("ascii") def getGameCode(self): code = ffi.new("char[12]") self._core.getGameCode(self._core, code) return ffi.string(code, 12).decode("ascii")
""" Define the Expansion Valve component. """ from scr.logic.components.component import Component as Cmp from scr.logic.components.component import ComponentInfo as CmpInfo from scr.logic.components.component import component, fundamental_equation def update_saved_data_to_last_version(orig_data, orig_version): return orig_data @component('theoretical_expansion_valve', CmpInfo.EXPANSION_VALVE, 1, update_saved_data_to_last_version) class Theoretical(Cmp): def __init__(self, id_, inlet_nodes_id, outlet_nodes_id, component_data): super().__init__(id_, inlet_nodes_id, outlet_nodes_id, component_data) """ Fundamental properties equations """ @fundamental_equation() # function name can be arbitrary. Return a single vector with each side of the equation evaluated. def _eval_intrinsic_equations(self): id_inlet_node = self.get_id_inlet_nodes()[0] inlet_node = self.get_inlet_node(id_inlet_node) id_outlet_node = self.get_id_outlet_nodes()[0] outlet_node = self.get_outlet_node(id_outlet_node) h_in = inlet_node.enthalpy() h_out = outlet_node.enthalpy() return [h_in / 1000.0, h_out / 1000.0]
import datetime import pytest from tests.test_utils import create_generic_job from treeherder.model.models import Push @pytest.fixture def perf_push(test_repository): return Push.objects.create( repository=test_repository, revision='1234abcd', author='foo@bar.com', time=datetime.datetime.now()) @pytest.fixture def perf_job(perf_push, failure_classifications, generic_reference_data): return create_generic_job('myfunguid', perf_push.repository, perf_push.id, generic_reference_data)
import hashlib import json import sys import traceback from datetime import datetime, timedelta from functools import wraps from uuid import uuid4 import newrelic.agent import waffle from constance import config from django.apps import apps from django.conf import settings from django.core.exceptions import ValidationError from django.db import models, transaction from django.db.models import signals from django.utils.decorators import available_attrs from django.utils.functional import cached_property from django.utils.translation import ugettext, ugettext_lazy as _ from pyquery import PyQuery from taggit.managers import TaggableManager from taggit.models import ItemBase, TagBase from taggit.utils import edit_string_for_tags, parse_tags from tidings.models import NotificationsMixin from kuma.core.cache import memcache from kuma.core.exceptions import ProgrammingError from kuma.core.i18n import get_language_mapping from kuma.core.urlresolvers import reverse from kuma.search.decorators import register_live_index from kuma.spam.models import AkismetSubmission, SpamAttempt from . import kumascript from .constants import (DEKI_FILE_URL, DOCUMENT_LAST_MODIFIED_CACHE_KEY_TMPL, KUMA_FILE_URL, REDIRECT_CONTENT, REDIRECT_HTML, TEMPLATE_TITLE_PREFIX) from .content import parse as parse_content from .content import (Extractor, H2TOCFilter, H3TOCFilter, SectionTOCFilter, get_content_sections, get_seo_description) from .exceptions import (DocumentRenderedContentNotAvailable, DocumentRenderingInProgress, PageMoveError, SlugCollision, UniqueCollision) from .jobs import DocumentContributorsJob, DocumentZoneStackJob from .managers import (DeletedDocumentManager, DocumentAdminManager, DocumentManager, RevisionIPManager, TaggedDocumentManager, TransformManager) from .signals import render_done from .templatetags.jinja_helpers import absolutify from .utils import tidy_content def cache_with_field(field_name): """Decorator for generated content methods. If the backing model field is null, or kwarg force_fresh is True, call the decorated method to generate and return the content. Otherwise, just return the value in the backing model field. """ def decorator(fn): @wraps(fn, assigned=available_attrs(fn)) def wrapper(self, *args, **kwargs): force_fresh = kwargs.pop('force_fresh', False) # Try getting the value using the DB field. field_val = getattr(self, field_name) if field_val is not None and not force_fresh: return field_val # DB field is blank, or we're forced to generate it fresh. field_val = fn(self, force_fresh=force_fresh) setattr(self, field_name, field_val) return field_val return wrapper return decorator def _inherited(parent_attr, direct_attr): """Return a descriptor delegating to an attr of the original document. If `self` is a translation, the descriptor delegates to the attribute `parent_attr` from the original document. Otherwise, it delegates to the attribute `direct_attr` from `self`. Use this only on a reference to another object, like a ManyToMany or a ForeignKey. Using it on a normal field won't work well, as it'll preclude the use of that field in QuerySet field lookups. Also, ModelForms that are passed instance=this_obj won't see the inherited value. """ getter = lambda self: (getattr(self.parent, parent_attr) if self.parent and self.parent.id != self.id else getattr(self, direct_attr)) setter = lambda self, val: (setattr(self.parent, parent_attr, val) if self.parent and self.parent.id != self.id else setattr(self, direct_attr, val)) return property(getter, setter) def valid_slug_parent(slug, locale): slug_bits = slug.split('/') slug_bits.pop() parent = None if slug_bits: parent_slug = '/'.join(slug_bits) try: parent = Document.objects.get(locale=locale, slug=parent_slug) except Document.DoesNotExist: raise Exception( ugettext('Parent %s does not exist.' % ( '%s/%s' % (locale, parent_slug)))) return parent class DocumentTag(TagBase): """A tag indexing a document""" class Meta: verbose_name = _('Document Tag') verbose_name_plural = _('Document Tags') def tags_for(cls, model, instance=None, **extra_filters): """ Sadly copied from taggit to work around the issue of not being able to use the TaggedItemBase class that has tag field already defined. """ kwargs = extra_filters or {} if instance is not None: kwargs.update({ '%s__content_object' % cls.tag_relname(): instance }) return cls.tag_model().objects.filter(**kwargs) kwargs.update({ '%s__content_object__isnull' % cls.tag_relname(): False }) return cls.tag_model().objects.filter(**kwargs).distinct() class TaggedDocument(ItemBase): """Through model, for tags on Documents""" content_object = models.ForeignKey('Document') tag = models.ForeignKey(DocumentTag, related_name="%(app_label)s_%(class)s_items") objects = TaggedDocumentManager() @classmethod def tags_for(cls, *args, **kwargs): return tags_for(cls, *args, **kwargs) class DocumentAttachment(models.Model): """ Intermediary between Documents and Attachments. Allows storing the user who attached a file to a document, and a (unique for that document) name for referring to the file from the document. """ file = models.ForeignKey( 'attachments.Attachment', related_name='document_attachments', ) document = models.ForeignKey( 'wiki.Document', related_name='attached_files', ) attached_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True) name = models.TextField() # whether or not this attachment was uploaded for the document is_original = models.BooleanField( verbose_name=_('uploaded to the document'), default=False, ) # whether or not this attachment is linked in the document's content is_linked = models.BooleanField( verbose_name=_('linked in the document content'), default=False, ) class Meta: db_table = 'attachments_documentattachment' def __unicode__(self): return u'"%s" for document "%s"' % (self.file, self.document) def clean(self): if self.pk and (self.document.files.through.objects.exclude(pk=self.pk) .exists()): raise ValidationError( _("Attachment %(attachment_id)s can't be attached " "multiple times to document %(document_id)s") % {'attachment_id': self.pk, 'document_id': self.document.pk} ) @register_live_index class Document(NotificationsMixin, models.Model): """A localized knowledgebase document, not revision-specific.""" TOC_FILTERS = { 1: SectionTOCFilter, 2: H2TOCFilter, 3: H3TOCFilter, 4: SectionTOCFilter } title = models.CharField(max_length=255, db_index=True) slug = models.CharField(max_length=255, db_index=True) # NOTE: Documents are indexed by tags, but tags are edited in Revisions. # Also, using a custom through table to isolate Document tags from those # used in other models and apps. (Works better than namespaces, for # completion and such.) tags = TaggableManager(through=TaggedDocument) # Is this document a template or not? is_template = models.BooleanField(default=False, editable=False, db_index=True) # Is this a redirect or not? is_redirect = models.BooleanField(default=False, editable=False, db_index=True) # Is this document localizable or not? is_localizable = models.BooleanField(default=True, db_index=True) locale = models.CharField( max_length=7, choices=settings.LANGUAGES, default=settings.WIKI_DEFAULT_LANGUAGE, db_index=True, ) # Latest approved revision. L10n dashboard depends on this being so (rather # than being able to set it to earlier approved revisions). current_revision = models.ForeignKey( 'Revision', null=True, related_name='current_for+', ) # The Document I was translated from. NULL if this doc is in the default # locale or it is nonlocalizable. TODO: validate against # settings.WIKI_DEFAULT_LANGUAGE. parent = models.ForeignKey( 'self', related_name='translations', null=True, blank=True, ) parent_topic = models.ForeignKey( 'self', related_name='children', null=True, blank=True, ) # The files attached to the document, represented by a custom intermediate # model so we can store some metadata about the relation files = models.ManyToManyField( 'attachments.Attachment', through=DocumentAttachment, ) # JSON representation of Document for API results, built on save json = models.TextField(editable=False, blank=True, null=True) # Raw HTML of approved revision's wiki markup html = models.TextField(editable=False) # Cached result of kumascript and other offline processors (if any) rendered_html = models.TextField(editable=False, blank=True, null=True) # Errors (if any) from the last rendering run rendered_errors = models.TextField(editable=False, blank=True, null=True) # Whether or not to automatically defer rendering of this page to a queued # offline task. Generally used for complex pages that need time defer_rendering = models.BooleanField(default=False, db_index=True) # Timestamp when this document was last scheduled for a render render_scheduled_at = models.DateTimeField(null=True, db_index=True) # Timestamp when a render for this document was last started render_started_at = models.DateTimeField(null=True, db_index=True) # Timestamp when this document was last rendered last_rendered_at = models.DateTimeField(null=True, db_index=True) # Maximum age (in seconds) before this document needs re-rendering render_max_age = models.IntegerField(blank=True, null=True) # Time after which this document needs re-rendering render_expires = models.DateTimeField(blank=True, null=True, db_index=True) # Whether this page is deleted. deleted = models.BooleanField(default=False, db_index=True) # Last modified time for the document. Should be equal-to or greater than # the current revision's created field modified = models.DateTimeField(auto_now=True, null=True, db_index=True) body_html = models.TextField(editable=False, blank=True, null=True) quick_links_html = models.TextField(editable=False, blank=True, null=True) zone_subnav_local_html = models.TextField(editable=False, blank=True, null=True) toc_html = models.TextField(editable=False, blank=True, null=True) summary_html = models.TextField(editable=False, blank=True, null=True) summary_text = models.TextField(editable=False, blank=True, null=True) uuid = models.UUIDField(default=uuid4, editable=False) class Meta(object): unique_together = ( ('parent', 'locale'), ('slug', 'locale'), ) permissions = ( ('view_document', 'Can view document'), ('add_template_document', 'Can add Template:* document'), ('change_template_document', 'Can change Template:* document'), ('move_tree', 'Can move a tree of documents'), ('purge_document', 'Can permanently delete document'), ('restore_document', 'Can restore deleted document'), ) objects = DocumentManager() deleted_objects = DeletedDocumentManager() admin_objects = DocumentAdminManager() def __unicode__(self): return u'%s (%s)' % (self.get_absolute_url(), self.title) @cache_with_field('body_html') def get_body_html(self, *args, **kwargs): html = self.rendered_html and self.rendered_html or self.html sections_to_hide = ('Quick_Links', 'Subnav') doc = parse_content(html) for sid in sections_to_hide: doc = doc.replaceSection(sid, '<!-- -->') doc.injectSectionIDs() doc.annotateLinks(base_url=settings.SITE_URL) return doc.serialize() @cache_with_field('quick_links_html') def get_quick_links_html(self, *args, **kwargs): return self.get_section_content('Quick_Links') @cache_with_field('zone_subnav_local_html') def get_zone_subnav_local_html(self, *args, **kwargs): return self.get_section_content('Subnav') @cache_with_field('toc_html') def get_toc_html(self, *args, **kwargs): if not self.current_revision: return '' toc_depth = self.current_revision.toc_depth if not toc_depth: return '' html = self.rendered_html and self.rendered_html or self.html return (parse_content(html) .injectSectionIDs() .filter(self.TOC_FILTERS[toc_depth]) .serialize()) @cache_with_field('summary_html') def get_summary_html(self, *args, **kwargs): return self.get_summary(strip_markup=False) @cache_with_field('summary_text') def get_summary_text(self, *args, **kwargs): return self.get_summary(strip_markup=True) def regenerate_cache_with_fields(self): """Regenerate fresh content for all the cached fields""" # TODO: Maybe @cache_with_field can build a registry over which this # method can iterate? self.get_body_html(force_fresh=True) self.get_quick_links_html(force_fresh=True) self.get_zone_subnav_local_html(force_fresh=True) self.get_toc_html(force_fresh=True) self.get_summary_html(force_fresh=True) self.get_summary_text(force_fresh=True) def get_zone_subnav_html(self): """ Search from self up through DocumentZone stack, returning the first zone nav HTML found. """ src = self.get_zone_subnav_local_html() if src: return src for zone in DocumentZoneStackJob().get(self.pk): src = zone.document.get_zone_subnav_local_html() if src: return src def get_section_content(self, section_id, ignore_heading=True): """ Convenience method to extract the rendered content for a single section """ if self.rendered_html: content = self.rendered_html else: content = self.html return self.extract.section(content, section_id, ignore_heading) def calculate_etag(self, section_id=None): """Calculate an etag-suitable hash for document content or a section""" if not section_id: content = self.html else: content = self.extract.section(self.html, section_id) return '"%s"' % hashlib.sha1(content.encode('utf8')).hexdigest() def current_or_latest_revision(self): """Returns current revision if there is one, else the last created revision.""" rev = self.current_revision if not rev: revs = self.revisions.order_by('-created') if revs.exists(): rev = revs[0] return rev @property def is_rendering_scheduled(self): """Does this have a rendering scheduled?""" if not self.render_scheduled_at: return False # Check whether a scheduled rendering has waited for too long. Assume # failure, in this case, and allow another scheduling attempt. timeout = config.KUMA_DOCUMENT_RENDER_TIMEOUT max_duration = timedelta(seconds=timeout) duration = datetime.now() - self.render_scheduled_at if duration > max_duration: return False if not self.last_rendered_at: return True return self.render_scheduled_at > self.last_rendered_at @property def is_rendering_in_progress(self): """Does this have a rendering in progress?""" if not self.render_started_at: # No start time, so False. return False # Check whether an in-progress rendering has gone on for too long. # Assume failure, in this case, and allow another rendering attempt. timeout = config.KUMA_DOCUMENT_RENDER_TIMEOUT max_duration = timedelta(seconds=timeout) duration = datetime.now() - self.render_started_at if duration > max_duration: return False if not self.last_rendered_at: # No rendering ever, so in progress. return True # Finally, if the render start is more recent than last completed # render, then we have one in progress. return self.render_started_at > self.last_rendered_at @newrelic.agent.function_trace() def get_rendered(self, cache_control=None, base_url=None): """Attempt to get rendered content for this document""" # No rendered content yet, so schedule the first render. if not self.rendered_html: try: self.schedule_rendering(cache_control, base_url) except DocumentRenderingInProgress: # Unable to trigger a rendering right now, so we bail. raise DocumentRenderedContentNotAvailable # If we have a cache_control directive, try scheduling a render. if cache_control: try: self.schedule_rendering(cache_control, base_url) except DocumentRenderingInProgress: pass # Parse JSON errors, if available. errors = None try: errors = (self.rendered_errors and json.loads(self.rendered_errors) or None) except ValueError: pass # If the above resulted in an immediate render, we might have content. if not self.rendered_html: if errors: return ('', errors) else: # But, no such luck, so bail out. raise DocumentRenderedContentNotAvailable return (self.rendered_html, errors) def schedule_rendering(self, cache_control=None, base_url=None): """ Attempt to schedule rendering. Honor the deferred_rendering field to decide between an immediate or a queued render. """ # Avoid scheduling a rendering if already scheduled or in progress. if self.is_rendering_scheduled or self.is_rendering_in_progress: return False # Note when the rendering was scheduled. Kind of a hack, doing a quick # update and setting the local property rather than doing a save() now = datetime.now() Document.objects.filter(pk=self.pk).update(render_scheduled_at=now) self.render_scheduled_at = now if (waffle.switch_is_active('wiki_force_immediate_rendering') or not self.defer_rendering): # Attempt an immediate rendering. self.render(cache_control, base_url) else: # Attempt to queue a rendering. If celery.conf.ALWAYS_EAGER is # True, this is also an immediate rendering. from . import tasks tasks.render_document.delay(self.pk, cache_control, base_url) def render(self, cache_control=None, base_url=None, timeout=None): """ Render content using kumascript and any other services necessary. """ if not base_url: base_url = settings.SITE_URL # Disallow rendering while another is in progress. if self.is_rendering_in_progress: raise DocumentRenderingInProgress # Note when the rendering was started. Kind of a hack, doing a quick # update and setting the local property rather than doing a save() now = datetime.now() Document.objects.filter(pk=self.pk).update(render_started_at=now) self.render_started_at = now # Perform rendering and update document if not config.KUMASCRIPT_TIMEOUT: # A timeout of 0 should shortcircuit kumascript usage. self.rendered_html, self.rendered_errors = self.html, [] else: self.rendered_html, errors = kumascript.get(self, cache_control, base_url, timeout=timeout) self.rendered_errors = errors and json.dumps(errors) or None # Regenerate the cached content fields self.regenerate_cache_with_fields() # Finally, note the end time of rendering and update the document. self.last_rendered_at = datetime.now() # If this rendering took longer than we'd like, mark it for deferred # rendering in the future. timeout = config.KUMA_DOCUMENT_FORCE_DEFERRED_TIMEOUT max_duration = timedelta(seconds=timeout) duration = self.last_rendered_at - self.render_started_at if duration >= max_duration: self.defer_rendering = True # TODO: Automatically clear the defer_rendering flag if the rendering # time falls under the limit? Probably safer to require manual # intervention to free docs from deferred jail. if self.render_max_age: # If there's a render_max_age, automatically update render_expires self.render_expires = (datetime.now() + timedelta(seconds=self.render_max_age)) else: # Otherwise, just clear the expiration time as a one-shot self.render_expires = None self.save() render_done.send(sender=self.__class__, instance=self) def get_summary(self, strip_markup=True, use_rendered=True): """ Attempt to get the document summary from rendered content, with fallback to raw HTML """ if use_rendered and self.rendered_html: src = self.rendered_html else: src = self.html return get_seo_description(src, self.locale, strip_markup) def build_json_data(self): html = self.rendered_html and self.rendered_html or self.html content = parse_content(html).injectSectionIDs().serialize() sections = get_content_sections(content) translations = [] if self.pk: for translation in self.other_translations: revision = translation.current_revision if revision.summary: summary = revision.summary else: summary = translation.get_summary(strip_markup=False) translations.append({ 'last_edit': revision.created.isoformat(), 'locale': translation.locale, 'localization_tags': list(revision.localization_tags .names()), 'review_tags': list(revision.review_tags.names()), 'summary': summary, 'tags': list(translation.tags.names()), 'title': translation.title, 'url': translation.get_absolute_url(), 'uuid': str(translation.uuid) }) if self.current_revision: review_tags = list(self.current_revision.review_tags.names()) localization_tags = list(self.current_revision .localization_tags .names()) last_edit = self.current_revision.created.isoformat() if self.current_revision.summary: summary = self.current_revision.summary else: summary = self.get_summary(strip_markup=False) else: review_tags = [] localization_tags = [] last_edit = '' summary = '' if not self.pk: tags = [] else: tags = list(self.tags.names()) now_iso = datetime.now().isoformat() if self.modified: modified = self.modified.isoformat() else: modified = now_iso return { 'title': self.title, 'label': self.title, 'url': self.get_absolute_url(), 'id': self.id, 'uuid': str(self.uuid), 'slug': self.slug, 'tags': tags, 'review_tags': review_tags, 'localization_tags': localization_tags, 'sections': sections, 'locale': self.locale, 'summary': summary, 'translations': translations, 'modified': modified, 'json_modified': now_iso, 'last_edit': last_edit } def get_json_data(self, stale=True): """Returns a document in object format for output as JSON. The stale parameter, when True, accepts stale cached data even after the document has been modified.""" # Have parsed data & don't care about freshness? Here's a quick out.. curr_json_data = getattr(self, '_json_data', None) if curr_json_data and stale: return curr_json_data # Attempt to parse the current contents of self.json, taking care in # case it's empty or broken JSON. self._json_data = {} if self.json: try: self._json_data = json.loads(self.json) except (TypeError, ValueError): pass # Try to get ISO 8601 datestamps for the doc and the json json_lmod = self._json_data.get('json_modified', '') doc_lmod = self.modified.isoformat() # If there's no parsed data or the data is stale & we care, it's time # to rebuild the cached JSON data. if (not self._json_data) or (not stale and doc_lmod > json_lmod): self._json_data = self.build_json_data() self.json = json.dumps(self._json_data) Document.objects.filter(pk=self.pk).update(json=self.json) return self._json_data @cached_property def extract(self): return Extractor(self) def natural_key(self): return (self.locale, self.slug) @staticmethod def natural_key_hash(keys): natural_key = u'/'.join(keys) return hashlib.md5(natural_key.encode('utf8')).hexdigest() @cached_property def natural_cache_key(self): return self.natural_key_hash(self.natural_key()) def _existing(self, attr, value): """Return an existing doc (if any) in this locale whose `attr` attr is equal to mine.""" return Document.objects.filter(locale=self.locale, **{attr: value}) def _raise_if_collides(self, attr, exception): """Raise an exception if a page of this title/slug already exists.""" if self.id is None or hasattr(self, 'old_' + attr): # If I am new or my title/slug changed... existing = self._existing(attr, getattr(self, attr)) if existing.exists(): raise exception(existing[0]) def clean(self): """Translations can't be localizable.""" self._clean_is_localizable() def _clean_is_localizable(self): """is_localizable == allowed to have translations. Make sure that isn't violated. For default language (en-US), is_localizable means it can have translations. Enforce: * is_localizable=True if it has translations * if has translations, unable to make is_localizable=False For non-default langauges, is_localizable must be False. """ if self.locale != settings.WIKI_DEFAULT_LANGUAGE: self.is_localizable = False # Can't save this translation if parent not localizable if (self.parent and self.parent.id != self.id and not self.parent.is_localizable): raise ValidationError('"%s": parent "%s" is not localizable.' % ( unicode(self), unicode(self.parent))) # Can't make not localizable if it has translations # This only applies to documents that already exist, hence self.pk if self.pk and not self.is_localizable and self.translations.exists(): raise ValidationError('"%s": document has %s translations but is ' 'not localizable.' % (unicode(self), self.translations.count())) def _attr_for_redirect(self, attr, template): """Return the slug or title for a new redirect. `template` is a Python string template with "old" and "number" tokens used to create the variant. """ def unique_attr(): """Return a variant of getattr(self, attr) such that there is no Document of my locale with string attribute `attr` equal to it. Never returns the original attr value. """ # "My God, it's full of race conditions!" i = 1 while True: new_value = template % dict(old=getattr(self, attr), number=i) if not self._existing(attr, new_value).exists(): return new_value i += 1 old_attr = 'old_' + attr if hasattr(self, old_attr): # My slug (or title) is changing; we can reuse it for the redirect. return getattr(self, old_attr) else: # Come up with a unique slug (or title): return unique_attr() def revert(self, revision, user, comment=None): """ Reverts the given revision by creating a new one. - Sets its comment to the given value and points the new revision to the old revision - Keeps review tags - Make new revision the current one of the document """ # remember the current revision's primary key for later old_revision_pk = revision.pk # get a list of review tag names for later old_review_tags = list(revision.review_tags.names()) with transaction.atomic(): # reset primary key revision.pk = None # add a sensible comment revision.comment = ("Revert to revision of %s by %s" % (revision.created, revision.creator)) if comment: revision.comment = u'%s: "%s"' % (revision.comment, comment) revision.created = datetime.now() revision.creator = user if revision.document.original.pk == self.pk: revision.based_on_id = old_revision_pk revision.save() # set review tags if old_review_tags: revision.review_tags.set(*old_review_tags) # populate model instance with fresh data from database revision.refresh_from_db() # make this new revision the current one for the document revision.make_current() return revision def revise(self, user, data, section_id=None): """Given a dict of changes to make, build and save a new Revision to revise this document""" curr_rev = self.current_revision new_rev = Revision(creator=user, document=self, content=self.html) for n in ('title', 'slug', 'render_max_age'): setattr(new_rev, n, getattr(self, n)) if curr_rev: new_rev.toc_depth = curr_rev.toc_depth original_doc = curr_rev.document.original if original_doc == self: new_rev.based_on = curr_rev else: new_rev.based_on = original_doc.current_revision # Accept optional field edits... new_title = data.get('title', False) new_rev.title = new_title and new_title or self.title new_tags = data.get('tags', False) new_rev.tags = (new_tags and new_tags or edit_string_for_tags(self.tags.all())) new_review_tags = data.get('review_tags', False) if new_review_tags: review_tags = new_review_tags elif curr_rev: review_tags = edit_string_for_tags(curr_rev.review_tags.all()) else: review_tags = '' new_rev.summary = data.get('summary', '') # To add comment, when Technical/Editorial review completed new_rev.comment = data.get('comment', '') # Accept HTML edits, optionally by section new_html = data.get('content', data.get('html', False)) if new_html: if not section_id: new_rev.content = new_html else: content = parse_content(self.html) new_rev.content = (content.replaceSection(section_id, new_html) .serialize()) # Finally, commit the revision changes and return the new rev. new_rev.save() new_rev.review_tags.set(*parse_tags(review_tags)) return new_rev @cached_property def last_modified_cache_key(self): return DOCUMENT_LAST_MODIFIED_CACHE_KEY_TMPL % self.natural_cache_key def fill_last_modified_cache(self): """ Convert python datetime to Unix epoch seconds. This is more easily digested by the cache, and is more compatible with other services that might spy on Kuma's cache entries (eg. KumaScript) """ modified_epoch = self.modified.strftime('%s') memcache.set(self.last_modified_cache_key, modified_epoch) return modified_epoch def save(self, *args, **kwargs): self.is_template = self.slug.startswith(TEMPLATE_TITLE_PREFIX) self.is_redirect = bool(self.get_redirect_url()) try: # Check if the slug would collide with an existing doc self._raise_if_collides('slug', SlugCollision) except UniqueCollision as err: if err.existing.get_redirect_url() is not None: # If the existing doc is a redirect, delete it and clobber it. err.existing.delete() else: raise err # These are too important to leave to a (possibly omitted) is_valid # call: self._clean_is_localizable() if not self.parent_topic and self.parent: # If this is a translation without a topic parent, try to get one. self.acquire_translated_topic_parent() super(Document, self).save(*args, **kwargs) # Delete any cached last-modified timestamp. self.fill_last_modified_cache() def delete(self, *args, **kwargs): if waffle.switch_is_active('wiki_error_on_delete'): # bug 863692: Temporary while we investigate disappearing pages. raise Exception("Attempt to delete document %s: %s" % (self.id, self.title)) else: if self.is_redirect or 'purge' in kwargs: if 'purge' in kwargs: kwargs.pop('purge') return super(Document, self).delete(*args, **kwargs) signals.pre_delete.send(sender=self.__class__, instance=self) if not self.deleted: Document.objects.filter(pk=self.pk).update(deleted=True) memcache.delete(self.last_modified_cache_key) signals.post_delete.send(sender=self.__class__, instance=self) def purge(self): if waffle.switch_is_active('wiki_error_on_delete'): # bug 863692: Temporary while we investigate disappearing pages. raise Exception("Attempt to purge document %s: %s" % (self.id, self.title)) else: if not self.deleted: raise Exception("Attempt tp purge non-deleted document %s: %s" % (self.id, self.title)) self.delete(purge=True) def restore(self): """ Restores a logically deleted document by reverting the deleted boolean to False. Sends pre_save and post_save Django signals to follow ducktyping best practices. """ if not self.deleted: raise Exception("Document is not deleted, cannot be restored.") signals.pre_save.send(sender=self.__class__, instance=self) Document.deleted_objects.filter(pk=self.pk).update(deleted=False) signals.post_save.send(sender=self.__class__, instance=self) def _post_move_redirects(self, new_slug, user, title): """ Create and return a Document and a Revision to serve as redirects once this page has been moved. """ redirect_doc = Document(locale=self.locale, title=self.title, slug=self.slug, is_localizable=False) content = REDIRECT_CONTENT % { 'href': reverse('wiki.document', args=[new_slug], locale=self.locale), 'title': title, } redirect_rev = Revision(content=content, is_approved=True, toc_depth=self.current_revision.toc_depth, creator=user) return redirect_doc, redirect_rev def _moved_revision(self, new_slug, user, title=None): """ Create and return a Revision which is a copy of this Document's current Revision, as it will exist at a moved location. """ moved_rev = self.current_revision # Shortcut trick for getting an object with all the same # values, but making Django think it's new. moved_rev.id = None moved_rev.creator = user moved_rev.created = datetime.now() moved_rev.slug = new_slug if title: moved_rev.title = title return moved_rev def _get_new_parent(self, new_slug): """ Get this moved Document's parent doc if a Document exists at the appropriate slug and locale. """ return valid_slug_parent(new_slug, self.locale) def _move_conflicts(self, new_slug): """ Given a new slug to be assigned to this document, check whether there is an existing, non-redirect, Document at that slug in this locale. Any redirect existing there will be deleted. This is necessary since page moving is a background task, and a Document may come into existence at the target slug after the move is requested. """ existing = None try: existing = Document.objects.get(locale=self.locale, slug=new_slug) except Document.DoesNotExist: pass if existing is not None: if existing.is_redirect: existing.delete() else: raise Exception("Requested move would overwrite a non-redirect page.") def _tree_conflicts(self, new_slug): """ Given a new slug to be assigned to this document, return a list of documents (if any) which would be overwritten by moving this document or any of its children in that fashion. """ conflicts = [] try: existing = Document.objects.get(locale=self.locale, slug=new_slug) if not existing.is_redirect: conflicts.append(existing) except Document.DoesNotExist: pass for child in self.get_descendants(): child_title = child.slug.split('/')[-1] try: slug = '/'.join([new_slug, child_title]) existing = Document.objects.get(locale=self.locale, slug=slug) if not existing.get_redirect_url(): conflicts.append(existing) except Document.DoesNotExist: pass return conflicts def _move_tree(self, new_slug, user=None, title=None): """ Move this page and all its children. """ # Page move is a 10-step process. # # Step 1: Sanity check. Has a page been created at this slug # since the move was requested? If not, OK to go ahead and # change our slug. self._move_conflicts(new_slug) if user is None: user = self.current_revision.creator if title is None: title = self.title # Step 2: stash our current review tags, since we want to # preserve them. review_tags = list(self.current_revision.review_tags.names()) # Step 3: Create (but don't yet save) a Document and Revision # to leave behind as a redirect from old location to new. redirect_doc, redirect_rev = self._post_move_redirects(new_slug, user, title) # Step 4: Update our breadcrumbs. new_parent = self._get_new_parent(new_slug) # If we found a Document at what will be our parent slug, set # it as our parent_topic. If we didn't find one, then we no # longer have a parent_topic (since our original parent_topic # would already have moved if it were going to). self.parent_topic = new_parent # Step 5: Save this Document. self.slug = new_slug self.save() # Step 6: Create (but don't yet save) a copy of our current # revision, but with the new slug and title (if title is # changing too). moved_rev = self._moved_revision(new_slug, user, title) # Step 7: Save the Revision that actually moves us. moved_rev.save(force_insert=True) # Step 8: Save the review tags. moved_rev.review_tags.set(*review_tags) # Step 9: Save the redirect. redirect_doc.save() redirect_rev.document = redirect_doc redirect_rev.save() # Finally, step 10: recurse through all of our children. for child in self.children.filter(locale=self.locale): # Save the original slug and locale so we can use them in # the error message if something goes wrong. old_child_slug, old_child_locale = child.slug, child.locale child_title = child.slug.split('/')[-1] try: child._move_tree('/'.join([new_slug, child_title]), user) except PageMoveError: # A child move already caught this and created the # correct exception + error message, so just propagate # it up. raise except Exception as e: # One of the immediate children of this page failed to # move. exc_class, exc_message, exc_tb = sys.exc_info() message = """ Failure occurred while attempting to move document with id %(doc_id)s. That document can be viewed at: https://developer.mozilla.org/%(locale)s/docs/%(slug)s The exception raised was: Exception type: %(exc_class)s Exception message: %(exc_message)s Full traceback: %(traceback)s """ % {'doc_id': child.id, 'locale': old_child_locale, 'slug': old_child_slug, 'exc_class': exc_class, 'exc_message': exc_message, 'traceback': traceback.format_exc(e)} raise PageMoveError(message) def repair_breadcrumbs(self): """ Temporary method while we work out the real issue behind translation/breadcrumb mismatches (bug 900961). Basically just walks up the tree of topical parents, calling acquire_translated_topic_parent() for as long as there's a language mismatch. """ if (not self.parent_topic or self.parent_topic.locale != self.locale): self.acquire_translated_topic_parent() if self.parent_topic: self.parent_topic.repair_breadcrumbs() def acquire_translated_topic_parent(self): """ This normalizes topic breadcrumb paths between locales. Attempt to acquire a topic parent from a translation of our translation parent's topic parent, auto-creating a stub document if necessary. """ if not self.parent: # Bail, if this is not in fact a translation. return parent_topic = self.parent.parent_topic if not parent_topic: # Bail, if the translation parent has no topic parent return try: # Look for an existing translation of the topic parent new_parent = parent_topic.translations.get(locale=self.locale) except Document.DoesNotExist: try: # No luck. As a longshot, let's try looking for the same slug. new_parent = Document.objects.get(locale=self.locale, slug=parent_topic.slug) if not new_parent.parent: # HACK: This same-slug/different-locale doc should probably # be considered a translation. Let's correct that on the # spot. new_parent.parent = parent_topic new_parent.save() except Document.DoesNotExist: # Finally, let's create a translated stub for a topic parent new_parent = Document.objects.get(pk=parent_topic.pk) new_parent.pk = None new_parent.current_revision = None new_parent.parent_topic = None new_parent.parent = parent_topic new_parent.locale = self.locale new_parent.save() if parent_topic.current_revision: # Don't forget to clone a current revision new_rev = Revision.objects.get(pk=parent_topic.current_revision.pk) new_rev.pk = None new_rev.document = new_parent # HACK: Let's auto-add tags that flag this as a topic stub stub_tags = '"TopicStub","NeedsTranslation"' stub_l10n_tags = ['inprogress'] if new_rev.tags: new_rev.tags = '%s,%s' % (new_rev.tags, stub_tags) else: new_rev.tags = stub_tags new_rev.save() new_rev.localization_tags.add(*stub_l10n_tags) # Finally, assign the new default parent topic self.parent_topic = new_parent self.save() @property def content_parsed(self): if not self.current_revision: return None return self.current_revision.content_parsed def populate_attachments(self): """ File attachments are stored at the DB level and synced here with the document's HTML content. We find them by regex-searching over the HTML for URLs that match the file URL patterns. """ mt_files = DEKI_FILE_URL.findall(self.html) kuma_files = KUMA_FILE_URL.findall(self.html) params = None if mt_files: # We have at least some MindTouch files. params = models.Q(mindtouch_attachment_id__in=mt_files) if kuma_files: # We also have some kuma files. Use an OR query. params = params | models.Q(id__in=kuma_files) if kuma_files and not params: # We have only kuma files. params = models.Q(id__in=kuma_files) Attachment = apps.get_model('attachments', 'Attachment') if params: found_attachments = Attachment.objects.filter(params) else: # If no files found, return an empty Attachment queryset. found_attachments = Attachment.objects.none() # Delete all document-attachments-relations for attachments that # weren't originally uploaded for the document to populate the list # again below self.attached_files.filter(is_original=False).delete() # Reset the linked status for all attachments that are left self.attached_files.all().update(is_linked=False) # Go through the attachments discovered in the HTML and # create linked attachments """ three options of state: - linked in the document, but not originally uploaded - linked in the document and originally uploaded - not linked in the document, but originally uploaded """ populated = [] for attachment in (found_attachments.only('pk', 'current_revision') .iterator()): revision = attachment.current_revision relation, created = self.files.through.objects.update_or_create( file_id=attachment.pk, document_id=self.pk, defaults={ 'attached_by': revision.creator, 'name': revision.filename, 'is_linked': True, }, ) populated.append((relation, created)) return populated @property def show_toc(self): return self.current_revision and self.current_revision.toc_depth @cached_property def language(self): return get_language_mapping()[self.locale.lower()] def get_absolute_url(self, endpoint='wiki.document'): """ Build the absolute URL to this document from its full path """ return reverse(endpoint, locale=self.locale, args=[self.slug]) def get_edit_url(self): return self.get_absolute_url(endpoint='wiki.edit') def get_redirect_url(self): """ If I am a redirect, return the absolute URL to which I redirect. Otherwise, return None. """ # If a document starts with REDIRECT_HTML and contains any <a> tags # with hrefs, return the href of the first one. This trick saves us # from having to parse the HTML every time. if REDIRECT_HTML in self.html: anchors = PyQuery(self.html)('a[href].redirect') if anchors: url = anchors[0].get('href') # allow explicit domain and *not* '//' # i.e allow "https://developer...." and "/en-US/docs/blah" if len(url) > 1: if url.startswith(settings.SITE_URL): return url elif url[0] == '/' and url[1] != '/': return url elif len(url) == 1 and url[0] == '/': return url def get_topic_parents(self): """Build a list of parent topics from self to root""" curr, parents = self, [] while curr.parent_topic: curr = curr.parent_topic parents.append(curr) return parents def allows_revision_by(self, user): """ Return whether `user` is allowed to create new revisions of me. The motivation behind this method is that templates and other types of docs may have different permissions. """ if (self.slug.startswith(TEMPLATE_TITLE_PREFIX) and not user.has_perm('wiki.change_template_document')): return False return True def allows_editing_by(self, user): """ Return whether `user` is allowed to edit document-level metadata. If the Document doesn't have a current_revision (nothing approved) then all the Document fields are still editable. Once there is an approved Revision, the Document fields can only be edited by privileged users. """ if (self.slug.startswith(TEMPLATE_TITLE_PREFIX) and not user.has_perm('wiki.change_template_document')): return False return (not self.current_revision or user.has_perm('wiki.change_document')) def translated_to(self, locale): """ Return the translation of me to the given locale. If there is no such Document, return None. """ if self.locale != settings.WIKI_DEFAULT_LANGUAGE: raise NotImplementedError('translated_to() is implemented only on' 'Documents in the default language so' 'far.') try: return Document.objects.get(locale=locale, parent=self) except Document.DoesNotExist: return None @property def original(self): """ Return the document I was translated from or, if none, myself. """ return self.parent or self @cached_property def other_translations(self): """ Return a list of Documents - other translations of this Document """ if self.parent is None: return self.translations.all().order_by('locale') else: translations = (self.parent.translations.all() .exclude(id=self.id) .order_by('locale')) pks = list(translations.values_list('pk', flat=True)) return Document.objects.filter(pk__in=[self.parent.pk] + pks) @property def parents(self): """ Return the list of topical parent documents above this one, or an empty list if none exist. """ if self.parent_topic is None: return [] current_parent = self.parent_topic parents = [current_parent] while current_parent.parent_topic is not None: parents.insert(0, current_parent.parent_topic) current_parent = current_parent.parent_topic return parents def is_child_of(self, other): """ Circular dependency detection -- if someone tries to set this as a parent of a document it's a child of, they're gonna have a bad time. """ return other.id in (d.id for d in self.parents) # This is a method, not a property, because it can do a lot of DB # queries and so should look scarier. It's not just named # 'children' because that's taken already by the reverse relation # on parent_topic. def get_descendants(self, limit=None, levels=0): """ Return a list of all documents which are children (grandchildren, great-grandchildren, etc.) of this one. """ results = [] if (limit is None or levels < limit) and self.children.exists(): for child in self.children.all().filter(locale=self.locale): results.append(child) [results.append(grandchild) for grandchild in child.get_descendants(limit, levels + 1)] return results def is_watched_by(self, user): """ Return whether `user` is notified of edits to me. """ from .events import EditDocumentEvent return EditDocumentEvent.is_notifying(user, self) def tree_is_watched_by(self, user): """Return whether `user` is notified of edits to me AND sub-pages.""" from .events import EditDocumentInTreeEvent return EditDocumentInTreeEvent.is_notifying(user, self) def parent_trees_watched_by(self, user): """ Return any and all of this document's parents that are watched by the given user. """ return [doc for doc in self.parents if doc.tree_is_watched_by(user)] @cached_property def contributors(self): return DocumentContributorsJob().get(self.pk) @cached_property def zone_stack(self): return DocumentZoneStackJob().get(self.pk) def get_full_url(self): return absolutify(self.get_absolute_url()) class DocumentDeletionLog(models.Model): """ Log of who deleted a Document, when, and why. """ # We store the locale/slug because it's unique, and also because a # ForeignKey would delete this log when the Document gets purged. locale = models.CharField( max_length=7, choices=settings.LANGUAGES, default=settings.WIKI_DEFAULT_LANGUAGE, db_index=True, ) slug = models.CharField(max_length=255, db_index=True) user = models.ForeignKey(settings.AUTH_USER_MODEL) timestamp = models.DateTimeField(auto_now=True) reason = models.TextField() def __unicode__(self): return "/%(locale)s/%(slug)s deleted by %(user)s" % { 'locale': self.locale, 'slug': self.slug, 'user': self.user } class DocumentZone(models.Model): """ Model object declaring a content zone root at a given Document, provides attributes inherited by the topic hierarchy beneath it. """ document = models.OneToOneField(Document, related_name='zone') styles = models.TextField(null=True, blank=True) url_root = models.CharField( max_length=255, null=True, blank=True, db_index=True, help_text="alternative URL path root for documents under this zone") def __unicode__(self): return u'DocumentZone %s (%s)' % (self.document.get_absolute_url(), self.document.title) class ReviewTag(TagBase): """A tag indicating review status, mainly for revisions""" class Meta: verbose_name = _('Review Tag') verbose_name_plural = _('Review Tags') class LocalizationTag(TagBase): """A tag indicating localization status, mainly for revisions""" class Meta: verbose_name = _('Localization Tag') verbose_name_plural = _('Localization Tags') class ReviewTaggedRevision(ItemBase): """Through model, just for review tags on revisions""" content_object = models.ForeignKey('Revision') tag = models.ForeignKey(ReviewTag, related_name="%(app_label)s_%(class)s_items") @classmethod def tags_for(cls, *args, **kwargs): return tags_for(cls, *args, **kwargs) class LocalizationTaggedRevision(ItemBase): """Through model, just for localization tags on revisions""" content_object = models.ForeignKey('Revision') tag = models.ForeignKey(LocalizationTag, related_name="%(app_label)s_%(class)s_items") @classmethod def tags_for(cls, *args, **kwargs): return tags_for(cls, *args, **kwargs) class Revision(models.Model): """A revision of a localized knowledgebase document""" # Depth of table-of-contents in document display. TOC_DEPTH_NONE = 0 TOC_DEPTH_ALL = 1 TOC_DEPTH_H2 = 2 TOC_DEPTH_H3 = 3 TOC_DEPTH_H4 = 4 TOC_DEPTH_CHOICES = ( (TOC_DEPTH_NONE, _(u'No table of contents')), (TOC_DEPTH_ALL, _(u'All levels')), (TOC_DEPTH_H2, _(u'H2 and higher')), (TOC_DEPTH_H3, _(u'H3 and higher')), (TOC_DEPTH_H4, _('H4 and higher')), ) document = models.ForeignKey(Document, related_name='revisions') # Title and slug in document are primary, but they're kept here for # revision history. title = models.CharField(max_length=255, null=True, db_index=True) slug = models.CharField(max_length=255, null=True, db_index=True) summary = models.TextField() # wiki markup content = models.TextField() # wiki markup tidied_content = models.TextField(blank=True) # wiki markup tidied up # Keywords are used mostly to affect search rankings. Moderators may not # have the language expertise to translate keywords, so we put them in the # Revision so the translators can handle them: keywords = models.CharField(max_length=255, blank=True) # Tags are stored in a Revision as a plain CharField, because Revisions are # not indexed by tags. This data is retained for history tracking. tags = models.CharField(max_length=255, blank=True) # Tags are (ab)used as status flags and for searches, but the through model # should constrain things from getting expensive. review_tags = TaggableManager(through=ReviewTaggedRevision) localization_tags = TaggableManager(through=LocalizationTaggedRevision) toc_depth = models.IntegerField(choices=TOC_DEPTH_CHOICES, default=TOC_DEPTH_ALL) # Maximum age (in seconds) before this document needs re-rendering render_max_age = models.IntegerField(blank=True, null=True) created = models.DateTimeField(default=datetime.now, db_index=True) comment = models.CharField(max_length=255) creator = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='created_revisions') is_approved = models.BooleanField(default=True, db_index=True) # The default locale's rev that was current when the Edit button was hit to # create this revision. Used to determine whether localizations are out of # date. based_on = models.ForeignKey('self', null=True, blank=True) # TODO: limit_choices_to={'document__locale': # settings.WIKI_DEFAULT_LANGUAGE} is a start but not sufficient. is_mindtouch_migration = models.BooleanField(default=False, db_index=True, help_text="Did this revision come from MindTouch?") objects = TransformManager() def get_absolute_url(self): """Build the absolute URL to this revision""" return reverse('wiki.revision', locale=self.document.locale, args=[self.document.slug, self.pk]) def _based_on_is_clean(self): """Return a tuple: (the correct value of based_on, whether the old value was correct). based_on must be an approved revision of the English version of the document if there are any such revisions, any revision if no approved revision exists, and None otherwise. If based_on is not already set when this is called, the return value defaults to the current_revision of the English document. """ # TODO(james): This could probably be simplified down to "if # based_on is set, it must be a revision of the original document." original = self.document.original base = original.current_or_latest_revision() has_approved = original.revisions.filter(is_approved=True).exists() if (original.current_revision or not has_approved): if (self.based_on and self.based_on.document != original): # based_on is set and points to the wrong doc. return base, False # Else based_on is valid; leave it alone. elif self.based_on: return None, False return self.based_on, True def clean(self): """Ensure based_on is valid.""" # All of the cleaning herein should be unnecessary unless the user # messes with hidden form data. try: self.document and self.document.original except Document.DoesNotExist: # For clean()ing forms that don't have a document instance behind # them yet self.based_on = None else: based_on, is_clean = self._based_on_is_clean() if not is_clean: if self.document.parent: # Restoring translation source, so base on current_revision self.based_on = self.document.parent.current_revision else: old = self.based_on self.based_on = based_on # Guess a correct value. locale = settings.LOCALES[settings.WIKI_DEFAULT_LANGUAGE].native error = ugettext( 'A revision must be based on a revision of the ' '%(locale)s document. Revision ID %(id)s does ' 'not fit those criteria.') raise ValidationError(error % {'locale': locale, 'id': old.id}) def save(self, *args, **kwargs): _, is_clean = self._based_on_is_clean() if not is_clean: # No more Mister Nice Guy # TODO(erik): This error message ignores non-translations. raise ProgrammingError('Revision.based_on must be None or refer ' 'to a revision of the default-' 'language document. It was %s' % self.based_on) if not self.title: self.title = self.document.title if not self.slug: self.slug = self.document.slug super(Revision, self).save(*args, **kwargs) # When a revision is approved, update document metadata and re-cache # the document's html content if self.is_approved: self.make_current() def make_current(self): """ Make this revision the current one for the document """ self.document.title = self.title self.document.slug = self.slug self.document.html = self.content_cleaned self.document.render_max_age = self.render_max_age self.document.current_revision = self # Since Revision stores tags as a string, we need to parse them first # before setting on the Document. self.document.tags.set(*parse_tags(self.tags)) self.document.save() # Re-create all document-attachment relations since they are based # on the actual HTML content self.document.populate_attachments() def __unicode__(self): return u'[%s] %s #%s' % (self.document.locale, self.document.title, self.id) def get_section_content(self, section_id): """Convenience method to extract the content for a single section""" return self.document.extract.section(self.content, section_id) def get_tidied_content(self, allow_none=False): """ Return the revision content parsed and cleaned by tidy. First, check in denormalized db field. If it's not available, schedule an asynchronous task to store it. allow_none -- To prevent CPU-hogging calls, return None instead of calling tidy_content in-process. """ # we may be lucky and have the tidied content already denormalized # in the database, if so return it if self.tidied_content: tidied_content = self.tidied_content else: if allow_none: if self.pk: from .tasks import tidy_revision_content tidy_revision_content.delay(self.pk, refresh=False) tidied_content = None else: tidied_content, errors = tidy_content(self.content) if self.pk: Revision.objects.filter(pk=self.pk).update( tidied_content=tidied_content) self.tidied_content = tidied_content or '' return tidied_content @property def content_cleaned(self): if self.document.is_template: return self.content else: return Document.objects.clean_content(self.content) @cached_property def previous(self): return self.get_previous() def get_previous(self): """ Returns the previous approved revision or None. """ try: return self.document.revisions.filter( is_approved=True, created__lt=self.created, ).order_by('-created')[0] except IndexError: return None @cached_property def needs_editorial_review(self): return self.review_tags.filter(name='editorial').exists() @cached_property def needs_technical_review(self): return self.review_tags.filter(name='technical').exists() @cached_property def localization_in_progress(self): return self.localization_tags.filter(name='inprogress').exists() @property def translation_age(self): return abs((datetime.now() - self.created).days) class RevisionIP(models.Model): """ IP Address for a Revision including User-Agent string and Referrer URL. """ revision = models.ForeignKey( Revision ) ip = models.CharField( _('IP address'), max_length=40, editable=False, db_index=True, blank=True, null=True, ) user_agent = models.TextField( _('User-Agent'), editable=False, blank=True, ) referrer = models.TextField( _('HTTP Referrer'), editable=False, blank=True, ) data = models.TextField( editable=False, blank=True, null=True, verbose_name=_('Data submitted to Akismet') ) objects = RevisionIPManager() def __unicode__(self): return '%s (revision %d)' % (self.ip or 'No IP', self.revision.id) class RevisionAkismetSubmission(AkismetSubmission): """ The Akismet submission per wiki document revision. Stores only a reference to the submitted revision. """ revision = models.ForeignKey( Revision, related_name='akismet_submissions', null=True, blank=True, verbose_name=_('Revision'), # don't delete the akismet submission but set the revision to null on_delete=models.SET_NULL, ) class Meta: verbose_name = _('Akismet submission') verbose_name_plural = _('Akismet submissions') def __unicode__(self): if self.revision: return ( u'%(type)s submission by %(sender)s (Revision %(revision_id)d)' % { 'type': self.get_type_display(), 'sender': self.sender, 'revision_id': self.revision.id, } ) else: return ( u'%(type)s submission by %(sender)s (no revision)' % { 'type': self.get_type_display(), 'sender': self.sender, } ) class EditorToolbar(models.Model): creator = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='created_toolbars') default = models.BooleanField(default=False) name = models.CharField(max_length=100) code = models.TextField(max_length=2000) def __unicode__(self): return self.name class DocumentSpamAttempt(SpamAttempt): """ The wiki document specific spam attempt. Stores title, slug and locale of the documet revision to be able to see where it happens. Stores data sent to Akismet so that staff can review Akismet's spam detection for false positives. """ title = models.CharField( verbose_name=_('Title'), max_length=255, ) slug = models.CharField( verbose_name=_('Slug'), max_length=255, ) document = models.ForeignKey( Document, related_name='spam_attempts', null=True, blank=True, verbose_name=_('Document (optional)'), on_delete=models.SET_NULL, ) data = models.TextField( editable=False, blank=True, null=True, verbose_name=_('Data submitted to Akismet') ) reviewed = models.DateTimeField( _('reviewed'), blank=True, null=True, ) NEEDS_REVIEW = 0 HAM = 1 SPAM = 2 REVIEW_UNAVAILABLE = 3 AKISMET_ERROR = 4 REVIEW_CHOICES = ( (NEEDS_REVIEW, _('Needs Review')), (HAM, _('Ham / False Positive')), (SPAM, _('Confirmed as Spam')), (REVIEW_UNAVAILABLE, _('Review Unavailable')), (AKISMET_ERROR, _('Akismet Error')), ) review = models.IntegerField( choices=REVIEW_CHOICES, default=NEEDS_REVIEW, verbose_name=_("Review of Akismet's classification as spam"), ) reviewer = models.ForeignKey( settings.AUTH_USER_MODEL, related_name='documentspam_reviewed', blank=True, null=True, verbose_name=_('Staff reviewer'), ) def __unicode__(self): return u'%s (%s)' % (self.slug, self.title)
from django.db import models from metronus_app.model.actor import Actor from metronus_app.model.task import Task class GoalEvolution(models.Model): """ Each time the goal or the price per unit/hour from a task is changed, a new entry is created in the log Maybe should have been named TaskLog, but... """ task_id = models.ForeignKey(Task) registryDate = models.DateTimeField(auto_now=True) actor_id = models.ForeignKey(Actor) production_goal = models.FloatField(blank=True, null=True) goal_description = models.CharField(blank=True, max_length=100, default="") price_per_unit = models.FloatField(null=True, blank=True) price_per_hour = models.FloatField(null=True, blank=True) def __unicode__(self): return self.production_goal
import csv import json import sys import click def score(company, sexbiases): """ Given a company record with board of directors and executive names, return our guess of the % of governance that is male. Since names are not always unambiguous determinants of sex, we also return an error bound, with 0.0 being perfect and 1.0 being possibly 100% wrong. """ men = 0 error = 0.0 governors = company['board'] + company['executives'] # Get all governor names, de-duping since board/exec team may overlap names = set([governor.get('name', '') for governor in governors]) for name in names: first_name = name.split(' ')[0].strip().title() bias = sexbiases.get(first_name, 0.0) # Assume male if not known, with maximal error bound if bias <= 0.0: men += 1 error += 1.0 - abs(bias) count = len(names) return (men/count, error/count) @click.command() @click.option('--companies', type=click.File(mode='rt'), required=True, help="A companies data file, as created by symbol-to-company-details.") @click.option('--sexbiases', type=click.File(mode='rt'), required=True, help="A sex bias CSV datafile, as in first_name_sex_bias.csv") def corpscore(companies, sexbiases): sexbias_reader = csv.reader(sexbiases) sexbiases = dict([item[0], float(item[1])] for item in sexbias_reader) fieldnames = ['symbol', 'url', 'percent_men', 'error', 'description'] writer = csv.DictWriter(sys.stdout, fieldnames=fieldnames) writer.writeheader() for company_json in companies: company = json.loads(company_json) percent_men, error = score(company, sexbiases) writer.writerow({ 'symbol': company['symbol'], 'url': company.get('url'), 'percent_men': percent_men, 'error': error, 'description': company.get('description'), }) sys.stdout.flush() if __name__ == '__main__': corpscore()
""" Multi-gpu code for Keras/TF. From https://github.com/avolkov1/keras_experiments """ import sys from itertools import chain import warnings from .multi_gpu_utils import Capturing from keras import backend as KB from keras.layers.core import Lambda from keras.models import Model from keras.layers.merge import Concatenate # , Average) import keras.optimizers as KO if KB.backend() == 'tensorflow': # Monkey patch Keras back-end to use Function with enqueue. # import keras_exp._patch_tf_backend as tfbpatch # tfbpatch.patch() # from keras_exp._patch_tf_backend import patch as tfbpatch # tfbpatch() import tensorflow as tf from tensorflow.python.client import device_lib try: from tensorflow.contrib import nccl have_nccl = True print('NCCL support available', file=sys.stderr) except ImportError: have_nccl = False print('WARNING: NCCL support not available', file=sys.stderr) from tensorflow.python.ops import data_flow_ops _DEBUG = False __all__ = ('get_available_gpus', 'make_parallel', 'print_mgpu_modelsummary', 'ModelMGPU') def get_available_gpus(ngpus=-1): ''' :param int ngpus: GPUs max to use. Default -1 means all gpus. :returns: List of gpu devices. Ex.: ['/gpu:0', '/gpu:1', ...] ''' local_device_protos = device_lib.list_local_devices() gpus_list = [x.name for x in local_device_protos if x.device_type == 'GPU'] return gpus_list[:ngpus] if ngpus > -1 else gpus_list def print_mgpu_modelsummary(model): '''Prints the summary for a multi-GPU keras model. :param model: Keras model. :type model: Model ''' # print json.dumps(model.get_config(), indent=2) # DEBUG print('\nMULTI-GPU MODEL: {}'.format(model.name)) print(model.summary()) for layer in model.layers: # print 'layer:', layer, '\ttype:', type(layer) if isinstance(layer, Model): submodel = layer print('\n\tSUBMODEL SUMMARY: {}'.format(layer.name)) with Capturing() as msum: minfo = submodel.summary() print('\t{}\n\t{}\n'.format('\n\t'.join(msum), minfo)) def all_sync_params(tower_params, devices, usenccl=True): """Assigns the params from the first tower to all others""" if len(devices) == 1: return tf.no_op() sync_ops = [] if have_nccl and usenccl: for param_on_devices in zip(*tower_params): # print('PARAM_ON_DEVICES: {}'.format(param_on_devices)) # DEBUG # Note: param_on_devices is [paramX_gpu0, paramX_gpu1, ...] param0 = param_on_devices[0] send_op, received_tensors = nccl.broadcast(param0, devices[1:]) sync_ops.append(send_op) for device, param, received in zip(devices[1:], param_on_devices[1:], received_tensors): with tf.device(device): sync_op = param.assign(received) sync_ops.append(sync_op) else: params0 = tower_params[0] for device, params in zip(devices, tower_params): with tf.device(device): for param, param0 in zip(params, params0): sync_op = param.assign(param0.read_value()) sync_ops.append(sync_op) return tf.group(*sync_ops) class ModelMGPU(Model): '''Override load and save methods of the multi-gpu model. The load and save should correspond to the serial model's load and save. If there are other idiosyncracies to handle for multi-gpu model case then these can be handled in this subclass. A serial model should always be instantiated prior to wrapping it or converting it to a multi-GPU model. This multi-gpu implementation uses data-parallelism. A copy-constructor is not implemented so optionally pass any additional parameters besides inputs/outputs as args/kwargs to initialize the multi-gpu model the same way as the serial model. Typically not needed. Currently, it seems that using NCCL and synchronizing/averaging gradients slows multi-gpu processing down. .. seealso:: Refer to :func:`make_parallel` docstring for scenarios when out-of-memory errors might occur and workaround. Kwargs: :param Model serial_model: Serial i.e. non-multi GPU Keras model. REQUIRED. :param list gdev_list: List of gpu devices i.e. ['/gpu:0', '/gpu:1', ...] Use function get_available_gpus to get the list of available gpus. This can be a list of strings or list of instances of tf.DeviceSpec. REQUIRED. :param str ps_device: Parameter server device to use. :param bool usenccl: Use the contrib.nccl Tensorflow library for initial parameter synchronization and gradients averaging. Note, the models usenccl option overrides the optimizers usenccl option. Default: False Raises RuntimeError if specified True and a non-multi-gpu optimizer is passed during compile stage. :param bool initsync: Synchronize initial Variables i.e. weights, biases, etc. Default: True :param bool syncopt: Synchronize gradients. Requires a multi-gpu optimizer. Default: False :param bool enqueue: Use StagingArea in the multi-GPU model. Could potentially speed up Host-to-Device transfers. Produces a warning that kwargs are ignored for Tensorflow. The _patch_tf_backend module mokey patches the Function in tensorflow_backend to use the enqueue_ops option. Default: False ''' def __init__(self, *args, **kwargs): # :param model_creator: Callable that returns a serial i.e. non-multi # GPU Keras model i.e. a keras.models.Model model. REQUIRED. # Suggestion, use partial from functools to setup model_creator. # try: # model_creator = kwargs.pop('model_creator') # except KeyError: # raise RuntimeError('Keyword argument "model_creator" required ' # 'for ModelMGPU.') try: smodel = kwargs.pop('serial_model') except KeyError: raise RuntimeError('Keyword argument "serial_model" required ' 'for ModelMGPU.') # SET STATE: Instance of serial model for checkpointing self._smodel = smodel # model_creator() try: gdev_list = kwargs.pop('gdev_list') except KeyError: raise RuntimeError('Keyword argument "gdev_list" required ' 'for ModelMGPU.') self._gdev_list = gdev_list mname = kwargs.pop('name', self._smodel.name) kwargs['name'] = mname self._ps_device = kwargs.pop('ps_device', '/cpu:0') self._initsync = kwargs.pop('initsync', True) self._usenccl = kwargs.pop('usenccl', False) self._syncopt = kwargs.pop('syncopt', False) self._enqueue = kwargs.pop('enqueue', False) if self._enqueue: warnings.warn('Enqueue option to use StagingArea currenctly does ' 'not work.', UserWarning) # NOTE: To use staging have to patch keras tensorflow_backend.Function. # Function implementation in keras_exp.multigpu._patch_tf_backend self._enqueue_ops = [] self._tower_params = [] # For init/sync'ing of parameters. self._init_make_dataparallel(gdev_list, *args, **kwargs) def __getattribute__(self, attrname): '''Override load and save methods to be used from the serial-model. The serial-model holds references to the weights in the multi-gpu model. ''' # return Model.__getattribute__(self, attrname) if 'load' in attrname or 'save' in attrname: return getattr(self._smodel, attrname) return super(ModelMGPU, self).__getattribute__(attrname) # ref: https://github.com/fchollet/keras/issues/2436 def _init_make_dataparallel(self, gdev_list, *args, **kwargs): '''Uses data-parallelism to convert a serial model to multi-gpu. Refer to make_parallel doc. ''' gpucopy_ops = [] def slice_batch(x, ngpus, part, dev): '''Divide the input batch into [ngpus] slices, and obtain slice no. [part]. i.e. if len(x)=10, then slice_batch(x, 2, 1) will return x[5:]. ''' sh = KB.shape(x) L = sh[0] // ngpus if part == ngpus - 1: xslice = x[part * L:] else: xslice = x[part * L:(part + 1) * L] # tf.split fails if batch size is not divisible by ngpus. Error: # InvalidArgumentError (see above for traceback): Number of # ways to split should evenly divide the split dimension # xslice = tf.split(x, ngpus)[part] if not self._enqueue: return xslice # Did not see any benefit. with tf.device(dev): # if self._stager is None: stager = data_flow_ops.StagingArea( dtypes=[xslice.dtype], shapes=[xslice.shape]) stage = stager.put([xslice]) gpucopy_ops.append(stage) # xslice_stage = stager.get() return stager.get() ngpus = len(gdev_list) if ngpus < 2: raise RuntimeError('Number of gpus < 2. Require two or more GPUs ' 'for multi-gpu model parallelization.') model = self._smodel noutputs = len(self._smodel.outputs) global_scope = tf.get_variable_scope() towers = [[] for _ in range(noutputs)] for idev, dev in enumerate(gdev_list): # TODO: The last slice could cause a gradient calculation outlier # when averaging gradients. Maybe insure ahead of time that the # batch_size is evenly divisible by number of GPUs, or maybe don't # use the last slice. with tf.device(self._ps_device): slices = [] # multi-input case for ix, x in enumerate(model.inputs): slice_g = Lambda( slice_batch, # lambda shape: shape, # lambda shape: x.shape.as_list(), name='stage_cpuSliceIn{}_Dev{}'.format(ix, idev), arguments={'ngpus': ngpus, 'part': idev, 'dev': dev})(x) slices.append(slice_g) # print('SLICE_G: {}'.format(slice_g)) # DEBUG # print('SLICES: {}'.format(slices)) # DEBUG # with tf.variable_scope('GPU_%i' % idev), \ # tf.variable_scope(global_scope, reuse=idev > 0), \ # tf.variable_scope('GPU_{}'.format(idev), # reuse=idev > 0) as var_scope, \ with tf.device(dev), \ tf.variable_scope(global_scope, reuse=idev > 0), \ tf.name_scope('tower_%i' % idev): # NOTE: Currently not using model_creator. Did not observe # any benefit in such an implementation. # Instantiate model under device context. More complicated. # Need to use optimizer synchronization in this scenario. # model_ = model_creator() # If using NCCL without re-instantiating the model then must # set the colocate_gradients_with_ops to False in optimizer. # if idev == 0: # # SET STATE: Instance of serial model for checkpointing # self._smodel = model_ # for ability to checkpoint # Handle multi-output case modeltower = model(slices) if not isinstance(modeltower, list): modeltower = [modeltower] for imt, mt in enumerate(modeltower): towers[imt].append(mt) params = mt.graph._collections['trainable_variables'] # params = model_.trainable_weights # params = tf.get_collection( # tf.GraphKeys.TRAINABLE_VARIABLES, scope=var_scope.name) # params = modeltower.graph._collections['trainable_variables'] # print('PARAMS: {}'.format(params)) # DEBUG self._tower_params.append(params) with tf.device(self._ps_device): # merged = Concatenate(axis=0)(towers) merged = [Concatenate(axis=0)(tw) for tw in towers] # self._enqueue_ops.append(tf.group(*gpucopy_ops)) self._enqueue_ops += gpucopy_ops kwargs['inputs'] = model.inputs kwargs['outputs'] = merged super(ModelMGPU, self).__init__(*args, **kwargs) def compile(self, *args, **kwargs): '''Refer to Model.compile docstring for parameters. Override functionality is documented below. :override compile: Override Model.compile method to check for options that the optimizer is multi-gpu enabled, and synchronize initial variables. ''' initsync = self._initsync usenccl = self._usenccl opt = kwargs['optimizer'] # if isinstance(opt, str): if not isinstance(opt, KO.Optimizer): opt = KO.get(opt) kwargs['optimizer'] = opt if self._syncopt and not getattr(opt, 'ismgpu', False): raise RuntimeError( 'Multi-GPU synchronization model requires a multi-GPU ' 'optimizer. Instead got: {}'.format(opt)) opt.usenccl = usenccl if self._enqueue_ops: # Produces a warning that kwargs are ignored for Tensorflow. Patch # Function in tensorflow_backend to use the enqueue_ops option. kwargs['fetches'] = self._enqueue_ops super(ModelMGPU, self).compile(*args, **kwargs) if initsync: self._run_initsync() def _run_initsync(self): # tparams = [list(chain(*tp)) for tp in self._tower_params] tparams = self._tower_params # Check to prevent from unnecessarily re-initializing and # synchronizing, i.e. when the model loads the weights. for v in chain.from_iterable(tparams): if getattr(v, '_keras_initialized', False): return KB.manual_variable_initialization(True) sess = KB.get_session() KB.manual_variable_initialization(False) # glob_variables = tf.global_variables() # sess.run(tf.variables_initializer(glob_variables)) # Initialize on GPU0 and sync to other GPUs init_op = tf.variables_initializer(tparams[0]) # init_op = tf.variables_initializer(self._tower_params[0]) # init_op = tf.variables_initializer(self.trainable_weights) sess.run(init_op) # Important if using model_creator. Not necessary of model instance is # reused in which case the model layers are shared between slices # and are automatically sync'd. sync_op = all_sync_params(tparams, self._gdev_list, usenccl=self._usenccl) sess.run(sync_op) for v in chain.from_iterable(tparams): v._keras_initialized = True def make_parallel(serial_model, gdev_list, ps_device='/cpu:0', usenccl=False, initsync=True, syncopt=False, enqueue=False, model_class=ModelMGPU): '''Given a keras model, return an equivalent model which parallelizes the computation over multiple GPUs listed in the gdev_list. Data-Parallel: Each GPU gets a slice of the input batch, applies the model on that slice and later the outputs of the models are concatenated to a single tensor, hence the user sees a model that behaves the same as the original. If getting an out-of-memory (OOM) error when scaling the batch size by the number of GPUs, there might be input layer(s) in the serial model that runs additional special operations (such as tranformation of some sort) on the 1st GPU as enumerated by Tensorflow. This was an observed behavior for Embedding layers. The workaround is to pin such layers to the CPU, or simply pin the instantiation of the serial mode to CPU. The parallelization will move the operations to GPU. :Example: if mgpu_flag: with tf.device('/cpu:0'): # define the serial model. model_serial = get_model_serial() gdev_list = get_available_gpus() model = make_parallel(model_serial, gdev_list) else: model = def_model_serial() :param Model serial_model: Serial i.e. non-multi GPU Keras model. :param list gdev_list: List of gpu devices i.e. ['/gpu:0', '/gpu:1', ...] Use function get_available_gpus to get the list of available gpus. This can be a list of strings or list of instances of tf.DeviceSpec. :param str ps_device: Parameter server device to use. :param bool usenccl: Use the contrib.nccl Tensorflow library for initial parameter synchronization and gradients averaging. Note, the model's usenccl option overrides the optimizers usenccl option. Default: False :param bool initsync: Synchronize initial Variables i.e. weights, biases, etc. Default: True :param bool syncopt: Synchronize gradients. Requires a multi-gpu optimizer. Default: False :param bool enqueue: Use StagingArea in the multi-GPU model. Could potentially speed up Host-to-Device transfers. Produces a warning that kwargs are ignored for Tensorflow. The _patch_tf_backend module mokey patches the Function in tensorflow_backend to use the enqueue_ops option. Default: False :param model_class: Class object to instantiate for multi-gpu models. This is needed when the ModelMGPU is mixed-in with other classes. Default: ModelMGPU :returns: Multi-GPU parallelized model. If ngpus < 2 then do nothing and return the provided serial_model. :rtype: ModelMGPU ''' ngpus = len(gdev_list) if ngpus < 2: return serial_model # model_creator() return model_class( serial_model=serial_model, gdev_list=gdev_list, ps_device=ps_device, enqueue=enqueue, usenccl=usenccl, initsync=initsync, syncopt=syncopt)
from . import base_wizard_mixin from . import document_cancel_wizard from . import document_correction_wizard from . import document_status_wizard from . import invalidate_number_wizard
from south.db import db from django.db import models from cm.models import * class Migration: def forwards(self, orm): "Write your forwards migration here" for tv in orm.TextVersion.objects.all(): tv.key = orm.TextVersion.objects._gen_key() tv.adminkey = orm.TextVersion.objects._gen_adminkey() tv.save() def backwards(self, orm): "Write your backwards migration here" models = { 'auth.group': { 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'unique_together': "(('content_type', 'codename'),)"}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'}) }, 'cm.activity': { 'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cm.Comment']", 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'default': 'None', 'max_length': '15', 'null': 'True', 'blank': 'True'}), 'originator_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'originator_activity'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}), 'text': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cm.Text']", 'null': 'True', 'blank': 'True'}), 'text_version': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cm.TextVersion']", 'null': 'True', 'blank': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'cm.attachment': { 'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}), 'data': ('django.db.models.fields.files.FileField', [], {'max_length': '1000'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}), 'text_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.TextVersion']"}) }, 'cm.comment': { 'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}), 'content': ('django.db.models.fields.TextField', [], {}), 'content_html': ('django.db.models.fields.TextField', [], {}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'end_offset': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'end_wrapper': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'format': ('django.db.models.fields.CharField', [], {'default': "'markdown'", 'max_length': '20'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Comment']", 'null': 'True', 'blank': 'True'}), 'start_offset': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'start_wrapper': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'tags': ('tagging.fields.TagField', [], {}), 'text_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.TextVersion']"}), 'title': ('django.db.models.fields.TextField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'cm.configuration': { 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.TextField', [], {}), 'raw_value': ('django.db.models.fields.TextField', [], {}) }, 'cm.email': { 'bcc': ('django.db.models.fields.TextField', [], {}), 'body': ('django.db.models.fields.TextField', [], {}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'from_email': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'subject': ('django.db.models.fields.TextField', [], {}), 'to': ('django.db.models.fields.TextField', [], {}) }, 'cm.notification': { 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}), 'email': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'text': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Text']", 'null': 'True', 'blank': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'cm.role': { 'anon': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), 'global_scope': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']"}) }, 'cm.text': { 'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}), 'last_text_version': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'related_text'", 'null': 'True', 'to': "orm['cm.TextVersion']"}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'private_feed_key': ('django.db.models.fields.CharField', [], {'null': 'True', 'default': 'None', 'max_length': '20', 'blank': 'True', 'unique': 'True', 'db_index': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'title': ('django.db.models.fields.TextField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'cm.textversion': { 'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}), 'content': ('django.db.models.fields.TextField', [], {}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'format': ('django.db.models.fields.CharField', [], {'default': "'markdown'", 'max_length': '20'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}), 'mod_posteriori': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'note': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'tags': ('tagging.fields.TagField', [], {'max_length': '1000'}), 'text': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Text']"}), 'title': ('django.db.models.fields.TextField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'cm.userprofile': { 'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}), 'allow_contact': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_email_error': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_suspended': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_temp': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'preferred_language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '2'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'cm.userrole': { 'Meta': {'unique_together': "(('role', 'user', 'text'),)"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Role']", 'null': 'True', 'blank': 'True'}), 'text': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Text']", 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'contenttypes.contenttype': { 'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['cm']
from openerp.osv import orm, fields import decimal_precision as dp import netsvc from tools import ustr class sale_order_confirm(orm.TransientModel): _inherit = "sale.order.confirm" _columns = { 'cig': fields.char('CIG', size=64, help="Codice identificativo di gara"), 'cup': fields.char('CUP', size=64, help="Codice unico di Progetto") } # def default_get(self, cr, uid, fields, context=None): # sale_order_obj = self.pool['sale.order'] # if context is None: # context = {} # # res = super(sale_order_confirm, self).default_get(cr, uid, fields, context=context) # sale_order_data = sale_order_obj.browse(cr, uid, context['active_ids'][0], context) # # res['cup'] = sale_order_data.cig # res['cig'] = sale_order_data.cup # # return res def sale_order_confirmated(self, cr, uid, ids, context=None): sale_order_obj = self.pool['sale.order'] result = super(sale_order_confirm, self).sale_order_confirmated(cr, uid, ids, context=context) sale_order_confirm_data = self.browse(cr, uid, ids[0], context=context) if result.get('res_id'): sale_order_obj.write(cr, uid, result['res_id'], { 'cig': sale_order_confirm_data.cig, 'cup': sale_order_confirm_data.cup, }, context=context) else: sale_order_obj.write(cr, uid, context['active_ids'][0], { 'cig': sale_order_confirm_data.cig, 'cup': sale_order_confirm_data.cup, }, context=context) for order in sale_order_obj.browse(cr, uid, [result.get('res_id') or context['active_ids'][0]], context=context): # partner = self.pool['res.partner'].browse(cr, uid, order.partner_id.id) picking_obj = self.pool['stock.picking'] picking_ids = picking_obj.search(cr, uid, [('sale_id', '=', order.id)], context=context) for picking_id in picking_ids: picking_obj.write(cr, uid, picking_id, { 'cig': sale_order_confirm_data.cig or '', 'cup': sale_order_confirm_data.cup or '' }, context=context) return result
from docx import Document import datetime from schedule.Assignment import * DATE = 0 SECTION_A_PARTICIPANTS = 1 SECTION_A_LESSON = 2 SECTION_B_PARTICIPANTS = 3 SECTION_B_LESSON = 4 HEADER = 'Date,Type,Assignee,Householder,Lesson,Classroom' def getWeekDate(weekHeaderRow, year, month): '''extract the date from the date row from table''' raw_date = weekHeaderRow.cells[DATE].text.strip() if raw_date == '': return raw_date # convert to nice date format # split up to look for a number for the day of the month date_parts = raw_date.split() # list comprehension to select numbeic strings numericParts = [part for part in date_parts if part.isnumeric()] # assume there will only be one day = int( numericParts[0] ) # this is the format for MySQL date date = '{:%Y-%m-%d}'.format( datetime.date(year, month, day) ) return date def parseAssignmentRow(row, date, aType): '''parse an assignment row from table, returns an array with the assignments''' assignments = [] # participants for first assgn participants = row.cells[SECTION_A_PARTICIPANTS].text.strip() if participants != '': # if there are participants assgn = Assignment() # new empty assignment # begin populating fields assgn.date = date assgn.type = aType assgn.lesson = row.cells[SECTION_A_LESSON].text.strip() assgn.section = SECTION_A # be sure to strip each element of the array `students` in case split leaves white space students = participants.split('\n') # assume at most 2 elements, and at least one # the assignee should come 1st assgn.assignee = students[0].strip() if len(students) > 1: # the householder second # '> 1' in case there is an additinal helper (will be ignored) assgn.hholder = students[1].strip() assignments.append( assgn ) # the same for the second participants = row.cells[SECTION_B_PARTICIPANTS].text.strip() # participants for second assgn if participants != '': assgn = Assignment() # new empty assignment assgn.date = date assgn.type = aType assgn.lesson = row.cells[SECTION_B_LESSON].text.strip() assgn.section = SECTION_B students = participants.split('\n') assgn.assignee = students[0].strip() if len(students) > 1: assgn.hholder = students[1].strip() assignments.append( assgn ) return assignments def to_csv(path, year, month): '''path to docx file, year and month as int, will convert into a csv file''' docxsched = Document(path) #find tables in the doc tables = docxsched.tables if len(tables) != 1: #should be raising an exception... print('uh oh, there should be exactly one table in the document') #select the first table table = tables[0] # Assume everything else is as expected #there are 5 weeks for every schedule # pick up the date # then the type # if a name and a lesson are found write the csv String # if only a name is found write the csv string # if no name is found continue looking for date or type (which ever appears first) csvSched = [] #this is an array of strings for the csv file row_iter = iter(table.rows[1:]) #skipping the first row (header) row = next(row_iter) # The first week of every month is different (has only 1 assgn) date = getWeekDate(row, year, month) #advance to the only participation for first week (Reading) row = next(row_iter) assgnRow = parseAssignmentRow(row, date, READING) for assgn in assgnRow: csvSched.append(assgn.makeCSV() + '\n') # Now continue with the remaining 4 weeks for week in range(4): row = next(row_iter) # Extract date for this week date = getWeekDate(row, year, month) if date == '': # no date will be available for assignments continue for assgnType in TYPES: row = next(row_iter) assgnRow = parseAssignmentRow(row, date, assgnType) for assgn in assgnRow: # print(assgn) csvSched.append( assgn.makeCSV() + '\n' ) #path will only work when called from main.py csvfilename = '../csv/%d-%d.csv' % (year, month) with open(csvfilename, encoding='utf-8', mode='w') as parsed: parsed.write(HEADER+'\n') for line in csvSched: parsed.write(line) if __name__ == '__main__': print('Running as main. Doing nothing.')
import win32file # The base COM port and file IO functions. import win32event # We use events and the WaitFor[Single|Multiple]Objects functions. import win32con # constants. from serialutil import * VERSION = "$Revision: 1527 $".split()[1] #extract CVS version MS_CTS_ON = 16 MS_DSR_ON = 32 MS_RING_ON = 64 MS_RLSD_ON = 128 def device(portnum): """Turn a port number into a device name""" #the "//./COMx" format is required for devices >= 9 #not all versions of windows seem to support this propperly #so that the first few ports are used with the DOS device name if portnum < 9: return 'COM%d' % (portnum+1) #numbers are transformed to a string else: return r'\\.\COM%d' % (portnum+1) class Serial(SerialBase): """Serial port implemenation for Win32. This implemenatation requires a win32all installation.""" BAUDRATES = (50,75,110,134,150,200,300,600,1200,1800,2400,4800,9600, 19200,38400,57600,115200) def open(self): """Open port with current settings. This may throw a SerialException if the port cannot be opened.""" if self._port is None: raise SerialException("Port must be configured before it can be used.") self.hComPort = None try: self.hComPort = win32file.CreateFile(self.portstr, win32con.GENERIC_READ | win32con.GENERIC_WRITE, 0, # exclusive access None, # no security win32con.OPEN_EXISTING, win32con.FILE_ATTRIBUTE_NORMAL | win32con.FILE_FLAG_OVERLAPPED, None) except Exception, msg: self.hComPort = None #'cause __del__ is called anyway raise SerialException("could not open port: %s" % msg) # Setup a 4k buffer win32file.SetupComm(self.hComPort, 4096, 4096) #Save original timeout values: self._orgTimeouts = win32file.GetCommTimeouts(self.hComPort) self._reconfigurePort() # Clear buffers: # Remove anything that was there win32file.PurgeComm(self.hComPort, win32file.PURGE_TXCLEAR | win32file.PURGE_TXABORT | win32file.PURGE_RXCLEAR | win32file.PURGE_RXABORT) self._overlappedRead = win32file.OVERLAPPED() self._overlappedRead.hEvent = win32event.CreateEvent(None, 1, 0, None) self._overlappedWrite = win32file.OVERLAPPED() self._overlappedWrite.hEvent = win32event.CreateEvent(None, 0, 0, None) self._isOpen = True def _reconfigurePort(self): """Set commuication parameters on opened port.""" if not self.hComPort: raise SerialException("Can only operate on a valid port handle") #Set Windows timeout values #timeouts is a tuple with the following items: #(ReadIntervalTimeout,ReadTotalTimeoutMultiplier, # ReadTotalTimeoutConstant,WriteTotalTimeoutMultiplier, # WriteTotalTimeoutConstant) if self._timeout is None: timeouts = (0, 0, 0, 0, 0) elif self._timeout == 0: timeouts = (win32con.MAXDWORD, 0, 0, 0, 0) else: timeouts = (0, 0, int(self._timeout*1000), 0, 0) win32file.SetCommTimeouts(self.hComPort, timeouts) win32file.SetCommMask(self.hComPort, win32file.EV_ERR) # Setup the connection info. # Get state and modify it: comDCB = win32file.GetCommState(self.hComPort) comDCB.BaudRate = self._baudrate if self._bytesize == FIVEBITS: comDCB.ByteSize = 5 elif self._bytesize == SIXBITS: comDCB.ByteSize = 6 elif self._bytesize == SEVENBITS: comDCB.ByteSize = 7 elif self._bytesize == EIGHTBITS: comDCB.ByteSize = 8 else: raise ValueError("Unsupported number of data bits: %r" % self._bytesize) if self._parity == PARITY_NONE: comDCB.Parity = win32file.NOPARITY comDCB.fParity = 0 # Dis/Enable Parity Check elif self._parity == PARITY_EVEN: comDCB.Parity = win32file.EVENPARITY comDCB.fParity = 1 # Dis/Enable Parity Check elif self._parity == PARITY_ODD: comDCB.Parity = win32file.ODDPARITY comDCB.fParity = 1 # Dis/Enable Parity Check else: raise ValueError("Unsupported parity mode: %r" % self._parity) if self._stopbits == STOPBITS_ONE: comDCB.StopBits = win32file.ONESTOPBIT elif self._stopbits == STOPBITS_TWO: comDCB.StopBits = win32file.TWOSTOPBITS else: raise ValueError("Unsupported number of stop bits: %r" % self._stopbits) comDCB.fBinary = 1 # Enable Binary Transmission # Char. w/ Parity-Err are replaced with 0xff (if fErrorChar is set to TRUE) if self._rtscts: comDCB.fRtsControl = win32file.RTS_CONTROL_HANDSHAKE comDCB.fDtrControl = win32file.DTR_CONTROL_HANDSHAKE else: comDCB.fRtsControl = win32file.RTS_CONTROL_ENABLE comDCB.fDtrControl = win32file.DTR_CONTROL_ENABLE comDCB.fOutxCtsFlow = self._rtscts comDCB.fOutxDsrFlow = self._rtscts comDCB.fOutX = self._xonxoff comDCB.fInX = self._xonxoff comDCB.fNull = 0 comDCB.fErrorChar = 0 comDCB.fAbortOnError = 0 win32file.SetCommState(self.hComPort, comDCB) #~ def __del__(self): #~ self.close() def close(self): """Close port""" if self._isOpen: if self.hComPort: #Restore original timeout values: win32file.SetCommTimeouts(self.hComPort, self._orgTimeouts) #Close COM-Port: win32file.CloseHandle(self.hComPort) self.hComPort = None self._isOpen = False def makeDeviceName(self, port): return device(port) # - - - - - - - - - - - - - - - - - - - - - - - - def inWaiting(self): """Return the number of characters currently in the input buffer.""" flags, comstat = win32file.ClearCommError(self.hComPort) return comstat.cbInQue def read(self, size=1): """Read size bytes from the serial port. If a timeout is set it may return less characters as requested. With no timeout it will block until the requested number of bytes is read.""" if not self.hComPort: raise portNotOpenError if size > 0: win32event.ResetEvent(self._overlappedRead.hEvent) flags, comstat = win32file.ClearCommError(self.hComPort) if self.timeout == 0: n = min(comstat.cbInQue, size) if n > 0: rc, buf = win32file.ReadFile(self.hComPort, win32file.AllocateReadBuffer(n), self._overlappedRead) win32event.WaitForSingleObject(self._overlappedRead.hEvent, win32event.INFINITE) read = str(buf) else: read = '' else: rc, buf = win32file.ReadFile(self.hComPort, win32file.AllocateReadBuffer(size), self._overlappedRead) n = win32file.GetOverlappedResult(self.hComPort, self._overlappedRead, 1) read = str(buf[:n]) else: read = '' return read def write(self, s): """Output the given string over the serial port.""" if not self.hComPort: raise portNotOpenError #print repr(s), if s: err, n = win32file.WriteFile(self.hComPort, s, self._overlappedWrite) if err: #will be ERROR_IO_PENDING: # Wait for the write to complete. win32event.WaitForSingleObject(self._overlappedWrite.hEvent, win32event.INFINITE) def flushInput(self): """Clear input buffer, discarding all that is in the buffer.""" if not self.hComPort: raise portNotOpenError win32file.PurgeComm(self.hComPort, win32file.PURGE_RXCLEAR | win32file.PURGE_RXABORT) def flushOutput(self): """Clear output buffer, aborting the current output and discarding all that is in the buffer.""" if not self.hComPort: raise portNotOpenError win32file.PurgeComm(self.hComPort, win32file.PURGE_TXCLEAR | win32file.PURGE_TXABORT) def sendBreak(self): """Send break condition.""" if not self.hComPort: raise portNotOpenError import time win32file.SetCommBreak(self.hComPort) #TODO: how to set the correct duration?? time.sleep(0.020) win32file.ClearCommBreak(self.hComPort) def setRTS(self,level=1): """Set terminal status line: Request To Send""" if not self.hComPort: raise portNotOpenError if level: win32file.EscapeCommFunction(self.hComPort, win32file.SETRTS) else: win32file.EscapeCommFunction(self.hComPort, win32file.CLRRTS) def setDTR(self,level=1): """Set terminal status line: Data Terminal Ready""" if not self.hComPort: raise portNotOpenError if level: win32file.EscapeCommFunction(self.hComPort, win32file.SETDTR) else: win32file.EscapeCommFunction(self.hComPort, win32file.CLRDTR) def getCTS(self): """Read terminal status line: Clear To Send""" if not self.hComPort: raise portNotOpenError return MS_CTS_ON & win32file.GetCommModemStatus(self.hComPort) != 0 def getDSR(self): """Read terminal status line: Data Set Ready""" if not self.hComPort: raise portNotOpenError return MS_DSR_ON & win32file.GetCommModemStatus(self.hComPort) != 0 def getRI(self): """Read terminal status line: Ring Indicator""" if not self.hComPort: raise portNotOpenError return MS_RING_ON & win32file.GetCommModemStatus(self.hComPort) != 0 def getCD(self): """Read terminal status line: Carrier Detect""" if not self.hComPort: raise portNotOpenError return MS_RLSD_ON & win32file.GetCommModemStatus(self.hComPort) != 0 if __name__ == '__main__': print __name__ s = Serial() print s s = Serial(0) print s s.baudrate = 19200 s.databits = 7 s.close() s.port = 3 s.open() print s
import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ADMINS = ( ("David Barragán", "bameda@dbarragan.com"), ) SECRET_KEY = '0q)_&-!hu%%en55a&cx!a2c^7aiw*7*+^zg%_&vk9&4&-4&qg#' DEBUG = False ALLOWED_HOSTS = ['*'] DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True STATIC_ROOT = os.path.join(BASE_DIR, 'static') STATIC_URL = '/static/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = '/media/' INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'monarch.base', 'monarch.documents', 'monarch.users', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'monarch.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'TEMPLATE_DEBUG': False, 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'wsgi.application' AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ]
""" This module provides functions to parse a DEP. """ from builtins import int from builtins import range from .gettext_helper import _ import copy import ijson from math import ceil from six import string_types from . import utils from . import receipt class DEPException(utils.RKSVVerifyException): """ An exception that is thrown if something is wrong with a DEP. """ pass class DEPParseException(DEPException): """ Indicates that an error occurred while parsing the DEP. """ def __init__(self, msg): super(DEPParseException, self).__init__(msg) self._initargs = (msg,) class MalformedDEPException(DEPParseException): """ Indicates that the DEP is not properly formed. """ def __init__(self, msg=None, groupidx=None): if msg is None: super(MalformedDEPException, self).__init__(_("Malformed DEP")) else: if groupidx is None: super(MalformedDEPException, self).__init__( _('{}.').format(msg)) else: super(MalformedDEPException, self).__init__( _("In group {}: {}.").format(groupidx, msg)) self._initargs = (msg, groupidx) class MissingDEPElementException(MalformedDEPException): """ Indicates that an element in the DEP is missing. """ def __init__(self, elem, groupidx=None): super(MissingDEPElementException, self).__init__( _("Element \"{}\" missing").format(elem), groupidx) self._initargs = (elem, groupidx) class MalformedDEPElementException(MalformedDEPException): """ Indicates that an element in the DEP is malformed. """ def __init__(self, elem, detail=None, groupidx=None): if detail is None: super(MalformedDEPElementException, self).__init__( _("Element \"{}\" malformed").format(elem), groupidx) else: super(MalformedDEPElementException, self).__init__( _("Element \"{}\" malformed: {}").format(elem, detail), groupidx) self._initargs = (elem, detail, groupidx) class DuplicateDEPElementException(MalformedDEPException): """ Indicates that an element in the DEP is redundant. """ def __init__(self, elem, groupidx=None): super(DuplicateDEPElementException, self).__init__( _("Duplicate element \"{}\"").format(elem), groupidx) self._initargs = (elem, groupidx) class MalformedCertificateException(DEPParseException): """ Indicates that a certificate in the DEP is not properly formed. """ def __init__(self, cert): super(MalformedCertificateException, self).__init__( _("Certificate \"{}\" malformed.").format(cert)) self._initargs = (cert,) class DEPState(object): def __init__(self, upper = None): self.upper = upper def parse(self, prefix, event, value): raise NotImplementedError("Please implement this yourself.") def ready(self): return False def getChunk(self): raise NotImplementedError("Please implement this yourself.") def needCrt(self): return None def setCrt(self, cert, cert_chain): raise NotImplementedError("Please implement this yourself.") class DEPStateWithData(DEPState): def __init__(self, chunksize, upper = None): super(DEPStateWithData, self).__init__(upper) self.chunksize = chunksize if upper: self.chunk = self.upper.chunk else: self.chunk = list() def currentChunksize(self): return sum(len(recs) for recs, cert, cert_chain in self.chunk) def ready(self): if self.chunksize == 0: return False return self.currentChunksize() >= self.chunksize def getChunk(self): if self.currentChunksize() <= 0: return [] # Note that we only copy the groups (of which there are hopefully few) # FIXME: but still... ret = copy.copy(self.chunk) del self.chunk[:] return ret class DEPStateWithIncompleteData(DEPStateWithData): class WIPData(object): def __init__(self): self.cert = None self.cert_chain = None self.recs = list() def __init__(self, chunksize, upper, idx): super(DEPStateWithIncompleteData, self).__init__(chunksize, upper) if hasattr(upper, 'wip'): self.wip = upper.wip else: self.wip = DEPStateWithIncompleteData.WIPData() self.idx = idx def needCrt(self): if self.wip.cert is None or self.wip.cert_chain is None: return self.idx return None def setCrt(self, cert, cert_chain): self.wip.cert = cert self.wip.cert_chain = cert_chain def mergeIntoChunk(self): if len(self.wip.recs) > 0: clist = self.wip.cert_chain if clist is None: clist = list() self.chunk.append((self.wip.recs, self.wip.cert, clist)) self.wip.recs = list() def ready(self): if self.chunksize == 0: return False return self.currentChunksize() + len(self.wip.recs) >= self.chunksize def getChunk(self): self.mergeIntoChunk() return super(DEPStateWithIncompleteData, self).getChunk() class DEPStateRoot(DEPStateWithData): def __init__(self, chunksize): super(DEPStateRoot, self).__init__(chunksize) self.root_seen = False def parse(self, prefix, event, value): if prefix == '' and event == 'start_map' and value == None: if self.root_seen: raise MalformedDEPException(_('Duplicate DEP root')) self.root_seen = True return DEPStateRootMap(self.chunksize, self) raise MalformedDEPException(_('Malformed DEP root')) class DEPStateRootMap(DEPStateWithData): def __init__(self, chunksize, upper): super(DEPStateRootMap, self).__init__(chunksize, upper) self.groups_seen = False def parse(self, prefix, event, value): if prefix == '' and event == 'end_map': if not self.groups_seen: raise MissingDEPElementException('Belege-Gruppe') return self.upper if prefix == 'Belege-Gruppe': if event != 'start_array': raise MalformedDEPException(_('Malformed DEP root')) if self.groups_seen: raise MalformedDEPException(_('Duplicate DEP root')) self.groups_seen = True return DEPStateBGList(self.chunksize, self) # TODO: handle other elements return self class DEPStateBGList(DEPStateWithData): def __init__(self, chunksize, upper): super(DEPStateBGList, self).__init__(chunksize, upper) self.curIdx = 0 def parse(self, prefix, event, value): if prefix == 'Belege-Gruppe' and event == 'end_array': return self.upper if prefix == 'Belege-Gruppe.item' and event == 'start_map': nextState = DEPStateGroup(self.chunksize, self, self.curIdx) self.curIdx += 1 return nextState raise MalformedDEPElementException('Belege-Gruppe') class DEPStateGroup(DEPStateWithIncompleteData): def __init__(self, chunksize, upper, idx): super(DEPStateGroup, self).__init__(chunksize, upper, idx) self.recs_seen = False self.cert_seen = False self.cert_list_seen = False def parse(self, prefix, event, value): if prefix == 'Belege-Gruppe.item' and event == 'end_map': if not self.cert_seen: raise MissingDEPElementException('Signaturzertifikat', self.idx) if not self.cert_list_seen: raise MissingDEPElementException('Zertifizierungsstellen', self.idx) if not self.recs_seen: raise MissingDEPElementException('Belege-kompakt', self.idx) self.mergeIntoChunk() return self.upper if prefix == 'Belege-Gruppe.item.Signaturzertifikat': if self.cert_seen: raise DuplicateDEPElementException('Signaturzertifikat', self.idx) if event != 'string': raise MalformedDEPElementException('Signaturzertifikat', _('not a string'), self.idx) self.cert_seen = True self.wip.cert = parseDEPCert(value) if value != '' else None elif prefix == 'Belege-Gruppe.item.Zertifizierungsstellen': if self.cert_list_seen: raise DuplicateDEPElementException('Zertifizierungsstellen', self.idx) if event != 'start_array': raise MalformedDEPElementException('Zertifizierungsstellen', _('not a list'), self.idx) self.wip.cert_chain = list() self.cert_list_seen = True return DEPStateCertList(self.chunksize, self, self.idx) elif prefix == 'Belege-Gruppe.item.Belege-kompakt': if self.recs_seen: raise DuplicateDEPElementException('Belege-kompakt', self.idx) if event != 'start_array': raise MalformedDEPElementException('Belege-kompakt', _('not a list'), self.idx) self.recs_seen = True return DEPStateReceiptList(self.chunksize, self, self.idx) # TODO: handle other elements return self class DEPStateCertList(DEPStateWithIncompleteData): def parse(self, prefix, event, value): if prefix == 'Belege-Gruppe.item.Zertifizierungsstellen' and event == 'end_array': return self.upper if prefix == 'Belege-Gruppe.item.Zertifizierungsstellen.item' \ and event == 'string': self.wip.cert_chain.append(parseDEPCert(value)) return self raise MalformedDEPElementException('Zertifizierungsstellen', self.idx) class DEPStateReceiptList(DEPStateWithIncompleteData): def parse(self, prefix, event, value): if prefix == 'Belege-Gruppe.item.Belege-kompakt' and event == 'end_array': return self.upper if prefix == 'Belege-Gruppe.item.Belege-kompakt.item' \ and event == 'string': self.wip.recs.append(shrinkDEPReceipt(value)) return self raise MalformedDEPElementException('Belege-kompakt', self.idx) def shrinkDEPReceipt(rec, idx = None): """ Encode a JWS receipt string to a bytes representation. This takes up less memory. :param rec: The receipt JWS as a string. :param idx: The index of the group in the DEP to which the receipt belongs or None if it is unknown. This is only used to generate error messages. :return: The receipt JWS as a byte array. """ try: return rec.encode('utf-8') except TypeError: if idx is None: raise MalformedDEPElementException(_('Receipt \"{}\"').format(rec)) else: raise MalformedDEPElementException(_('Receipt \"{}\"').format(rec), idx) def expandDEPReceipt(rec, idx = None): """ Decodes a receipt JWS byte array to a regular string. :param rec: The receipt JWS as a byte array. :param idx: The index of the group in the DEP to which the receipt belongs or None if it is unknown. This is only used to generate error messages. :return: The receipt JWS as a string. """ try: return rec.decode('utf-8') except UnicodeDecodeError: if idx is None: raise MalformedDEPElementException(_('Receipt \"{}\"').format(rec)) else: raise MalformedDEPElementException(_('Receipt \"{}\"').format(rec), idx) def parseDEPCert(cert_str): """ Turns a certificate string as used in a DEP into a certificate object. :param cert_str: A certificate in PEM format without header and footer and on a single line. :return: A cryptography certificate object. :throws: MalformedCertificateException """ if not isinstance(cert_str, string_types): raise MalformedCertificateException(cert_str) try: return utils.loadCert(utils.addPEMCertHeaders(cert_str)) except ValueError: raise MalformedCertificateException(cert_str) class DEPParserI(object): """ The base class for DEP parsers. This interface allows reading a DEP in small chunks without having to store it in memory entirely. Do not use this directly, use one of the subclasses. """ def parse(self, chunksize = 0): """ This function parses a DEP and yields chunks of at most chunksize receipts. A chunk is a list of group tuples. Every group tuple consists of a list of receipt JWS as byte arrays, a certificate object containing the certificate used to sign the receipts (or None) and a list of certificate objects with the certificates used to sign the first certificate (or an empty list) in that order. If the chunksize is non-zero, every chunk is guaranteed to contain at most chunksize receipts in total (over all groups). Otherwise, the maximum number of receipts is implementation dependent. Every yielded chunk is guaranteed to contain at least one group tuple. :param chunksize: A positive number specifying the maximum number of receipts in one chunk or zero. :yield: One chunk at a time as described above. :throws: DEPParseException """ raise NotImplementedError("Please implement this yourself.") class IncrementalDEPParser(DEPParserI): """ A DEP parser that reads a DEP from a file descriptor. Do not use this directly, use one of the subclasses or the fromFd() method which will return an appropriate parser object. """ def __init__(self, fd): # skipBOM checks if we can seek, so no harm in doing it to a non-file self.startpos = utils.skipBOM(fd) self.fd = fd @staticmethod def fromFd(fd, need_certs=True): """ Returns a new IncrementalDEPParser object using the specified file descriptor. If chunks don't necessarily have to contain the DEP group certificates (because, for example, no signature verification is performed), the need_certs parameter can be set to False. In this case fromFd() will return a CertlessStreamDEPParser. If need_certs is True, it will return a FileDEPParser for a seekable file descriptor and a StreamDEPParser for a non-seekable one. :param fd: The file descriptor to use. :param need_certs: Whether chunks need to contain the group certificates. :return: An IncrementalDEPParser object using fd as data source. """ if not need_certs: return CertlessStreamDEPParser(fd) try: fd.tell() return FileDEPParser(fd) except IOError: return StreamDEPParser(fd) def _needCerts(self, state, chunksize, groupidx): raise NotImplementedError("Please implement this yourself.") def parse(self, chunksize = 0): parser = ijson.parse(self.fd) state = DEPStateRoot(chunksize) got_something = False try: for prefix, event, value in parser: nextState = state.parse(prefix, event, value) if state.ready(): needed = state.needCrt() if needed is not None: self._needCerts(state, chunksize, needed) yield state.getChunk() got_something = True state = nextState # The entire DEP is parsed, get the rest. # We should have found any certs here, so no check needed. last = state.getChunk() if len(last) > 0: yield last elif not got_something: raise MalformedDEPException(_('No receipts found')) except ijson.JSONError as e: raise DEPParseException(_('Malformed JSON: {}.').format(e)) class StreamDEPParser(IncrementalDEPParser): """ A DEP parser that reads a DEP from a stream type file descriptor. Such a file descriptor is not seekable. The parse() method will raise an exception if an element needed to construct a chunk was not read by the time the chunk has to be yielded. It will not perform any look-ahead operations because all receipts read until the missing elements are found would need to be stored in memory, thus defeating the purpose of the parser API. A chunksize of zero for the parse() method will cause all receipts in the DEP to be returned in a single chunk. """ def _needCerts(self, state, chunksize, groupidx): raise MalformedDEPException( _("Element \"Signaturzertifikat\" or \"Zertifizierungsstellen\" missing"), groupidx) def parse(self, chunksize = 0): return super(StreamDEPParser, self).parse(chunksize) class CertlessStreamDEPParser(StreamDEPParser): """ This DEP parser behaves identically to StreamDEPParser, except for the fact, that it will not raise an exception if a DEP element needed to construct the current chunk has not been read yet. Instead, the yielded chunk will have these elements set to None (for Signaturzertifikat) and the empty list (for Zertifizierungsstellen) respectively. Note that the parser will still not tolerate if the elements are missing altogether. """ def _needCerts(self, state, chunksize, groupidx): # Do nothing, we don't really care about certs. # The parser will still fail if they are outright missing, but we are ok # with returning chunks without certs even though the DEP contains some. pass class FileDEPParser(IncrementalDEPParser): """ A DEP parser that reads a DEP from a seekable file. If DEP elements needed to construct the current chunk are missing, this parser will perform an additional parsing pass to locate these elements before returning the chunk. If the total number of such elements is less than the given chunksize, they will be cached in memory to avoid having to do even more parsing passes. A chunksize of zero for the parse() method will cause all receipts in the DEP to be returned in a single chunk. """ def __getItems(self, prefix, chunksize): if prefix in self.cache: return self.cache[prefix] # cache miss, gotta parse the JSON again ofs = self.fd.tell() self.fd.seek(self.startpos) items = list(ijson.items(self.fd, prefix)) self.fd.seek(ofs) if chunksize == 0 or len(items) <= chunksize: self.cache[prefix] = items return items def _needCerts(self, state, chunksize, groupidx): cert_str = self.__getItems( 'Belege-Gruppe.item.Signaturzertifikat', chunksize)[groupidx] cert_str_list = self.__getItems( 'Belege-Gruppe.item.Zertifizierungsstellen', chunksize)[groupidx] cert = parseDEPCert(cert_str) if cert_str != '' else None cert_list = [ parseDEPCert(cs) for cs in cert_str_list ] state.setCrt(cert, cert_list) def parse(self, chunksize = 0): self.fd.seek(self.startpos) self.cache = dict() return super(FileDEPParser, self).parse(chunksize) def totalRecsInDictDEP(dep): def _nrecs(group): try: recs = group['Belege-kompakt'] if not isinstance(recs, list): return 0 return len(recs) except (TypeError, KeyError): return 0 bg = dep.get('Belege-Gruppe', []) if not isinstance(bg, list): return 0 return sum(_nrecs(g) for g in bg) class DictDEPParser(DEPParserI): """ A DEP parser that accepts an already parsed dictionary data structure and yields chunks of the requested size. This parser is intended to parse DEPs that are already completely in memory anyway but emulates the parser API for compatibility. If the chunksize is zero and the nparts parameter equals 1, the parse() method will return each group in the DEP in its own chunk. If the chunksize is zero and the nparts parameter is greater than 1, the parse() method will try to evenly distribute the receipts over nparts chunks. It will then yield at most nparts chunks. """ def __init__(self, dep, nparts = 1): self.dep = dep self.nparts = nparts pass def _parseDEPGroup(self, group, idx): if not isinstance(group, dict): raise MalformedDEPElementException('Belege-Gruppe', idx) if 'Belege-kompakt' not in group: raise MissingDEPElementException('Belege-kompakt', idx) if 'Signaturzertifikat' not in group: raise MissingDEPElementException('Signaturzertifikat', idx) if 'Zertifizierungsstellen' not in group: raise MissingDEPElementException('Zertifizierungsstellen', idx) cert_str = group['Signaturzertifikat'] cert_str_list = group['Zertifizierungsstellen'] receipts = (shrinkDEPReceipt(r) for r in group['Belege-kompakt']) if not isinstance(cert_str, string_types): raise MalformedDEPElementException('Signaturzertifikat', _('not a string'), idx) if not isinstance(cert_str_list, list): raise MalformedDEPElementException('Zertifizierungsstellen', _('not a list'), idx) try: iter(receipts) except TypeError: raise MalformedDEPElementException('Belege-kompakt', _('not a list'), idx) cert = parseDEPCert(cert_str) if cert_str != '' else None cert_list = [ parseDEPCert(cs) for cs in cert_str_list ] return receipts, cert, cert_list def _groupChunkGen(self, chunksize, groups): if chunksize == 0: groupidx = 0 for group in groups: recgen, cert, certs = self._parseDEPGroup(group, groupidx) recs = list(recgen) if len(recs) > 0: yield [(recs, cert, certs)] groupidx += 1 return chunk = list() chunklen = 0 groupidx = 0 for group in groups: recgen, cert, cert_list = self._parseDEPGroup(group, groupidx) nextrecs = list() for rec in recgen: nextrecs.append(rec) chunklen += 1 if chunklen >= chunksize: chunk.append((nextrecs, cert, cert_list)) yield chunk nextrecs = list() chunk = list() chunklen = 0 if len(nextrecs) > 0: chunk.append((nextrecs, cert, cert_list)) groupidx += 1 if chunklen > 0: yield chunk def parse(self, chunksize = 0): if not isinstance(self.dep, dict): raise MalformedDEPException(_('Malformed DEP root')) if 'Belege-Gruppe' not in self.dep: raise MissingDEPElementException('Belege-Gruppe') bg = self.dep['Belege-Gruppe'] if not isinstance(bg, list) or not bg: raise MalformedDEPElementException('Belege-Gruppe') if self.nparts > 1 and not chunksize: nrecs = totalRecsInDictDEP(self.dep) chunksize = int(ceil(float(nrecs) / self.nparts)) got_something = False for chunk in self._groupChunkGen(chunksize, bg): yield chunk got_something = True if not got_something: raise MalformedDEPException(_('No receipts found')) class FullFileDEPParser(DEPParserI): """ This parser behaves like DictDEPParser but accepts a file descriptor from which to read the JSON instead of an already parsed dictionary structure. The file is read in its entirety on the first call to parse() and JSON parsed contents are kept in memory. Subsequent calls reuse these contents. """ def __init__(self, fd, nparts = 1): self.fd = fd self.nparts = nparts self.dictParser = None def parse(self, chunksize = 0): if not self.dictParser: try: dep = utils.readJsonStream(self.fd) except (IOError, UnicodeDecodeError, ValueError) as e: raise DEPParseException(_('Malformed JSON: {}.').format(e)) self.dictParser = DictDEPParser(dep, self.nparts) return self.dictParser.parse(chunksize) def receiptGroupAdapter(depgen): for chunk in depgen: for recs, cert, cert_list in chunk: rec_tuples = [ receipt.Receipt.fromJWSString(expandDEPReceipt(r)) for r in recs ] recs = None yield (rec_tuples, cert, cert_list) rec_tuples = None chunk = None
def migrate(cr, version): if not version: return # Replace ids of better_zip by ids of city_zip cr.execute(""" ALTER TABLE crm_event_compassion DROP CONSTRAINT crm_event_compassion_zip_id_fkey; UPDATE crm_event_compassion e SET zip_id = ( SELECT id FROM res_city_zip WHERE openupgrade_legacy_12_0_better_zip_id = e.zip_id) """)
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('website', '0036_auto_20170813_0049'), ] operations = [ migrations.AlterField( model_name='userprofile', name='follows', field=models.ManyToManyField(blank=True, related_name='follower', to='website.UserProfile'), ), ]
""" A throwaway thread pool with thread-local storage. Throwaway Tasks =============== A throwaway task is one you'd *like* to get done, but it's not a big deal if it doesn't actually get done. Scary as it may be, throwaway tasks are quite common in the wild. They usually compute for a while and then *end* with *one* operation that permanently changes the state of the world: think of committing to a database or sending a lone network packet. Tasks that iterate while modifying the world, or tasks that need to apply more than one operation to change the world consistently, are *not* throwaway. You have been warned. This thread pool assumes that all tasks are throwaway. It doesn't care if they finish and it certainly doesn't care to tell anyone that a task is done. Also, while some pools go to great lengths to cope with blocked threads, this one assumes that your application is broken if no progress can be made for a certain amount of time; see add() below. Thread-local Storage ==================== Threads frequently require some local storage of their own, for example it may be necessary for each thread to hold its own database connection. This thread pool automatically equips each worker with local storage; see __init__() and add() below. """ import inspect as I import logging as L import Queue as Q import threading as T class _NullHandler(L.Handler): """Logging handler that does nothing.""" def emit(self, _record): pass L.getLogger("com.urbanban.threading.throwaway.pool").addHandler(_NullHandler()) class _Worker(T.Thread): """Worker thread, don't instantiate directly!""" def __init__(self, task_queue, init_local=None): """Initialize and start a new worker.""" super(_Worker, self).__init__() assert isinstance(task_queue, Q.Queue) assert init_local is None or callable(init_local) self.__task_queue = task_queue self.__init_local = init_local self.daemon = True self.start() def run(self): """Worker thread main loop.""" storage = self.__make_local() self.__run_forever(storage) def __make_local(self): """Create and initialize thread-local storage.""" storage = T.local() if self.__init_local is not None: self.__init_local(storage) return storage def __run_forever(self, storage): """Grab the next task and run it.""" while True: task = self.__task_queue.get() self.__run_task(task, storage) self.__task_queue.task_done() def __run_task(self, task, storage): """Run a single task.""" func, args, kwargs = task required_args, _, _, _ = I.getargspec(func) try: if '_tp_local' in required_args: func(_tp_local=storage, *args, **kwargs) else: func(*args, **kwargs) except Exception as exc: L.exception( "exception %s during %s ignored by thread pool", exc, func ) class ThreadPool(object): """The thread pool.""" def __init__(self, num_threads=4, max_tasks=16, timeout=32, init_local=None, stack_size=None): """ Initialize and start a new thread pool. Exactly num_threads will be spawned. At most max_tasks can be queued before add() blocks; add() blocks for at most timeout seconds before raising an exception. You can pass a callable with one argument as init_local to initialize thread-local storage for each thread; see add() below for how to access thread-local storage from your tasks. For example: import sqlite3 ... def init_local(local): local.connection = sqlite3.connect("some.db") ... pool = ThreadPool(init_local=init_local) """ assert num_threads > 0 assert max_tasks > 0 assert timeout > 0 # TODO: undocumented and probably a very bad idea assert stack_size is None or stack_size > 16*4096 if stack_size is not None: T.stack_size(stack_size) self.__queue = Q.Queue(max_tasks) self.__timeout = timeout for _ in range(num_threads): _Worker(self.__queue, init_local) def add(self, func, *args, **kwargs): """ Add a task. A task consists of a callable func and arguments for func. For example: def task(some, argu, ments=None): ... pool.add(task, act, ual, ments=parameters) You can access thread-local storage by requiring the special "_tp_local" argument for func. For example: def task(_tp_local, some, argu, ments=None): _tp_local.connection.rollback() ... _tp_local.connection.commit() ... pool.add(task, act, ual, ments=parameters) """ assert callable(func) self.__queue.put((func, args, kwargs), True, self.__timeout) def test(): """Simple example and test case.""" from random import uniform from time import sleep from signal import pause def init_local(local): """A silly local. :-D""" local.x = uniform(0, 1) local.y = 0 L.info("init_local local.x %s", local.x) def task(number, _tp_local): """A silly task. :-D""" L.info("task %s thread local.x %s", number, _tp_local.x) L.info("task %s started", number) sleep(uniform(1, 4)) L.info("task %s finished", number) _tp_local.y += 1 L.info("thread %s has finished %s tasks", _tp_local.x, _tp_local.y) pool = ThreadPool(init_local=init_local) L.info("starting to add tasks to pool") for i in range(32): pool.add(task, i) L.info("all tasks added, press CTRL-C to exit") pause() if __name__ == "__main__": L.basicConfig(level=L.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s") test()
"""Process one email message, read from stdin.""" import _pythonpath import sys from lp.services.config import config from lp.services.mail.helpers import save_mail_to_librarian from lp.services.mail.incoming import handle_one_mail from lp.services.mail.signedmessage import signed_message_from_string from lp.services.scripts.base import LaunchpadScript class ProcessMail(LaunchpadScript): usage = """%prog [options] [MAIL_FILE] Process one incoming email, read from the specified file or from stdin. Any mail generated in response is printed to stdout. """ + __doc__ def main(self): self.txn.begin() # NB: This somewhat duplicates handleMail, but there it's mixed in # with handling a mailbox, which we're avoiding here. if len(self.args) >= 1: from_file = file(self.args[0], 'rb') else: from_file = sys.stdin self.logger.debug("reading message from %r" % (from_file,)) raw_mail = from_file.read() self.logger.debug("got %d bytes" % len(raw_mail)) file_alias = save_mail_to_librarian(raw_mail) self.logger.debug("saved to librarian as %r" % (file_alias,)) parsed_mail = signed_message_from_string(raw_mail) # Kinda kludgey way to cause sendmail to just print it. config.sendmail_to_stdout = True handle_one_mail( self.logger, parsed_mail, file_alias, file_alias.http_url, signature_timestamp_checker=None) self.logger.debug("mail handling complete") self.txn.commit() if __name__ == '__main__': script = ProcessMail('process-one-mail', dbuser=config.processmail.dbuser) # No need to lock; you can run as many as you want as they use no global # resources (like a mailbox). script.run(use_web_security=True)
import flask mod = flask.Blueprint('api', __name__)
from odoo import models class SaleOrder(models.Model): _inherit = "sale.order" def action_confirm(self): res = super(SaleOrder, self).action_confirm() for order in self: order.procurement_group_id.stock_move_ids.created_production_id.write( {"analytic_account_id": order.analytic_account_id} ) return res
from django.conf.urls.defaults import * urlpatterns = patterns('', url(r'^$', 'blog.views.entry_list', name="entry-list"), url(r'^archive/(?P<year>\d{4})/$', 'blog.views.entry_archive_year', name="year-archive"), url(r'^archive/(?P<year>\d{4})/(?P<month>\d{1,2})/$', 'blog.views.entry_archive_month', name="month-archive"), url(r'^(?P<slug>[-\w]+)/$', 'blog.views.entry_detail', name="entry-detail"), )
from twisted.trial.unittest import TestCase from mock import Mock from twisted.web.test.test_web import DummyRequest from twisted.web.http import OK, NOT_FOUND from cryptosync.resources import make_site def make_request(uri='', method='GET', args={}): site = make_site(authenticator=Mock()) request = DummyRequest(uri.split('/')) request.method = method request.args = args resource = site.getResourceFor(request) request.render(resource) request.data = "".join(request.written) return request class RootResourceResponseCodesTestCase(TestCase): def test_root_resource_ok(self): request = make_request() self.assertEquals(request.responseCode, OK) def test_root_resource_not_found_url(self): request = make_request(uri='shouldneverfindthisthing') self.assertEquals(request.responseCode, NOT_FOUND) class AuthResourceTestCase(TestCase): def _try_auth(self, credentials, expected): request = make_request(uri='/auth/', method='POST', args=credentials) self.assertEquals(request.responseCode, OK) self.assertEquals(request.data, expected) def test_auth_success_with_good_parameters(self): credentials = {'username': 'myself', 'password': 'somethingawesome'} self._try_auth(credentials, '{"status": "success"}') def test_auth_failure_with_missing_parameters(self): credentials = {'username': 'myself', 'password': 'somethingawesome'} for (k, v) in credentials.items(): self._try_auth({k: v}, '{"status": "failure"}')
{ "name": "Purchase Order Approved", "summary": "Add a new state 'Approved' in purchase orders.", "version": "14.0.1.1.0", "category": "Purchases", "website": "https://github.com/OCA/purchase-workflow", "author": "ForgeFlow, Odoo Community Association (OCA)", "license": "AGPL-3", "application": False, "installable": True, "depends": ["purchase_stock"], "data": ["views/purchase_order_view.xml", "views/res_config_view.xml"], }
""" Copyright (C) 2008 by Steven Wallace snwallace@gmail.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. """ from __future__ import with_statement import struct import threading import sys, traceback, time def cascadeSetIn(a, b): a.setIn(b) return b class NetworkException(Exception): pass class Filter: def __init__(self, *args): self.input = None self.output = None self.server = False self.master = None self.initialized = threading.Event() self.wlock = threading.Lock() self.rlock = threading.Lock() self.init_lock = threading.Lock() self._init(*args) def _init(self, *args): pass def disconnect(self): if self.input: self.input.disconnect() def begin(self): with self.init_lock: if not self.initialized.isSet(): self._begin() if self.input: if not self.initialized.isSet(): self.initialized.wait() self.input.begin() def _begin(self): self.initialized.set() def end(self): if self.output: self.output.end() def setIn(self, input = None): self.input = input if input: input.setOut(self) def setOut(self, output = None): self.output = output def readIn(self, data): self.writeOut(data) def readOut(self, data): with self.rlock: self._readOut(data) def _readOut(self, data): self.writeIn(data) def writeIn(self, data): if self.input: self.input.readOut(data) def writeOut(self, data): self.initialized.wait() with self.wlock: self._writeOut(data) def _writeOut(self, data): if self.output: self.output.readIn(data) def error(self, error): raise NetworkException(error) class PacketizerFilter(Filter): def _init(self): self.received = "" def _readOut(self, data): self.received += data while len(self.received) > 3: length ,= struct.unpack("!i",self.received[:4]) if length + 4 <= len(self.received): self.writeIn(self.received[4:length+4]) self.received = self.received[length+4:] else: return def _writeOut(self, data): Filter._writeOut(self, struct.pack("!i",len(data))+data) class CompressionFilter(Filter): def _init(self): self.algorithms = {} self.otherAlgorithms = [] try: import zlib self.algorithms['z'] = zlib except: pass try: import bz2 self.algorithms['b'] = bz2 except: pass try: import noCompress self.algorithms['n'] = noCompress except: pass def _begin(self): if self.server: self._writeOut(''.join(self.algorithms.keys())) def _readOut(self, data): if not self.initialized.isSet(): if self.server: self.otherAlgorithms = [i for i in data] self.initialized.set() self.begin() else: self.otherAlgorithms = [i for i in data] self._writeOut(''.join(self.algorithms.keys())) self.initialized.set() self.begin() else: algorithm = data[0] if algorithm not in self.algorithms: self.error("UNKNOWN COMPRESSION ALGORITHM " + data) self.writeIn(self.algorithms[algorithm].decompress(data[1:])) def _writeOut(self, data): if not self.initialized: Filter._writeOut(self, data) else: algorithm = 'n' newData = data for i in self.otherAlgorithms: if i in self.algorithms: tmpData = self.algorithms[i].compress(data, 9) if len(tmpData) < len(newData): newData = tmpData algorithm = i Filter._writeOut(self, ''.join((algorithm, newData))) def EncryptionFilter(Filter): pass #TODO class TCPFilter(Filter): def _init(self, connection = None): self.connection = connection def _writeOut(self, data): if self.connection: try: self.connection.send(data) except: pass def poll(self): try: data = self.connection.recv(4096) if data: self.readOut(data) else: self.disconnect() except: print "bleh!" traceback.print_exc(file=sys.stdout) self.disconnect() def disconnect(self): self.master.remove(self.connection) if self.connection: self.connection.close() Filter.disconnect(self) def end(self): self.disconnect()
import logging from odoo import api, fields, models, _ from odoo.exceptions import UserError, ValidationError from odoo.tools.safe_eval import safe_eval _logger = logging.getLogger(__name__) class DeliveryCarrier(models.Model): _name = 'delivery.carrier' _inherits = {'product.product': 'product_id'} _description = "Carrier" _order = 'sequence, id' ''' A Shipping Provider In order to add your own external provider, follow these steps: 1. Create your model MyProvider that _inherit 'delivery.carrier' 2. Extend the selection of the field "delivery_type" with a pair ('<my_provider>', 'My Provider') 3. Add your methods: <my_provider>_get_shipping_price_from_so <my_provider>_send_shipping <my_provider>_open_tracking_page <my_provider>_cancel_shipment (they are documented hereunder) ''' # -------------------------------- # # Internals for shipping providers # # -------------------------------- # sequence = fields.Integer(help="Determine the display order", default=10) # This field will be overwritten by internal shipping providers by adding their own type (ex: 'fedex') delivery_type = fields.Selection([('fixed', 'Fixed Price'), ('base_on_rule', 'Based on Rules')], string='Provider', default='fixed', required=True) product_type = fields.Selection(related='product_id.type', default='service') product_sale_ok = fields.Boolean(related='product_id.sale_ok', default=False) product_id = fields.Many2one('product.product', string='Delivery Product', required=True, ondelete="cascade") price = fields.Float(compute='get_price') available = fields.Boolean(compute='get_price') free_if_more_than = fields.Boolean('Free if Order total is more than', help="If the order is more expensive than a certain amount, the customer can benefit from a free shipping", default=False) amount = fields.Float(string='Amount', help="Amount of the order to benefit from a free shipping, expressed in the company currency") country_ids = fields.Many2many('res.country', 'delivery_carrier_country_rel', 'carrier_id', 'country_id', 'Countries') state_ids = fields.Many2many('res.country.state', 'delivery_carrier_state_rel', 'carrier_id', 'state_id', 'States') zip_from = fields.Char('Zip From') zip_to = fields.Char('Zip To') price_rule_ids = fields.One2many('delivery.price.rule', 'carrier_id', 'Pricing Rules', copy=True) fixed_price = fields.Float(compute='_compute_fixed_price', inverse='_set_product_fixed_price', store=True, string='Fixed Price',help="Keep empty if the pricing depends on the advanced pricing per destination") integration_level = fields.Selection([('rate', 'Get Rate'), ('rate_and_ship', 'Get Rate and Create Shipment')], string="Integration Level", default='rate_and_ship', help="Action while validating Delivery Orders") prod_environment = fields.Boolean("Environment", help="Set to True if your credentials are certified for production.") margin = fields.Integer(help='This percentage will be added to the shipping price.') _sql_constraints = [ ('margin_not_under_100_percent', 'CHECK (margin >= -100)', 'Margin cannot be lower than -100%'), ] @api.one def toggle_prod_environment(self): self.prod_environment = not self.prod_environment @api.multi def install_more_provider(self): return { 'name': 'New Providers', 'view_mode': 'kanban', 'res_model': 'ir.module.module', 'domain': [['name', 'ilike', 'delivery_']], 'type': 'ir.actions.act_window', 'help': _('''<p class="oe_view_nocontent"> Buy Odoo Enterprise now to get more providers. </p>'''), } @api.multi def name_get(self): display_delivery = self.env.context.get('display_delivery', False) order_id = self.env.context.get('order_id', False) if display_delivery and order_id: order = self.env['sale.order'].browse(order_id) currency = order.pricelist_id.currency_id.name or '' res = [] for carrier_id in self.ids: try: r = self.read([carrier_id], ['name', 'price'])[0] res.append((r['id'], r['name'] + ' (' + (str(r['price'])) + ' ' + currency + ')')) except ValidationError: r = self.read([carrier_id], ['name'])[0] res.append((r['id'], r['name'])) else: res = super(DeliveryCarrier, self).name_get() return res @api.depends('product_id.list_price', 'product_id.product_tmpl_id.list_price') def _compute_fixed_price(self): for carrier in self: carrier.fixed_price = carrier.product_id.list_price def _set_product_fixed_price(self): for carrier in self: carrier.product_id.list_price = carrier.fixed_price @api.one def get_price(self): SaleOrder = self.env['sale.order'] self.available = False self.price = False order_id = self.env.context.get('order_id') if order_id: # FIXME: temporary hack until we refactor the delivery API in master order = SaleOrder.browse(order_id) if self.delivery_type not in ['fixed', 'base_on_rule']: try: computed_price = self.get_shipping_price_from_so(order)[0] self.available = True except ValidationError as e: # No suitable delivery method found, probably configuration error _logger.info("Carrier %s: %s, not found", self.name, e.name) computed_price = 0.0 else: carrier = self.verify_carrier(order.partner_shipping_id) if carrier: try: computed_price = carrier.get_price_available(order) self.available = True except UserError as e: # No suitable delivery method found, probably configuration error _logger.info("Carrier %s: %s", carrier.name, e.name) computed_price = 0.0 else: computed_price = 0.0 self.price = computed_price * (1.0 + (float(self.margin) / 100.0)) # -------------------------- # # API for external providers # # -------------------------- # # TODO define and handle exceptions that could be thrown by providers def get_shipping_price_from_so(self, orders): ''' For every sale order, compute the price of the shipment :param orders: A recordset of sale orders :return list: A list of floats, containing the estimated price for the shipping of the sale order ''' self.ensure_one() if hasattr(self, '%s_get_shipping_price_from_so' % self.delivery_type): return getattr(self, '%s_get_shipping_price_from_so' % self.delivery_type)(orders) def send_shipping(self, pickings): ''' Send the package to the service provider :param pickings: A recordset of pickings :return list: A list of dictionaries (one per picking) containing of the form:: { 'exact_price': price, 'tracking_number': number } ''' self.ensure_one() if hasattr(self, '%s_send_shipping' % self.delivery_type): return getattr(self, '%s_send_shipping' % self.delivery_type)(pickings) def get_tracking_link(self, pickings): ''' Ask the tracking link to the service provider :param pickings: A recordset of pickings :return list: A list of string URLs, containing the tracking links for every picking ''' self.ensure_one() if hasattr(self, '%s_get_tracking_link' % self.delivery_type): return getattr(self, '%s_get_tracking_link' % self.delivery_type)(pickings) def cancel_shipment(self, pickings): ''' Cancel a shipment :param pickings: A recordset of pickings ''' self.ensure_one() if hasattr(self, '%s_cancel_shipment' % self.delivery_type): return getattr(self, '%s_cancel_shipment' % self.delivery_type)(pickings) @api.onchange('state_ids') def onchange_states(self): self.country_ids = [(6, 0, self.country_ids.ids + self.state_ids.mapped('country_id.id'))] @api.onchange('country_ids') def onchange_countries(self): self.state_ids = [(6, 0, self.state_ids.filtered(lambda state: state.id in self.country_ids.mapped('state_ids').ids).ids)] @api.multi def verify_carrier(self, contact): self.ensure_one() if self.country_ids and contact.country_id not in self.country_ids: return False if self.state_ids and contact.state_id not in self.state_ids: return False if self.zip_from and (contact.zip or '') < self.zip_from: return False if self.zip_to and (contact.zip or '') > self.zip_to: return False return self @api.multi def create_price_rules(self): PriceRule = self.env['delivery.price.rule'] for record in self: # If using advanced pricing per destination: do not change if record.delivery_type == 'base_on_rule': continue # Not using advanced pricing per destination: override lines if record.delivery_type == 'base_on_rule' and not (record.fixed_price is not False or record.free_if_more_than): record.price_rule_ids.unlink() # Check that float, else 0.0 is False if not (record.fixed_price is not False or record.free_if_more_than): continue if record.delivery_type == 'fixed': PriceRule.search([('carrier_id', '=', record.id)]).unlink() line_data = { 'carrier_id': record.id, 'variable': 'price', 'operator': '>=', } # Create the delivery price rules if record.free_if_more_than: line_data.update({ 'max_value': record.amount, 'standard_price': 0.0, 'list_base_price': 0.0, }) PriceRule.create(line_data) if record.fixed_price is not False: line_data.update({ 'max_value': 0.0, 'standard_price': record.fixed_price, 'list_base_price': record.fixed_price, }) PriceRule.create(line_data) return True @api.model def create(self, vals): res = super(DeliveryCarrier, self).create(vals) res.create_price_rules() return res @api.multi def write(self, vals): res = super(DeliveryCarrier, self).write(vals) self.create_price_rules() return res @api.multi def get_price_available(self, order): self.ensure_one() total = weight = volume = quantity = 0 total_delivery = 0.0 for line in order.order_line: if line.state == 'cancel': continue if line.is_delivery: total_delivery += line.price_total if not line.product_id or line.is_delivery: continue qty = line.product_uom._compute_quantity(line.product_uom_qty, line.product_id.uom_id) weight += (line.product_id.weight or 0.0) * qty volume += (line.product_id.volume or 0.0) * qty quantity += qty total = (order.amount_total or 0.0) - total_delivery total = order.currency_id.with_context(date=order.date_order).compute(total, order.company_id.currency_id) return self.get_price_from_picking(total, weight, volume, quantity) def get_price_from_picking(self, total, weight, volume, quantity): price = 0.0 criteria_found = False price_dict = {'price': total, 'volume': volume, 'weight': weight, 'wv': volume * weight, 'quantity': quantity} for line in self.price_rule_ids: test = safe_eval(line.variable + line.operator + str(line.max_value), price_dict) if test: price = line.list_base_price + line.list_price * price_dict[line.variable_factor] criteria_found = True break if not criteria_found: raise UserError(_("Selected product in the delivery method doesn't fulfill any of the delivery carrier(s) criteria.")) return price
from __future__ import unicode_literals import json from rest_framework.test import APIClient from rest_framework import status from shuup.core.models import Order from shuup.testing.factories import ( create_order_with_product, get_default_product, get_default_shop, get_default_supplier ) def create_order(): shop = get_default_shop() product = get_default_product() supplier = get_default_supplier() order = create_order_with_product( product, shop=shop, supplier=supplier, quantity=1, taxless_base_unit_price=10, ) order.cache_prices() order.save() return order def get_client(admin_user): client = APIClient() client.force_authenticate(user=admin_user) return client def get_create_payment_url(order_pk): return "/api/shuup/order/%s/create_payment/" % order_pk def get_set_fully_paid_url(order_pk): return "/api/shuup/order/%s/set_fully_paid/" % order_pk def get_order_url(order_pk): return "/api/shuup/order/%s/" % order_pk def test_create_payment(admin_user): order = create_order() client = get_client(admin_user) payment_identifier = "some_identifier" data = { "amount_value": 1, "payment_identifier": payment_identifier, "description": "some_payment" } response = client.post( get_create_payment_url(order.pk), data, format="json" ) assert response.status_code == status.HTTP_201_CREATED assert order.get_total_paid_amount().value == 1 response = client.get( get_order_url(order.pk), format="json" ) assert response.status_code == status.HTTP_200_OK order_data = json.loads(response.content.decode("utf-8")) payments = order_data["payments"] assert len(payments) == 1 assert payments[0]["payment_identifier"] == payment_identifier def test_set_fully_paid(admin_user): order = create_order() client = get_client(admin_user) data = { "payment_identifier": 1, "description": "some_payment" } order_pk = order.pk response = client.post( get_set_fully_paid_url(order_pk), data, format="json" ) assert response.status_code == status.HTTP_201_CREATED order = Order.objects.get(pk=order_pk) assert bool(order.is_paid()) currently_paid_amount = order.get_total_paid_amount() # Make sure that api works with already fully paid orders response = client.post( "/api/shuup/order/%s/set_fully_paid/" % order_pk, data, format="json" ) assert response.status_code == status.HTTP_200_OK order = Order.objects.get(pk=order_pk) assert bool(order.is_paid()) assert currently_paid_amount == order.get_total_paid_amount() def test_set_paid_from_partially_paid_order(admin_user): order = create_order() client = get_client(admin_user) data = { "amount_value": 1, "payment_identifier": 1, "description": "some_payment" } response = client.post( get_create_payment_url(order.pk), data, format="json" ) assert response.status_code == status.HTTP_201_CREATED assert order.get_total_paid_amount().value == 1 data = { "payment_identifier": 2, "description": "some_payment" } order_pk = order.pk response = client.post( get_set_fully_paid_url(order_pk), data, format="json" ) assert response.status_code == status.HTTP_201_CREATED order = Order.objects.get(pk=order_pk) assert bool(order.is_paid()) assert bool(order.get_total_paid_amount() == order.taxful_total_price.amount)
''' @since: 2015-01-07 @author: moschlar ''' import sqlalchemy.types as sqlat import tw2.core as twc import tw2.bootstrap.forms as twb import tw2.jqplugins.chosen.widgets as twjc import sprox.widgets.tw2widgets.widgets as sw from sprox.sa.widgetselector import SAWidgetSelector from sprox.sa.validatorselector import SAValidatorSelector, Email from sauce.widgets.widgets import (LargeMixin, SmallMixin, AdvancedWysihtml5, MediumTextField, SmallTextField, CalendarDateTimePicker) from sauce.widgets.validators import AdvancedWysihtml5BleachValidator class ChosenPropertyMultipleSelectField(LargeMixin, twjc.ChosenMultipleSelectField, sw.PropertyMultipleSelectField): search_contains = True def _validate(self, value, state=None): value = super(ChosenPropertyMultipleSelectField, self)._validate(value, state) if self.required and not value: raise twc.ValidationError('Please select at least one value') else: return value class ChosenPropertySingleSelectField(SmallMixin, twjc.ChosenSingleSelectField, sw.PropertySingleSelectField): search_contains = True class MyWidgetSelector(SAWidgetSelector): '''Custom WidgetSelector for SAUCE Primarily uses fields from tw2.bootstrap.forms and tw2.jqplugins.chosen. ''' text_field_limit = 256 default_multiple_select_field_widget_type = ChosenPropertyMultipleSelectField default_single_select_field_widget_type = ChosenPropertySingleSelectField default_name_based_widgets = { 'name': MediumTextField, 'subject': MediumTextField, '_url': MediumTextField, 'user_name': MediumTextField, 'email_address': MediumTextField, '_display_name': MediumTextField, 'description': AdvancedWysihtml5, 'message': AdvancedWysihtml5, } def __init__(self, *args, **kwargs): self.default_widgets.update({ sqlat.String: MediumTextField, sqlat.Integer: SmallTextField, sqlat.Numeric: SmallTextField, sqlat.DateTime: CalendarDateTimePicker, sqlat.Date: twb.CalendarDatePicker, sqlat.Time: twb.CalendarTimePicker, sqlat.Binary: twb.FileField, sqlat.BLOB: twb.FileField, sqlat.PickleType: MediumTextField, sqlat.Enum: twjc.ChosenSingleSelectField, }) super(MyWidgetSelector, self).__init__(*args, **kwargs) def select(self, field): widget = super(MyWidgetSelector, self).select(field) if (issubclass(widget, sw.TextArea) and hasattr(field.type, 'length') and (field.type.length is None or field.type.length < self.text_field_limit)): widget = MediumTextField return widget class MyValidatorSelector(SAValidatorSelector): _name_based_validators = { 'email_address': Email, 'description': AdvancedWysihtml5BleachValidator, 'message': AdvancedWysihtml5BleachValidator, } # def select(self, field): # print 'MyValidatorSelector', 'select', field # return super(MyValidatorSelector, self).select(field)
""" Code to allow module store to interface with courseware index """ from __future__ import absolute_import from abc import ABCMeta, abstractmethod from datetime import timedelta import logging import re from six import add_metaclass from django.conf import settings from django.utils.translation import ugettext_lazy, ugettext as _ from django.core.urlresolvers import resolve from contentstore.course_group_config import GroupConfiguration from course_modes.models import CourseMode from eventtracking import tracker from openedx.core.lib.courses import course_image_url from search.search_engine_base import SearchEngine from xmodule.annotator_mixin import html_to_text from xmodule.modulestore import ModuleStoreEnum from xmodule.library_tools import normalize_key_for_search REINDEX_AGE = timedelta(0, 60) # 60 seconds log = logging.getLogger('edx.modulestore') def strip_html_content_to_text(html_content): """ Gets only the textual part for html content - useful for building text to be searched """ # Removing HTML-encoded non-breaking space characters text_content = re.sub(r"(\s|&nbsp;|//)+", " ", html_to_text(html_content)) # Removing HTML CDATA text_content = re.sub(r"<!\[CDATA\[.*\]\]>", "", text_content) # Removing HTML comments text_content = re.sub(r"<!--.*-->", "", text_content) return text_content def indexing_is_enabled(): """ Checks to see if the indexing feature is enabled """ return settings.FEATURES.get('ENABLE_COURSEWARE_INDEX', False) class SearchIndexingError(Exception): """ Indicates some error(s) occured during indexing """ def __init__(self, message, error_list): super(SearchIndexingError, self).__init__(message) self.error_list = error_list @add_metaclass(ABCMeta) class SearchIndexerBase(object): """ Base class to perform indexing for courseware or library search from different modulestores """ __metaclass__ = ABCMeta INDEX_NAME = None DOCUMENT_TYPE = None ENABLE_INDEXING_KEY = None INDEX_EVENT = { 'name': None, 'category': None } @classmethod def indexing_is_enabled(cls): """ Checks to see if the indexing feature is enabled """ return settings.FEATURES.get(cls.ENABLE_INDEXING_KEY, False) @classmethod @abstractmethod def normalize_structure_key(cls, structure_key): """ Normalizes structure key for use in indexing """ @classmethod @abstractmethod def _fetch_top_level(cls, modulestore, structure_key): """ Fetch the item from the modulestore location """ @classmethod @abstractmethod def _get_location_info(cls, normalized_structure_key): """ Builds location info dictionary """ @classmethod def _id_modifier(cls, usage_id): """ Modifies usage_id to submit to index """ return usage_id @classmethod def remove_deleted_items(cls, searcher, structure_key, exclude_items): """ remove any item that is present in the search index that is not present in updated list of indexed items as we find items we can shorten the set of items to keep """ response = searcher.search( doc_type=cls.DOCUMENT_TYPE, field_dictionary=cls._get_location_info(structure_key), exclude_dictionary={"id": list(exclude_items)} ) result_ids = [result["data"]["id"] for result in response["results"]] searcher.remove(cls.DOCUMENT_TYPE, result_ids) @classmethod def index(cls, modulestore, structure_key, triggered_at=None, reindex_age=REINDEX_AGE): """ Process course for indexing Arguments: modulestore - modulestore object to use for operations structure_key (CourseKey|LibraryKey) - course or library identifier triggered_at (datetime) - provides time at which indexing was triggered; useful for index updates - only things changed recently from that date (within REINDEX_AGE above ^^) will have their index updated, others skip updating their index but are still walked through in order to identify which items may need to be removed from the index If None, then a full reindex takes place Returns: Number of items that have been added to the index """ error_list = [] searcher = SearchEngine.get_search_engine(cls.INDEX_NAME) if not searcher: return structure_key = cls.normalize_structure_key(structure_key) location_info = cls._get_location_info(structure_key) # Wrap counter in dictionary - otherwise we seem to lose scope inside the embedded function `prepare_item_index` indexed_count = { "count": 0 } # indexed_items is a list of all the items that we wish to remain in the # index, whether or not we are planning to actually update their index. # This is used in order to build a query to remove those items not in this # list - those are ready to be destroyed indexed_items = set() # items_index is a list of all the items index dictionaries. # it is used to collect all indexes and index them using bulk API, # instead of per item index API call. items_index = [] def get_item_location(item): """ Gets the version agnostic item location """ return item.location.version_agnostic().replace(branch=None) def prepare_item_index(item, skip_index=False, groups_usage_info=None): """ Add this item to the items_index and indexed_items list Arguments: item - item to add to index, its children will be processed recursively skip_index - simply walk the children in the tree, the content change is older than the REINDEX_AGE window and would have been already indexed. This should really only be passed from the recursive child calls when this method has determined that it is safe to do so Returns: item_content_groups - content groups assigned to indexed item """ is_indexable = hasattr(item, "index_dictionary") item_index_dictionary = item.index_dictionary() if is_indexable else None # if it's not indexable and it does not have children, then ignore if not item_index_dictionary and not item.has_children: return item_content_groups = None if item.category == "split_test": split_partition = item.get_selected_partition() for split_test_child in item.get_children(): if split_partition: for group in split_partition.groups: group_id = unicode(group.id) child_location = item.group_id_to_child.get(group_id, None) if child_location == split_test_child.location: groups_usage_info.update({ unicode(get_item_location(split_test_child)): [group_id], }) for component in split_test_child.get_children(): groups_usage_info.update({ unicode(get_item_location(component)): [group_id] }) if groups_usage_info: item_location = get_item_location(item) item_content_groups = groups_usage_info.get(unicode(item_location), None) item_id = unicode(cls._id_modifier(item.scope_ids.usage_id)) indexed_items.add(item_id) if item.has_children: # determine if it's okay to skip adding the children herein based upon how recently any may have changed skip_child_index = skip_index or \ (triggered_at is not None and (triggered_at - item.subtree_edited_on) > reindex_age) children_groups_usage = [] for child_item in item.get_children(): if modulestore.has_published_version(child_item): children_groups_usage.append( prepare_item_index( child_item, skip_index=skip_child_index, groups_usage_info=groups_usage_info ) ) if None in children_groups_usage: item_content_groups = None if skip_index or not item_index_dictionary: return item_index = {} # if it has something to add to the index, then add it try: item_index.update(location_info) item_index.update(item_index_dictionary) item_index['id'] = item_id if item.start: item_index['start_date'] = item.start item_index['content_groups'] = item_content_groups if item_content_groups else None item_index.update(cls.supplemental_fields(item)) items_index.append(item_index) indexed_count["count"] += 1 return item_content_groups except Exception as err: # pylint: disable=broad-except # broad exception so that index operation does not fail on one item of many log.warning('Could not index item: %s - %r', item.location, err) error_list.append(_('Could not index item: {}').format(item.location)) try: with modulestore.branch_setting(ModuleStoreEnum.RevisionOption.published_only): structure = cls._fetch_top_level(modulestore, structure_key) groups_usage_info = cls.fetch_group_usage(modulestore, structure) # First perform any additional indexing from the structure object cls.supplemental_index_information(modulestore, structure) # Now index the content for item in structure.get_children(): prepare_item_index(item, groups_usage_info=groups_usage_info) searcher.index(cls.DOCUMENT_TYPE, items_index) cls.remove_deleted_items(searcher, structure_key, indexed_items) except Exception as err: # pylint: disable=broad-except # broad exception so that index operation does not prevent the rest of the application from working log.exception( "Indexing error encountered, courseware index may be out of date %s - %r", structure_key, err ) error_list.append(_('General indexing error occurred')) if error_list: raise SearchIndexingError('Error(s) present during indexing', error_list) return indexed_count["count"] @classmethod def _do_reindex(cls, modulestore, structure_key): """ (Re)index all content within the given structure (course or library), tracking the fact that a full reindex has taken place """ indexed_count = cls.index(modulestore, structure_key) if indexed_count: cls._track_index_request(cls.INDEX_EVENT['name'], cls.INDEX_EVENT['category'], indexed_count) return indexed_count @classmethod def _track_index_request(cls, event_name, category, indexed_count): """Track content index requests. Arguments: event_name (str): Name of the event to be logged. category (str): category of indexed items indexed_count (int): number of indexed items Returns: None """ data = { "indexed_count": indexed_count, 'category': category, } tracker.emit( event_name, data ) @classmethod def fetch_group_usage(cls, modulestore, structure): # pylint: disable=unused-argument """ Base implementation of fetch group usage on course/library. """ return None @classmethod def supplemental_index_information(cls, modulestore, structure): """ Perform any supplemental indexing given that the structure object has already been loaded. Base implementation performs no operation. Arguments: modulestore - modulestore object used during the indexing operation structure - structure object loaded during the indexing job Returns: None """ pass @classmethod def supplemental_fields(cls, item): # pylint: disable=unused-argument """ Any supplemental fields that get added to the index for the specified item. Base implementation returns an empty dictionary """ return {} class CoursewareSearchIndexer(SearchIndexerBase): """ Class to perform indexing for courseware search from different modulestores """ INDEX_NAME = "courseware_index" DOCUMENT_TYPE = "courseware_content" ENABLE_INDEXING_KEY = 'ENABLE_COURSEWARE_INDEX' INDEX_EVENT = { 'name': 'edx.course.index.reindexed', 'category': 'courseware_index' } UNNAMED_MODULE_NAME = ugettext_lazy("(Unnamed)") @classmethod def normalize_structure_key(cls, structure_key): """ Normalizes structure key for use in indexing """ return structure_key @classmethod def _fetch_top_level(cls, modulestore, structure_key): """ Fetch the item from the modulestore location """ return modulestore.get_course(structure_key, depth=None) @classmethod def _get_location_info(cls, normalized_structure_key): """ Builds location info dictionary """ return {"course": unicode(normalized_structure_key), "org": normalized_structure_key.org} @classmethod def do_course_reindex(cls, modulestore, course_key): """ (Re)index all content within the given course, tracking the fact that a full reindex has taken place """ return cls._do_reindex(modulestore, course_key) @classmethod def fetch_group_usage(cls, modulestore, structure): groups_usage_dict = {} groups_usage_info = GroupConfiguration.get_content_groups_usage_info(modulestore, structure).items() groups_usage_info.extend( GroupConfiguration.get_content_groups_items_usage_info( modulestore, structure ).items() ) if groups_usage_info: for name, group in groups_usage_info: for module in group: view, args, kwargs = resolve(module['url']) # pylint: disable=unused-variable usage_key_string = unicode(kwargs['usage_key_string']) if groups_usage_dict.get(usage_key_string, None): groups_usage_dict[usage_key_string].append(name) else: groups_usage_dict[usage_key_string] = [name] return groups_usage_dict @classmethod def supplemental_index_information(cls, modulestore, structure): """ Perform additional indexing from loaded structure object """ CourseAboutSearchIndexer.index_about_information(modulestore, structure) @classmethod def supplemental_fields(cls, item): """ Add location path to the item object Once we've established the path of names, the first name is the course name, and the next 3 names are the navigable path within the edx application. Notice that we stop at that level because a full path to deep children would be confusing. """ location_path = [] parent = item while parent is not None: path_component_name = parent.display_name if not path_component_name: path_component_name = unicode(cls.UNNAMED_MODULE_NAME) location_path.append(path_component_name) parent = parent.get_parent() location_path.reverse() return { "course_name": location_path[0], "location": location_path[1:4] } class LibrarySearchIndexer(SearchIndexerBase): """ Base class to perform indexing for library search from different modulestores """ INDEX_NAME = "library_index" DOCUMENT_TYPE = "library_content" ENABLE_INDEXING_KEY = 'ENABLE_LIBRARY_INDEX' INDEX_EVENT = { 'name': 'edx.library.index.reindexed', 'category': 'library_index' } @classmethod def normalize_structure_key(cls, structure_key): """ Normalizes structure key for use in indexing """ return normalize_key_for_search(structure_key) @classmethod def _fetch_top_level(cls, modulestore, structure_key): """ Fetch the item from the modulestore location """ return modulestore.get_library(structure_key, depth=None) @classmethod def _get_location_info(cls, normalized_structure_key): """ Builds location info dictionary """ return {"library": unicode(normalized_structure_key)} @classmethod def _id_modifier(cls, usage_id): """ Modifies usage_id to submit to index """ return usage_id.replace(library_key=(usage_id.library_key.replace(version_guid=None, branch=None))) @classmethod def do_library_reindex(cls, modulestore, library_key): """ (Re)index all content within the given library, tracking the fact that a full reindex has taken place """ return cls._do_reindex(modulestore, library_key) class AboutInfo(object): """ About info structure to contain 1) Property name to use 2) Where to add in the index (using flags above) 3) Where to source the properties value """ # Bitwise Flags for where to index the information # # ANALYSE - states that the property text contains content that we wish to be able to find matched within # e.g. "joe" should yield a result for "I'd like to drink a cup of joe" # # PROPERTY - states that the property text should be a property of the indexed document, to be returned with the # results: search matches will only be made on exact string matches # e.g. "joe" will only match on "joe" # # We are using bitwise flags because one may want to add the property to EITHER or BOTH parts of the index # e.g. university name is desired to be analysed, so that a search on "Oxford" will match # property values "University of Oxford" and "Oxford Brookes University", # but it is also a useful property, because within a (future) filtered search a user # may have chosen to filter courses from "University of Oxford" # # see https://wiki.python.org/moin/BitwiseOperators for information about bitwise shift operator used below # ANALYSE = 1 << 0 # Add the information to the analysed content of the index PROPERTY = 1 << 1 # Add the information as a property of the object being indexed (not analysed) def __init__(self, property_name, index_flags, source_from): self.property_name = property_name self.index_flags = index_flags self.source_from = source_from def get_value(self, **kwargs): """ get the value for this piece of information, using the correct source """ return self.source_from(self, **kwargs) def from_about_dictionary(self, **kwargs): """ gets the value from the kwargs provided 'about_dictionary' """ about_dictionary = kwargs.get('about_dictionary', None) if not about_dictionary: raise ValueError("Context dictionary does not contain expected argument 'about_dictionary'") return about_dictionary.get(self.property_name, None) def from_course_property(self, **kwargs): """ gets the value from the kwargs provided 'course' """ course = kwargs.get('course', None) if not course: raise ValueError("Context dictionary does not contain expected argument 'course'") return getattr(course, self.property_name, None) def from_course_mode(self, **kwargs): """ fetches the available course modes from the CourseMode model """ course = kwargs.get('course', None) if not course: raise ValueError("Context dictionary does not contain expected argument 'course'") return [mode.slug for mode in CourseMode.modes_for_course(course.id)] # Source location options - either from the course or the about info FROM_ABOUT_INFO = from_about_dictionary FROM_COURSE_PROPERTY = from_course_property FROM_COURSE_MODE = from_course_mode class CourseAboutSearchIndexer(object): """ Class to perform indexing of about information from course object """ DISCOVERY_DOCUMENT_TYPE = "course_info" INDEX_NAME = CoursewareSearchIndexer.INDEX_NAME # List of properties to add to the index - each item in the list is an instance of AboutInfo object ABOUT_INFORMATION_TO_INCLUDE = [ AboutInfo("advertised_start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY), AboutInfo("announcement", AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO), AboutInfo("start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY), AboutInfo("end", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY), AboutInfo("effort", AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO), AboutInfo("display_name", AboutInfo.ANALYSE, AboutInfo.FROM_COURSE_PROPERTY), AboutInfo("overview", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO), AboutInfo("title", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO), AboutInfo("university", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO), AboutInfo("number", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY), AboutInfo("short_description", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO), AboutInfo("description", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO), AboutInfo("key_dates", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO), AboutInfo("video", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO), AboutInfo("course_staff_short", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO), AboutInfo("course_staff_extended", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO), AboutInfo("requirements", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO), AboutInfo("syllabus", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO), AboutInfo("textbook", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO), AboutInfo("faq", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO), AboutInfo("more_info", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO), AboutInfo("ocw_links", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO), AboutInfo("enrollment_start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY), AboutInfo("enrollment_end", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY), AboutInfo("org", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY), AboutInfo("modes", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_MODE), AboutInfo("language", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY), ] @classmethod def index_about_information(cls, modulestore, course): """ Add the given course to the course discovery index Arguments: modulestore - modulestore object to use for operations course - course object from which to take properties, locate about information """ searcher = SearchEngine.get_search_engine(cls.INDEX_NAME) if not searcher: return course_id = unicode(course.id) course_info = { 'id': course_id, 'course': course_id, 'content': {}, 'image_url': course_image_url(course), } # load data for all of the 'about' modules for this course into a dictionary about_dictionary = { item.location.name: item.data for item in modulestore.get_items(course.id, qualifiers={"category": "about"}) } about_context = { "course": course, "about_dictionary": about_dictionary, } for about_information in cls.ABOUT_INFORMATION_TO_INCLUDE: # Broad exception handler so that a single bad property does not scupper the collection of others try: section_content = about_information.get_value(**about_context) except: # pylint: disable=bare-except section_content = None log.warning( "Course discovery could not collect property %s for course %s", about_information.property_name, course_id, exc_info=True, ) if section_content: if about_information.index_flags & AboutInfo.ANALYSE: analyse_content = section_content if isinstance(section_content, basestring): analyse_content = strip_html_content_to_text(section_content) course_info['content'][about_information.property_name] = analyse_content if about_information.index_flags & AboutInfo.PROPERTY: course_info[about_information.property_name] = section_content # Broad exception handler to protect around and report problems with indexing try: searcher.index(cls.DISCOVERY_DOCUMENT_TYPE, [course_info]) except: # pylint: disable=bare-except log.exception( "Course discovery indexing error encountered, course discovery index may be out of date %s", course_id, ) raise log.debug( "Successfully added %s course to the course discovery index", course_id ) @classmethod def _get_location_info(cls, normalized_structure_key): """ Builds location info dictionary """ return {"course": unicode(normalized_structure_key), "org": normalized_structure_key.org} @classmethod def remove_deleted_items(cls, structure_key): """ Remove item from Course About Search_index """ searcher = SearchEngine.get_search_engine(cls.INDEX_NAME) if not searcher: return response = searcher.search( doc_type=cls.DISCOVERY_DOCUMENT_TYPE, field_dictionary=cls._get_location_info(structure_key) ) result_ids = [result["data"]["id"] for result in response["results"]] searcher.remove(cls.DISCOVERY_DOCUMENT_TYPE, result_ids)
import stock_production_lot_ext import stock_picking_ext import stock_move_ext import purchase_order_ext import stock_move_split_ext
""" Module exports :class:`AtkinsonWald2007`. """ from __future__ import division import numpy as np from openquake.hazardlib.gsim.base import IPE from openquake.hazardlib import const from openquake.hazardlib.imt import MMI class AtkinsonWald2007(IPE): """ Implements IPE developed by Atkinson and Wald (2007) California, USA MS! """ DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([ MMI ]) DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([ const.StdDev.TOTAL ]) # TODO ! REQUIRES_SITES_PARAMETERS = set(('vs30', )) REQUIRES_RUPTURE_PARAMETERS = set(('mag',)) REQUIRES_DISTANCES = set(('rrup', )) def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ h = 14.0 R = np.sqrt(dists.rrup**2 + h**2) B = np.zeros_like(dists.rrup) B[R > 30.] = np.log10(R / 30.)[R > 30.] mean_mmi = 12.27 + 2.270 * (rup.mag - 6) + 0.1304 * (rup.mag - 6)**2 - 1.30 * np.log10(R) - 0.0007070 * R + 1.95 * B - 0.577 * rup.mag * np.log10(R) mean_mmi += self.compute_site_term(sites) mean_mmi = mean_mmi.clip(min=1, max=12) stddevs = np.zeros_like(dists.rrup) stddevs.fill(0.4) stddevs = stddevs.reshape(1, len(stddevs)) return mean_mmi, stddevs def compute_site_term(self, sites): # TODO ! return 0
""" A simple bot to gather some census data in IRC channels. It is intended to sit in a channel and collect the data for statistics. :author: tpltnt :license: AGPLv3 """ import irc.bot import irc.strings from irc.client import ip_numstr_to_quad, ip_quad_to_numstr class CensusBot(irc.bot.SingleServerIRCBot): """ The class implementing the census bot. """ def __init__(self, channel, nickname, server, port=6667): """ The constructor for the CensusBot class. :param channel: name of the channel to join :type channel: str :param nickname: nick of the bot (to use) :type nickname: str :param server: FQDN of the server to use :type server: str :param port: port to use when connecting to the server :type port: int """ if 0 != channel.find('#'): channel = '#' + channel irc.bot.SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname) self.channel = channel def on_nickname_in_use(self, connection, event): """ Change own nickname if already in use. :param connection: connection to the server :type connection: irc.client.ServerConnection :param event: event to react to :type event: :raises: TypeError """ if not isinstance(connection, ServerConnection): raise TypeError("'connection' is not of type 'ServerConnection'") connection.nick(connection.get_nickname() + "_") def main(): import sys if len(sys.argv) != 4: print("Usage: " + sys.argv[0] + " <server[:port]> <channel> <nickname>") sys.exit(1) server = sys.argv[1].split(":", 1) host = server[0] if len(server) == 2: try: port = int(server[1]) except ValueError: print("Error: Erroneous port.") sys.exit(1) else: port = 6667 channel = sys.argv[2] nickname = sys.argv[3] bot = CensusBot(channel, nickname, server, port) bot.start() if __name__ == "__main__": main()
import uuid from django.conf import settings from django.core.cache import cache from django.urls import reverse def _mk_key(token): return "one-time-data-" + token def set_one_time_data(data): token = str(uuid.uuid4()) key = _mk_key(token) cache.set(key, data, 60) return '{}://{}{}'.format(settings.DEFAULT_PROTOCOL, settings.HOSTNAME, reverse("one_time_url", kwargs={"token": token})) def get_one_time_data(token): key = _mk_key(token) data = cache.get(key) # It seems like Brightcove wants to hit it twice # cache.delete(key) return data
from . import hr_certification from . import hr_training_participant
"""Make session:proposal 1:1. Revision ID: 3a6b2ab00e3e Revises: 4dbf686f4380 Create Date: 2013-11-09 13:51:58.343243 """ revision = '3a6b2ab00e3e' down_revision = '4dbf686f4380' from alembic import op def upgrade(): op.create_unique_constraint('session_proposal_id_key', 'session', ['proposal_id']) def downgrade(): op.drop_constraint('session_proposal_id_key', 'session', 'unique')
from __future__ import with_statement import re from weboob.capabilities.gallery import ICapGallery, BaseGallery, BaseImage from weboob.tools.backend import BaseBackend from weboob.tools.browser import BaseBrowser, BasePage __all__ = ['GenericComicReaderBackend'] class DisplayPage(BasePage): def get_page(self, gallery): src = self.document.xpath(self.browser.params['img_src_xpath'])[0] return BaseImage(src, gallery=gallery, url=src) def page_list(self): return self.document.xpath(self.browser.params['page_list_xpath']) class GenericComicReaderBrowser(BaseBrowser): def __init__(self, browser_params, *args, **kwargs): self.params = browser_params BaseBrowser.__init__(self, *args, **kwargs) def iter_gallery_images(self, gallery): self.location(gallery.url) assert self.is_on_page(DisplayPage) for p in self.page.page_list(): if 'page_to_location' in self.params: self.location(self.params['page_to_location'] % p) else: self.location(p) assert self.is_on_page(DisplayPage) yield self.page.get_page(gallery) def fill_image(self, image, fields): if 'data' in fields: image.data = self.readurl(image.url) class GenericComicReaderBackend(BaseBackend, ICapGallery): NAME = 'genericcomicreader' MAINTAINER = u'Noé Rubinstein' EMAIL = 'noe.rubinstein@gmail.com' VERSION = '0.f' DESCRIPTION = 'Generic comic reader backend; subclasses implement specific sites' LICENSE = 'AGPLv3+' BROWSER = GenericComicReaderBrowser BROWSER_PARAMS = {} ID_REGEXP = None URL_REGEXP = None ID_TO_URL = None PAGES = {} def create_default_browser(self): b = self.create_browser(self.BROWSER_PARAMS) b.PAGES = self.PAGES try: b.DOMAIN = self.DOMAIN except AttributeError: pass return b def iter_gallery_images(self, gallery): with self.browser: return self.browser.iter_gallery_images(gallery) def get_gallery(self, _id): match = re.match(r'^%s$' % self.URL_REGEXP, _id) if match: _id = match.group(1) else: match = re.match(r'^%s$' % self.ID_REGEXP, _id) if match: _id = match.group(0) else: return None gallery = BaseGallery(_id, url=(self.ID_TO_URL % _id)) with self.browser: return gallery def fill_gallery(self, gallery, fields): gallery.title = gallery.id def fill_image(self, image, fields): with self.browser: self.browser.fill_image(image, fields) OBJECTS = { BaseGallery: fill_gallery, BaseImage: fill_image}
import os test_dir = os.path.dirname(__file__) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(test_dir, 'db.sqlite3'), } } INSTALLED_APPS = [ 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.auth', 'django.contrib.messages', 'django.contrib.admin', 'django.contrib.sites', 'django.contrib.staticfiles', 'imperavi', 'tinymce', 'newsletter' ] import django if django.VERSION > (1, 8): INSTALLED_APPS.remove('imperavi') MIDDLEWARE = [ 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ] ROOT_URLCONF = 'test_project.urls' FIXTURE_DIRS = [os.path.join(test_dir, 'fixtures'), ] SITE_ID = 1 TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, 'DIRS': [os.path.join(test_dir, 'templates')], 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] USE_TZ = True TIME_ZONE = 'UTC' STATIC_URL = '/static/' import random key_chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)' SECRET_KEY = ''.join([ random.SystemRandom().choice(key_chars) for i in range(50) ]) LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'handlers': { 'console': { 'class': 'logging.StreamHandler', }, }, 'loggers': { 'newsletter': { 'handlers': ['console'], 'propagate': True, }, }, } DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
class FieldRegistry(object): _registry = {} def add_field(self, model, field): reg = self.__class__._registry.setdefault(model, []) reg.append(field) def get_fields(self, model): return self.__class__._registry.get(model, []) def __contains__(self, model): return model in self.__class__._registry
__metaclass__ = type from zope.component import ( ComponentLookupError, getMultiAdapter, ) from zope.configuration import xmlconfig from zope.interface import ( implements, Interface, ) from zope.publisher.interfaces.browser import ( IBrowserPublisher, IDefaultBrowserLayer, ) from zope.testing.cleanup import cleanUp from lp.services.webapp import Navigation from lp.testing import TestCase class TestNavigationDirective(TestCase): def test_default_layer(self): # By default all navigation classes are registered for # IDefaultBrowserLayer. directive = """ <browser:navigation module="%(this)s" classes="ThingNavigation"/> """ % dict(this=this) xmlconfig.string(zcml_configure % directive) navigation = getMultiAdapter( (Thing(), DefaultBrowserLayer()), IBrowserPublisher, name='') self.assertIsInstance(navigation, ThingNavigation) def test_specific_layer(self): # If we specify a layer when registering a navigation class, it will # only be available on that layer. directive = """ <browser:navigation module="%(this)s" classes="OtherThingNavigation" layer="%(this)s.IOtherLayer" /> """ % dict(this=this) xmlconfig.string(zcml_configure % directive) self.assertRaises( ComponentLookupError, getMultiAdapter, (Thing(), DefaultBrowserLayer()), IBrowserPublisher, name='') navigation = getMultiAdapter( (Thing(), OtherLayer()), IBrowserPublisher, name='') self.assertIsInstance(navigation, OtherThingNavigation) def test_multiple_navigations_for_single_context(self): # It is possible to have multiple navigation classes for a given # context class as long as they are registered for different layers. directive = """ <browser:navigation module="%(this)s" classes="ThingNavigation"/> <browser:navigation module="%(this)s" classes="OtherThingNavigation" layer="%(this)s.IOtherLayer" /> """ % dict(this=this) xmlconfig.string(zcml_configure % directive) navigation = getMultiAdapter( (Thing(), DefaultBrowserLayer()), IBrowserPublisher, name='') other_navigation = getMultiAdapter( (Thing(), OtherLayer()), IBrowserPublisher, name='') self.assertNotEqual(navigation, other_navigation) def tearDown(self): TestCase.tearDown(self) cleanUp() class DefaultBrowserLayer: implements(IDefaultBrowserLayer) class IThing(Interface): pass class Thing(object): implements(IThing) class ThingNavigation(Navigation): usedfor = IThing class OtherThingNavigation(Navigation): usedfor = IThing class IOtherLayer(Interface): pass class OtherLayer: implements(IOtherLayer) this = "lp.services.webapp.tests.test_navigation" zcml_configure = """ <configure xmlns:browser="http://namespaces.zope.org/browser"> <include package="lp.services.webapp" file="meta.zcml" /> %s </configure> """
import os import sys import glob import json import subprocess from collections import defaultdict from utils import UnicodeReader, slugify, count_pages, combine_pdfs, parser import addresscleaner from click2mail import Click2MailBatch parser.add_argument("directory", help="Path to downloaded mail batch") parser.add_argument("--skip-letters", action='store_true', default=False) parser.add_argument("--skip-postcards", action='store_true', default=False) def fix_lines(address): """ Click2Mail screws up addresses with 3 lines. If we have only one address line, put it in "address1". If we have more, put the first in "organization", and subsequent ones in "addressN". """ lines = [a for a in [ address.get('organization', None), address.get('address1', None), address.get('address2', None), address.get('address3', None)] if a] if len(lines) == 1: address['organization'] = '' address['address1'] = lines[0] address['address2'] = '' address['address3'] = '' if len(lines) >= 2: address['organization'] = lines[0] address['address1'] = lines[1] address['address2'] = '' address['address3'] = '' if len(lines) >= 3: address['address2'] = lines[2] address['address3'] = '' if len(lines) >= 4: address['address3'] = lines[3] return address def collate_letters(mailing_dir, letters, page=1): # Sort by recipient. recipient_letters = defaultdict(list) for letter in letters: recipient_letters[(letter['recipient'], letter['sender'])].append(letter) # Assemble list of files and jobs. files = [] jobs = {} for (recipient, sender), letters in recipient_letters.iteritems(): count = 0 for letter in letters: filename = os.path.join(mailing_dir, letter["file"]) files.append(filename) count += count_pages(filename) end = page + count jobs[recipient] = { "startingPage": page, "endingPage": end - 1, "recipients": [fix_lines(addresscleaner.parse_address(recipient))], "sender": addresscleaner.parse_address(sender), "type": "letter" } page = end vals = jobs.values() vals.sort(key=lambda j: j['startingPage']) return files, vals, page def collate_postcards(postcards, page=1): # Collate postcards into a list per type and sender. type_sender_postcards = defaultdict(list) for letter in postcards: key = (letter['type'], letter['sender']) type_sender_postcards[key].append(letter) files = [] jobs = [] for (postcard_type, sender), letters in type_sender_postcards.iteritems(): files.append(os.path.join( os.path.dirname(__file__), "postcards", "{}.pdf".format(postcard_type) )) jobs.append({ "startingPage": page + len(files) - 1, "endingPage": page + len(files) - 1, "recipients": [ fix_lines(addresscleaner.parse_address(letter['recipient'])) for letter in letters ], "sender": addresscleaner.parse_address(sender), "type": "postcard", }) return files, jobs, page + len(files) def run_batch(args, files, jobs): filename = combine_pdfs(files) print "Building job with", filename batch = Click2MailBatch( username=args.username, password=args.password, filename=filename, jobs=jobs, staging=args.staging) if batch.run(args.dry_run): os.remove(filename) def main(): args = parser.parse_args() if args.directory.endswith(".zip"): directory = os.path.abspath(args.directory[0:-len(".zip")]) if not os.path.exists(directory): subprocess.check_call([ "unzip", args.directory, "-d", os.path.dirname(args.directory) ]) else: directory = args.directory with open(os.path.join(directory, "manifest.json")) as fh: manifest = json.load(fh) if manifest["letters"] and not args.skip_letters: lfiles, ljobs, lpage = collate_letters(directory, manifest["letters"], 1) print "Found", len(ljobs), "letter jobs" if ljobs: run_batch(args, lfiles, ljobs) if manifest["postcards"] and not args.skip_postcards: pfiles, pjobs, ppage = collate_postcards(manifest["postcards"], 1) print "Found", len(pjobs), "postcard jobs" if pjobs: run_batch(args, pfiles, pjobs) if __name__ == "__main__": main()
from . import test_employee_display_own_info
from django.conf.urls import patterns, url from application import views urlpatterns = patterns('', url(r'^$', views.index, name='index'), url(r'^(?P<application_id>\d+)/$', views.detail, name='detail'), url(r'^klogin/(?P<username>\w+)/(?P<password>\w+)/$', views.klogin, name='klogin'), )
from disco.core import Disco, result_iterator from disco.settings import DiscoSettings from disco.func import chain_reader from discodex.objects import DataSet from freequery.document import docparse from freequery.document.docset import Docset from freequery.index.tf_idf import TfIdf class IndexJob(object): def __init__(self, spec, discodex, disco_addr="disco://localhost", profile=False): # TODO(sqs): refactoring potential with PagerankJob self.spec = spec self.discodex = discodex self.docset = Docset(spec.docset_name) self.disco = Disco(DiscoSettings()['DISCO_MASTER']) self.nr_partitions = 8 self.profile = profile def start(self): results = self.__run_job(self.__index_job()) self.__run_discodex_index(results) def __run_job(self, job): results = job.wait() if self.profile: self.__profile_job(job) return results def __index_job(self): return self.disco.new_job( name="index_tfidf", input=['tag://' + self.docset.ddfs_tag], map_reader=docparse, map=TfIdf.map, reduce=TfIdf.reduce, sort=True, partitions=self.nr_partitions, partition=TfIdf.partition, merge_partitions=False, profile=self.profile, params=dict(doc_count=self.docset.doc_count)) def __run_discodex_index(self, results): opts = { 'parser': 'disco.func.chain_reader', 'demuxer': 'freequery.index.tf_idf.TfIdf_demux', 'nr_ichunks': 1, # TODO(sqs): after disco#181 fixed, increase this } ds = DataSet(input=results, options=opts) origname = self.discodex.index(ds) self.disco.wait(origname) # origname is also the disco job name self.discodex.clone(origname, self.spec.invindex_name)
from __future__ import unicode_literals import sys from intelmq.lib import utils from intelmq.lib.bot import Bot from intelmq.lib.message import Event class MalwareGroupIPsParserBot(Bot): def process(self): report = self.receive_message() if not report: self.acknowledge_message() return if not report.contains("raw"): self.acknowledge_message() raw_report = utils.base64_decode(report.value("raw")) raw_report = raw_report.split("<tbody>")[1] raw_report = raw_report.split("</tbody>")[0] raw_report_splitted = raw_report.split("<tr>") for row in raw_report_splitted: row = row.strip() if row == "": continue row_splitted = row.split("<td>") ip = row_splitted[1].split('">')[1].split("<")[0].strip() time_source = row_splitted[6].replace("</td></tr>", "").strip() time_source = time_source + " 00:00:00 UTC" event = Event(report) event.add('time.source', time_source, sanitize=True) event.add('classification.type', u'malware') event.add('source.ip', ip, sanitize=True) event.add('raw', row, sanitize=True) self.send_message(event) self.acknowledge_message() if __name__ == "__main__": bot = MalwareGroupIPsParserBot(sys.argv[1]) bot.start()
import os, subprocess import argparse parser = argparse.ArgumentParser() parser.add_argument('--virus', default="flu", help="virus to download; default is flu") parser.add_argument('--flu_lineages', default=["h3n2", "h1n1pdm", "vic", "yam"], nargs='+', type = str, help ="seasonal flu lineages to download, options are h3n2, h1n1pdm, vic and yam") parser.add_argument('--segments', type=str, default=['ha', 'na'], nargs='+', help="specify segment(s) to download") parser.add_argument('--sequences', default=False, action="store_true", help="download sequences from vdb") parser.add_argument('--titers', default=False, action="store_true", help="download titers from tdb") parser.add_argument('--titers_sources', default=["base", "crick", "cdc", "niid", "vidrl"], nargs='+', type = str, help ="titer sources to download, options are base, cdc, crick, niid and vidrl") parser.add_argument('--titers_passages', default=["egg", "cell"], nargs='+', type = str, help ="titer passage types to download, options are egg and cell") def concatenate_titers(params, passage, assay): for lineage in params.flu_lineages: out = 'data/%s_who_%s_%s_titers.tsv'%(lineage, assay, passage) hi_titers = [] for source in params.titers_sources: hi_titers_file = 'data/%s_%s_%s_%s_titers.tsv'%(lineage, source, assay, passage) if os.path.isfile(hi_titers_file): hi_titers.append(hi_titers_file) if len(hi_titers) > 0: with open(out, 'w+') as f: call = ['cat'] + hi_titers print call subprocess.call(call, stdout=f) for lineage in params.flu_lineages: out = 'data/%s_public_%s_%s_titers.tsv'%(lineage, assay, passage) hi_titers = [] for source in ["base", "cdc"]: hi_titers_file = 'data/%s_%s_%s_%s_titers.tsv'%(lineage, source, assay, passage) if os.path.isfile(hi_titers_file): hi_titers.append(hi_titers_file) if len(hi_titers) > 0: with open(out, 'w+') as f: call = ['cat'] + hi_titers print call subprocess.call(call, stdout=f) if __name__=="__main__": params = parser.parse_args() if params.virus == "flu": # Download FASTAs from database if params.sequences: segments = params.segments for segment in segments: for lineage in params.flu_lineages: call = "python vdb/flu_download.py -db vdb -v flu --select locus:%s lineage:seasonal_%s --fstem %s_%s --resolve_method split_passage"%(segment.upper(), lineage, lineage, segment) print(call) os.system(call) if params.titers: # download titers for source in params.titers_sources: if source == "base": for lineage in params.flu_lineages: call = "python tdb/download.py -db tdb -v flu --subtype %s --select assay_type:hi --fstem %s_base_hi_cell"%(lineage, lineage) print(call) os.system(call) if source in ["cdc", "crick", "niid", "vidrl"]: for passage in params.titers_passages: for lineage in params.flu_lineages: call = "python tdb/download.py -db %s_tdb -v flu --subtype %s --select assay_type:hi serum_passage_category:%s --fstem %s_%s_hi_%s"%(source, lineage, passage, lineage, source, passage) print(call) os.system(call) lineage = 'h3n2' call = "python tdb/download.py -db %s_tdb -v flu --subtype %s --select assay_type:fra serum_passage_category:%s --fstem %s_%s_fra_%s"%(source, lineage, passage, lineage, source, passage) print(call) os.system(call) if source == "cdc": for lineage in params.flu_lineages: call = "python tdb/download.py -db %s_tdb -v flu --subtype %s --select assay_type:hi serum_host:human --fstem %s_%s_hi_%s_human"%(source, lineage, lineage, source, passage) print(call) os.system(call) lineage = 'h3n2' call = "python tdb/download.py -db %s_tdb -v flu --subtype %s --select assay_type:fra serum_host:human --fstem %s_%s_fra_%s_human"%(source, lineage, lineage, source, passage) print(call) os.system(call) # concatenate to create default HI strain TSVs for each subtype concatenate_titers(params, "cell", "hi") concatenate_titers(params, "cell", "fra") concatenate_titers(params, "egg", "hi") concatenate_titers(params, "egg", "fra") elif params.virus == "ebola": call = "python vdb/ebola_download.py -db vdb -v ebola --fstem ebola" print(call) os.system(call) elif params.virus == "dengue": # Download all serotypes together. call = "python vdb/dengue_download.py" print(call) os.system(call) # Download individual serotypes. serotypes = [1, 2, 3, 4] for serotype in serotypes: call = "python vdb/dengue_download.py --select serotype:%i" % serotype print(call) os.system(call) # Download titers. if params.titers: call = "python tdb/download.py -db tdb -v dengue --fstem dengue" print(call) os.system(call) elif params.virus == "zika": call = "python vdb/zika_download.py -db vdb -v zika --fstem zika" print(call) os.system(call) elif params.virus == "mumps": call = "python vdb/mumps_download.py -db vdb -v mumps --fstem mumps --resolve_method choose_genbank" print(call) os.system(call) elif params.virus == "h7n9" or params.virus == "avian": os.system("python vdb/h7n9_download.py -db vdb -v h7n9 --select locus:PB2 --fstem h7n9_pb2") os.system("python vdb/h7n9_download.py -db vdb -v h7n9 --select locus:PB1 --fstem h7n9_pb1") os.system("python vdb/h7n9_download.py -db vdb -v h7n9 --select locus:PA --fstem h7n9_pa") os.system("python vdb/h7n9_download.py -db vdb -v h7n9 --select locus:HA --fstem h7n9_ha") os.system("python vdb/h7n9_download.py -db vdb -v h7n9 --select locus:NP --fstem h7n9_np") os.system("python vdb/h7n9_download.py -db vdb -v h7n9 --select locus:NA --fstem h7n9_na") os.system("python vdb/h7n9_download.py -db vdb -v h7n9 --select locus:MP --fstem h7n9_mp") os.system("python vdb/h7n9_download.py -db vdb -v h7n9 --select locus:NS --fstem h7n9_ns") else: print("%s is an invalid virus type.\nValid viruses are flu, ebola, dengue, zika, mumps, h7n9, and avian."%(params.virus)) sys.exit(2)
{ "name": "Product Code Unique", "summary": "Add the unique property to default_code field", "version": "9.0.1.0.0", "category": "Product", "website": "https://odoo-community.org/", "author": "<Deysy Mascorro>, Odoo Community Association (OCA)", "license": "AGPL-3", "application": False, "installable": True, "external_dependencies": { "python": [], "bin": [], }, "depends": [ "base", "product", ], "data": [ "views/product_view.xml" ], "demo": [ ], "qweb": [ ] }
import ddt from django.contrib.auth import login, authenticate from importlib import import_module from django_lti_tool_provider import AbstractApplicationHookManager from mock import patch, Mock from oauth2 import Request, Consumer, SignatureMethod_HMAC_SHA1 from django.contrib.auth.models import User from django.test.utils import override_settings from django.test import Client, TestCase, RequestFactory from django.conf import settings from django_lti_tool_provider.models import LtiUserData from django_lti_tool_provider.views import LTIView @override_settings( LTI_CLIENT_KEY='qertyuiop1234567890!@#$%^&*()_+[];', LTI_CLIENT_SECRET='1234567890!@#$%^&*()_+[];./,;qwertyuiop' ) class LtiRequestsTestBase(TestCase): _data = { "lis_result_sourcedid": "lis_result_sourcedid", "context_id": "LTIX/LTI-101/now", "user_id": "1234567890", "roles": ["Student"], "lis_outcome_service_url": "lis_outcome_service_url", "resource_link_id": "resource_link_id", "lti_version": "LTI-1p0", 'lis_person_sourcedid': 'username', 'lis_person_contact_email_primary': 'username@email.com' } _url_base = 'http://testserver' DEFAULT_REDIRECT = '/home' def setUp(self): self.client = Client() self.hook_manager = Mock(spec=AbstractApplicationHookManager) self.hook_manager.vary_by_key = Mock(return_value=None) self.hook_manager.optional_lti_parameters = Mock(return_value={}) LTIView.register_authentication_manager(self.hook_manager) @property def consumer(self): return Consumer(settings.LTI_CLIENT_KEY, settings.LTI_CLIENT_SECRET) def _get_signed_oauth_request(self, path, method, data=None): data = data if data is not None else self._data url = self._url_base + path method = method if method else 'GET' req = Request.from_consumer_and_token(self.consumer, {}, method, url, data) req.sign_request(SignatureMethod_HMAC_SHA1(), self.consumer, None) return req def get_correct_lti_payload(self, path='/lti/', method='POST', data=None): req = self._get_signed_oauth_request(path, method, data) return req.to_postdata() def get_incorrect_lti_payload(self, path='/lti/', method='POST', data=None): req = self._get_signed_oauth_request(path, method, data) req['oauth_signature'] += '_broken' return req.to_postdata() def send_lti_request(self, payload, client=None): client = client or self.client return client.post('/lti/', payload, content_type='application/x-www-form-urlencoded') def _authenticate(self, username='test'): self.client = Client() user = User.objects.get(username=username) logged_in = self.client.login(username=username, password='test') self.assertTrue(logged_in) return user def _logout(self): self.client.logout() def _verify_redirected_to(self, response, expected_url): self.assertEqual(response.status_code, 302) self.assertEqual(response.url, expected_url) def _verify_session_lti_contents(self, session, expected): self.assertIn('lti_parameters', session) self._verify_lti_data(session['lti_parameters'], expected) def _verify_lti_data(self, actual, expected): for key, value in expected.items(): self.assertEqual(value, actual[key]) def _verify_lti_created(self, user, expected_lti_data, custom_key=None): key = custom_key if custom_key else '' lti_data = LtiUserData.objects.get(user=user, custom_key=key) self.assertIsNotNone(lti_data) self.assertEqual(lti_data.custom_key, key) for key, value in expected_lti_data.items(): self.assertEqual(value, lti_data.edx_lti_parameters[key]) class AnonymousLtiRequestTests(LtiRequestsTestBase): def setUp(self): super(AnonymousLtiRequestTests, self).setUp() self.hook_manager.anonymous_redirect_to = Mock(return_value=self.DEFAULT_REDIRECT) def test_given_incorrect_payload_throws_bad_request(self): response = self.send_lti_request(self.get_incorrect_lti_payload()) self.assertEqual(response.status_code, 400) self.assertIn("Invalid LTI Request", response.content) def test_given_correct_requests_sets_session_variable(self): response = self.send_lti_request(self.get_correct_lti_payload()) self._verify_redirected_to(response, self.DEFAULT_REDIRECT) self._verify_session_lti_contents(self.client.session, self._data) @ddt.ddt @patch('django_lti_tool_provider.views.Signals.LTI.received.send') class AuthenticatedLtiRequestTests(LtiRequestsTestBase): def _authentication_hook(self, request, user_id=None, username=None, email=None, **kwargs): user = User.objects.create_user(username or user_id, password='1234', email=email) user.save() authenticated_user = authenticate(request, username=user.username, password='1234') login(request, authenticated_user) return user def setUp(self): super(AuthenticatedLtiRequestTests, self).setUp() self.hook_manager.authenticated_redirect_to = Mock(return_value=self.DEFAULT_REDIRECT) self.hook_manager.authentication_hook = self._authentication_hook def _verify_lti_updated_signal_is_sent(self, patched_send_lti_received, expected_user): expected_lti_data = LtiUserData.objects.get(user=expected_user) patched_send_lti_received.assert_called_once_with(LTIView, user=expected_user, lti_data=expected_lti_data) def test_no_session_given_incorrect_payload_throws_bad_request(self, _): response = self.send_lti_request(self.get_incorrect_lti_payload()) self.assertEqual(response.status_code, 400) self.assertIn("Invalid LTI Request", response.content) def test_no_session_correct_payload_processes_lti_request(self, patched_send_lti_received): # Precondition check self.assertFalse(LtiUserData.objects.all()) response = self.send_lti_request(self.get_correct_lti_payload()) # Should have been created. user = User.objects.all()[0] self._verify_lti_created(user, self._data) self._verify_redirected_to(response, self.DEFAULT_REDIRECT) self._verify_lti_updated_signal_is_sent(patched_send_lti_received, user) def test_given_session_and_lti_uses_lti(self, patched_send_lti_received): # Precondition check self.assertFalse(LtiUserData.objects.all()) session = self.client.session session['lti_parameters'] = {} session.save() response = self.send_lti_request(self.get_correct_lti_payload()) # Should have been created. user = User.objects.all()[0] self._verify_lti_created(user, self._data) self._verify_redirected_to(response, self.DEFAULT_REDIRECT) self._verify_lti_updated_signal_is_sent(patched_send_lti_received, user) def test_force_login_change(self, patched_send_lti_received): self.assertFalse(User.objects.exclude(id=1)) payload = self.get_correct_lti_payload() request = self.send_lti_request(payload, client=RequestFactory()) engine = import_module(settings.SESSION_ENGINE) request.session = engine.SessionStore() request.user = None user = self._authentication_hook(request, username='goober') request.session.save() self.assertEqual(request.user, user) LTIView.as_view()(request) # New user creation not actually available during tests. self.assertTrue(request.user) new_user = User.objects.exclude(username='goober')[0] self.assertEqual(request.user, new_user) # Verify a new user is not created with the same data if re-visiting. request = self.send_lti_request(payload, client=RequestFactory()) request.session = engine.SessionStore() request.user = None authenticated_user = authenticate(request, username=new_user.username, password='1234') self.assertTrue(authenticated_user) login(request, authenticated_user) LTIView.as_view()(request) self.assertEqual(request.user, authenticated_user) self.assertEqual(authenticated_user, new_user) self.assertEqual(LtiUserData.objects.all().count(), 1) @ddt.ddt class AuthenticationManagerIntegrationTests(LtiRequestsTestBase): TEST_URLS = "/some_url", "/some_other_url", "http://qwe.asd.zxc.com" def setUp(self): super(AuthenticationManagerIntegrationTests, self).setUp() def tearDown(self): LTIView.authentication_manager = None self._logout() def _authenticate_user(self, request, user_id=None, username=None, email=None, **kwargs): if not username: username = "test_username" password = "test_password" user = User.objects.create_user(username=username, email=email, password=password) authenticated_user = authenticate(request, username=username, password=password) login(request, authenticated_user) self.addCleanup(lambda: user.delete()) def test_authentication_hook_executed_if_not_authenticated(self): payload = self.get_correct_lti_payload() self.send_lti_request(payload) args, user_data = self.hook_manager.authentication_hook.call_args request = args[0] self.assertEqual(request.body, payload) self.assertFalse(request.user.is_authenticated) expected_user_data = { 'username': self._data['lis_person_sourcedid'], 'email': self._data['lis_person_contact_email_primary'], 'user_id': self._data['user_id'], 'extra_params': {} } self.assertEqual(user_data, expected_user_data) def test_authentication_hook_passes_optional_lti_data(self): payload = self.get_correct_lti_payload() self.hook_manager.optional_lti_parameters.return_value = {'resource_link_id': 'link_id', 'roles': 'roles'} self.send_lti_request(payload) args, user_data = self.hook_manager.authentication_hook.call_args request = args[0] self.assertEqual(request.body, payload) self.assertFalse(request.user.is_authenticated) expected_user_data = { 'username': self._data['lis_person_sourcedid'], 'email': self._data['lis_person_contact_email_primary'], 'user_id': self._data['user_id'], 'extra_params': { 'roles': ['Student'], 'link_id': 'resource_link_id', } } self.assertEqual(user_data, expected_user_data) @ddt.data(*TEST_URLS) def test_anonymous_lti_is_processed_if_hook_does_not_authenticate_user(self, expected_url): self.hook_manager.anonymous_redirect_to.return_value = expected_url response = self.send_lti_request(self.get_correct_lti_payload()) self._verify_redirected_to(response, expected_url) self._verify_session_lti_contents(self.client.session, self._data) # verifying correct parameters were passed to auth manager hook request, lti_data = self.hook_manager.anonymous_redirect_to.call_args[0] self._verify_session_lti_contents(request.session, self._data) self._verify_lti_data(lti_data, self._data) @ddt.data(*TEST_URLS) def test_authenticated_lti_is_processed_if_hook_authenticates_user(self, expected_url): self.hook_manager.authentication_hook.side_effect = self._authenticate_user self.hook_manager.authenticated_redirect_to.return_value = expected_url response = self.send_lti_request(self.get_correct_lti_payload()) self._verify_redirected_to(response, expected_url) # verifying correct parameters were passed to auth manager hook request, lti_data = self.hook_manager.authenticated_redirect_to.call_args[0] user = request.user self._verify_lti_created(user, self._data) self._verify_lti_data(lti_data, self._data) @ddt.data('custom', 'very custom', 'extremely custom') def test_authenticated_lti_saves_custom_key_if_specified(self, key): self.hook_manager.vary_by_key.return_value = key self.hook_manager.authentication_hook.side_effect = self._authenticate_user self.send_lti_request(self.get_correct_lti_payload()) request, lti_data = self.hook_manager.authenticated_redirect_to.call_args[0] user = request.user self._verify_lti_created(user, self._data, key)
import pytest from django.urls import reverse from adhocracy4.dashboard import components from adhocracy4.test.helpers import assert_template_response from adhocracy4.test.helpers import redirect_target from adhocracy4.test.helpers import setup_phase from meinberlin.apps.topicprio.models import Topic from meinberlin.apps.topicprio.phases import PrioritizePhase component = components.modules.get('topic_edit') @pytest.mark.django_db def test_edit_view(client, phase_factory, topic_factory): phase, module, project, item = setup_phase( phase_factory, topic_factory, PrioritizePhase) initiator = module.project.organisation.initiators.first() url = component.get_base_url(module) client.login(username=initiator.email, password='password') response = client.get(url) assert_template_response(response, 'meinberlin_topicprio/topic_dashboard_list.html') @pytest.mark.django_db def test_topic_create_view(client, phase_factory, category_factory): phase, module, project, item = setup_phase( phase_factory, None, PrioritizePhase) initiator = module.project.organisation.initiators.first() category = category_factory(module=module) url = reverse('a4dashboard:topic-create', kwargs={'module_slug': module.slug}) data = { 'name': 'test', 'description': 'test', 'category': category.pk } client.login(username=initiator.email, password='password') response = client.post(url, data) assert redirect_target(response) == 'topic-list' topic = Topic.objects.get(name=data.get('name')) assert topic.description == data.get('description') assert topic.category.pk == data.get('category') @pytest.mark.django_db def test_topic_update_view( client, phase_factory, topic_factory, category_factory): phase, module, project, item = setup_phase( phase_factory, topic_factory, PrioritizePhase) initiator = module.project.organisation.initiators.first() category = category_factory(module=module) url = reverse('a4dashboard:topic-update', kwargs={'pk': item.pk, 'year': item.created.year}) data = { 'name': 'test', 'description': 'test', 'category': category.pk } client.login(username=initiator.email, password='password') response = client.post(url, data) assert redirect_target(response) == 'topic-list' item.refresh_from_db() assert item.description == data.get('description') assert item.category.pk == data.get('category') @pytest.mark.django_db def test_topic_delete_view(client, phase_factory, topic_factory): phase, module, project, item = setup_phase( phase_factory, topic_factory, PrioritizePhase) initiator = module.project.organisation.initiators.first() url = reverse('a4dashboard:topic-delete', kwargs={'pk': item.pk, 'year': item.created.year}) client.login(username=initiator.email, password='password') response = client.delete(url) assert redirect_target(response) == 'topic-list' assert not Topic.objects.exists()
from flask import Blueprint, render_template from flask.ext.security import current_user mod = Blueprint('documentation', __name__) @mod.route('/documentation') @mod.route('/documentation/index') def doc_index(): return render_template('documentation/index.html', apikey='token' if current_user.is_anonymous else current_user.apikey)
from __future__ import absolute_import import unittest from gateway.dto import SensorDTO, SensorSourceDTO from gateway.api.serializers import SensorSerializer class SensorSerializerTest(unittest.TestCase): def test_serialize(self): # Valid room data = SensorSerializer.serialize(SensorDTO(id=1, name='foo', room=5), fields=['id', 'name', 'room']) self.assertEqual({'id': 1, 'name': 'foo', 'room': 5}, data) # Empty room data = SensorSerializer.serialize(SensorDTO(id=1, name='foo'), fields=['id', 'name', 'room']) self.assertEqual({'id': 1, 'name': 'foo', 'room': 255}, data) # No room data = SensorSerializer.serialize(SensorDTO(id=1, name='foo', room=5), fields=['id', 'name']) self.assertEqual({'id': 1, 'name': 'foo'}, data) def test_deserialize(self): # Valid room dto = SensorSerializer.deserialize({'id': 5, 'external_id': '0', 'source': {'type': 'master'}, 'physical_quantity': 'temperature', 'unit': 'celcius', 'name': 'bar', 'room': 10}) expected_dto = SensorDTO(id=5, external_id='0', source=SensorSourceDTO('master', name=None), physical_quantity='temperature', unit='celcius', name='bar', room=10) assert expected_dto == dto self.assertEqual(expected_dto, dto) self.assertEqual(['external_id', 'id', 'name', 'physical_quantity', 'room', 'source', 'unit'], sorted(dto.loaded_fields)) # Empty room dto = SensorSerializer.deserialize({'id': 5, 'name': 'bar', 'room': 255}) self.assertEqual(SensorDTO(id=5, name='bar'), dto) self.assertEqual(['id', 'name', 'room'], sorted(dto.loaded_fields)) # No room dto = SensorSerializer.deserialize({'id': 5, 'name': 'bar'}) self.assertEqual(SensorDTO(id=5, name='bar'), dto) self.assertEqual(['id', 'name'], sorted(dto.loaded_fields)) # Invalid physical_quantity with self.assertRaises(ValueError): _ = SensorSerializer.deserialize({'id': 5, 'physical_quantity': 'something', 'unit': 'celcius', 'name': 'bar'}) # Invalid unit with self.assertRaises(ValueError): _ = SensorSerializer.deserialize({'id': 5, 'physical_quantity': 'temperature', 'unit': 'unicorns', 'name': 'bar'})
import logging from datetime import datetime from openerp import SUPERUSER_ID from openerp.osv import orm, fields from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT _logger = logging.getLogger(__name__) _logger.setLevel(logging.DEBUG) class stock_location(orm.Model): _inherit = "stock.location" _columns = { 'update_product_bylocation': fields.boolean('Show Product location quantity on db', help='If check create a columns on product_product table for get product for this location'), 'product_related_columns': fields.char('Columns Name on product_product') } def update_product_by_location(self, cr, uid, context=None): context = context or self.pool['res.users'].context_get(cr, uid) location_ids = self.search(cr, uid, [('update_product_bylocation', '=', True)], context=context) location_vals = {} start_time = datetime.now() date_product_by_location_update = start_time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) if location_ids: product_obj = self.pool['product.product'] for location in self.browse(cr, uid, location_ids, context): location_vals[location.id] = location.product_related_columns product_ids = product_obj.search(cr, uid, [('type', '!=', 'service')], context=context) product_context = context.copy() product_vals = {} for product_id in product_ids: product_vals[product_id] = {} for location_keys in location_vals.keys(): product_context['location'] = location_keys for product in product_obj.browse(cr, uid, product_ids, product_context): if location_vals[location_keys] and (product[location_vals[location_keys]] != product.qty_available): product_vals[product.id][location_vals[location_keys]] = product.qty_available if product_vals: for product_id in product_vals.keys(): product_val = product_vals[product_id] if product_val: product_val['date_product_by_location_update'] = date_product_by_location_update product_obj.write(cr, uid, product_id, product_val, context) end_time = datetime.now() duration_seconds = (end_time - start_time) duration = '{sec}'.format(sec=duration_seconds) _logger.info(u'update_product_by_location get in {duration}'.format(duration=duration)) return True def create_product_by_location(self, cr, location_name, context): model_id = self.pool['ir.model.data'].get_object_reference(cr, SUPERUSER_ID, 'product', 'model_product_product')[1] fields_value = { 'field_description': location_name, 'groups': [[6, False, []]], 'model_id': model_id, 'name': 'x_{location_name}'.format(location_name=location_name).lower().replace(' ', '_'), 'readonly': False, 'required': False, 'select_level': '0', 'serialization_field_id': False, 'translate': False, 'ttype': 'float', } context_field = context.copy() context_field.update( { 'department_id': False, 'lang': 'it_IT', 'manual': True, # required for create columns on table 'uid': 1 } ) fields_id = self.pool['ir.model.fields'].create(cr, SUPERUSER_ID, fields_value, context_field) return fields_id, fields_value['name'] def write(self, cr, uid, ids, vals, context=None): context = context or self.pool['res.users'].context_get(cr, uid) if vals.get('update_product_bylocation', False): for location in self.browse(cr, uid, ids, context): field_id, field_name = self.create_product_by_location(cr, location.name, context) vals['product_related_columns'] = field_name return super(stock_location, self).write(cr, uid, ids, vals, context)
from coriolis import utils from coriolis.conductor.rpc import client as rpc_conductor_client from coriolis.minion_manager.rpc import client as rpc_minion_manager_client class API(object): def __init__(self): self._rpc_conductor_client = rpc_conductor_client.ConductorClient() self._rpc_minion_manager_client = ( rpc_minion_manager_client.MinionManagerClient()) def create(self, ctxt, name, endpoint_type, description, connection_info, mapped_regions): return self._rpc_conductor_client.create_endpoint( ctxt, name, endpoint_type, description, connection_info, mapped_regions) def update(self, ctxt, endpoint_id, properties): return self._rpc_conductor_client.update_endpoint( ctxt, endpoint_id, properties) def delete(self, ctxt, endpoint_id): self._rpc_conductor_client.delete_endpoint(ctxt, endpoint_id) def get_endpoints(self, ctxt): return self._rpc_conductor_client.get_endpoints(ctxt) def get_endpoint(self, ctxt, endpoint_id): return self._rpc_conductor_client.get_endpoint(ctxt, endpoint_id) def validate_connection(self, ctxt, endpoint_id): return self._rpc_conductor_client.validate_endpoint_connection( ctxt, endpoint_id) @utils.bad_request_on_error("Invalid destination environment: %s") def validate_target_environment(self, ctxt, endpoint_id, target_env): return self._rpc_conductor_client.validate_endpoint_target_environment( ctxt, endpoint_id, target_env) @utils.bad_request_on_error("Invalid source environment: %s") def validate_source_environment(self, ctxt, endpoint_id, source_env): return self._rpc_conductor_client.validate_endpoint_source_environment( ctxt, endpoint_id, source_env) @utils.bad_request_on_error("Invalid source minion pool environment: %s") def validate_endpoint_source_minion_pool_options( self, ctxt, endpoint_id, pool_environment): return self._rpc_minion_manager_client.validate_endpoint_source_minion_pool_options( ctxt, endpoint_id, pool_environment) @utils.bad_request_on_error( "Invalid destination minion pool environment: %s") def validate_endpoint_destination_minion_pool_options( self, ctxt, endpoint_id, pool_environment): return self._rpc_minion_manager_client.validate_endpoint_destination_minion_pool_options( ctxt, endpoint_id, pool_environment)
""" Global settings file. Everything in here is imported *before* everything in settings.py. This means that this file is used for default, fixed and global varibles, and then settings.py is used to overwrite anything here as well as adding settings particular to the install. Note that there are no tuples here, as they are immutable. Please use lists, so that in settings.py we can do list.append() """ import os from os.path import exists, join import sys sys.path.append('web') DEBUG = True TIME_ZONE = 'Europe/London' LANGUAGE_CODE = 'en_GB' SITE_ID = 1 USE_I18N = False HOME_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # the parent directory of SCRAPERWIKI_DIR SCRAPERWIKI_DIR = HOME_DIR + '/web/' MEDIA_DIR = SCRAPERWIKI_DIR + 'media' MEDIA_URL = 'http://media.scraperwiki.com/' MEDIA_ADMIN_DIR = SCRAPERWIKI_DIR + '/media-admin' LOGIN_URL = '/login/' HOME_DIR = "" OVERDUE_SQL = "(DATE_ADD(last_run, INTERVAL run_interval SECOND) < NOW() or last_run is null)" OVERDUE_SQL_PARAMS = [] URL_ROOT = "" MEDIA_ROOT = URL_ROOT + 'media/' ADMIN_MEDIA_PREFIX = URL_ROOT + '/media-admin/' SECRET_KEY = 'x*#sb54li2y_+b-ibgyl!lnd^*#=bzv7bj_ypr2jvon9mwii@z' TEMPLATE_LOADERS = ( ('django.template.loaders.cached.Loader', ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', )), ) MIDDLEWARE_CLASSES = [ 'middleware.exception_logging.ExceptionLoggingMiddleware', 'middleware.improved_gzip.ImprovedGZipMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django_notify.middleware.NotificationsMiddleware', 'pagination.middleware.PaginationMiddleware', 'middleware.csrfcookie.CsrfAlwaysSetCookieMiddleware', 'api.middleware.CORSMiddleware' ] AUTHENTICATION_BACKENDS = [ 'frontend.email_auth.EmailOrUsernameModelBackend', 'django.contrib.auth.backends.ModelBackend' ] ROOT_URLCONF = 'urls' TEMPLATE_DIRS = [ join(SCRAPERWIKI_DIR, 'templates'), ] TEMPLATE_CONTEXT_PROCESSORS = [ 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.request', 'django.contrib.messages.context_processors.messages', 'django_notify.context_processors.notifications', 'frontend.context_processors.site', 'frontend.context_processors.template_settings', 'frontend.context_processors.vault_info', # 'frontend.context_processors.site_messages', # disabled as not used since design revamp April 2011 ] SCRAPERWIKI_APPS = [ # the following are scraperwiki apps 'frontend', 'codewiki', 'api', 'cropper', 'kpi', 'documentation', #'devserver', ] INSTALLED_APPS = [ 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.admin', 'django.contrib.comments', 'django.contrib.markup', 'registration', 'south', 'profiles', 'django.contrib.humanize', 'django.contrib.messages', 'django_notify', 'tagging', 'contact_form', 'captcha', 'pagination', 'compressor', ] + SCRAPERWIKI_APPS TEST_RUNNER = 'scraperwiki_tests.run_tests' ACCOUNT_ACTIVATION_DAYS = 3650 # If you haven't activated in 10 years then tough luck! AUTH_PROFILE_MODULE = 'frontend.UserProfile' INTERNAL_IPS = ['127.0.0.1',] NOTIFICATIONS_STORAGE = 'session.SessionStorage' REGISTRATION_BACKEND = "frontend.backends.UserWithNameBackend" FORCE_LOWERCASE_TAGS = True SCRAPER_LIBS_DIR = join(HOME_DIR, "scraperlibs") SEND_BROKEN_LINK_EMAILS = DEBUG == False SCRAPERS_PER_PAGE = 50 MAX_API_ITEMS = 500 DEFAULT_API_ITEMS = 100 ABSOLUTE_URL_OVERRIDES = { 'auth.user': lambda o: o.get_profile().get_absolute_url() } TEMPLATE_SETTINGS = [ 'API_URL', 'ORBITED_URL', 'MAX_DATA_POINTS', 'MAX_MAP_POINTS', 'REVISION', 'VIEW_URL', 'CODEMIRROR_URL' ] try: REVISION = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'revision.txt')).read()[:-1] except: REVISION = "" MAX_DATA_POINTS = 500 BLOG_FEED = 'http://blog.scraperwiki.com/feed/atom' DATA_TABLE_ROWS = 10 RSS_ITEMS = 50 VIEW_SCREENSHOT_SIZES = {'small': (110, 73), 'medium': (220, 145), 'large': (800, 600)} SCRAPER_SCREENSHOT_SIZES = {'small': (110, 73), 'medium': (220, 145) } CODEMIRROR_VERSION = "0.94" CODEMIRROR_URL = "CodeMirror-%s/" % CODEMIRROR_VERSION APPROXLENOUTPUTLIMIT = 3000 CONFIGFILE = "/var/www/scraperwiki/uml/uml.cfg" HTTPPROXYURL = "http://localhost:9005" DISPATCHERURL = "http://localhost:9000" PAGINATION_DEFAULT_PAGINATION=20 SOUTH_TESTS_MIGRATE = True SESSION_COOKIE_SECURE = False import logging from middleware import exception_logging logging.custom_handlers = exception_logging LOGGING = { 'version': 1, 'disable_existing_loggers': True, 'formatters': { 'simple': { 'format' : '%(asctime)s %(name)s %(filename)s:%(lineno)s %(levelname)s: %(message)s' } }, 'handlers': { # Include the default Django email handler for errors # This is what you'd get without configuring logging at all. 'mail_admins': { 'class': 'django.utils.log.AdminEmailHandler', 'level': 'ERROR', # But the emails are plain text by default - HTML is nicer 'include_html': True, }, # Log to a text file that can be rotated by logrotate 'logfile': { 'class': 'logging.custom_handlers.WorldWriteRotatingFileHandler', 'filename': '/var/log/scraperwiki/django-www.log', 'mode': 'a', 'maxBytes': 100000, 'backupCount': 5, 'formatter': 'simple' }, }, 'loggers': { # Again, default Django configuration to email unhandled exceptions 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, # Might as well log any errors anywhere else in Django # (so use empty string for name here to catch anything) '': { 'handlers': ['logfile'], 'level': DEBUG and 'DEBUG' or 'ERROR', 'propagate': False, }, # Your own app - this assumes all your logger names start with "myapp." #'myapp': { # 'handlers': ['logfile'], # 'level': 'WARNING', # Or maybe INFO or DEBUG # 'propagate': False #}, }, } INSTALLED_APPS += ['icanhaz'] ICANHAZ_DIRS = [SCRAPERWIKI_DIR + 'templates/codewiki/js/']
import logging, logging.handlers import sys logging.handlers.HTTPHandler('','',method='GET') logger = logging.getLogger('simple_example') http_handler = logging.handlers.HTTPHandler('127.0.0.1:9999', '/httpevent', method='GET') logger.addHandler(http_handler) f=open(sys.argv[1]) for i in range(10): line = f.readline() print line logger.critical(line)
import attr from osis_common.ddd import interface @attr.s(frozen=True, slots=True) class EntiteUclDTO(interface.DTO): sigle = attr.ib(type=str) intitule = attr.ib(type=str)
from flask import ( Blueprint, render_template, redirect, url_for, request, flash, current_app, g, ) from flask_login import ( current_user, ) from flask_wtf import Form from wtforms import ( SubmitField, BooleanField, DecimalField, ) from wtforms.validators import DataRequired from flask_mail import Message import requests import json from datetime import datetime from datetime import timedelta from topitup import db from frontend import login_required from nav import ( nav, top_nav ) class Payd(db.Model): __bind_key__ = "topitup" id = db.Column(db.Integer, primary_key=True) user_id = db.Column(db.Integer) time_creation = db.Column(db.DateTime) time_payment = db.Column(db.DateTime) order_id = db.Column(db.String(35), unique=True) native_price = db.Column(db.Integer) native_currency = db.Column(db.String(3)) btc_price = db.Column(db.Integer) address = db.Column(db.String(35)) txn = db.Column(db.Integer, default=0) def __init__(self, id, user_id, time_creation, time_payment, order_id, native_price, native_currency, btc_price, address, txn): self.id = id self.user_id = user_id self.time_creation = time_creation self.time_payment = time_payment self.order_id = order_id self.native_price = native_price self.native_currency = native_currency self.btc_price = btc_price self.address = address self.txn = txn def __repr__(self): return '<Payd %r>' % self.id try: db.create_all(bind='topitup') except: pass siema = Blueprint('siema', __name__) class LoginForm(Form): amount = DecimalField('Amount of Credits', validators=[DataRequired()]) confirm_me = BooleanField('Please confirm you agree to TOC', validators=[DataRequired()]) submit = SubmitField("Buy Credits") @siema.before_request def before_request(): try: g.user = current_user.username.decode('utf-8') g.email = current_user.email.decode('utf-8') # amount of Credits in user's account g.credits = current_user.neuro g.user_id = current_user.id except: g.user = None g.credits = None nav.register_element('top_nav', top_nav(g.user, g.credits)) @siema.route('/invoices/checkitup') def checkitup(): # we collect all invoices which are not paid sql_query = Payd.query.filter_by( time_payment=datetime.fromtimestamp(0)).all() for invoice in sql_query: print(invoice) howold = current_app.config['WARRANTY_TIME'] # ignore all invoices which are older than WARRANTY_TIME days if invoice.time_creation + timedelta(days=howold) > datetime.now(): print(invoice.order_id) # initiate conversation with pypayd pypayd_headers = {'content-type': 'application/json'} pypayd_payload = { "method": "check_order_status", "params": {"order_id": invoice.order_id}, "jsonrpc": "2.0", "id": 0, } #pypayd_response = requests.post( # current_app.config['PYPAYD_URI'], # data=json.dumps(pypayd_payload), # headers=pypayd_headers).json() #print(pypayd_response) #invoice.txn = 0 howmanyconfirmations = current_app.config['CONFIRMATIONS'] confirmations = pypayd_response['result']['amount'] # Huhu! We have a new payment! if invoice.txn == 0 and confirmations > howmanyconfirmations: # Send an email message if payment was registered # From: DEFAULT_MAIL_SENDER msg = Message() msg.add_recipient(current_user.email) msg.subject = "Payment confirmation" msg.body = "" # Register payment invoice.time_payment = datetime.now() # Register paid amount in the main database balance = current_user.credits current_user.credits = balance + pypayd_response['result']['amount'] # Housekeeping invoice.txn = confirmations # register all transactions in databases db.session.commit() flash('Thank you.', 'info') return redirect(url_for('frontend.index')) @siema.route('/invoices/id/<orderid>') @login_required def showinvoice(orderid): sql_query = Payd.query.filter_by( order_id=orderid).first() return render_template('invoice-id.html', invoice=sql_query, ) @siema.route('/invoices/new', methods=('GET', 'POST')) @login_required def new(): form = LoginForm() if form.validate_on_submit(): amount = request.form['amount'] confirm_me = False if 'confirm_me' in request.form: confirm_me = True if confirm_me is False: pass # get a new transaction id sql_query = Payd.query.all() new_local_transaction_id = len(sql_query) # TODO: deal with an unlikely event of concurrency # initiate conversation with pypayd pypayd_headers = {'content-type': 'application/json'} pypayd_payload = { "method": "create_order", "params": {"amount": amount, "qr_code": True}, "jsonrpc": "2.0", "id": new_local_transaction_id, } pypayd_response = requests.post( current_app.config['PYPAYD_URI'], data=json.dumps(pypayd_payload), headers=pypayd_headers).json() print(pypayd_response) # insert stuff into our transaction database to_db = Payd( None, g.user_id, datetime.utcnow(), datetime.fromtimestamp(0), # this is not a paid invoice, yet pypayd_response['result']['order_id'], amount, "EUR", pypayd_response['result']['amount'], pypayd_response['result']['receiving_address'], 0, ) db.session.add(to_db) db.session.commit() payme = { 'credits': amount, 'btc': pypayd_response['result']['amount'], 'address': pypayd_response['result']['receiving_address'], 'image': pypayd_response['result']['qr_image'], } # generate approximate time to pay the invoice pay_time = datetime.now() + timedelta(minutes=45) # and finally show an invoice to the customer return render_template('invoice-payme.html', payme=payme, pay_time=pay_time) return render_template('invoice-new.html', form=form) @siema.route('/invoices/', defaults={'page': 1}) @siema.route('/invoices/page/<int:page>') @login_required def index(page): # downloading all records related to user sql_query = Payd.query.filter_by( user_id=g.user_id).paginate(page, current_app.config['INVOICES_PER_PAGE']) return render_template('invoices.html', invoices=sql_query, ) @siema.route('/admin/', defaults={'page': 1}) @siema.route('/admin/page/<int:page>') @login_required def admin(page): # only user with id = 666 can enter this route if g.user_id == 666: sql_query = Payd.query.paginate(page, 50) return render_template('invoices.html', invoices=sql_query, ) else: flash('You are not admin and you can see your own invoices only!', 'warning') return redirect(url_for('siema.index'))
from django.dispatch import receiver from assessments.business import scores_encodings_deadline from base.signals import publisher @receiver(publisher.compute_scores_encodings_deadlines) def compute_scores_encodings_deadlines(sender, **kwargs): scores_encodings_deadline.compute_deadline(kwargs['offer_year_calendar']) @receiver(publisher.compute_student_score_encoding_deadline) def compute_student_score_encoding_deadline(sender, **kwargs): scores_encodings_deadline.compute_deadline_by_student(kwargs['session_exam_deadline']) @receiver(publisher.compute_all_scores_encodings_deadlines) def compute_all_scores_encodings_deadlines(sender, **kwargs): scores_encodings_deadline.recompute_all_deadlines(kwargs['academic_calendar'])
from django.contrib import auth from django.contrib.auth.models import User from django.test import TestCase from django.urls.base import reverse class TestAccountRegistration(TestCase): def setUp(self): # create one user for convenience response = self.client.post( reverse('account:register'), { 'username': 'Alice', 'email': 'alice@localhost', 'password': 'supasecret', 'password2': 'supasecret', }, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:login')) self.assertEqual(response.status_code, 200) def test_registration(self): self.assertEqual(len(User.objects.all()), 1) user = User.objects.get(username='Alice') self.assertEqual(user.email, 'alice@localhost') response = self.client.post( reverse('account:register'), { 'username': 'Bob', 'email': 'bob@localhost', 'password': 'foo', 'password2': 'foo', }, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:login')) self.assertEqual(response.status_code, 200) self.assertEqual(len(User.objects.all()), 2) def test_duplicate_username(self): response = self.client.post( reverse('account:register'), { 'username': 'Alice', 'email': 'alice2@localhost', 'password': 'supasecret', 'password2': 'supasecret', }, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:register')) self.assertEqual(response.status_code, 200) self.assertEqual(len(User.objects.all()), 1) def test_duplicate_email(self): response = self.client.post( reverse('account:register'), { 'username': 'Alice2000', 'email': 'alice@localhost', 'password': 'supasecret', 'password2': 'supasecret', }, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:register')) self.assertEqual(response.status_code, 200) self.assertEqual(len(User.objects.all()), 1) def test_non_matching_passwords(self): response = self.client.post( reverse('account:register'), { 'username': 'Bob', 'email': 'bob@localhost', 'password': 'foo', 'password2': 'bar', }, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:register')) self.assertEqual(response.status_code, 200) self.assertEqual(len(User.objects.all()), 1) def test_form_view(self): response = self.client.get(reverse('account:register')) self.assertEqual(response.status_code, 200) class TestLogin(TestCase): def setUp(self): # create one user for convenience response = self.client.post( reverse('account:register'), { 'username': 'Alice', 'email': 'alice@localhost', 'password': 'supasecret', 'password2': 'supasecret', }, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:login')) self.assertEqual(response.status_code, 200) def test_login(self): response = self.client.post( reverse('account:login'), {'username': 'Alice', 'password': 'supasecret'}, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:home')) self.assertEqual(response.status_code, 200) def test_disabled_login(self): user = User.objects.all().update(is_active=False) response = self.client.post( reverse('account:login'), {'username': 'Alice', 'password': 'supasecret'}, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:login')) self.assertEqual(response.status_code, 200) def test_wrong_credentials(self): response = self.client.post( reverse('account:login'), {'username': 'Alice', 'password': 'wrong'}, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:login')) self.assertEqual(response.status_code, 200) def test_wrong_user(self): response = self.client.post( reverse('account:login'), {'username': 'Bob', 'password': 'supasecret'}, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:login')) self.assertEqual(response.status_code, 200) def test_login_view(self): response = self.client.get(reverse('account:login')) self.assertEqual(response.status_code, 200) def test_login_view_being_logged_in(self): response = self.client.post( reverse('account:login'), {'username': 'Alice', 'password': 'supasecret'}, follow=True ) response = self.client.get( reverse('account:login'), follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:home')) self.assertEqual(response.status_code, 200) response = self.client.post( reverse('account:login'), {'username': 'Alice', 'password': 'supasecret'}, follow=True ) self.assertEqual(response.redirect_chain[0][1], 302) self.assertEqual(response.redirect_chain[0][0], reverse('account:home')) self.assertEqual(response.status_code, 200) def test_home_view_while_not_logged_in(self): response = self.client.get(reverse('account:home'), follow=True) self.assertEqual(response.redirect_chain[0][1], 302) self.assertTrue(response.redirect_chain[0][0].startswith(reverse('account:login'))) self.assertEqual(response.status_code, 200) def test_home_view_while_logged_in(self): response = self.client.post( reverse('account:login'), {'username': 'Alice', 'password': 'supasecret'}, follow=True ) response = self.client.get(reverse('account:home')) self.assertEqual(response.status_code, 200) def test_register_view_while_logged_in(self): response = self.client.post( reverse('account:login'), {'username': 'Alice', 'password': 'supasecret'}, follow=True ) response = self.client.get(reverse('account:register'), follow=True) self.assertEqual(response.redirect_chain[0][1], 302) self.assertTrue(response.redirect_chain[0][0].startswith(reverse('account:home'))) self.assertEqual(response.status_code, 200) def test_logout(self): response = self.client.post( reverse('account:login'), {'username': 'Alice', 'password': 'supasecret'}, follow=True ) user = auth.get_user(self.client) self.assertTrue(user.is_authenticated) response = self.client.get(reverse('account:logout'), follow=True) self.assertEqual(response.redirect_chain[0][1], 302) self.assertTrue(response.redirect_chain[0][0].startswith(reverse('base:home'))) self.assertEqual(response.status_code, 200) user = auth.get_user(self.client) self.assertFalse(user.is_authenticated)
import analytics import anyjson from channels import Group from django.conf import settings from django.utils.translation import ugettext_lazy as _ from rest_framework import serializers from lily.accounts.api.serializers import RelatedAccountSerializer from lily.api.fields import SanitizedHtmlCharField from lily.api.nested.mixins import RelatedSerializerMixin from lily.api.nested.serializers import WritableNestedSerializer from lily.api.serializers import ContentTypeSerializer from lily.contacts.api.serializers import RelatedContactSerializer from lily.contacts.models import Function from lily.users.api.serializers import RelatedLilyUserSerializer, RelatedTeamSerializer from lily.utils.api.serializers import RelatedTagSerializer from lily.utils.request import is_external_referer from ..models import Case, CaseStatus, CaseType class CaseStatusSerializer(serializers.ModelSerializer): """ Serializer for case status model. """ class Meta: model = CaseStatus fields = ( 'id', 'name', ) class RelatedCaseStatusSerializer(RelatedSerializerMixin, CaseStatusSerializer): pass class CaseTypeSerializer(serializers.ModelSerializer): """ Serializer for case type model. """ class Meta: model = CaseType fields = ( 'id', 'is_archived', 'name', 'use_as_filter', ) class RelatedCaseTypeSerializer(RelatedSerializerMixin, CaseTypeSerializer): pass class CaseSerializer(WritableNestedSerializer): """ Serializer for the case model. """ # Set non mutable fields. created_by = RelatedLilyUserSerializer(read_only=True) content_type = ContentTypeSerializer( read_only=True, help_text='This is what the object is identified as in the back-end.', ) # Related fields. account = RelatedAccountSerializer( required=False, allow_null=True, help_text='Account for which the case is being created.', ) contact = RelatedContactSerializer( required=False, allow_null=True, help_text='Contact for which the case is being created.', ) assigned_to = RelatedLilyUserSerializer( required=False, allow_null=True, assign_only=True, help_text='Person which the case is assigned to.', ) assigned_to_teams = RelatedTeamSerializer( many=True, required=False, assign_only=True, help_text='List of teams the case is assigned to.', ) type = RelatedCaseTypeSerializer( assign_only=True, help_text='The type of case.', ) status = RelatedCaseStatusSerializer( assign_only=True, help_text='Status of the case.', ) tags = RelatedTagSerializer( many=True, required=False, create_only=True, help_text='Any tags used to further categorize the case.', ) description = SanitizedHtmlCharField( help_text='Any extra text to describe the case (supports Markdown).', ) # Show string versions of fields. priority_display = serializers.CharField( source='get_priority_display', read_only=True, help_text='Human readable value of the case\'s priority.', ) def validate(self, data): contact_id = data.get('contact', {}) if isinstance(contact_id, dict): contact_id = contact_id.get('id') account_id = data.get('account', {}) if isinstance(account_id, dict): account_id = account_id.get('id') if contact_id and account_id: if not Function.objects.filter(contact_id=contact_id, account_id=account_id).exists(): raise serializers.ValidationError({'contact': _('Given contact must work at the account.')}) # Check if we are related and if we only passed in the id, which means user just wants new reference. errors = { 'account': _('Please enter an account and/or contact.'), 'contact': _('Please enter an account and/or contact.'), } if not self.partial: # For POST or PUT we always want to check if either is set. if not (account_id or contact_id): raise serializers.ValidationError(errors) else: # For PATCH only check the data if both account and contact are passed. if ('account' in data and 'contact' in data) and not (account_id or contact_id): raise serializers.ValidationError(errors) return super(CaseSerializer, self).validate(data) def create(self, validated_data): user = self.context.get('request').user assigned_to = validated_data.get('assigned_to') validated_data.update({ 'created_by_id': user.pk, }) if assigned_to: Group('tenant-%s' % user.tenant.id).send({ 'text': anyjson.dumps({ 'event': 'case-assigned', }), }) if assigned_to.get('id') != user.pk: validated_data.update({ 'newly_assigned': True, }) else: Group('tenant-%s' % user.tenant.id).send({ 'text': anyjson.dumps({ 'event': 'case-unassigned', }), }) instance = super(CaseSerializer, self).create(validated_data) # Track newly ceated accounts in segment. if not settings.TESTING: analytics.track( user.id, 'case-created', { 'expires': instance.expires, 'assigned_to_id': instance.assigned_to_id if instance.assigned_to else '', 'creation_type': 'automatic' if is_external_referer(self.context.get('request')) else 'manual', }, ) return instance def update(self, instance, validated_data): user = self.context.get('request').user status_id = validated_data.get('status', instance.status_id) assigned_to = validated_data.get('assigned_to') if assigned_to: assigned_to = assigned_to.get('id') if isinstance(status_id, dict): status_id = status_id.get('id') status = CaseStatus.objects.get(pk=status_id) # Automatically archive the case if the status is set to 'Closed'. if status.name == 'Closed' and 'is_archived' not in validated_data: validated_data.update({ 'is_archived': True }) # Check if the case being reassigned. If so we want to notify that user. if assigned_to and assigned_to != user.pk: validated_data.update({ 'newly_assigned': True, }) elif 'assigned_to' in validated_data and not assigned_to: # Case is unassigned, so clear newly assigned flag. validated_data.update({ 'newly_assigned': False, }) if (('status' in validated_data and status.name == 'Open') or ('is_archived' in validated_data and not validated_data.get('is_archived'))): # Case is reopened or unarchived, so we want to notify the user again. validated_data.update({ 'newly_assigned': True, }) if 'assigned_to' in validated_data or instance.assigned_to_id: Group('tenant-%s' % user.tenant.id).send({ 'text': anyjson.serialize({ 'event': 'case-assigned', }), }) if (not instance.assigned_to_id or instance.assigned_to_id and 'assigned_to' in validated_data and not validated_data.get('assigned_to')): Group('tenant-%s' % user.tenant.id).send({ 'text': anyjson.serialize({ 'event': 'case-unassigned', }), }) return super(CaseSerializer, self).update(instance, validated_data) class Meta: model = Case fields = ( 'id', 'account', 'assigned_to', 'assigned_to_teams', 'contact', 'content_type', 'created', 'created_by', 'description', 'expires', 'is_archived', 'modified', 'newly_assigned', 'priority', 'priority_display', 'status', 'tags', 'subject', 'type', ) extra_kwargs = { 'created': { 'help_text': 'Shows the date and time when the deal was created.', }, 'expires': { 'help_text': 'Shows the date and time for when the case should be completed.', }, 'modified': { 'help_text': 'Shows the date and time when the case was last modified.', }, 'newly_assigned': { 'help_text': 'True if the assignee was changed and that person hasn\'t accepted yet.', }, 'subject': { 'help_text': 'A short description of the case.', }, } class RelatedCaseSerializer(RelatedSerializerMixin, CaseSerializer): """ Serializer for the case model when used as a relation. """ class Meta: model = Case # Override the fields because we don't want related fields in this serializer. fields = ( 'id', 'assigned_to', 'assigned_to_teams', 'created', 'created_by', 'description', 'expires', 'is_archived', 'modified', 'priority', 'priority_display', 'subject', )
from openerp.osv import orm, fields from openerp.tools.translate import _ class create_extra_documentation(orm.TransientModel): _name = 'module.doc.create' def create_documentation(self, cr, uid, ids, context=None): doc_obj = self.pool.get('module.doc') mod_obj = self.pool.get('ir.module.module') for id in ids: search_ids = doc_obj.search(cr, uid, [('module_id', '=', id)], context=context) if not search_ids: created_id = doc_obj.create(cr, uid, {'module_id': id}, context=context) name = doc_obj.onchange_module_id(cr, uid, [created_id], id, context=context)['value']['name'] doc_obj.write(cr, uid, created_id, {'name': name}, context=context) mod_obj.write(cr, uid, id, {'doc_id': created_id}, context=context) else: for search_id in search_ids: doc_obj.write(cr, uid, search_id, {'has_info': True}, context=context) mod_obj.write(cr, uid, id, {'doc_id': search_id}, context=context) return { 'name': _('Extra documentation'), 'view_type': 'form', 'view_mode': 'tree,form', 'res_model': 'module.doc', 'type': 'ir.actions.act_window', } def create_documentation_all(self, cr, uid, ids, context): mod_obj = self.pool.get('ir.module.module') all_ids = mod_obj.search(cr, uid, []) return self.create_documentation(cr, uid, all_ids, context) def create_documentation_installed(self, cr, uid, ids, context): mod_obj = self.pool.get('ir.module.module') installed_ids = mod_obj.search(cr, uid, [('state', '=', 'installed')]) return self.create_documentation(cr, uid, installed_ids, context)