src
stringlengths
721
1.04M
#!/usr/bin/env python import os import re import sys # from codecs import open from setuptools import (setup, find_packages) from setuptools.command.test import test as TestCommand class PyTest(TestCommand): user_options = [('pytest-args=', 'a', "Arguments to pass into py.test")] def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = [] def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): import pytest errno = pytest.main(self.pytest_args) sys.exit(errno) if sys.argv[-1] == 'publish': os.system('python setup.py sdist upload -r pypi') sys.exit() requires = [] test_requirements = ['pytest>=2.9.2', 'pytest-cov>=2.3.1', 'PyYAML>=3.11', 'boto', 'boto3', 'moto', 'mock'] with open('lib/creds/__init__.py', 'r') as fd: version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) if not version: raise RuntimeError('Cannot find version information') readme = open('README.rst').read() long_description = readme setup( name='creds', version=version, description='Creds is a library for managing Linux, FreeBSD and OpenBSD user accounts and credentials.', long_description=long_description, author='Jon Hadfield', author_email='jon@lessknown.co.uk', url='http://github.com/jonhadfield/creds', packages=find_packages('lib'), package_dir={'': 'lib'}, # package_data={'': ['LICENSE', 'NOTICE'], 'creds': ['*.pem']}, include_package_data=True, install_requires=requires, license='MIT', zip_safe=False, classifiers=( 'Development Status :: 4 - Beta', 'Intended Audience :: System Administrators', 'Natural Language :: English', 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX :: BSD :: Linux', 'Operating System :: POSIX :: BSD :: FreeBSD', 'Operating System :: POSIX :: BSD :: OpenBSD', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy' ), cmdclass={'test': PyTest}, tests_require=test_requirements, # extras_require={ # 'security': [], # }, )
# -*- coding: utf-8 -*- # @Time : 2017/7/13 下午5:13 # @Author : play4fun # @File : findHomography.py # @Software: PyCharm """ findHomography.py:联合使用特征提取和 calib3d 模块中的 findHomography 在复杂图像中查找已知对象 """ import numpy as np import cv2 from matplotlib import pyplot as plt MIN_MATCH_COUNT = 10 img1 = cv2.imread('../data/box.png', 0) # queryImage img2 = cv2.imread('../data/box_in_scene.png', 0) # trainImage # Initiate SIFT detector sift = cv2.xfeatures2d.SIFT_create() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1, None) kp2, des2 = sift.detectAndCompute(img2, None) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1, des2, k=2) # store all the good matches as per Lowe's ratio test. good = [] for m, n in matches: if m.distance < 0.7 * n.distance: good.append(m) ''' 现在我们 置只有存在 10 个以上匹 时才去查找目标 MIN_MATCH_COUNT=10 否则显示 告消息 现在匹 不 如果找到了 够的匹 我们 提取两幅图像中匹 点的坐标。把它们传 入到函数中 算 变换。一旦我们找到 3x3 的变换矩 就可以使用它将查 图像的四个 点 四个 变换到目标图像中去了。然后再绘制出来。 ''' if len(good) > MIN_MATCH_COUNT: # 获取关 点的坐标 src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2) dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) # 第三个参数 Method used to computed a homography matrix. The following methods are possible: #0 - a regular method using all the points # CV_RANSAC - RANSAC-based robust method # CV_LMEDS - Least-Median robust method # 第四个参数取值范围在 1 到 10 绝一个点对的 值。原图像的点经 变换后点与目标图像上对应点的 差 # 差就 为是 outlier # 回值中 M 为变换矩 。 M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) matchesMask = mask.ravel().tolist() # 获得原图像的高和宽 h, w = img1.shape # 使用得到的变换矩 对原图像的四个 变换 获得在目标图像上对应的坐标 pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2) dst = cv2.perspectiveTransform(pts, M) # 原图像为灰度图 img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA) else: print("Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT)) matchesMask = None # 最后我再绘制 inliers 如果能成功的找到目标图像的话 或者匹配的关 点 如果失败。 draw_params = dict(matchColor=(0, 255, 0), # draw matches in green color singlePointColor=None, matchesMask=matchesMask, # draw only inliers flags=2) img3 = cv2.drawMatches(img1, kp1, img2, kp2, good, None, **draw_params) plt.imshow(img3, 'gray'), plt.show() # 复杂图像中被找到的目标图像被标记成白色
# Adrian deWynter, 2016 import numpy as np from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import make_pipeline F, N = tuple(map(int, raw_input().strip().split(' '))) trainingData = [] testData = [] for _ in range(N): trainingData.append([float(i) for i in raw_input().strip().split(' ')]) T = input() for _ in range(T): testData.append([float(i) for i in raw_input().strip().split(' ')]) trainingData = np.mat(trainingData) testData = np.mat(testData) X = trainingData[:,0:F] Y = trainingData[:,F] ceiling = 99999999 #range of possible values is << minVar finalDegree = 2 #Polynomial regression -- polynomial is of degree 4 at most. for degree in range(2, 5): poly = make_pipeline(PolynomialFeatures(degree), LinearRegression()) poly.fit(X, Y) prediction = poly.predict(X) if ceiling > np.var(prediction - Y): finalDegree = degree ceiling = np.var(prediction - Y) poly = make_pipeline(PolynomialFeatures(finalDegree), LinearRegression()) poly.fit(X, Y) result = poly.predict(testData) for r in result: print r[0]
#!/usr/bin/python # data_sender.py # # Copyright (C) 2008-2018 Veselin Penev, https://bitdust.io # # This file (data_sender.py) is part of BitDust Software. # # BitDust is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # BitDust Software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with BitDust Software. If not, see <http://www.gnu.org/licenses/>. # # Please contact us if you have any questions at bitdust.io@gmail.com # # # # """ .. module:: data_sender. .. raw:: html <a href="https://bitdust.io/automats/data_sender/data_sender.png" target="_blank"> <img src="https://bitdust.io/automats/data_sender/data_sender.png" style="max-width:100%;"> </a> A state machine to manage data sending process, acts very simple: 1) when new local data is created it tries to send it to the correct supplier 2) wait while ``p2p.io_throttle`` is doing some data transmission to remote suppliers 3) calls ``p2p.backup_matrix.ScanBlocksToSend()`` to get a list of pieces needs to be send 4) this machine is restarted every minute to check if some more data needs to be send 5) also can be restarted at any time when it is needed EVENTS: * :red:`block-acked` * :red:`block-failed` * :red:`init` * :red:`new-data` * :red:`restart` * :red:`scan-done` * :red:`timer-1min` * :red:`timer-1sec` """ #------------------------------------------------------------------------------ from __future__ import absolute_import from io import open #------------------------------------------------------------------------------ _Debug = True _DebugLevel = 12 #------------------------------------------------------------------------------ import os import time #------------------------------------------------------------------------------ from logs import lg from automats import automat from automats import global_state from lib import misc from lib import packetid from contacts import contactsdb from userid import my_id from main import settings from p2p import contact_status from . import io_throttle #------------------------------------------------------------------------------ _DataSender = None _ShutdownFlag = False #------------------------------------------------------------------------------ def A(event=None, arg=None): """ Access method to interact with the state machine. """ global _DataSender if _DataSender is None: _DataSender = DataSender( name='data_sender', state='READY', debug_level=_DebugLevel, log_events=_Debug, log_transitions=_Debug, ) if event is not None: _DataSender.automat(event, arg) return _DataSender def Destroy(): """ Destroy the state machine and remove the instance from memory. """ global _DataSender if _DataSender is None: return _DataSender.destroy() del _DataSender _DataSender = None class DataSender(automat.Automat): """ A class to manage process of sending data packets to remote suppliers. """ timers = { 'timer-1min': (60, ['READY']), 'timer-1min': (60, ['READY']), 'timer-1sec': (1.0, ['SENDING']), } statistic = {} def state_changed(self, oldstate, newstate, event, arg): global_state.set_global_state('DATASEND ' + newstate) def A(self, event, arg): #---READY--- if self.state == 'READY': if event == 'new-data' or event == 'timer-1min' or event == 'restart': self.state = 'SCAN_BLOCKS' self.doScanAndQueue(arg) elif event == 'init': pass #---SCAN_BLOCKS--- elif self.state == 'SCAN_BLOCKS': if event == 'scan-done' and self.isQueueEmpty(arg): self.state = 'READY' self.doRemoveUnusedFiles(arg) elif event == 'scan-done' and not self.isQueueEmpty(arg): self.state = 'SENDING' #---SENDING--- elif self.state == 'SENDING': if event == 'restart' or ( ( event == 'timer-1sec' or event == 'block-acked' or event == 'block-failed' or event == 'new-data' ) and self.isQueueEmpty(arg) ): self.state = 'SCAN_BLOCKS' self.doScanAndQueue(arg) return None def isQueueEmpty(self, arg): if not arg: return io_throttle.IsSendingQueueEmpty() remoteID, _ = arg return io_throttle.OkToSend(remoteID) def doScanAndQueue(self, arg): global _ShutdownFlag if _Debug: lg.out(_DebugLevel, 'data_sender.doScanAndQueue _ShutdownFlag=%r' % _ShutdownFlag) if _Debug: log = open(os.path.join(settings.LogsDir(), 'data_sender.log'), 'w') log.write(u'doScanAndQueue %s\n' % time.asctime()) # .decode('utf-8') if _ShutdownFlag: if _Debug: log.write(u'doScanAndQueue _ShutdownFlag is True\n') self.automat('scan-done') if _Debug: log.flush() log.close() return for customer_idurl in contactsdb.known_customers(): if '' not in contactsdb.suppliers(customer_idurl): from storage import backup_matrix for backupID in misc.sorted_backup_ids( list(backup_matrix.local_files().keys()), True): this_customer_idurl = packetid.CustomerIDURL(backupID) if this_customer_idurl != customer_idurl: continue packetsBySupplier = backup_matrix.ScanBlocksToSend(backupID) if _Debug: log.write(u'%s\n' % packetsBySupplier) for supplierNum in packetsBySupplier.keys(): supplier_idurl = contactsdb.supplier(supplierNum, customer_idurl=customer_idurl) if not supplier_idurl: lg.warn('unknown supplier_idurl supplierNum=%s for %s, customer_idurl=%s' % ( supplierNum, backupID, customer_idurl)) continue for packetID in packetsBySupplier[supplierNum]: backupID_, _, supplierNum_, _ = packetid.BidBnSnDp(packetID) if backupID_ != backupID: lg.warn('unexpected backupID supplierNum=%s for %s, customer_idurl=%s' % ( packetID, backupID, customer_idurl)) continue if supplierNum_ != supplierNum: lg.warn('unexpected supplierNum %s for %s, customer_idurl=%s' % ( packetID, backupID, customer_idurl)) continue if io_throttle.HasPacketInSendQueue( supplier_idurl, packetID): if _Debug: log.write(u'%s already in sending queue for %s\n' % (packetID, supplier_idurl)) continue if not io_throttle.OkToSend(supplier_idurl): if _Debug: log.write(u'skip, not ok to send %s\n' % supplier_idurl) continue customerGlobalID, pathID = packetid.SplitPacketID(packetID) # tranByID = gate.transfers_out_by_idurl().get(supplier_idurl, []) # if len(tranByID) > 3: # log.write(u'transfers by %s: %d\n' % (supplier_idurl, len(tranByID))) # continue customerGlobalID, pathID = packetid.SplitPacketID(packetID) filename = os.path.join( settings.getLocalBackupsDir(), customerGlobalID, pathID, ) if not os.path.isfile(filename): if _Debug: log.write(u'%s is not a file\n' % filename) continue if io_throttle.QueueSendFile( filename, packetID, supplier_idurl, my_id.getLocalID(), self._packetAcked, self._packetFailed, ): if _Debug: log.write(u'io_throttle.QueueSendFile %s\n' % packetID) else: if _Debug: log.write(u'io_throttle.QueueSendFile FAILED %s\n' % packetID) # lg.out(6, ' %s for %s' % (packetID, backupID)) # DEBUG # break self.automat('scan-done') if _Debug: log.flush() log.close() # def doPrintStats(self, arg): # """ # """ # if lg.is_debug(18): # transfers = transport_control.current_transfers() # bytes_stats = transport_control.current_bytes_transferred() # s = '' # for info in transfers: # s += '%s ' % (diskspace.MakeStringFromBytes(bytes_stats[info.transfer_id]).replace(' ', '').replace('bytes', 'b')) # lg.out(0, 'transfers: ' + s[:120]) def doRemoveUnusedFiles(self, arg): # we want to remove files for this block # because we only need them during rebuilding if settings.getBackupsKeepLocalCopies() is True: # if user set this in settings - he want to keep the local files return # ... user do not want to keep local backups if settings.getGeneralWaitSuppliers() is True: from customer import fire_hire # but he want to be sure - all suppliers are green for a long time if len(contact_status.listOfflineSuppliers()) > 0 or time.time( ) - fire_hire.GetLastFireTime() < 24 * 60 * 60: # some people are not there or we do not have stable team yet # do not remove the files because we need it to rebuild return count = 0 from storage import backup_matrix from storage import restore_monitor from storage import backup_rebuilder if _Debug: lg.out(_DebugLevel, 'data_sender.doRemoveUnusedFiles') for backupID in misc.sorted_backup_ids( list(backup_matrix.local_files().keys())): if restore_monitor.IsWorking(backupID): if _Debug: lg.out( _DebugLevel, ' %s : SKIP, because restoring' % backupID) continue if backup_rebuilder.IsBackupNeedsWork(backupID): if _Debug: lg.out( _DebugLevel, ' %s : SKIP, because needs rebuilding' % backupID) continue if not backup_rebuilder.ReadStoppedFlag(): if backup_rebuilder.A().currentBackupID is not None: if backup_rebuilder.A().currentBackupID == backupID: if _Debug: lg.out( _DebugLevel, ' %s : SKIP, because rebuilding is in process' % backupID) continue packets = backup_matrix.ScanBlocksToRemove( backupID, settings.getGeneralWaitSuppliers()) for packetID in packets: customer, pathID = packetid.SplitPacketID(packetID) filename = os.path.join(settings.getLocalBackupsDir(), customer, pathID) if os.path.isfile(filename): try: os.remove(filename) # lg.out(6, ' ' + os.path.basename(filename)) except: lg.exc() continue count += 1 if _Debug: lg.out(_DebugLevel, ' %d files were removed' % count) backup_matrix.ReadLocalFiles() def _packetAcked(self, packet, ownerID, packetID): from storage import backup_matrix backupID, blockNum, supplierNum, dataORparity = packetid.BidBnSnDp(packetID) backup_matrix.RemoteFileReport( backupID, blockNum, supplierNum, dataORparity, True) if ownerID not in self.statistic: self.statistic[ownerID] = [0, 0] self.statistic[ownerID][0] += 1 self.automat('block-acked', (ownerID, packetID)) def _packetFailed(self, remoteID, packetID, why): from storage import backup_matrix backupID, blockNum, supplierNum, dataORparity = packetid.BidBnSnDp( packetID) backup_matrix.RemoteFileReport( backupID, blockNum, supplierNum, dataORparity, False) if remoteID not in self.statistic: self.statistic[remoteID] = [0, 0] self.statistic[remoteID][1] += 1 self.automat('block-failed', (remoteID, packetID)) def statistic(): """ The ``data_sender()`` keeps track of sending results with every supplier. This is used by ``fire_hire()`` to decide how reliable is given supplier. """ global _DataSender if _DataSender is None: return {} return _DataSender.statistic def SetShutdownFlag(): """ Set flag to indicate that no need to send anything anymore. """ global _ShutdownFlag _ShutdownFlag = True
# -*- coding: utf-8 -*- # # Faker documentation build configuration file, created by # sphinx-quickstart on Tue Mar 11 11:25:48 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.todo', 'faker.build_docs', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Faker' copyright = u'2014, Daniele Faraglia' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.7.5' # The full version, including alpha/beta/rc tags. release = '0.7.5' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Fakerdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'Faker.tex', u'Faker Documentation', u'Daniele Faraglia', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'faker', u'Faker Documentation', [u'Daniele Faraglia'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Faker', u'Faker Documentation', u'Daniele Faraglia', 'Faker', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
############################################################################### import b64_mod from binascii import unhexlify ############################################################################### def decode(bstring,flag = 0): """Decode, when flag is set to 0 or not set takes a Base64 strig and converts it to english plain test. when flag is set to 1 the input string is already in hex format.""" declist = [] outstring = '' if flag ==0: declist = b64_mod.hextodec(b64_mod.basetohex(bstring)) elif flag == 1: declist = b64_mod.hextodec(bstring) for x in declist: outstring += ""+chr(x) return outstring ############################################################################## def encode(ascstring, key=None): """Given an ascii english string of text with any quotes properly escaped this will encode it into a Base64 string.""" if key!=None: if len(key)<len(ascstring): key = keylen(b64_mod.ascitohex(ascstring),key) outlist = [] for x in ascstring: outlist.append(ord(x)) return b64_mod.hextobase(''.join(b64_mod.dec_to_hex(outlist))) ############################################################################## def hexorsum(hexstring1, hexstring2): """Calculates the Xor sum of 2 equal length hex strings""" binlist1 = [] binlist2 = [] binstring1 = b64_mod.hextobin(hexstring1) binstring2 = b64_mod.hextobin(hexstring2) for x in binstring1: binlist1.append(x) for x in binstring2: binlist2.append(x) sumlist = [] sumstring = '' #print len(binlist1) #print len(binlist2) for x in range (len(binlist1)): if binlist1[x] == binlist2[x]: sumlist.append('0') elif binlist1[x] != binlist2[x]: sumlist.append('1') sumstring = ''.join(sumlist) return b64_mod.bintohex(sumstring) ############################################################################## def keylen(hexstring, key, flag =0): if flag == 0: key = b64_mod.ascitohex(key) while len(hexstring) != len(key): if len(key)>len(hexstring): key = key[:len(key)-1] if len(key)<len(hexstring): key+=key return key ############################################################################## def repkeyxor_encoder(text, key): text = b64_mod.ascitohex(text) if len(key)<len(text): key = keylen(text,key) return hexorsum(text,key)
from cloudinary.models import CloudinaryField from community.models import Community from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.db import models class Resource(models.Model): LANGUAGE_TAGS = ( ('PYTHON', 'Python'), ('RUBY', 'Ruby'), ('ANDROID', 'Android'), ('MARKUP', 'HTML/CSS'), ('JAVA', 'Java'), ('PHP', 'PHP'), ('IOS', 'IOS'), ('JAVASCRIPT', 'Javascript'), ('C', 'C') ) author = models.ForeignKey(User) community = models.ForeignKey(Community, blank=True, null=True, related_name='resources') text = models.TextField(null=True, blank=False) language_tags = models.CharField( max_length=30, choices=LANGUAGE_TAGS, default='Untagged') resource_file = CloudinaryField( 'resource_file', null=True, blank=True) resource_file_name = models.CharField(max_length=100, null=True) resource_file_size = models.IntegerField(default=0) snippet_text = models.TextField(null=True, blank=True) date_added = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) def __str__(self): return self.text def get_absolute_url(self): return reverse('detail', args=[str(self.id)]) def upvotes(self): liked_ids = [ vote.user.id for vote in self.votes.all() if vote.vote is True] return liked_ids def downvotes(self): unliked_ids = [ vote.user.id for vote in self.votes.all() if vote.vote is False] return unliked_ids class NotificationQueue(models.Model): user = models.ForeignKey(User) notification_type = models.CharField(max_length=20) first_interaction = models.CharField(max_length=20) count = models.IntegerField(default=0)
""" Dialog for building Tkinter accelerator key bindings """ from tkinter import * from tkinter.ttk import Scrollbar from tkinter import messagebox import string import sys class GetKeysDialog(Toplevel): # Dialog title for invalid key sequence keyerror_title = 'Key Sequence Error' def __init__(self, parent, title, action, currentKeySequences, *, _htest=False, _utest=False): """ action - string, the name of the virtual event these keys will be mapped to currentKeys - list, a list of all key sequence lists currently mapped to virtual events, for overlap checking _utest - bool, do not wait when running unittest _htest - bool, change box location when running htest """ Toplevel.__init__(self, parent) self.withdraw() #hide while setting geometry self.configure(borderwidth=5) self.resizable(height=FALSE, width=FALSE) self.title(title) self.transient(parent) self.grab_set() self.protocol("WM_DELETE_WINDOW", self.Cancel) self.parent = parent self.action=action self.currentKeySequences = currentKeySequences self.result = '' self.keyString = StringVar(self) self.keyString.set('') self.SetModifiersForPlatform() # set self.modifiers, self.modifier_label self.modifier_vars = [] for modifier in self.modifiers: variable = StringVar(self) variable.set('') self.modifier_vars.append(variable) self.advanced = False self.CreateWidgets() self.LoadFinalKeyList() self.update_idletasks() self.geometry( "+%d+%d" % ( parent.winfo_rootx() + (parent.winfo_width()/2 - self.winfo_reqwidth()/2), parent.winfo_rooty() + ((parent.winfo_height()/2 - self.winfo_reqheight()/2) if not _htest else 150) ) ) #centre dialog over parent (or below htest box) if not _utest: self.deiconify() #geometry set, unhide self.wait_window() def showerror(self, *args, **kwargs): # Make testing easier. Replace in #30751. messagebox.showerror(*args, **kwargs) def CreateWidgets(self): frameMain = Frame(self,borderwidth=2,relief=SUNKEN) frameMain.pack(side=TOP,expand=TRUE,fill=BOTH) frameButtons=Frame(self) frameButtons.pack(side=BOTTOM,fill=X) self.buttonOK = Button(frameButtons,text='OK', width=8,command=self.OK) self.buttonOK.grid(row=0,column=0,padx=5,pady=5) self.buttonCancel = Button(frameButtons,text='Cancel', width=8,command=self.Cancel) self.buttonCancel.grid(row=0,column=1,padx=5,pady=5) self.frameKeySeqBasic = Frame(frameMain) self.frameKeySeqAdvanced = Frame(frameMain) self.frameControlsBasic = Frame(frameMain) self.frameHelpAdvanced = Frame(frameMain) self.frameKeySeqAdvanced.grid(row=0,column=0,sticky=NSEW,padx=5,pady=5) self.frameKeySeqBasic.grid(row=0,column=0,sticky=NSEW,padx=5,pady=5) self.frameKeySeqBasic.lift() self.frameHelpAdvanced.grid(row=1,column=0,sticky=NSEW,padx=5) self.frameControlsBasic.grid(row=1,column=0,sticky=NSEW,padx=5) self.frameControlsBasic.lift() self.buttonLevel = Button(frameMain,command=self.ToggleLevel, text='Advanced Key Binding Entry >>') self.buttonLevel.grid(row=2,column=0,stick=EW,padx=5,pady=5) labelTitleBasic = Label(self.frameKeySeqBasic, text="New keys for '"+self.action+"' :") labelTitleBasic.pack(anchor=W) labelKeysBasic = Label(self.frameKeySeqBasic,justify=LEFT, textvariable=self.keyString,relief=GROOVE,borderwidth=2) labelKeysBasic.pack(ipadx=5,ipady=5,fill=X) self.modifier_checkbuttons = {} column = 0 for modifier, variable in zip(self.modifiers, self.modifier_vars): label = self.modifier_label.get(modifier, modifier) check=Checkbutton(self.frameControlsBasic, command=self.BuildKeyString, text=label,variable=variable,onvalue=modifier,offvalue='') check.grid(row=0,column=column,padx=2,sticky=W) self.modifier_checkbuttons[modifier] = check column += 1 labelFnAdvice=Label(self.frameControlsBasic,justify=LEFT, text=\ "Select the desired modifier keys\n"+ "above, and the final key from the\n"+ "list on the right.\n\n" + "Use upper case Symbols when using\n" + "the Shift modifier. (Letters will be\n" + "converted automatically.)") labelFnAdvice.grid(row=1,column=0,columnspan=4,padx=2,sticky=W) self.listKeysFinal=Listbox(self.frameControlsBasic,width=15,height=10, selectmode=SINGLE) self.listKeysFinal.bind('<ButtonRelease-1>',self.FinalKeySelected) self.listKeysFinal.grid(row=0,column=4,rowspan=4,sticky=NS) scrollKeysFinal=Scrollbar(self.frameControlsBasic,orient=VERTICAL, command=self.listKeysFinal.yview) self.listKeysFinal.config(yscrollcommand=scrollKeysFinal.set) scrollKeysFinal.grid(row=0,column=5,rowspan=4,sticky=NS) self.buttonClear=Button(self.frameControlsBasic, text='Clear Keys',command=self.ClearKeySeq) self.buttonClear.grid(row=2,column=0,columnspan=4) labelTitleAdvanced = Label(self.frameKeySeqAdvanced,justify=LEFT, text="Enter new binding(s) for '"+self.action+"' :\n"+ "(These bindings will not be checked for validity!)") labelTitleAdvanced.pack(anchor=W) self.entryKeysAdvanced=Entry(self.frameKeySeqAdvanced, textvariable=self.keyString) self.entryKeysAdvanced.pack(fill=X) labelHelpAdvanced=Label(self.frameHelpAdvanced,justify=LEFT, text="Key bindings are specified using Tkinter keysyms as\n"+ "in these samples: <Control-f>, <Shift-F2>, <F12>,\n" "<Control-space>, <Meta-less>, <Control-Alt-Shift-X>.\n" "Upper case is used when the Shift modifier is present!\n\n" + "'Emacs style' multi-keystroke bindings are specified as\n" + "follows: <Control-x><Control-y>, where the first key\n" + "is the 'do-nothing' keybinding.\n\n" + "Multiple separate bindings for one action should be\n"+ "separated by a space, eg., <Alt-v> <Meta-v>." ) labelHelpAdvanced.grid(row=0,column=0,sticky=NSEW) def SetModifiersForPlatform(self): """Determine list of names of key modifiers for this platform. The names are used to build Tk bindings -- it doesn't matter if the keyboard has these keys, it matters if Tk understands them. The order is also important: key binding equality depends on it, so config-keys.def must use the same ordering. """ if sys.platform == "darwin": self.modifiers = ['Shift', 'Control', 'Option', 'Command'] else: self.modifiers = ['Control', 'Alt', 'Shift'] self.modifier_label = {'Control': 'Ctrl'} # short name def ToggleLevel(self): if self.buttonLevel.cget('text')[:8]=='Advanced': self.ClearKeySeq() self.buttonLevel.config(text='<< Basic Key Binding Entry') self.frameKeySeqAdvanced.lift() self.frameHelpAdvanced.lift() self.entryKeysAdvanced.focus_set() self.advanced = True else: self.ClearKeySeq() self.buttonLevel.config(text='Advanced Key Binding Entry >>') self.frameKeySeqBasic.lift() self.frameControlsBasic.lift() self.advanced = False def FinalKeySelected(self,event): self.BuildKeyString() def BuildKeyString(self): keyList = modifiers = self.GetModifiers() finalKey = self.listKeysFinal.get(ANCHOR) if finalKey: finalKey = self.TranslateKey(finalKey, modifiers) keyList.append(finalKey) self.keyString.set('<' + '-'.join(keyList) + '>') def GetModifiers(self): modList = [variable.get() for variable in self.modifier_vars] return [mod for mod in modList if mod] def ClearKeySeq(self): self.listKeysFinal.select_clear(0,END) self.listKeysFinal.yview(MOVETO, '0.0') for variable in self.modifier_vars: variable.set('') self.keyString.set('') def LoadFinalKeyList(self): #these tuples are also available for use in validity checks self.functionKeys=('F1','F2','F3','F4','F5','F6','F7','F8','F9', 'F10','F11','F12') self.alphanumKeys=tuple(string.ascii_lowercase+string.digits) self.punctuationKeys=tuple('~!@#%^&*()_-+={}[]|;:,.<>/?') self.whitespaceKeys=('Tab','Space','Return') self.editKeys=('BackSpace','Delete','Insert') self.moveKeys=('Home','End','Page Up','Page Down','Left Arrow', 'Right Arrow','Up Arrow','Down Arrow') #make a tuple of most of the useful common 'final' keys keys=(self.alphanumKeys+self.punctuationKeys+self.functionKeys+ self.whitespaceKeys+self.editKeys+self.moveKeys) self.listKeysFinal.insert(END, *keys) def TranslateKey(self, key, modifiers): "Translate from keycap symbol to the Tkinter keysym" translateDict = {'Space':'space', '~':'asciitilde','!':'exclam','@':'at','#':'numbersign', '%':'percent','^':'asciicircum','&':'ampersand','*':'asterisk', '(':'parenleft',')':'parenright','_':'underscore','-':'minus', '+':'plus','=':'equal','{':'braceleft','}':'braceright', '[':'bracketleft',']':'bracketright','|':'bar',';':'semicolon', ':':'colon',',':'comma','.':'period','<':'less','>':'greater', '/':'slash','?':'question','Page Up':'Prior','Page Down':'Next', 'Left Arrow':'Left','Right Arrow':'Right','Up Arrow':'Up', 'Down Arrow': 'Down', 'Tab':'Tab'} if key in translateDict: key = translateDict[key] if 'Shift' in modifiers and key in string.ascii_lowercase: key = key.upper() key = 'Key-' + key return key def OK(self, event=None): keys = self.keyString.get().strip() if not keys: self.showerror(title=self.keyerror_title, parent=self, message="No key specified.") return if (self.advanced or self.KeysOK(keys)) and self.bind_ok(keys): self.result = keys self.grab_release() self.destroy() def Cancel(self, event=None): self.result='' self.grab_release() self.destroy() def KeysOK(self, keys): '''Validity check on user's 'basic' keybinding selection. Doesn't check the string produced by the advanced dialog because 'modifiers' isn't set. ''' finalKey = self.listKeysFinal.get(ANCHOR) modifiers = self.GetModifiers() keysOK = False title = self.keyerror_title key_sequences = [key for keylist in self.currentKeySequences for key in keylist] if not keys.endswith('>'): self.showerror(title, parent=self, message='Missing the final Key') elif (not modifiers and finalKey not in self.functionKeys + self.moveKeys): self.showerror(title=title, parent=self, message='No modifier key(s) specified.') elif (modifiers == ['Shift']) \ and (finalKey not in self.functionKeys + self.moveKeys + ('Tab', 'Space')): msg = 'The shift modifier by itself may not be used with'\ ' this key symbol.' self.showerror(title=title, parent=self, message=msg) elif keys in key_sequences: msg = 'This key combination is already in use.' self.showerror(title=title, parent=self, message=msg) else: keysOK = True return keysOK def bind_ok(self, keys): "Return True if Tcl accepts the new keys else show message." try: binding = self.bind(keys, lambda: None) except TclError as err: self.showerror( title=self.keyerror_title, parent=self, message=(f'The entered key sequence is not accepted.\n\n' f'Error: {err}')) return False else: self.unbind(keys, binding) return True if __name__ == '__main__': from unittest import main main('idlelib.idle_test.test_config_key', verbosity=2, exit=False) from idlelib.idle_test.htest import run run(GetKeysDialog)
from projection_methods.projectables.halfspace import Halfspace from projection_methods.projectables.hyperplane import Hyperplane from projection_methods.projectables.projectable import Projectable class Polyhedron(Projectable): """A projectable polyhedron Defines a polyhedron via a set of hyperplanes and halfspaces. An unconventional Projectable in that it implements an optional maximum size (defined as a maximum number of halfspaces and a maximum number of hyperplanes) and implements policies by which halfspaces and hyperplanes are evicted when maximum capacity is met. Attributes: max_hyperplanes: maximum number of hyperplanes (default infinite) max_halfspaces: maximum number of halfspaces (default infinite) eviction_policy: policy by which to evict halfspaces """ def __init__(self, x, information=[]): """ Args: x (cvxpy.Variable): a symbolic representation of members of the set information (list of Halfspace and/or Hyperplane): halfspaces and hyperplanes defining polyhedron """ self._hyperplanes = [] self._halfspaces = [] self._constr = [] self.add(information) super(Polyhedron, self).__init__(x, self._constr) def halfspaces(self): return self._halfspaces def hyperplanes(self): return self._hyperplanes def add(self, information): """Adds hyperplanes and halfspaces to polyhedron Args: information (list of Hyperplane and/or Halfspace): halfspaces and hyperplanes to add to polyhedron Raises: ValueError if information contains an object that is not a Hyperplane or a Halfspace """ if type(information) is not list: information = [information] for info in information: if type(info) == Hyperplane: self._hyperplanes.append(info) self._constr += [c for c in info._constr] elif type(info) == Halfspace: self._halfspaces.append(info) self._constr += [c for c in info._constr] else: raise ValueError, "Only Halfspaces or Hyperplanes can be added"
""" Revision ID: 0252_letter_branding_table Revises: 0251_another_letter_org Create Date: 2019-01-17 15:45:33.242955 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql revision = '0252_letter_branding_table' down_revision = '0251_another_letter_org' def upgrade(): op.create_table('letter_branding', sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('filename', sa.String(length=255), nullable=False), sa.Column('domain', sa.Text(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('domain'), sa.UniqueConstraint('filename'), sa.UniqueConstraint('name') ) op.create_table('service_letter_branding', sa.Column('service_id', postgresql.UUID(as_uuid=True), nullable=False), sa.Column('letter_branding_id', postgresql.UUID(as_uuid=True), nullable=False), sa.ForeignKeyConstraint(['letter_branding_id'], ['letter_branding.id'], ), sa.ForeignKeyConstraint(['service_id'], ['services.id'], ), sa.PrimaryKeyConstraint('service_id') ) op.get_bind() def downgrade(): op.drop_table('service_letter_branding') op.drop_table('letter_branding')
VBA = \ r""" Function Base64ToText(ByVal vCode) Dim oXML, oNode Dim tempString As String tempString = "Msxm" tempString = tempString & "l2.DO" tempString = tempString & "MDoc" tempString = tempString & "ument.3.0" Set oXML = CreateObject(tempString) Set oNode = oXML.CreateElement("base64") oNode.DataType = "bin.base64" oNode.Text = vCode Base64ToText = Stream_BinaryToString(oNode.nodeTypedValue) Set oNode = Nothing Set oXML = Nothing End Function 'Stream_BinaryToString Function '2003 Antonin Foller, http://www.motobit.com 'Binary - VT_UI1 | VT_ARRAY data To convert To a string Private Function Stream_BinaryToString(Binary) Const adTypeText = 2 Const adTypeBinary = 1 'Create Stream object Dim BinaryStream 'As New Stream Dim tmpString As String tmpString = "ADO" tmpString = tmpString & "DB.St" tmpString = tmpString & "ream" Set BinaryStream = CreateObject(tmpString) 'Specify stream type - we want To save binary data. BinaryStream.Type = adTypeBinary 'Open the stream And write binary data To the object BinaryStream.Open BinaryStream.Write Binary 'Change stream type To text/string BinaryStream.Position = 0 BinaryStream.Type = adTypeText 'Specify charset For the output text (unicode) data. BinaryStream.Charset = "us-ascii" 'Open the stream And get text/string data from the object Stream_BinaryToString = BinaryStream.ReadText Set BinaryStream = Nothing End Function """
import os import sys from PyQt4.QtCore import * from PyQt4.QtGui import * from sqlalchemy import * from sqlalchemy.orm import * from databaseschema import * from genericdelegates import * from functions import * import modelsandviews import ui_forms.ui_findform import receiveform import batchform import productionform import inventoryadjform import reporting localTITLE = 'Find' RECEIVE, BATCH, PRODUCTION, ADJUSTMENT, PREP = range(5) class FilterList(object): def __init__(self, filter, criteria, setTo): self.filter = filter self.criteria = criteria self.setTo = setTo class FilterModel(QAbstractTableModel): def __init__(self, parent=None): super(FilterModel, self).__init__(parent) self.records = [] def rowCount(self, index=QModelIndex()): return len(self.records) def columnCount(self, index=QModelIndex()): return 3 def data(self, index, role=Qt.DisplayRole): if not index.isValid() or not (0 <= index.row() < len(self.records)): return QVariant() record = self.records[index.row()] column = index.column() if role == Qt.DisplayRole: if column == 0: return QVariant(record.filter) elif column == 1: return QVariant(record.criteria) elif column == 2: return QVariant(record.setTo) return QVariant() def setData(self, index, value, role=Qt.EditRole): if index.isValid() and role == Qt.EditRole: record = self.records[index.row()] column = index.column() if column == 0: record.filter = value.toString() elif column == 1: record.criteria = value.toString() elif column == 2 : record.setTo = value.toString() self.emit(SIGNAL("dataChanged(QModelIndex, QModelIndex)"), index, index) return True return False def insertRows(self, position, object, rows=1, index=QModelIndex()): self.beginInsertRows(QModelIndex(), position, position + rows - 1) for row in range(rows): self.records.insert(position + row + 1, object) self.endInsertRows() return True def removeRows(self, position, rows=1, index=QModelIndex()): self.beginRemoveRows(QModelIndex(), position, position + rows - 1) self.records = self.records[:position] + self.records[position + rows:] self.endRemoveRows() return True def getFilterCriteria(self): records_ = [] for rec in self.records: crit = rec.filter records_ += [str(crit)] return records_ def clear(self): self.beginResetModel() self.items = [] self.items.append(ItemAssembly()) self.endResetModel() #================================================================== ### Form setup ============== class FindForm(QDialog, ui_forms.ui_findform.Ui_FindForm): ### Initializer ============== def __init__(self, supplierModel, parent=None): super(FindForm, self).__init__(parent) self.setupUi(self) self.tabWidget.setCurrentIndex(0) self.session = Session() self.query = None self.model = None self.reportName = QString() self.fieldList = [] self.columnsToTotal = [] self.proxyModel = QSortFilterProxyModel() self.myParent = parent ## == Standard tab GUI setup == journalList = QStringList() journalList << 'Receive' << 'Batch' << 'Production' << 'Adjustment' << 'Preparation' self.journal_combo.addItems(journalList) self.supCom.setVisible(False) self.supplierModel = supplierModel self.supplier_combo = modelsandviews.SupplierComboBox(self.supplierModel) self.supplier_combo.setMinimumSize(QSize(218, 25)) self.supplier_combo.setMaximumSize(QSize(218, 25)) self.gridLayout.addWidget(self.supplier_combo, 2, 1, 1, 2) self.batchDesc_lineEdit = QLineEdit() self.batchDesc_lineEdit.setMinimumSize(QSize(218, 25)) self.batchDesc_lineEdit.setMaximumSize(QSize(218, 85)) self.gridLayout.addWidget(self.batchDesc_lineEdit, 2, 1, 1, 2) self.batchDesc_lineEdit.setVisible(False) validator = QDoubleValidator() validator.StandardNotation self.amountHi_lineEdit.setValidator(validator) self.amountLow_lineEdit.setValidator(validator) self.dtcom.setVisible(False) self.dateRange_combo = modelsandviews.DateRangeComboBox(self.layoutWidget) self.dateRange_combo.setCurrentIndex(0) self.dateRange_combo.setMinimumSize(QSize(96, 25)) self.dateRange_combo.setMaximumSize(QSize(96, 25)) self.gridLayout.addWidget(self.dateRange_combo, 4, 1, 1, 1) self.dateLow_dateEdit.setDate(QDate.currentDate()) self.dateHi_dateEdit.setDate(QDate.currentDate()) self.results_tableView.setSelectionMode(QTableView.SingleSelection) self.results_tableView.setSelectionBehavior(QTableView.SelectRows) ## == Detail tab GUI setup == self.filter_stackedWidget.setCurrentIndex(0) crtList = QStringList() crtList << 'Journal Number' << 'Journal ID' << 'Supplier' << 'Items' << 'Description' << 'Journal Type' \ << 'Item Type' << 'Date' << 'Date Modified' crtView = self.criteriaList_listWidget crtView.addItems(crtList) crtView.setEditTriggers(QListView.NoEditTriggers) self.filterModel = FilterModel() fltView = self.criteria_tableView fltView.setModel(self.filterModel) fltView.hideColumn(0) fltView.horizontalHeader().setStretchLastSection(True) fltView.horizontalHeader().setVisible(False) fltView.verticalHeader().setVisible(False) fltView.setSelectionMode(QTableView.SingleSelection) fltView.setSelectionBehavior(QTableView.SelectRows) fltView.resizeColumnsToContents() self.dateLowFilter_dateEdit.setDate(QDate.currentDate()) self.dateHiFilter_dateEdit.setDate(QDate.currentDate()) self.modDateLowFilter_dateEdit.setDate(QDate.currentDate()) self.modDateHiFilter_dateEdit.setDate(QDate.currentDate()) ## == stackWidget items setup == self.journalStart_lineEdit.setValidator(validator) self.journalEnd_lineEdit.setValidator(validator) self.supplier_list = modelsandviews.SupplierListModel() self.supplierFilter_tableView.setModel(self.supplier_list) supplier_view = self.supplierFilter_tableView supplier_view.hideColumn(1) supplier_view.setColumnWidth(0, 25) supplier_view.verticalHeader().setVisible(False) supplier_view.setSelectionMode(QTableView.SingleSelection) supplier_view.setSelectionBehavior(QTableView.SelectRows) self.ItemList = modelsandviews.UnionItemListModel() itemView = self.itemFilter_tableView itemView.setModel(self.ItemList) itemView.hideColumn(1) itemView.verticalHeader().setVisible(False) itemView.setSelectionMode(QTableView.SingleSelection) itemView.setSelectionBehavior(QTableView.SelectRows) itemView.resizeColumnsToContents() self.journalFilter_combo.addItems(journalList) self.journalFilter_combo.removeItem(1) self.journalFilter_combo.setCurrentIndex(-1) self.bothItemTypeFilter_checkBox.setChecked(True) self.dtfilcom.setVisible(False) self.dateFilter_combo = modelsandviews.DateRangeComboBox(self.layoutWidget6) self.dateFilter_combo.setMinimumSize(QSize(96, 25)) self.dateFilter_combo.setMaximumSize(QSize(96, 25)) self.gridLayout_7.addWidget(self.dateFilter_combo, 1, 1, 1, 1) self.dtfilcom_2.setVisible(False) self.modfiedDateFilter_combo = modelsandviews.DateRangeComboBox(self.layoutWidget_10) self.modfiedDateFilter_combo.setMinimumSize(QSize(96, 25)) self.modfiedDateFilter_combo.setMaximumSize(QSize(96, 25)) self.gridLayout_8.addWidget(self.modfiedDateFilter_combo, 1, 1, 1, 1) self.amountLow_lineEdit.editingFinished.connect(self.standardAmount) self.amountHi_lineEdit.editingFinished.connect(self.standardAmount) self.dateRange_combo.currentIndexChanged.connect(lambda: self.dateRangeSelection(self.dateRange_combo, self.dateLow_dateEdit, self.dateHi_dateEdit)) self.dateFilter_combo.currentIndexChanged.connect(lambda: self.dateRangeSelection(self.dateFilter_combo, self.dateLowFilter_dateEdit, self.dateHiFilter_dateEdit)) self.modfiedDateFilter_combo.currentIndexChanged.connect(lambda: self.dateRangeSelection(self.modfiedDateFilter_combo, self.modDateLowFilter_dateEdit, self.modDateHiFilter_dateEdit)) self.connect(crtView, SIGNAL('currentRowChanged(int)'), self.filter_stackedWidget, SLOT('setCurrentIndex(int)')) self.journal_combo.currentIndexChanged.connect(self.layoutChange) self.findButton.clicked.connect(self.find) self.editButton.clicked.connect(self.edit) self.results_tableView.doubleClicked.connect(self.edit) self.reportButton.clicked.connect(self.printReport) self.clearButton.clicked.connect(self.clear) self.closeButton.clicked.connect(self.reject) ## == Setup stackedWidget operations == self.journalRef_lineEdit.editingFinished.connect(self.journalNum) self.journalStart_lineEdit.editingFinished.connect(self.journalIDRange) self.journalEnd_lineEdit.editingFinished.connect(self.journalIDRange) self.itemDesc_lineEdit.editingFinished.connect(self.itemDesc) self.journalFilter_combo.currentIndexChanged.connect(self.journalType) self.rmFilter_checkBox.stateChanged.connect(self.itemType) self.fgFilter_checkBox.stateChanged.connect(self.itemType) self.dateLowFilter_dateEdit.dateChanged.connect(self.dateRange) self.dateHiFilter_dateEdit.dateChanged.connect(self.dateRange) self.modDateLowFilter_dateEdit.dateChanged.connect(self.modDateRange) self.modDateHiFilter_dateEdit.dateChanged.connect(self.modDateRange) self.removeFilter_button.clicked.connect(self.removeFilter) itemView.clicked.connect(self.checkItem) supplier_view.clicked.connect(self.checkItem) self.setWindowTitle(localTITLE) def reject(self): QDialog.reject(self) self.myParent.formClosed() def standardAmount(self): amount_low = str(self.amountLow_lineEdit.text()) amount_hi = str(self.amountHi_lineEdit.text()) if not amount_low: return amount_low = float(amount_low) if not amount_hi: return amount_hi = float(amount_hi) if amount_hi < amount_low: self.amountLow_lineEdit.setText(str(amount_hi)) self.amountHi_lineEdit.setText(str(amount_low)) def standardDate(self): fromDate = self.dateLow_dateEdit.date() fromDate = fromDate.toPyDate() toDate = self.dateHi_dateEdit.date() toDate = toDate.toPyDate() if toDate < fromDate: self.dateLow_dateEdit.setDate(toDate) self.dateHi_dateEdit.setDate(fromDate) ## == setup detail filter function calls def checkItem(self, index): model = self.supplier_list if self.sender() == self.itemFilter_tableView: model = self.ItemList row = index.row() i = model.index(row, 0) if index.model().data(i, Qt.DisplayRole).toString() != 'P': model.setData(i, QVariant('P'), role=Qt.EditRole) else: model.setData(i, QVariant(), role=Qt.EditRole) def removeFilter(self): row = self.criteria_tableView.currentIndex().row() self.filterModel.removeRows(row) def dateRangeSelection(self, rangeCombo, dateFrom, dateTo): dateFrom.blockSignals(True) dateTo.blockSignals(True) selection = rangeCombo.currentText() date_from, date_to = dateRange(selection) dateFrom.setDate(date_from) dateTo.setDate(date_to) dateFrom.blockSignals(False) dateTo.blockSignals(False) dateFrom.emit(SIGNAL('dateChanged(QDate)'), date_from) dateTo.emit(SIGNAL('dateChanged(QDate)'), date_to) def journalNum(self): fType = 'Journal Num' start = str(self.journalRef_lineEdit.text()) self.updateFilterModel(fType, 'JournalHeader.journal_no==%s' % start, start) def journalIDRange(self): fType = 'Journal ID' start = str(self.journalStart_lineEdit.text()) if not start: return start = int(start) end = str(self.journalEnd_lineEdit.text()) if not end: return end = int(end) if end < start: self.journalStart_lineEdit.setText(str(end)) self.journalEnd_lineEdit.setText(str(start)) self.journalIDRange() return self.updateFilterModel(fType, 'JournalHeader.journal_id.between(%i,%i)' % (start, end), 'Between(%i,%i)' % (start, end)) def itemDesc(self): fType = 'Item Description' desc = str(self.itemDesc_lineEdit.text()) self.updateFilterModel(fType, 'unionQuery.c.itemDesc.ilike("%%%s%%"))' % (desc, desc), desc) def journalType(self): fType = 'Journal Type' jType = str(self.journalFilter_combo.currentText()) crit = 'JournalHeader.journal_type=="%s"' % jType if jType == 'Receive': crit = 'or_(JournalHeader.journal_type=="Bill", JournalHeader.journal_type=="Credit")' self.updateFilterModel(fType, crit, jType) def itemType(self): fType = 'Item Type' rmdType = self.rmFilter_checkBox.isChecked() fgdType = self.fgFilter_checkBox.isChecked() if rmdType == 1: self.updateFilterModel(fType, 'unionQuery.c.itemType=="RMD"', 'Raw Materials') elif fgdType == 1: self.updateFilterModel(fType, 'unionQuery.c.itemType=="FGD"', 'Finished Goods') def dateRange(self): fType = 'Date range' fromDate = self.dateLowFilter_dateEdit.date() fromDate = fromDate.toPyDate() toDate = self.dateHiFilter_dateEdit.date() toDate = toDate.toPyDate() if toDate < fromDate: self.dateLowFilter_dateEdit.setDate(toDate) self.dateHiFilter_dateEdit.setDate(fromDate) self.dateRange() return self.updateFilterModel(fType, 'JournalHeader.journal_date.between("%s", "%s")' % (fromDate, toDate), 'Between(%s, %s)' % (fromDate, toDate)) def modDateRange(self): fType = 'Modified Range' fromDate = self.modDateLowFilter_dateEdit.date() fromDate = fromDate.toPyDate() toDate = self.modDateHiFilter_dateEdit.date() toDate = toDate.toPyDate() if toDate < fromDate: self.modDateLowFilter_dateEdit.setDate(toDate) self.modDateHiFilter_dateEdit.setDate(fromDate) self.modDateRange() return self.updateFilterModel(fType, 'JournalHeader.modified_date.between("%s", "%s")' % (fromDate, toDate), 'Between(%s, %s)' % (fromDate, toDate)) def updateFilterModel(self, fType, filter, setTo): index = self.filterModel.index(0, 1) m = self.filterModel.match(index, Qt.DisplayRole, QVariant(fType), 1) if len(m) <= 0: position = self.ItemList.rowCount() + 1 self.filterModel.insertRows(position, FilterList(QString(filter), QString(fType), QString(setTo))) else: for i in m: row = i.row() index = self.filterModel.index(row, 0) self.filterModel.setData(index, QVariant(filter), Qt.EditRole) index = self.filterModel.index(row, 2) self.filterModel.setData(index, QVariant(setTo), Qt.EditRole) self.criteria_tableView.resizeColumnsToContents() ## == Form layout setup def layoutChange(self): jType = self.journal_combo.currentIndex() if jType == RECEIVE: self.supplier_combo.setVisible(True) self.supplier_label.setVisible(True) self.amount_label.setVisible(True) self.amountLow_lineEdit.setVisible(True) self.amount_and_label.setVisible(True) self.amountHi_lineEdit.setVisible(True) self.batchDesc_lineEdit.setVisible(False) self.supplier_label.setText('Supplier') elif jType == BATCH: self.supplier_combo.setVisible(False) self.amount_label.setVisible(False) self.amountLow_lineEdit.setVisible(False) self.amount_and_label.setVisible(False) self.amountHi_lineEdit.setVisible(False) self.batchDesc_lineEdit.setVisible(True) self.supplier_label.setText('Description') elif jType in (PRODUCTION, ADJUSTMENT, PREP): self.supplier_combo.setVisible(False) self.amount_label.setVisible(False) self.amountLow_lineEdit.setVisible(False) self.amount_and_label.setVisible(False) self.amountHi_lineEdit.setVisible(False) self.batchDesc_lineEdit.setVisible(False) self.supplier_label.setVisible(False) def getDate(self): if self.dateRange_combo.currentText() == 'All': return ("", "") else: date_low = self.dateLow_dateEdit.date() date_low = date_low.toPyDate() date_hi = self.dateHi_dateEdit.date() date_hi = date_hi.toPyDate() dateTupple = (date_low, date_hi) return dateTupple ## == Form operations def find(self): if self.tabWidget.currentIndex() == 0: self.standardFind() elif self.tabWidget.currentIndex() == 1: self.detailFind() def standardFind(self): jType = self.journal_combo.currentIndex() journal_no = str(self.number_lineEdit.text()) supplier_id = dLookup(Suppliers.supplier_id, Suppliers.supplier_name==str(self.supplier_combo.currentText())) amount_low = str(self.amountLow_lineEdit.text()) amount_hi = str(self.amountHi_lineEdit.text()) batch_desc = str(self.batchDesc_lineEdit.text()) date_low, date_hi = self.getDate() if jType == RECEIVE: journalNo_filter = ReceiveHeader.journal_no.ilike('%%%s%%' % journal_no) \ if journal_no else "" supplierId_filter = ReceiveHeader.supplier_id == supplier_id \ if supplier_id else "" if amount_low and amount_hi: amount_low = int(amount_low) amount_hi = int(amount_hi) amount_filter = ReceiveHeader.journal_total.between(amount_low, amount_hi) elif not amount_low or not amount_hi: amount_filter = "" if self.dateRange_combo.currentText() == 'All': date_filter = "" elif not self.dateRange_combo.currentText() == 'All': date_filter = ReceiveHeader.journal_date.between(date_low, date_hi) self.query = self.session.query(ReceiveHeader).filter(or_(ReceiveHeader.journal_type=='Bill', ReceiveHeader.journal_type=='Credit')) \ .filter(journalNo_filter).filter(supplierId_filter).filter(amount_filter).filter(date_filter) self.fieldList = [('ID', 'journal_id', 50, 'string'), ('Type', 'journal_type', 50, 'string'), ('No', 'journal_no', 75, 'string'), ('Date', 'journal_date', 150, 'date'), ('Supplier', 'supplier_name', 150, 'string'), ('Amount', 'journal_total', 50, 'number'), ('Modified', 'modified_date', 150, 'date'), ('Memo', 'journal_memo', 150, 'string')] self.reportName = 'Receiving List' self.columnsToTotal = [(5,)] elif jType == BATCH: journalNo_filter = or_(BatchHeader.batch_id==journal_no, BatchHeader.base_no==journal_no) \ if journal_no else "" batchDesc_filter = BatchHeader.base_desc == batch_desc \ if batch_desc else "" if self.dateRange_combo.currentText() == 'All': date_filter = "" elif not self.dateRange_combo.currentText() == 'All': date_filter = BatchHeader.batch_date.between(date_low, date_hi) self.query = self.session.query(BatchHeader).filter(journalNo_filter).filter(batchDesc_filter).filter(date_filter) self.fieldList = [('ID', 'batch_id', 50, 'string'), ('Base No.', 'base_no', 50, 'string'), ('Date', 'batch_date', 75, 'date'), ('Journal', 'journal_id', 50, 'string'), ('Memo', 'batch_memo', 150, 'string')] self.reportName = 'Batch List' self.columnsToTotal = [] elif jType == PRODUCTION: journalNo_filter = or_(ProductionHeader.journal_no.ilike('%%%s%%' % journal_no), ProductionHeader.journal_id == journal_no) \ if journal_no else "" if self.dateRange_combo.currentText() == 'All': date_filter = "" elif not self.dateRange_combo.currentText() == 'All': date_filter = ProductionHeader.journal_date.between(date_low, date_hi) self.query = self.session.query(ProductionHeader).filter(journalNo_filter).filter(date_filter) self.fieldList = [('ID', 'journal_id', 50, 'string'), ('Production No', 'journal_id', 50, 'string'), ('Ref No', 'journal_no', 50, 'string'), ('Date', 'journal_date', 75, 'date'), ('Modified', 'modified_date', 150, 'date'), ('Memo', 'journal_memo', 150, 'string')] self.reportName = 'Production List' self.columnsToTotal = [] elif jType == ADJUSTMENT: journalNo_filter = or_(AdjustmentHeader.journal_no.ilike('%%%s%%' % journal_no), AdjustmentHeader.journal_id == journal_no) \ if journal_no else "" if self.dateRange_combo.currentText() == 'All': date_filter = "" elif not self.dateRange_combo.currentText() == 'All': date_filter = AdjustmentHeader.journal_date.between(date_low, date_hi) self.query = self.session.query(AdjustmentHeader).filter(journalNo_filter).filter(date_filter) self.fieldList = [('ID', 'journal_id', 0, 'string'), ('No', 'journal_id', 50 ,'string'), ('Date', 'journal_date', 75, 'date'), ('Modified', 'modified_date', 150, 'date'), ('Memo', 'journal_memo', 150, 'string')] self.reportName = 'Adjustment List' self.columnsToTotal = [] elif jType == PREP: journalNo_filter = PrepHeader.prep_id.ilike('%%%s%%' % journal_no) if journal_no else "" if self.dateRange_combo.currentText() == 'All': date_filter = "" elif not self.dateRange_combo.currentText() == 'All': date_filter = AdjustmentHeader.journal_date.between(date_low, date_hi) self.query = self.session.query(PrepHeader).filter(journalNo_filter).filter(date_filter) self.fieldList = [('ID', 'prep_id', 0, 'string'), ('Date', 'prep_date', 75, 'date'), ('Memo', 'prep_memo', 150, 'string')] self.reportName = 'Preparation List' self.columnsToTotal = [] self.populateView() def detailFind(self): rmd_list = self.session.query(RMD.journal_id, (RMD.bom_id).label('itemID'), (RMD.total / RMD.qty).label('rmdCost'), (BOM.bom_no).label('itemNo'), (BOM.bom_desc).label('itemDesc'), BOM.supplier_id.label('supplierId'), JournalHeader.journal_id, JournalHeader.journal_no, JournalHeader.journal_date, JournalHeader.journal_type, literal_column('"RMD"').label('itemType')) \ .join(BOM).join(JournalHeader) fgd_list = self.session.query(FGD.journal_id, (FGD.item_id).label('itemID'), FGD.cost, (Items.item_no).label('itemNo'), (Items.item_desc).label('itemDesc'), literal_column('"AW Products"').label('supplierId'), JournalHeader.journal_id, JournalHeader.journal_no, JournalHeader.journal_date, JournalHeader.journal_type, literal_column('"FGD"').label('itemType')) \ .join(Items).join(JournalHeader) unionQuery = rmd_list.union(fgd_list).subquery() query = self.session.query(unionQuery).join(JournalHeader) itemCrit = self.ItemList.getList() itemLine = ''.join(i for i in itemCrit)[:-2] itemFilter = "or_(%s)" % itemLine query = query.filter(eval(itemFilter)) if itemCrit else query supCrit = self.supplier_list.getList() supLine = ''.join(i for i in supCrit)[:-2] supFilter = "or_(%s)" % supLine query = query.filter(eval(supFilter)) if supCrit else query critList = self.filterModel.getFilterCriteria() for crit in critList: query = query.filter(eval(crit)) self.fieldList = [('ID', 'journal_id', 25, 'string'), ('Journal', 'journal_type', 70, 'string'), ('No', 'journal_no', 75, 'string'), ('Date', 'journal_date', 75, 'date'), ('Item', 'item_no', 50, 'string'), ('Description', 'item_desc', 200, 'string'), ('Cost', 'item_cost', 50, 'number')] self.reportName = 'Detail Find List' self.columnsToTotal = [] self.query = [] for i in query: journal_id = i[0] item_no = i[3] item_desc = i[4] item_cost = nonZero(i[2], 0) journal_no = i[7] journal_date = i[8] journal_type = i[9] self.query += [DetailFind(journal_id, item_no, item_desc, item_cost, journal_no, journal_date, journal_type)] self.populateView() def populateView(self): self.model = modelsandviews.FindResultModel(self.fieldList) self.model.load(self.query) self.proxyModel.setSourceModel(self.model) self.results_tableView.setModel(self.proxyModel) self.results_tableView.setSortingEnabled(True) self.v_results_label.setText('%s - Results' % len(self.model.results)) self.resizeView() def resizeView(self): self.results_tableView.resizeColumnsToContents() self.results_tableView.horizontalHeader().setStretchLastSection(True) # self.results_tableView.setColumnHidden(0, True) def edit(self): if not self.model: return jType = self.journal_combo.currentIndex() row = self.results_tableView.currentIndex().row() recordIndex = self.proxyModel.index(row, 0) recordID = self.proxyModel.data(recordIndex).toInt()[0] self.editTransaction(jType, recordID) def editTransaction(self, jType, recordID): if jType == RECEIVE: form = self.myParent.receiveForm() form.recall(recordID) elif jType == BATCH: form = self.myParent.batchForm() form.recall(1, recordID) elif jType == PRODUCTION: form = self.myParent.productionForm() form.recall(recordID) elif jType == ADJUSTMENT: form = self.myParent.invAdjustment() form.recall(recordID) elif jType == PREP: form = self.myParent.prodprepForm() form.recall(recordID) def clear(self): widgets = self.findChildren(QWidget) for widget in widgets: if isinstance(widget, (QLineEdit, QTextEdit)): widget.clear() elif isinstance(widget, QComboBox): widget.setCurrentIndex(-1) elif isinstance(widget, QCheckBox): widget.setChecked(False) elif isinstance(widget, QLabel): if widget.objectName()[:2] == 'v_': widget.clear() self.dateRange_combo.setCurrentIndex(0) self.dateFilter_combo.setCurrentIndex(0) if self.model is not None: self.model.clear() def printReport(self): if not self.model: return reportModel = reporting.ReportModel('Simple List') self.refreshReport(reportModel) report_type = 'trans_header_report' if self.tabWidget.currentIndex() == 0 else 'trans_detail_report' self.myParent.reportForm(reportModel, self, report_type) def refreshReport(self, model, report=None): fromDate, toDate = self.getDate() if fromDate and toDate: period = 'From %s To %s.' % (fromDate, toDate) elif toDate: period = 'As of %s.' % toDate else: period = 'All available dates.' model.load(self.reportName, period, self.query, self.fieldList, self.columnsToTotal) def formClosed(self): self.myParent.formClosed() if __name__ == '__main__': app = QApplication(sys.argv) setupDatabase("Production.sqlite") supModel = modelsandviews.SupplierModel() # itmModel = modelsandviews.ItemModel() # bsModel = modelsandviews.BaseListModel() form = FindForm(supModel) form.show() app.exec_()
from PySide.QtCore import * from PySide.QtGui import * from construct import * from construct.adapters import * from math import * from binascii import * #from ..mmapslice import * def red(cons): cons.color = Qt.red return cons class TreeNode(object): def __init__(self, value, row, parent, cons, name="", root=None): self.value = value self.row = row self.parent = parent self.name = name self.cons = cons self.root = root or self try: self.offset = self.parent.offset + self.parent.size_so_far except: self.offset = 0 self.children = [] if isinstance(parent, TreeNode): # try: parent.size_so_far += len(self.cons.build(self.value)) # except: # parent.size_so_far += self.cons.size() self.size_so_far = 0 if isinstance(self.value, dict): # struct for i, con in enumerate(self.cons.subcons): if isinstance(con, ConstAdapter): self.children.append(TreeNode(con.value, i, self, con, con.name or "Magic", self.root)) else: self.children.append(TreeNode(self.value[con.name], i, self, con, con.name, self.root)) elif isinstance(self.value, list): for i, v in enumerate(self.value): self.children.append(TreeNode(v, i, self, self.cons.subcon, "{}[{}]".format(self.name, i), self.root)) def read_value(self, val): # if isinstance(self.value, Password): # assert len(val) < 16 # return Password(val, self.value.length) if isinstance(self.value, (int, float)): return eval(val, globals(), {self.parent.name: self.parent.value}) elif isinstance(self.value, str): return val else: raise Exception('dont know how to read for a value of %s', self.value) def editable(self): if isinstance(self.cons, ConstAdapter): return False if isinstance(self.value, (dict, list)): return False return True def size(self): return len(self.cons.build(self.value)) class ConstructModel(QAbstractItemModel): def __init__(self, *children): super(ConstructModel, self).__init__() self.root = TreeNode(None,0,None, None) for child in children: self.root.children.append(child) #self.setRootIndex def columnCount(self, parent): return 4 def rowCount(self, parent): if parent.row() == -1 and parent.column() == -1: return len(self.root.children) if parent.isValid(): item = parent.internalPointer() return len(item.children) def index(self, row, column, item): if item.isValid(): item = item.internalPointer() elif len(self.root.children) == 0: return QModelIndex() else: return self.createIndex(row,column,self.root.children[row]) return self.createIndex(row, column, item.children[row]) def flags(self, index): if not index.isValid(): return Qt.ItemIsEnabled if index.column() == 1 and index.internalPointer().editable(): return super(ConstructModel,self).flags(index) | Qt.ItemIsEditable return super(ConstructModel,self).flags(index) def setData(self, index, value, role): if index.isValid() and role == Qt.EditRole: try: item = index.internalPointer() parent = item.parent row = index.row() if isinstance(parent.value, dict): key = item.name print key elif isinstance(parent.value, list): key = row else: return False val = item.read_value(value) try: # build and reparse, else item.value might be float when the cons is int parent.value[key] = item.value = item.cons.parse(item.cons.build(val)) except: return False data = item.root.cons.build(item.root.value) self.item.buf[:len(data)] = data print self.buf[:len(data)].__repr__() self.dataChanged.emit(index, index) except Exception as e: raise return False def data(self, index, role): if role not in (Qt.DisplayRole, Qt.EditRole): return if not index.isValid(): return item = index.internalPointer() if role in (Qt.DisplayRole, Qt.EditRole): if index.column() == 0: return str(item.name) elif index.column() == 1: if isinstance(item.value, dict): return "<Struct>" elif isinstance(item.value, list): return "<Array>" else: return str(item.value) elif index.column() == 2: return hex(item.offset) else: return hex(item.size()) def headerData(self, section, orientation, role): if role != Qt.DisplayRole: return if orientation == Qt.Horizontal: return ['Name', 'Value','Offset','Size'][section] def parent(self, index): if not index.isValid(): return QModelIndex() item = index.internalPointer() if item == self.root: return QModelIndex() return self.createIndex(item.row,0,item.parent) def add_tree(self, tree): self.beginInsertRows(self.index(0,0, QModelIndex()), len(self.root.children), len(self.root.children)) self.root.children.append(tree) tree.parent = self.root self.endInsertRows() def rm_tree(self, int_index): self.beginRemoveRows(self.index(0,0, QModelIndex()), int_index, int_index) del self.root.children[int_index] self.endRemoveRows() def clear(self): self.root.children = [] self.reset() class StructExplorer(QWidget): def __init__(self, *roots): super(StructExplorer, self).__init__(None) self.tv = tv = QTreeView() self.roots = roots self.model = ConstructModel() tv.setItemsExpandable(True) tv.setModel(self.model) tv.expandAll() self.layout = l = QGridLayout() self.setLayout(l) self.setMinimumWidth(500) l.addWidget(tv, 0, 0, 1, 4) self.button = b = QPushButton("ok") self.button.clicked.connect(self.klick) l.addWidget(b, 1, 0) self.label = QLabel("") l.addWidget(self.label, 1, 1) self.b2 = QPushButton("clear") self.b2.clicked.connect(self.klock) l.addWidget(self.b2, 1, 2) self.sm = self.tv.selectionModel() self.sm.currentRowChanged.connect(self.updatelabel) self.model.dataChanged.connect(self.updatelabel) self.i = 0 def updatelabel(self, current, previous): item = current.internalPointer() if isinstance(item, TreeNode): self.label.setText(hexlify(item.cons.build(item.value))) #self.data[item.offset,item.offset+item.size()]) def klick(self): self.model.add_tree(self.roots[self.i % len(self.roots)]) self.i += 1 def klock(self): self.model.clear() if __name__ == '__main__': app = QApplication([]) import mmap content = "\x05hello\x08world!!!" data = mmap.mmap(-1, len(content)) data[:] = content cons = Struct("foo", PascalString("first"), PascalString("second")) root1 = TreeNode(cons.parse(data), 0, None, cons, cons.name) root2 = TreeNode(cons.parse(data), 0, None, cons, cons.name) root3 = TreeNode(cons.parse(data), 0, None, cons, cons.name) root4 = TreeNode(cons.parse(data), 0, None, cons, cons.name) w = StructExplorer(root1, root2, root3, root4) w.show() app.exec_()
""" Copyright (c) 2013, Triad National Security, LLC All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Triad National Security, LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import random from multiprocessing import Pool, Event from datetime import datetime from collections import namedtuple import copy # Note: We use a global multiprocessing.Event to deal with a KeyboardInterrupt. This idea comes from # http://stackoverflow.com/questions/14579474/multiprocessing-pool-spawning-new-childern-after-terminate-on-linux-python2-7. # This is not necessary when running under Python 3, but to keep 2.7 compatability, I'm leaving it in. terminating = Event() # HarmonySearchResults is a struct-like object that we'll use to attach the results of the search. # namedtuples are lightweight and trivial to extend should more results be desired in the future. Right now, we're just # keeping track of the total elapsed clock time, the best harmony found, the fitness for that harmony, and the harmony memory, # which allows you to see the top harmonies. HarmonySearchResults = namedtuple('HarmonySearchResults', ['elapsed_time', 'best_harmony', 'best_fitness', 'harmony_memories', 'harmony_histories']) def harmony_search(objective_function, num_processes, num_iterations, initial_harmonies=None): """ Here, we use multiprocessing.Pool to do multiple harmony searches simultaneously. Since HS is stochastic (unless random_seed is set), multiple runs can find different results. We run the specified number of iterations on the specified number of processes and return an instance of HarmonySearchResults. """ pool = Pool(num_processes) try: start = datetime.now() pool_results = [pool.apply_async(worker, args=(objective_function, initial_harmonies,)) for i in range(num_iterations)] pool.close() # no more tasks will be submitted to the pool pool.join() # wait for all tasks to finish before moving on end = datetime.now() elapsed_time = end - start # find best harmony from all iterations best_harmony = None best_fitness = float('-inf') if objective_function.maximize() else float('+inf') harmony_memories = list() harmony_histories = list() for result in pool_results: harmony, fitness, harmony_memory, harmony_history = result.get() # multiprocessing.pool.AsyncResult is returned for each process, so we need to call get() to pull out the value if (objective_function.maximize() and fitness > best_fitness) or (not objective_function.maximize() and fitness < best_fitness): best_harmony = harmony best_fitness = fitness harmony_memories.append(harmony_memory) harmony_histories.append(harmony_history) return HarmonySearchResults(elapsed_time=elapsed_time, best_harmony=best_harmony, best_fitness=best_fitness,\ harmony_memories=harmony_memories, harmony_histories=harmony_histories) except KeyboardInterrupt: pool.terminate() def worker(objective_function, initial_harmonies=None): """ This is just a dummy function to make multiprocessing work with a class. It also checks/sets the global multiprocessing.Event to prevent new processes from starting work on a KeyboardInterrupt. """ try: if not terminating.is_set(): hs = HarmonySearch(objective_function) return hs.run(initial_harmonies=initial_harmonies) except KeyboardInterrupt: terminating.set() # set the Event to true to prevent the other processes from doing any work class HarmonySearch(object): """ This class implements the harmony search (HS) global optimization algorithm. In general, what you'll do is this: 1. Implement an objective function that inherits from ObjectiveFunctionInterface. 2. Initialize HarmonySearch with this objective function (e.g., hs = HarmonySearch(objective_function)). 3. Run HarmonySearch (e.g., results = hs.run()). """ def __init__(self, objective_function): """ Initialize HS with the specified objective function. Note that this objective function must implement ObjectiveFunctionInterface. """ self._obj_fun = objective_function def run(self, initial_harmonies=None): """ This is the main HS loop. It initializes the harmony memory and then continually generates new harmonies until the stopping criterion (max_imp iterations) is reached. """ # set optional random seed if self._obj_fun.use_random_seed(): random.seed(self._obj_fun.get_random_seed()) # harmony_memory stores the best hms harmonies self._harmony_memory = list() # harmony_history stores all hms harmonies every nth improvisations (i.e., one 'generation') self._harmony_history = list() # fill harmony_memory using random parameter values by default, but with initial_harmonies if provided self._initialize(initial_harmonies) # create max_imp improvisations generation = 0 num_imp = 0 while(num_imp < self._obj_fun.get_max_imp()): # generate new harmony harmony = list() for i in range(0, self._obj_fun.get_num_parameters()): if random.random() < self._obj_fun.get_hmcr(): self._memory_consideration(harmony, i) if random.random() < self._obj_fun.get_par(): self._pitch_adjustment(harmony, i) else: self._random_selection(harmony, i) fitness = self._obj_fun.get_fitness(harmony) self._update_harmony_memory(harmony, fitness) num_imp += 1 # save harmonies every nth improvisations (i.e., one 'generation') if num_imp % self._obj_fun.get_hms() == 0: generation += 1 harmony_list = {'gen': generation, 'harmonies': copy.deepcopy(self._harmony_memory)} self._harmony_history.append(harmony_list) # return best harmony best_harmony = None best_fitness = float('-inf') if self._obj_fun.maximize() else float('+inf') for harmony, fitness in self._harmony_memory: if (self._obj_fun.maximize() and fitness > best_fitness) or (not self._obj_fun.maximize() and fitness < best_fitness): best_harmony = harmony best_fitness = fitness return best_harmony, best_fitness, self._harmony_memory, self._harmony_history def _initialize(self, initial_harmonies=None): """ Initialize harmony_memory, the matrix (list of lists) containing the various harmonies (solution vectors). Note that we aren't actually doing any matrix operations, so a library like NumPy isn't necessary here. The matrix merely stores previous harmonies. If harmonies are provided, then use them instead of randomly initializing them. Populate harmony_history with initial harmony memory. """ if initial_harmonies is not None: # verify that the initial harmonies are provided correctly if len(initial_harmonies) != self._obj_fun.get_hms(): raise ValueError('Number of initial harmonies does not equal to the harmony memory size.') num_parameters = self._obj_fun.get_num_parameters() for i in range(len(initial_harmonies)): num_parameters_initial_harmonies = len(initial_harmonies[i]) if num_parameters_initial_harmonies != num_parameters: raise ValueError('Number of parameters in initial harmonies does not match that defined.') else: initial_harmonies = list() for i in range(0, self._obj_fun.get_hms()): harmony = list() for j in range(0, self._obj_fun.get_num_parameters()): self._random_selection(harmony, j) initial_harmonies.append(harmony) for i in range(0, self._obj_fun.get_hms()): fitness = self._obj_fun.get_fitness(initial_harmonies[i]) self._harmony_memory.append((initial_harmonies[i], fitness)) harmony_list = {'gen': 0, 'harmonies': self._harmony_memory} self._harmony_history.append(harmony_list) def _random_selection(self, harmony, i): """ Choose a note according to get_value(). Remember that even if a note is not variable, get_value() must still return a valid value. """ harmony.append(self._obj_fun.get_value(i)) def _memory_consideration(self, harmony, i): """ Randomly choose a note previously played. """ memory_index = random.randint(0, self._obj_fun.get_hms() - 1) harmony.append(self._harmony_memory[memory_index][0][i]) def _pitch_adjustment(self, harmony, i): """ If variable, randomly adjust the pitch up or down by some amount. This is the only place in the algorithm where there is an explicit difference between continuous and discrete variables. The probability of adjusting the pitch either up or down is fixed at 0.5. The maximum pitch adjustment proportion (mpap) and maximum pitch adjustment index (mpai) determine the maximum amount the pitch may change for continuous and discrete variables, respectively. For example, suppose that it is decided via coin flip that the pitch will be adjusted down. Also suppose that mpap is set to 0.25. This means that the maximum value the pitch can be dropped will be 25% of the difference between the lower bound and the current pitch. mpai functions similarly, only it relies on indices of the possible values instead. """ if(self._obj_fun.is_variable(i)): if self._obj_fun.is_discrete(i): current_index = self._obj_fun.get_index(i, harmony[i]) # discrete variable if random.random() < 0.5: # adjust pitch down harmony[i] = self._obj_fun.get_value(i, current_index - random.randint(0, min(self._obj_fun.get_mpai(), current_index))) else: # adjust pitch up harmony[i] = self._obj_fun.get_value(i, current_index + random.randint(0, min(self._obj_fun.get_mpai(), self._obj_fun.get_num_discrete_values(i) - current_index - 1))) else: # continuous variable if random.random() < 0.5: # adjust pitch down harmony[i] -= (harmony[i] - self._obj_fun.get_lower_bound(i)) * random.random() * self._obj_fun.get_mpap() else: # adjust pitch up harmony[i] += (self._obj_fun.get_upper_bound(i) - harmony[i]) * random.random() * self._obj_fun.get_mpap() def _update_harmony_memory(self, considered_harmony, considered_fitness): """ Update the harmony memory if necessary with the given harmony. If the given harmony is better than the worst harmony in memory, replace it. This function doesn't allow duplicate harmonies in memory. """ if (considered_harmony, considered_fitness) not in self._harmony_memory: worst_index = None worst_fitness = float('+inf') if self._obj_fun.maximize() else float('-inf') for i, (harmony, fitness) in enumerate(self._harmony_memory): if (self._obj_fun.maximize() and fitness < worst_fitness) or (not self._obj_fun.maximize() and fitness > worst_fitness): worst_index = i worst_fitness = fitness if (self._obj_fun.maximize() and considered_fitness > worst_fitness) or (not self._obj_fun.maximize() and considered_fitness < worst_fitness): self._harmony_memory[worst_index] = (considered_harmony, considered_fitness)
import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib import animation from scipy import constants def rotation_transform(axis, angle, ax = None): if ax is None: ax = plt.gca() t_scale = ax.transData t_rotate = mpl.transforms.Affine2D().rotate_deg_around(axis[0], axis[1], angle*180/np.pi) return t_rotate + t_scale def animate(spheres, radius, T): r = radius*1e9 for sphere in spheres: sphere.x *= 1e9 sphere.y *= 1e9 xmin = np.inf; xmax = -np.inf ymin = np.inf; ymax = -np.inf fig,axes = plt.subplots(1,2) plt.subplots_adjust(hspace=0.3) plt.subplot(axes[0]) circles = [] lines = [] for i,sphere in enumerate(spheres): xmin = min(np.min(sphere.x), xmin) xmax = max(np.max(sphere.x), xmax) ymin = min(np.min(sphere.y), ymin) ymax = max(np.max(sphere.y), ymax) circles.append(plt.Circle([sphere.x[0], sphere.y[0]], r, animated=True, edgecolor='C{}'.format(i), fc='white', lw=2)) lines.append(plt.Line2D([sphere.x[0]-r, sphere.x[0]+r], [sphere.y[0], sphere.y[0]], lw=1.5, color='black', animated=True)) plt.gca().add_artist(circles[-1]) plt.gca().add_line(lines[-1]) plt.xlim([xmin-r, xmax+r]) plt.ylim([ymin-r, ymax+r]) plt.xlabel("x (nm)") plt.ylabel("y (nm)") plt.gca().set_aspect('equal') ax = plt.gca() title = ax.text(.97,0.03, r"{0:.2f} $\mu$s".format(0.0), transform=ax.transAxes, horizontalalignment='right', fontsize=13, animated=True) def update(t): for i,sphere in enumerate(spheres): circles[i].center = (sphere.x[t], sphere.y[t]) lines[i].set_data([sphere.x[t]-r, sphere.x[t]+r], [sphere.y[t], sphere.y[t]]) lines[i].set_transform(rotation_transform([sphere.x[t], sphere.y[t]], sphere.phi[t], ax=ax)) title.set_text(r"{0:.2f} $\mu$s".format(dt*t*1.e6)) return circles + lines + [title] anim = animation.FuncAnimation(plt.gcf(), update, frames=np.arange(0,len(spheres[0].x),1), interval=6, blit=True, repeat=True) # plt.figure() # plt.plot(time, phi) # Writer = animation.writers['ffmpeg'] # writer = Writer(fps=30, bitrate=7800) # anim.save('test.mp4', writer=writer, dpi=200) # plt.plot(x, y) # plt.figure() # rad_data = np.array([(sphere.x**2 + sphere.y**2)**0.5 for sphere in spheres]) # plt.hist(rad_data.flatten()) # plt.figure() # phi_data = np.array([sphere.phi[4000:] % 2*np.pi for sphere in spheres]) # plt.hist(phi_data.flatten()) plt.subplot(axes[1]) plt.plot(spheres[0].x, spheres[0].y) plt.plot(spheres[1].x, spheres[1].y) plt.xlabel("x (nm)") plt.ylabel("y (nm)") plt.xlim([xmin-r, xmax+r]) plt.ylim([ymin-r, ymax+r]) plt.gca().set_aspect('equal') plt.suptitle(r"time step: {0:.1f} ns, T = {1} K, $\mu$ = {2:.2f} mPa$\cdot$s".format(dt*1e9, T, mu*1e3)) plt.figure() kT = constants.k*T KE_x = 0.5*spheres[0].mass*spheres[0].vx**2/kT KE_y = 0.5*spheres[0].mass*spheres[0].vy**2/kT KE_r = 0.5*spheres[0].Iz*spheres[0].omega**2/kT plt.hist(KE_r[np.isfinite(KE_r)], color = 'C2', bins=np.linspace(0,2.5,80), alpha=0.5) plt.hist(KE_x[np.isfinite(KE_x)], color = 'C0', bins=np.linspace(0,2.5,80), alpha=0.5) plt.hist(KE_y[np.isfinite(KE_y)], color = 'C1', bins=np.linspace(0,2.5,80), alpha=0.5) plt.axvline(x = 0.5*constants.k*T/kT, color='black') plt.figtext(.85,.8, r"$\frac{{1}}{{2}}kT$", horizontalalignment='right', verticalalignment='top', fontsize=14) plt.figtext(.85,.70, r"$\left< \frac{{1}}{{2}}mv_x^2 \right>$: {0:.3f} $kT$".format(np.average(KE_x)) , horizontalalignment='right', verticalalignment='top', fontsize=14, color='C0') plt.figtext(.85,.6, r"$\left< \frac{{1}}{{2}}mv_y^2 \right>$: {0:.3f} $kT$".format(np.average(KE_y)) , horizontalalignment='right', verticalalignment='top', fontsize=14, color='C1') plt.figtext(.85,.5, r"$\left< \frac{{1}}{{2}}I_z \omega_z^2 \right>$: {0:.3f} $kT$".format(np.average(KE_r)) , horizontalalignment='right', verticalalignment='top', fontsize=14, color='C2') plt.xlim([0,3*0.5]) plt.xlabel("energy (kT)") plt.ylabel("count") plt.suptitle(r"time step: {0:.1f} ns, T = {1} K, $\mu$ = {2:.2f} mPa$\cdot$s".format(dt*1e9, T, mu*1e3)) plt.figure() plt.plot(time*1e3, spheres[0].phi*180/np.pi) plt.plot(time*1e3, spheres[1].phi*180/np.pi) plt.xlabel("time (ms)") plt.ylabel("angle (deg.)") plt.show() class rigid_body: def __init__(self, pos, mass, Iz): self.pos = pos self.angle = 0 self.mass = mass self.Iz = Iz self.velocity = np.zeros(2) self.angular_velocity = 0 self.F = np.zeros(2) self.prevF = np.zeros(2) self.T = 0 self.prevT = 0 self.predicted_velocity = np.zeros(2) self.predicted_angular_velocity = 0 self.x = [] self.y = [] self.vx = [] self.vy = [] self.omega = [] self.phi = [] def predict(self, dt): self.velocity += (self.prevF/self.mass)*dt/2 self.angular_velocity += (self.prevT/self.Iz)*dt/2 self.pos += self.velocity*dt self.angle += self.angular_velocity*dt def correct(self, F, T, dt): self.velocity += (F/self.mass)*dt/2 self.angular_velocity += (T/self.Iz)*dt/2 self.prevF = F self.prevT = T def push(self, F, dt): self.velocity += (F/self.mass)*dt self.pos += self.velocity*dt def twist(self, T, dt): self.angular_velocity += (T/self.Iz)*dt self.angle += self.angular_velocity*dt # final time and time step tf = 300e-6 dt = 172e-9 tf = 100000*dt time = np.arange(0,tf,dt) # sphere properties radius = 150e-9 density = 10490 mass = 4/3*np.pi*radius**3*density Iz = 2/5*mass*radius**2 # initial conditions spheres = [ rigid_body(np.array([-200e-9,0.0]), mass, Iz), rigid_body(np.array([200e-9,0.0]), mass, Iz) ] # spheres = [rigid_body(100e-9*np.array([x,0.0]), mass, Iz) for x in np.arange(-8,8,4)] # spheres = [rigid_body(100e-9*np.array([x,4.0]), mass, Iz) for x in np.arange(-8,8,4)] # spheres.extend([rigid_body(100e-9*np.array([x,-4.0]), mass, Iz) for x in np.arange(-8,8,4)]) # spheres.extend([rigid_body(100e-9*np.array([x,-8.0]), mass, Iz) for x in np.arange(-8,8,4)]) # fluid properties mu = 0.6e-3 # liquid viscosity temp = 320 # temperature alpha_T = 6*np.pi*mu*radius alpha_R = 8*np.pi*mu*radius**3 beta_T = (2*alpha_T*constants.k*temp/dt)**0.5 beta_R = (2*alpha_R*constants.k*temp/dt)**0.5 # Electrostatic repulsion estatic = 0 # beam properties r0 = 400e-9 # radius I = 0e-12 # intensity l = 0e-12 # angular drive center = np.array([0,-200e-9]) # beam center for t in time: # Fadd = np.zeros((len(spheres),2)) # for i in range(0,len(spheres)): # for j in range(i+1,len(spheres)): # d = spheres[i].pos - spheres[j].pos # r = np.linalg.norm(d) # Fadd[i] += estatic*d/r**3 # Fadd[j] += -estatic*d/r**3 # if r < 2*radius: # dv = spheres[i].velocity - spheres[j].velocity # spheres[i].velocity -= np.dot(dv, d)/r**2 * d # spheres[j].velocity += np.dot(dv, d)/r**2 * d for i,sphere in enumerate(spheres): sphere.predict(dt) F = -alpha_T*sphere.velocity + beta_T*np.random.normal(size=2) n = sphere.pos - center dist = np.linalg.norm(n) n /= dist that = np.array([n[1], -n[0]]) # F += I*1*dist*np.exp(dist**2/r0**2)*(1 - dist**2/r0**2)*n # F += l*that# *np.sin(t/59) # F += Fadd[i] T = -alpha_R*sphere.angular_velocity + beta_R*np.random.normal(size=1)[0] sphere.correct(F,T,dt) sphere.x.append(sphere.pos[0]) sphere.y.append(sphere.pos[1]) sphere.vx.append(sphere.velocity[0]) sphere.vy.append(sphere.velocity[1]) sphere.omega.append(sphere.angular_velocity) sphere.phi.append(sphere.angle) for sphere in spheres: sphere.x = np.asarray(sphere.x) sphere.y = np.asarray(sphere.y) sphere.vx = np.asarray(sphere.vx) sphere.vy = np.asarray(sphere.vy) sphere.phi = np.asarray(sphere.phi) sphere.omega = np.asarray(sphere.omega) animate(spheres, radius=radius, T=temp) # plt.plot(1e9*spheres[0].x, 1e9*spheres[0].y, '.-') # plt.plot(1e9*spheres[1].x, 1e9*spheres[1].y, '.-') plt.show()
class BadGrammar(Exception): """The rule definitions passed to Grammar contain syntax errors.""" class VisitationError(Exception): """Something went wrong while traversing a parse tree. This exception exists to augment an underlying exception with information about where in the parse tree the error occurred. Otherwise, it could be tiresome to figure out what went wrong; you'd have to play back the whole tree traversal in your head. """ # TODO: Make sure this is pickleable. Probably use @property pattern. Make # the original exc and node available on it if they don't cause a whole # raft of stack frames to be retained. def __init__(self, exc, exc_class, node): """Construct. :arg exc: What went wrong. We wrap this and add more info. :arg node: The node at which the error occurred """ self.original_class = exc_class super(VisitationError, self).__init__( '%s: %s\n\n' 'Parse tree:\n' '%s' % (exc_class.__name__, exc, node.prettily(error=node))) class UndefinedLabel(VisitationError): """A rule referenced in a grammar was never defined. Circular references and forward references are okay, but you have to define stuff at some point. """ def __init__(self, label): self.label = label def __unicode__(self): return u'The label "%s" was never defined.' % self.label __str__ = __unicode__
#!../bin/python # -*- coding: utf-8 -*- from __future__ import with_statement import csv import datetime import json import urllib2 import StringIO import logging import logging.handlers import re from pytz import timezone import contextlib import copy # Entity type AMBIENT_TYPE_NAME = 'AirQualityObserved' # List of known air quality stations station_dict = { } # Orion service that will store the data orion_service = 'http://localhost:1030' logger = None madrid_tz = timezone('CET') pollutant_dict = { '01': 'SO2', '06': 'CO', '07': 'NO', '08': 'NO2', '09': 'PM2.5', '10': 'PM10', '12': 'NOx', '14': 'O3', '20': 'TOL', '30': 'BEN', '35': 'EBE', '37': 'MXY', '38': 'PXY', '39': 'OXY', '42': 'TCH', '43': 'CH4', '44': 'NHMC' } pollutant_descriptions = { '01': 'Sulfur Dioxide', '06': 'Carbon Monoxide', '07': 'Nitrogen Monoxide', '08': 'Nitrogen Dioxide', '09': 'Particles lower than 2.5', '10': 'Particles lower than 10', '12': 'Nitrogen oxides', '14': 'Ozone', '20': 'Toluene', '30': 'Benzene', '35': 'Etilbenzene', '37': 'Metaxylene', '38': 'Paraxylene', '39': 'Orthoxylene', '42': 'Total Hydrocarbons', '43': 'Hydrocarbons - Methane', '44': 'Non-methane hydrocarbons - Hexane' } other_dict = { '80': 'ultravioletRadiation', '81': 'windSpeed', '82': 'windDirection', '83': 'temperature', '86': 'relativeHumidity', '87': 'barometricPressure', '88': 'solarRadiation', '89': 'precipitation', '92': 'acidRainLevel' } other_descriptions = { '80': 'Ultraviolet Radiation', '81': 'Wind Speed', '82': 'Wind Direction', '83': 'temperature', '86': 'Relative Humidity', '87': 'Barometric Pressure', '88': 'Solar Radiation', '89': 'Precipitation', '92': 'Acid Rain Level' } dataset_url = 'http://datos.madrid.es/egob/catalogo/212531-7916318-calidad-aire-tiempo-real.txt' # Statistics for tracking purposes persisted_entities = 0 in_error_entities = 0 MIME_JSON = 'application/json' FIWARE_SERVICE = 'AirQuality' FIWARE_SPATH = '/Spain_Madrid' # Sanitize string to avoid forbidden characters by Orion def sanitize(str_in): return re.sub(r"[<(>)\"\'=;]", "", str_in) # Obtains air quality data and harmonizes it, persisting to Orion def get_air_quality_madrid(): req = urllib2.Request(url=dataset_url) with contextlib.closing(urllib2.urlopen(req)) as f: csv_data = f.read() csv_file = StringIO.StringIO(csv_data) reader = csv.reader(csv_file, delimiter=',') # Dictionary with station data indexed by station code # An array per station code containing one element per hour stations = { } for row in reader: station_code = str(row[0]) + str(row[1]) + str(row[2]) station_num = row[2] if not station_dict[station_num]: continue if not station_code in stations: stations[station_code] = [] magnitude = row[3] if (not magnitude in pollutant_dict) and (not magnitude in other_dict): continue is_other = None if magnitude in pollutant_dict: property_name = pollutant_dict[magnitude] property_desc = pollutant_descriptions[magnitude] is_other = False if magnitude in other_dict: property_name = other_dict[magnitude] property_desc = other_descriptions[magnitude] is_other = True hour = 0 for x in xrange(9, 57, 2): value = row[x] value_control = row[x + 1] if value_control == 'V': # A new entity object is created if it does not exist yet if (len(stations[station_code]) < hour + 1): stations[station_code].append(build_station(station_num, station_code, hour, row)) elif (not 'id' in stations[station_code][hour]): stations[station_code][hour] = build_station(station_num, station_code, hour, row) param_value = float(value) if not is_other: unit_code = 'GQ' if property_name == 'CO': unit_code = 'GP' measurand_data = [property_name, str(param_value), unit_code, property_desc] stations[station_code][hour]['measurand']['value'].append(','.join(measurand_data)) else: if property_name == 'relativeHumidity': param_value = param_value / 100 stations[station_code][hour][property_name] = { 'value': param_value } else: # ensure there are no holes in the data if (len(stations[station_code]) < hour + 1): stations[station_code].append({}) hour += 1 print len(stations[station_code]) # Now persisting data to Orion Context Broker for station in stations: station_data = stations[station] data_array = [] for data in station_data: if 'id' in data: data_array.append(data) if len(data_array) > 0: logger.debug("Retrieved data for %s at %s (last hour)", station, data_array[-1]['dateObserved']['value']) # Last measurement is duplicated to have an entity with the latest measurement obtained last_measurement = data_array[-1] last_measurement['id'] = 'Madrid-AirQualityObserved-' + last_measurement['stationCode']['value'] + '-' + 'latest' else: logger.warn('No data retrieved for: %s', station) post_station_data(station, data_array) ############# # Builds a new entity of type AirQualityObserved def build_station(station_num, station_code, hour, row): station_data = { 'type': AMBIENT_TYPE_NAME, 'measurand': { 'type': 'List', 'value': [] }, 'stationCode': { 'value': station_code }, 'stationName': { 'value': sanitize(station_dict[station_num]['name']) }, 'address': { 'type': 'PostalAddress', 'value': { 'addressCountry': 'ES', 'addressLocality': 'Madrid', 'streetAddress': sanitize(station_dict[station_num]['address']) } }, 'location': { 'type': 'geo:json', 'value': station_dict[station_num]['location']['value'] or None }, 'source': { 'type': 'URL', 'value': 'http://datos.madrid.es' }, 'dataProvider': { 'value': 'TEF' } } valid_from = datetime.datetime(int(row[6]), int(row[7]), int(row[8]), hour) station_data['id'] = 'Madrid-AirQualityObserved-' + station_code + '-' + valid_from.isoformat() valid_to = (valid_from + datetime.timedelta(hours=1)) # Adjust timezones valid_from = valid_from.replace(tzinfo=madrid_tz) valid_to = valid_to.replace(tzinfo=madrid_tz) station_data['validity'] = { 'value': { 'from': valid_from.isoformat(), 'to': valid_to.isoformat() }, 'type': 'StructuredValue' } station_data['hour'] = { 'value': str(hour) + ':' + '00' } observ_corrected_date = valid_from station_data['dateObserved'] = { 'type': 'DateTime', 'value': observ_corrected_date.isoformat() } return station_data # POST data to an Orion Context Broker instance using NGSIv2 API def post_station_data(station_code, data): if len(data) == 0: return payload = { 'actionType': 'APPEND', 'entities': data } data_as_str = json.dumps(payload) headers = { 'Content-Type': MIME_JSON, 'Content-Length': len(data_as_str), 'Fiware-Service': FIWARE_SERVICE, 'Fiware-Servicepath': FIWARE_SPATH } req = urllib2.Request(url=(orion_service + '/v2/op/update'), data=data_as_str, headers=headers) logger.debug('Going to persist %s to %s - %d', station_code, orion_service, len(data)) try: with contextlib.closing(urllib2.urlopen(req)) as f: global persisted_entities logger.debug("Entity successfully created: %s", station_code) persisted_entities = persisted_entities + 1 except urllib2.URLError as e: global in_error_entities logger.error('Error while POSTing data to Orion: %d %s', e.code, e.read()) logger.debug('Data which failed: %s', data_as_str) in_error_entities = in_error_entities + 1 # Reads station data from CSV file def read_station_csv(): with contextlib.closing(open('madrid_airquality_stations.csv', 'rU')) as csvfile: reader = csv.reader(csvfile, delimiter=',') index = 0 for row in reader: if index <> 0: station_code = row[2] station_name = row[3] station_address = row[4] station_coords = { 'type': 'geo:json', 'value': { 'type': 'Point', 'coordinates': [float(row[0]), float(row[1])] } } station_dict[station_code.zfill(3)] = { 'name': station_name, 'address': station_address, 'location': station_coords } index += 1 station_dict['099'] = { 'name': 'average', 'address': None, 'location': None } def setup_logger(): global logger LOG_FILENAME = 'harvest_madrid.log' # Set up a specific logger with our desired output level logger = logging.getLogger('Madrid') logger.setLevel(logging.DEBUG) # Add the log message handler to the logger handler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=2000000, backupCount=3) formatter = logging.Formatter('%(levelname)s %(asctime)s %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) if __name__ == '__main__': setup_logger() read_station_csv() logger.debug('#### Starting a new harvesting and harmonization cycle ... ####') logger.debug('Number of air quality stations known: %d', len(station_dict.keys())) get_air_quality_madrid() logger.debug('Number of entities persisted: %d', persisted_entities) logger.debug('Number of entities in error: %d', in_error_entities) logger.debug('#### Harvesting cycle finished ... ####')
#!/usr/bin/env python # encoding: utf-8 preamble="""\\usetikzlibrary{{backgrounds,fit}} \\begin{{tikzpicture}} [vertex/.style={{circle,draw=blue!50,fill=blue!20,thick}}, selectedvertex/.style={{circle,draw=red!50,fill=red!20,thick}}, edge/.style={{thick}}, newedge/.style={{dashed,thick}}]""" def component1(prefix, offset_x, offset_y, selectedvertexNo): def select_vertex(n): if n == selectedvertexNo: return "selectedvertex" else: return "vertex" def vertex(n, x, y): return "\\node[" + ("selectedvertex" if selectedvertexNo == n else "vertex") + "] (" + prefix + str(n) + ") at (" + str(x) + "," + str(y) + ") {" + str(n) + "}\n"; ret = "" ret += vertex(1, offset_x + 2, offset_y + 2) ret += vertex(2, offset_x + 2, offset_y + 0) ret += vertex(3, offset_x + 4, offset_y + 1) ret += vertex(4, offset_x + 6, offset_y + 0) ret += vertex(5, offset_x + 6, offset_y + 2) ret += vertex(6, offset_x + 8, offset_y + 1) ret += vertex(7, offset_x + 0, offset_y + 1) #\draw[ edge] (1) to [out=270,in= 90] (2); #\draw[ edge] (1) to [out= 0,in=100] (3); #\draw[ edge] (1) to [out=180,in= 80] (7); #\draw[ edge] (2) to [out=180,in=280] (7); #\draw[ edge] (2) to [out= 0,in=260] (3); #\draw[ edge] (3) to [out=280,in=180] (4); #\draw[ edge] (3) to [out= 80,in=180] (5); #\draw[ edge] (4) to [out= 90,in=270] (5); #\draw[ edge] (4) to [out= 0,in=260] (6); #\draw[ edge] (5) to [out= 0,in=100] (6); return ret #return """ #\\node[{vertexstyle1}] ({prefix}1) at ({x1},{y1}) {1}; #\\node[{vertexstyle2}] ({prefix}2) at ({x2},{y2}) {2}; #\\node[{vertexstyle3}] ({prefix}3) at ({x3},{y3}) {3}; #\\node[{vertexstyle4}] ({prefix}4) at ({x4},{y4}) {4}; #\\node[{vertexstyle5}] ({prefix}5) at ({x5},{y5}) {5}; #\\node[{vertexstyle6}] ({prefix}6) at ({x6},{y6}) {6}; #\\node[{vertexstyle7}] ({prefix}7) at ({x7},{y7}) {7};""".format( ##\\draw[ edge] ({prefix}1) to [out=270,in= 90] ({prefix}2); ##\\draw[ edge] ({prefix}1) to [out= 0,in=100] ({prefix}3); ##\\draw[ edge] ({prefix}1) to [out=180,in= 80] ({prefix}7); ##\\draw[ edge] ({prefix}2) to [out=180,in=280] ({prefix}7); ##\\draw[ edge] ({prefix}2) to [out= 0,in=260] ({prefix}3); ##\\draw[ edge] ({prefix}3) to [out=280,in=180] ({prefix}4); ##\\draw[ edge] ({prefix}3) to [out= 80,in=180] ({prefix}5); ##\\draw[ edge] ({prefix}4) to [out= 90,in=270] ({prefix}5); ##\\draw[ edge] ({prefix}4) to [out= 0,in=260] ({prefix}6); ##\\draw[ edge] ({prefix}5) to [out= 0,in=100] ({prefix}6); ##\\end{{tikzpicture}}""".format( #prefix=prefix, #vertexstyle1=select_vertex(1), #vertexstyle2=select_vertex(2), #vertexstyle3=select_vertex(3), #vertexstyle4=select_vertex(4), #vertexstyle5=select_vertex(5), #vertexstyle6=select_vertex(6), #vertexstyle7=select_vertex(7), #x1=offset_x+2, #x2=offset_x+2, #x3=offset_x+4, #x4=offset_x+6, #x5=offset_x+6, #x6=offset_x+8, #x7=offset_x+0, #y1=offset_y+2, #y2=offset_y+0, #y3=offset_y+1, #y4=offset_y+0, #y5=offset_y+2, #y6=offset_y+1, #y7=offset_y+1) print(preamble) print(component1(prefix="a", offset_x=0, offset_y=0, selectedvertexNo=3))
#! /usr/bin/env python # -*- coding: utf-8 -*- # The MIT License (MIT) # # Copyright (c) 2015 Bartosz Janda # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. NSPropertyListOpenStepFormat = 1 NSPropertyListXMLFormat_v1_0 = 100 NSPropertyListBinaryFormat_v1_0 = 200 NSPropertyListImmutable = 0 NSPropertyListMutableContainers = 1 NSPropertyListMutableContainersAndLeaves = 2 def get_format_text(value): """ Returns NSPropertyListFormat value as text. :param int value: NSPropertyListFormat value. :return: NSPropertyListFormat value as text. :rtype: str """ if value == NSPropertyListOpenStepFormat: return "OpenStep" elif value == NSPropertyListXMLFormat_v1_0: return "XML" elif value == NSPropertyListBinaryFormat_v1_0: return "Binary" return "Unknown" def get_mutability_text(value): """ Returns NSPropertyListMutabilityOptions value as text. :param int value: NSPropertyListMutabilityOptions value. :return: NSPropertyListMutabilityOptions value as text. :rtype: str """ v = list() if value & NSPropertyListImmutable: v.append("Immutable") if value & NSPropertyListMutableContainers: v.append("MutableContainers") if value & NSPropertyListMutableContainersAndLeaves: v.append("MutableContainersAndLeaves") return ", ".join(v)
import shopify import json from test.test_helper import TestCase class CollectionPublicationTest(TestCase): def test_find_all_collection_publications(self): self.fake( "publications/55650051/collection_publications", method="GET", body=self.load_fixture("collection_publications"), ) collection_publications = shopify.CollectionPublication.find(publication_id=55650051) self.assertEqual(96062799894, collection_publications[0].id) self.assertEqual(60941828118, collection_publications[0].collection_id) def test_find_collection_publication(self): self.fake( "publications/55650051/collection_publications/96062799894", method="GET", body=self.load_fixture("collection_publication"), code=200, ) collection_publication = shopify.CollectionPublication.find(96062799894, publication_id=55650051) self.assertEqual(96062799894, collection_publication.id) self.assertEqual(60941828118, collection_publication.collection_id) def test_create_collection_publication(self): self.fake( "publications/55650051/collection_publications", method="POST", headers={"Content-type": "application/json"}, body=self.load_fixture("collection_publication"), code=201, ) collection_publication = shopify.CollectionPublication.create( { "publication_id": 55650051, "published_at": "2018-01-29T14:06:08-05:00", "published": True, "collection_id": 60941828118, } ) expected_body = { "collection_publication": { "published_at": "2018-01-29T14:06:08-05:00", "published": True, "collection_id": 60941828118, } } self.assertEqual(expected_body, json.loads(self.http.request.data.decode("utf-8"))) def test_destroy_collection_publication(self): self.fake( "publications/55650051/collection_publications/96062799894", method="GET", body=self.load_fixture("collection_publication"), code=200, ) collection_publication = shopify.CollectionPublication.find(96062799894, publication_id=55650051) self.fake("publications/55650051/collection_publications/96062799894", method="DELETE", body="{}", code=200) collection_publication.destroy() self.assertEqual("DELETE", self.http.request.get_method())
# Copyright 2009-2014 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Database level operations.""" import warnings from bson.binary import OLD_UUID_SUBTYPE from bson.code import Code from bson.dbref import DBRef from bson.son import SON from pymongo import auth, common, helpers from pymongo.collection import Collection from pymongo.command_cursor import CommandCursor from pymongo.errors import (CollectionInvalid, ConfigurationError, OperationFailure) from pymongo.read_preferences import (modes, secondary_ok_commands, ReadPreference) from pymongo.son_manipulator import SONManipulator class Database(common.BaseObject): """A Mongo database. """ def __init__(self, connection, name): """Get a database by connection and name. Raises :class:`TypeError` if `name` is not an instance of :class:`basestring` (:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName` if `name` is not a valid database name. :Parameters: - `connection`: a client instance - `name`: database name .. mongodoc:: databases """ super(Database, self).__init__(slave_okay=connection.slave_okay, read_preference=connection.read_preference, tag_sets=connection.tag_sets, secondary_acceptable_latency_ms=( connection.secondary_acceptable_latency_ms), safe=connection.safe, uuidrepresentation=connection.uuid_subtype, **connection.write_concern) if not isinstance(name, str): raise TypeError("name must be an instance " "of %s" % (str.__name__,)) if name != '$external': helpers._check_database_name(name) self.__name = str(name) self.__connection = connection self.__incoming_manipulators = [] self.__incoming_copying_manipulators = [] self.__outgoing_manipulators = [] self.__outgoing_copying_manipulators = [] def add_son_manipulator(self, manipulator): """Add a new son manipulator to this database. Newly added manipulators will be applied before existing ones. :Parameters: - `manipulator`: the manipulator to add """ base = SONManipulator() def method_overwritten(instance, method): return (getattr( instance, method).__func__ != getattr(base, method).__func__) if manipulator.will_copy(): if method_overwritten(manipulator, "transform_incoming"): self.__incoming_copying_manipulators.insert(0, manipulator) if method_overwritten(manipulator, "transform_outgoing"): self.__outgoing_copying_manipulators.insert(0, manipulator) else: if method_overwritten(manipulator, "transform_incoming"): self.__incoming_manipulators.insert(0, manipulator) if method_overwritten(manipulator, "transform_outgoing"): self.__outgoing_manipulators.insert(0, manipulator) @property def system_js(self): """A :class:`SystemJS` helper for this :class:`Database`. See the documentation for :class:`SystemJS` for more details. .. versionadded:: 1.5 """ return SystemJS(self) @property def connection(self): """The client instance for this :class:`Database`. .. versionchanged:: 1.3 ``connection`` is now a property rather than a method. """ return self.__connection @property def name(self): """The name of this :class:`Database`. .. versionchanged:: 1.3 ``name`` is now a property rather than a method. """ return self.__name @property def incoming_manipulators(self): """List all incoming SON manipulators installed on this instance. .. versionadded:: 2.0 """ return [manipulator.__class__.__name__ for manipulator in self.__incoming_manipulators] @property def incoming_copying_manipulators(self): """List all incoming SON copying manipulators installed on this instance. .. versionadded:: 2.0 """ return [manipulator.__class__.__name__ for manipulator in self.__incoming_copying_manipulators] @property def outgoing_manipulators(self): """List all outgoing SON manipulators installed on this instance. .. versionadded:: 2.0 """ return [manipulator.__class__.__name__ for manipulator in self.__outgoing_manipulators] @property def outgoing_copying_manipulators(self): """List all outgoing SON copying manipulators installed on this instance. .. versionadded:: 2.0 """ return [manipulator.__class__.__name__ for manipulator in self.__outgoing_copying_manipulators] def __eq__(self, other): if isinstance(other, Database): us = (self.__connection, self.__name) them = (other.__connection, other.__name) return us == them return NotImplemented def __ne__(self, other): return not self == other def __repr__(self): return "Database(%r, %r)" % (self.__connection, self.__name) def __getattr__(self, name): """Get a collection of this database by name. Raises InvalidName if an invalid collection name is used. :Parameters: - `name`: the name of the collection to get """ return Collection(self, name) def __getitem__(self, name): """Get a collection of this database by name. Raises InvalidName if an invalid collection name is used. :Parameters: - `name`: the name of the collection to get """ return self.__getattr__(name) def create_collection(self, name, **kwargs): """Create a new :class:`~pymongo.collection.Collection` in this database. Normally collection creation is automatic. This method should only be used to specify options on creation. :class:`~pymongo.errors.CollectionInvalid` will be raised if the collection already exists. Options should be passed as keyword arguments to this method. Supported options vary with MongoDB release. Some examples include: - "size": desired initial size for the collection (in bytes). For capped collections this size is the max size of the collection. - "capped": if True, this is a capped collection - "max": maximum number of objects if capped (optional) See the MongoDB documentation for a full list of supported options by server version. :Parameters: - `name`: the name of the collection to create - `**kwargs` (optional): additional keyword arguments will be passed as options for the create collection command .. versionchanged:: 2.2 Removed deprecated argument: options .. versionchanged:: 1.5 deprecating `options` in favor of kwargs """ opts = {"create": True} opts.update(kwargs) if name in self.collection_names(): raise CollectionInvalid("collection %s already exists" % name) return Collection(self, name, **opts) def _apply_incoming_manipulators(self, son, collection): for manipulator in self.__incoming_manipulators: son = manipulator.transform_incoming(son, collection) return son def _apply_incoming_copying_manipulators(self, son, collection): for manipulator in self.__incoming_copying_manipulators: son = manipulator.transform_incoming(son, collection) return son def _fix_incoming(self, son, collection): """Apply manipulators to an incoming SON object before it gets stored. :Parameters: - `son`: the son object going into the database - `collection`: the collection the son object is being saved in """ son = self._apply_incoming_manipulators(son, collection) son = self._apply_incoming_copying_manipulators(son, collection) return son def _fix_outgoing(self, son, collection): """Apply manipulators to a SON object as it comes out of the database. :Parameters: - `son`: the son object coming out of the database - `collection`: the collection the son object was saved in """ for manipulator in reversed(self.__outgoing_manipulators): son = manipulator.transform_outgoing(son, collection) for manipulator in reversed(self.__outgoing_copying_manipulators): son = manipulator.transform_outgoing(son, collection) return son def _command(self, command, value=1, check=True, allowable_errors=None, uuid_subtype=OLD_UUID_SUBTYPE, compile_re=True, **kwargs): """Internal command helper. """ if isinstance(command, str): command = SON([(command, value)]) command_name = list(command.keys())[0].lower() must_use_master = kwargs.pop('_use_master', False) if command_name not in secondary_ok_commands: must_use_master = True # Special-case: mapreduce can go to secondaries only if inline if command_name == 'mapreduce': out = command.get('out') or kwargs.get('out') if not isinstance(out, dict) or not out.get('inline'): must_use_master = True # Special-case: aggregate with $out cannot go to secondaries. if command_name == 'aggregate': for stage in kwargs.get('pipeline', []): if '$out' in stage: must_use_master = True break extra_opts = { 'as_class': kwargs.pop('as_class', None), 'slave_okay': kwargs.pop('slave_okay', self.slave_okay), '_must_use_master': must_use_master, '_uuid_subtype': uuid_subtype } extra_opts['read_preference'] = kwargs.pop( 'read_preference', self.read_preference) extra_opts['tag_sets'] = kwargs.pop( 'tag_sets', self.tag_sets) extra_opts['secondary_acceptable_latency_ms'] = kwargs.pop( 'secondary_acceptable_latency_ms', self.secondary_acceptable_latency_ms) extra_opts['compile_re'] = compile_re fields = kwargs.get('fields') if fields is not None and not isinstance(fields, dict): kwargs['fields'] = helpers._fields_list_to_dict(fields) command.update(kwargs) # Warn if must_use_master will override read_preference. if (extra_opts['read_preference'] != ReadPreference.PRIMARY and extra_opts['_must_use_master']): warnings.warn("%s does not support %s read preference " "and will be routed to the primary instead." % (command_name, modes[extra_opts['read_preference']]), UserWarning, stacklevel=3) cursor = self["$cmd"].find(command, **extra_opts).limit(-1) for doc in cursor: result = doc if check: msg = "command %s on namespace %s failed: %%s" % ( repr(command).replace("%", "%%"), self.name + '.$cmd') helpers._check_command_response(result, self.connection.disconnect, msg, allowable_errors) return result, cursor.conn_id def command(self, command, value=1, check=True, allowable_errors=[], uuid_subtype=OLD_UUID_SUBTYPE, compile_re=True, **kwargs): """Issue a MongoDB command. Send command `command` to the database and return the response. If `command` is an instance of :class:`basestring` (:class:`str` in python 3) then the command {`command`: `value`} will be sent. Otherwise, `command` must be an instance of :class:`dict` and will be sent as is. Any additional keyword arguments will be added to the final command document before it is sent. For example, a command like ``{buildinfo: 1}`` can be sent using: >>> db.command("buildinfo") For a command where the value matters, like ``{collstats: collection_name}`` we can do: >>> db.command("collstats", collection_name) For commands that take additional arguments we can use kwargs. So ``{filemd5: object_id, root: file_root}`` becomes: >>> db.command("filemd5", object_id, root=file_root) :Parameters: - `command`: document representing the command to be issued, or the name of the command (for simple commands only). .. note:: the order of keys in the `command` document is significant (the "verb" must come first), so commands which require multiple keys (e.g. `findandmodify`) should use an instance of :class:`~bson.son.SON` or a string and kwargs instead of a Python `dict`. - `value` (optional): value to use for the command verb when `command` is passed as a string - `check` (optional): check the response for errors, raising :class:`~pymongo.errors.OperationFailure` if there are any - `allowable_errors`: if `check` is ``True``, error messages in this list will be ignored by error-checking - `uuid_subtype` (optional): The BSON binary subtype to use for a UUID used in this command. - `compile_re` (optional): if ``False``, don't attempt to compile BSON regular expressions into Python regular expressions. Return instances of :class:`~bson.regex.Regex` instead. Can avoid :exc:`~bson.errors.InvalidBSON` errors when receiving Python-incompatible regular expressions, for example from ``currentOp`` - `read_preference`: The read preference for this connection. See :class:`~pymongo.read_preferences.ReadPreference` for available options. - `tag_sets`: Read from replica-set members with these tags. To specify a priority-order for tag sets, provide a list of tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag set, ``{}``, means "read from any member that matches the mode, ignoring tags." ReplicaSetConnection tries each set of tags in turn until it finds a set of tags with at least one matching member. - `secondary_acceptable_latency_ms`: Any replica-set member whose ping time is within secondary_acceptable_latency_ms of the nearest member may accept reads. Default 15 milliseconds. **Ignored by mongos** and must be configured on the command line. See the localThreshold_ option for more information. - `**kwargs` (optional): additional keyword arguments will be added to the command document before it is sent .. note:: ``command`` ignores the ``network_timeout`` parameter. .. versionchanged:: 2.7 Added ``compile_re`` option. .. versionchanged:: 2.3 Added `tag_sets` and `secondary_acceptable_latency_ms` options. .. versionchanged:: 2.2 Added support for `as_class` - the class you want to use for the resulting documents .. versionchanged:: 1.6 Added the `value` argument for string commands, and keyword arguments for additional command options. .. versionchanged:: 1.5 `command` can be a string in addition to a full document. .. versionadded:: 1.4 .. mongodoc:: commands .. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold """ return self._command(command, value, check, allowable_errors, uuid_subtype, compile_re, **kwargs)[0] def collection_names(self, include_system_collections=True): """Get a list of all the collection names in this database. :Parameters: - `include_system_collections` (optional): if ``False`` list will not include system collections (e.g ``system.indexes``) """ client = self.connection client._ensure_connected(True) if client.max_wire_version > 2: res, addr = self._command("listCollections", cursor={}, read_preference=ReadPreference.PRIMARY) # MongoDB 2.8rc2 if "collections" in res: results = res["collections"] # >= MongoDB 2.8rc3 else: results = CommandCursor(self["$cmd"], res["cursor"], addr) names = [result["name"] for result in results] else: names = [result["name"] for result in self["system.namespaces"].find(_must_use_master=True)] names = [n[len(self.__name) + 1:] for n in names if n.startswith(self.__name + ".") and "$" not in n] if not include_system_collections: names = [n for n in names if not n.startswith("system.")] return names def drop_collection(self, name_or_collection): """Drop a collection. :Parameters: - `name_or_collection`: the name of a collection to drop or the collection object itself """ name = name_or_collection if isinstance(name, Collection): name = name.name if not isinstance(name, str): raise TypeError("name_or_collection must be an instance of " "%s or Collection" % (str.__name__,)) self.__connection._purge_index(self.__name, name) self.command("drop", str(name), allowable_errors=["ns not found"], read_preference=ReadPreference.PRIMARY) def validate_collection(self, name_or_collection, scandata=False, full=False): """Validate a collection. Returns a dict of validation info. Raises CollectionInvalid if validation fails. With MongoDB < 1.9 the result dict will include a `result` key with a string value that represents the validation results. With MongoDB >= 1.9 the `result` key no longer exists and the results are split into individual fields in the result dict. :Parameters: - `name_or_collection`: A Collection object or the name of a collection to validate. - `scandata`: Do extra checks beyond checking the overall structure of the collection. - `full`: Have the server do a more thorough scan of the collection. Use with `scandata` for a thorough scan of the structure of the collection and the individual documents. Ignored in MongoDB versions before 1.9. .. versionchanged:: 1.11 validate_collection previously returned a string. .. versionadded:: 1.11 Added `scandata` and `full` options. """ name = name_or_collection if isinstance(name, Collection): name = name.name if not isinstance(name, str): raise TypeError("name_or_collection must be an instance of " "%s or Collection" % (str.__name__,)) result = self.command("validate", str(name), scandata=scandata, full=full, read_preference=ReadPreference.PRIMARY) valid = True # Pre 1.9 results if "result" in result: info = result["result"] if info.find("exception") != -1 or info.find("corrupt") != -1: raise CollectionInvalid("%s invalid: %s" % (name, info)) # Sharded results elif "raw" in result: for _, res in result["raw"].items(): if "result" in res: info = res["result"] if (info.find("exception") != -1 or info.find("corrupt") != -1): raise CollectionInvalid("%s invalid: " "%s" % (name, info)) elif not res.get("valid", False): valid = False break # Post 1.9 non-sharded results. elif not result.get("valid", False): valid = False if not valid: raise CollectionInvalid("%s invalid: %r" % (name, result)) return result def current_op(self, include_all=False): """Get information on operations currently running. :Parameters: - `include_all` (optional): if ``True`` also list currently idle operations in the result """ if include_all: return self['$cmd.sys.inprog'].find_one({"$all": True}) else: return self['$cmd.sys.inprog'].find_one() def profiling_level(self): """Get the database's current profiling level. Returns one of (:data:`~pymongo.OFF`, :data:`~pymongo.SLOW_ONLY`, :data:`~pymongo.ALL`). .. mongodoc:: profiling """ result = self.command("profile", -1, read_preference=ReadPreference.PRIMARY) assert result["was"] >= 0 and result["was"] <= 2 return result["was"] def set_profiling_level(self, level, slow_ms=None): """Set the database's profiling level. :Parameters: - `level`: Specifies a profiling level, see list of possible values below. - `slow_ms`: Optionally modify the threshold for the profile to consider a query or operation. Even if the profiler is off queries slower than the `slow_ms` level will get written to the logs. Possible `level` values: +----------------------------+------------------------------------+ | Level | Setting | +============================+====================================+ | :data:`~pymongo.OFF` | Off. No profiling. | +----------------------------+------------------------------------+ | :data:`~pymongo.SLOW_ONLY` | On. Only includes slow operations. | +----------------------------+------------------------------------+ | :data:`~pymongo.ALL` | On. Includes all operations. | +----------------------------+------------------------------------+ Raises :class:`ValueError` if level is not one of (:data:`~pymongo.OFF`, :data:`~pymongo.SLOW_ONLY`, :data:`~pymongo.ALL`). .. mongodoc:: profiling """ if not isinstance(level, int) or level < 0 or level > 2: raise ValueError("level must be one of (OFF, SLOW_ONLY, ALL)") if slow_ms is not None and not isinstance(slow_ms, int): raise TypeError("slow_ms must be an integer") if slow_ms is not None: self.command("profile", level, slowms=slow_ms, read_preference=ReadPreference.PRIMARY) else: self.command("profile", level, read_preference=ReadPreference.PRIMARY) def profiling_info(self): """Returns a list containing current profiling information. .. mongodoc:: profiling """ return list(self["system.profile"].find()) def error(self): """**DEPRECATED**: Get the error if one occurred on the last operation. This method is obsolete: all MongoDB write operations (insert, update, remove, and so on) use the write concern ``w=1`` and report their errors by default. This method must be called in the same :doc:`request </examples/requests>` as the preceding operation, otherwise it is unreliable. Requests are deprecated and will be removed in PyMongo 3.0. Return None if the last operation was error-free. Otherwise return the error that occurred. .. versionchanged:: 2.8 Deprecated. """ warnings.warn("Database.error() is deprecated", DeprecationWarning, stacklevel=2) error = self.command("getlasterror", read_preference=ReadPreference.PRIMARY) error_msg = error.get("err", "") if error_msg is None: return None if error_msg.startswith("not master"): self.__connection.disconnect() return error def last_status(self): """**DEPRECATED**: Get status information from the last operation. This method is obsolete: all MongoDB write operations (insert, update, remove, and so on) use the write concern ``w=1`` and report their errors by default. This method must be called in the same :doc:`request </examples/requests>` as the preceding operation, otherwise it is unreliable. Requests are deprecated and will be removed in PyMongo 3.0. Returns a SON object with status information. .. versionchanged:: 2.8 Deprecated. """ warnings.warn("last_status() is deprecated", DeprecationWarning, stacklevel=2) return self.command("getlasterror", read_preference=ReadPreference.PRIMARY) def previous_error(self): """**DEPRECATED**: Get the most recent error on this database. This method is obsolete: all MongoDB write operations (insert, update, remove, and so on) use the write concern ``w=1`` and report their errors by default. This method must be called in the same :doc:`request </examples/requests>` as the preceding operation, otherwise it is unreliable. Requests are deprecated and will be removed in PyMongo 3.0. Furthermore, the underlying database command ``getpreverror`` will be removed in a future MongoDB release. Only returns errors that have occurred since the last call to :meth:`reset_error_history`. Returns None if no such errors have occurred. .. versionchanged:: 2.8 Deprecated. """ warnings.warn("previous_error() is deprecated", DeprecationWarning, stacklevel=2) error = self.command("getpreverror", read_preference=ReadPreference.PRIMARY) if error.get("err", 0) is None: return None return error def reset_error_history(self): """**DEPRECATED**: Reset the error history of this database. This method is obsolete: all MongoDB write operations (insert, update, remove, and so on) use the write concern ``w=1`` and report their errors by default. This method must be called in the same :doc:`request </examples/requests>` as the preceding operation, otherwise it is unreliable. Requests are deprecated and will be removed in PyMongo 3.0. Furthermore, the underlying database command ``reseterror`` will be removed in a future MongoDB release. Calls to :meth:`previous_error` will only return errors that have occurred since the most recent call to this method. .. versionchanged:: 2.8 Deprecated. """ warnings.warn("reset_error_history() is deprecated", DeprecationWarning, stacklevel=2) self.command("reseterror", read_preference=ReadPreference.PRIMARY) def __iter__(self): return self def __next__(self): raise TypeError("'Database' object is not iterable") def _default_role(self, read_only): if self.name == "admin": if read_only: return "readAnyDatabase" else: return "root" else: if read_only: return "read" else: return "dbOwner" def _create_or_update_user( self, create, name, password, read_only, **kwargs): """Use a command to create (if create=True) or modify a user. """ opts = {} if read_only or (create and "roles" not in kwargs): warnings.warn("Creating a user with the read_only option " "or without roles is deprecated in MongoDB " ">= 2.6", DeprecationWarning) opts["roles"] = [self._default_role(read_only)] elif read_only: warnings.warn("The read_only option is deprecated in MongoDB " ">= 2.6, use 'roles' instead", DeprecationWarning) if password is not None: # We always salt and hash client side. if "digestPassword" in kwargs: raise ConfigurationError("The digestPassword option is not " "supported via add_user. Please use " "db.command('createUser', ...) " "instead for this option.") opts["pwd"] = auth._password_digest(name, password) opts["digestPassword"] = False opts["writeConcern"] = self._get_wc_override() or self.write_concern opts.update(kwargs) if create: command_name = "createUser" else: command_name = "updateUser" self.command(command_name, name, read_preference=ReadPreference.PRIMARY, **opts) def _legacy_add_user(self, name, password, read_only, **kwargs): """Uses v1 system to add users, i.e. saving to system.users. """ user = self.system.users.find_one({"user": name}) or {"user": name} if password is not None: user["pwd"] = auth._password_digest(name, password) if read_only is not None: user["readOnly"] = read_only user.update(kwargs) try: self.system.users.save(user, **self._get_wc_override()) except OperationFailure as exc: # First admin user add fails gle in MongoDB >= 2.1.2 # See SERVER-4225 for more information. if 'login' in str(exc): pass # First admin user add fails gle from mongos 2.0.x # and 2.2.x. elif (exc.details and 'getlasterror' in exc.details.get('note', '')): pass else: raise def add_user(self, name, password=None, read_only=None, **kwargs): """Create user `name` with password `password`. Add a new user with permissions for this :class:`Database`. .. note:: Will change the password if user `name` already exists. :Parameters: - `name`: the name of the user to create - `password` (optional): the password of the user to create. Can not be used with the ``userSource`` argument. - `read_only` (optional): if ``True`` the user will be read only - `**kwargs` (optional): optional fields for the user document (e.g. ``userSource``, ``otherDBRoles``, or ``roles``). See `<http://docs.mongodb.org/manual/reference/privilege-documents>`_ for more information. .. note:: The use of optional keyword arguments like ``userSource``, ``otherDBRoles``, or ``roles`` requires MongoDB >= 2.4.0 .. versionchanged:: 2.5 Added kwargs support for optional fields introduced in MongoDB 2.4 .. versionchanged:: 2.2 Added support for read only users .. versionadded:: 1.4 """ if not isinstance(name, str): raise TypeError("name must be an instance " "of %s" % (str.__name__,)) if password is not None: if not isinstance(password, str): raise TypeError("password must be an instance " "of %s or None" % (str.__name__,)) if len(password) == 0: raise ValueError("password can't be empty") if read_only is not None: read_only = common.validate_boolean('read_only', read_only) if 'roles' in kwargs: raise ConfigurationError("Can not use " "read_only and roles together") try: uinfo = self.command("usersInfo", name, read_preference=ReadPreference.PRIMARY) self._create_or_update_user( (not uinfo["users"]), name, password, read_only, **kwargs) except OperationFailure as exc: # MongoDB >= 2.5.3 requires the use of commands to manage # users. if exc.code in common.COMMAND_NOT_FOUND_CODES: self._legacy_add_user(name, password, read_only, **kwargs) # Unauthorized. MongoDB >= 2.7.1 has a narrow localhost exception, # and we must add a user before sending commands. elif exc.code == 13: self._create_or_update_user( True, name, password, read_only, **kwargs) else: raise def remove_user(self, name): """Remove user `name` from this :class:`Database`. User `name` will no longer have permissions to access this :class:`Database`. :Parameters: - `name`: the name of the user to remove .. versionadded:: 1.4 """ try: write_concern = self._get_wc_override() or self.write_concern self.command("dropUser", name, read_preference=ReadPreference.PRIMARY, writeConcern=write_concern) except OperationFailure as exc: # See comment in add_user try / except above. if exc.code in common.COMMAND_NOT_FOUND_CODES: self.system.users.remove({"user": name}, **self._get_wc_override()) return raise def authenticate(self, name, password=None, source=None, mechanism='DEFAULT', **kwargs): """Authenticate to use this database. Authentication lasts for the life of the underlying client instance, or until :meth:`logout` is called. Raises :class:`TypeError` if (required) `name`, (optional) `password`, or (optional) `source` is not an instance of :class:`basestring` (:class:`str` in python 3). .. note:: - This method authenticates the current connection, and will also cause all new :class:`~socket.socket` connections in the underlying client instance to be authenticated automatically. - Authenticating more than once on the same database with different credentials is not supported. You must call :meth:`logout` before authenticating with new credentials. - When sharing a client instance between multiple threads, all threads will share the authentication. If you need different authentication profiles for different purposes you must use distinct client instances. - To get authentication to apply immediately to all existing sockets you may need to reset this client instance's sockets using :meth:`~pymongo.mongo_client.MongoClient.disconnect`. :Parameters: - `name`: the name of the user to authenticate. - `password` (optional): the password of the user to authenticate. Not used with GSSAPI or MONGODB-X509 authentication. - `source` (optional): the database to authenticate on. If not specified the current database is used. - `mechanism` (optional): See :data:`~pymongo.auth.MECHANISMS` for options. By default, use SCRAM-SHA-1 with MongoDB 3.0 and later, MONGODB-CR (MongoDB Challenge Response protocol) for older servers. - `gssapiServiceName` (optional): Used with the GSSAPI mechanism to specify the service name portion of the service principal name. Defaults to 'mongodb'. .. versionadded:: 2.8 Use SCRAM-SHA-1 with MongoDB 3.0 and later. .. versionchanged:: 2.5 Added the `source` and `mechanism` parameters. :meth:`authenticate` now raises a subclass of :class:`~pymongo.errors.PyMongoError` if authentication fails due to invalid credentials or configuration issues. .. mongodoc:: authenticate """ if not isinstance(name, str): raise TypeError("name must be an instance " "of %s" % (str.__name__,)) if password is not None and not isinstance(password, str): raise TypeError("password must be an instance " "of %s" % (str.__name__,)) if source is not None and not isinstance(source, str): raise TypeError("source must be an instance " "of %s" % (str.__name__,)) common.validate_auth_mechanism('mechanism', mechanism) validated_options = {} for option, value in kwargs.items(): normalized, val = common.validate_auth_option(option, value) validated_options[normalized] = val credentials = auth._build_credentials_tuple(mechanism, source or self.name, name, password, validated_options) self.connection._cache_credentials(self.name, credentials) return True def logout(self): """Deauthorize use of this database for this client instance. .. note:: Other databases may still be authenticated, and other existing :class:`~socket.socket` connections may remain authenticated for this database unless you reset all sockets with :meth:`~pymongo.mongo_client.MongoClient.disconnect`. """ # Sockets will be deauthenticated as they are used. self.connection._purge_credentials(self.name) def dereference(self, dbref, **kwargs): """Dereference a :class:`~bson.dbref.DBRef`, getting the document it points to. Raises :class:`TypeError` if `dbref` is not an instance of :class:`~bson.dbref.DBRef`. Returns a document, or ``None`` if the reference does not point to a valid document. Raises :class:`ValueError` if `dbref` has a database specified that is different from the current database. :Parameters: - `dbref`: the reference - `**kwargs` (optional): any additional keyword arguments are the same as the arguments to :meth:`~pymongo.collection.Collection.find`. """ if not isinstance(dbref, DBRef): raise TypeError("cannot dereference a %s" % type(dbref)) if dbref.database is not None and dbref.database != self.__name: raise ValueError("trying to dereference a DBRef that points to " "another database (%r not %r)" % (dbref.database, self.__name)) return self[dbref.collection].find_one({"_id": dbref.id}, **kwargs) def eval(self, code, *args): """Evaluate a JavaScript expression in MongoDB. Useful if you need to touch a lot of data lightly; in such a scenario the network transfer of the data could be a bottleneck. The `code` argument must be a JavaScript function. Additional positional arguments will be passed to that function when it is run on the server. Raises :class:`TypeError` if `code` is not an instance of :class:`basestring` (:class:`str` in python 3) or `Code`. Raises :class:`~pymongo.errors.OperationFailure` if the eval fails. Returns the result of the evaluation. :Parameters: - `code`: string representation of JavaScript code to be evaluated - `args` (optional): additional positional arguments are passed to the `code` being evaluated """ if not isinstance(code, Code): code = Code(code) result = self.command("$eval", code, read_preference=ReadPreference.PRIMARY, args=args) return result.get("retval", None) def __call__(self, *args, **kwargs): """This is only here so that some API misusages are easier to debug. """ raise TypeError("'Database' object is not callable. If you meant to " "call the '%s' method on a '%s' object it is " "failing because no such method exists." % ( self.__name, self.__connection.__class__.__name__)) class SystemJS(object): """Helper class for dealing with stored JavaScript. """ def __init__(self, database): """Get a system js helper for the database `database`. An instance of :class:`SystemJS` can be created with an instance of :class:`Database` through :attr:`Database.system_js`, manual instantiation of this class should not be necessary. :class:`SystemJS` instances allow for easy manipulation and access to server-side JavaScript: .. doctest:: >>> db.system_js.add1 = "function (x) { return x + 1; }" >>> db.system.js.find({"_id": "add1"}).count() 1 >>> db.system_js.add1(5) 6.0 >>> del db.system_js.add1 >>> db.system.js.find({"_id": "add1"}).count() 0 .. note:: Requires server version **>= 1.1.1** .. versionadded:: 1.5 """ # can't just assign it since we've overridden __setattr__ object.__setattr__(self, "_db", database) def __setattr__(self, name, code): self._db.system.js.save({"_id": name, "value": Code(code)}, **self._db._get_wc_override()) def __setitem__(self, name, code): self.__setattr__(name, code) def __delattr__(self, name): self._db.system.js.remove({"_id": name}, **self._db._get_wc_override()) def __delitem__(self, name): self.__delattr__(name) def __getattr__(self, name): return lambda *args: self._db.eval(Code("function() { " "return this[name].apply(" "this, arguments); }", scope={'name': name}), *args) def __getitem__(self, name): return self.__getattr__(name) def list(self): """Get a list of the names of the functions stored in this database. .. versionadded:: 1.9 """ return [x["_id"] for x in self._db.system.js.find(fields=["_id"])]
from __future__ import print_function import sys class Direction: NORTH = 1 EAST = 2 SOUTH = 3 WEST = 4 def ERROR(s): sys.exit("ERROR: " + s) def check_darkness_size(lines): columns = len(lines[0]) for line in lines: if(len(line) != columns): ERROR("MALFORMED DARKNESS") def generate_darkness(program): lines = program.split("\n") check_darkness_size(lines) darkness = [[char for char in line] for line in lines] return darkness def navigate(darkness): debug = False value = 0 direction = Direction.EAST increment_mode = True ascii = False x, y = 0, 0 #Flipped darkness, real world solution while darkness[y][x] != ' ': op = darkness[y][x].encode("utf-8") if(debug is True): print("DEBUG: op is " + op.decode("utf-8") + ", x is " + chr(x + 48) + ", y is " + chr(y + 48)) if(op == "█"): if(increment_mode is False and value != 0): value -= 1 elif(increment_mode is True): value += 1 elif(op == "▀"): increment_mode = True elif(op == "▄"): increment_mode = False elif(op == "■"): if(ascii is True): print(chr(value), end = "") else: print(value, end = "") elif(op == "─"): ascii = not ascii elif(op == "╬" or op == "┼"): if(direction == Direction.NORTH): if(value != 0): direction = Direction.EAST else: direction = Direction.WEST elif(direction == Direction.EAST): if(value != 0): direction = Direction.SOUTH else: direction = Direction.NORTH elif(direction == Direction.SOUTH): if(value != 0): direction = Direction.WEST else: direction = Direction.EAST elif(direction == Direction.WEST): if(value != 0): direction = Direction.NORTH else: direction = Direction.SOUTH if(op == "┼"): value = 0 if(debug is True): print("DEBUG: DIRECTION IS " + chr(direction + 48)) if(direction == Direction.NORTH): y -= 1 elif(direction == Direction.EAST): x += 1 elif(direction == Direction.SOUTH): y += 1 elif(direction == Direction.WEST): x -= 1 def main(): if(len(sys.argv) > 1): program = open(sys.argv[-1], "r").read().decode("string-escape").decode("utf-8") darkness = generate_darkness(program) navigate(darkness) else: ERROR("EXPECTED FILE") if __name__ == '__main__': main()
#!/usr/bin/python3 # -*- coding: utf-8 -*- import os from setuptools import setup try: long_description = open("README.rst").read() except IOError: long_description = "" LOCALEDIR = os.path.join('share', 'locale') setup( name="pygiftparser", version="1.1", url="https://github.com/mtommasi/pygiftparser", description="GIFT parser in python that parses a Gift source code and loads data in a Question/Answer model for further use in an application", license="MIT", author="Marc Tommasi - UdL/INRIA", author_email="first.last@univ-lille.fr", py_modules=['pygiftparser.parser', 'pygiftparser.i18n', 'pygiftparser.answer', 'pygiftparser.question', 'pygiftparser.utils'], install_requires=['yattag', 'markdown', 'MarkdownSuperscript'], long_description=long_description, classifiers=[ "Programming Language :: Python", "Programming Language :: Python :: 3.4", "Topic :: Text Processing" ], data_files=[(os.path.join('share', 'locale', lang, 'LC_MESSAGES'), [os.path.join('share', 'locale', lang, 'LC_MESSAGES', 'pygiftparser.mo')]) for lang in os.listdir(LOCALEDIR)] )
# -*- coding: UTF-8 -*- # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Copyright (C) 2015 - 2018 Simon Stuerz <simon.stuerz@guh.io> # # # # This file is part of nymea-cli. # # # # nymea-cli is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, version 2 of the License. # # # # nymea-cli is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with nymea-cli. If not, see <http://www.gnu.org/licenses/>. # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # import sys import socket import json import select import telnetlib import nymea def notification_sniffer(nymeaHost, nymeaPort): global commandId commandId = 0 print "Connecting notification handler..." try: tn = telnetlib.Telnet(nymeaHost, nymeaPort) except : print "ERROR: notification socket could not connect the to nymea-server. \n" return None print "...OK \n" #enable_notification(notificationSocket) enable_notification(tn.get_socket()) try: x = None while (x !=ord('\n') and x != 27): socket_list = [sys.stdin, tn.get_socket()] read_sockets, write_sockets, error_sockets = select.select(socket_list , [], []) for sock in read_sockets: # notification messages: if sock == tn.get_socket(): packet = tn.read_until("}\n") packet = json.loads(packet) nymea.print_json_format(packet) elif sock == sys.stdin: x = sys.stdin.readline() return None finally: tn.close() print "Notification socket closed." def enable_notification(notifySocket): global commandId params = {} commandObj = {} commandObj['id'] = commandId commandObj['method'] = "JSONRPC.SetNotificationStatus" params['enabled'] = "true" commandObj['params'] = params command = json.dumps(commandObj) + '\n' commandId = commandId + 1 notifySocket.send(command)
""" Tests for send_course_update management command. """ from __future__ import absolute_import from unittest import skipUnless import ddt from django.conf import settings from edx_ace.utils.date import serialize from mock import patch from six.moves import range from openedx.core.djangoapps.schedules import resolvers, tasks from openedx.core.djangoapps.schedules.config import COURSE_UPDATE_WAFFLE_FLAG from openedx.core.djangoapps.schedules.management.commands import send_course_update as nudge from openedx.core.djangoapps.schedules.management.commands.tests.send_email_base import ( ExperienceTest, ScheduleSendEmailTestMixin ) from openedx.core.djangoapps.schedules.management.commands.tests.upsell_base import ScheduleUpsellTestMixin from openedx.core.djangoapps.schedules.models import ScheduleExperience from openedx.core.djangoapps.waffle_utils.testutils import override_waffle_flag from openedx.core.djangolib.testing.utils import skip_unless_lms from student.tests.factories import CourseEnrollmentFactory from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory @ddt.ddt @skip_unless_lms @skipUnless( 'openedx.core.djangoapps.schedules.apps.SchedulesConfig' in settings.INSTALLED_APPS, "Can't test schedules if the app isn't installed", ) class TestSendCourseUpdate(ScheduleUpsellTestMixin, ScheduleSendEmailTestMixin, ModuleStoreTestCase): __test__ = True # pylint: disable=protected-access resolver = resolvers.CourseUpdateResolver task = tasks.ScheduleCourseUpdate deliver_task = tasks._course_update_schedule_send command = nudge.Command deliver_config = 'deliver_course_update' enqueue_config = 'enqueue_course_update' expected_offsets = list(range(-7, -77, -7)) experience_type = ScheduleExperience.EXPERIENCES.course_updates queries_deadline_for_each_course = True def setUp(self): super(TestSendCourseUpdate, self).setUp() self.highlights_patcher = patch('openedx.core.djangoapps.schedules.resolvers.get_week_highlights') mock_highlights = self.highlights_patcher.start() mock_highlights.return_value = [u'Highlight {}'.format(num + 1) for num in range(3)] self.addCleanup(self.stop_highlights_patcher) def stop_highlights_patcher(self): """ Stops the patcher for the get_week_highlights method if the patch is still in progress. """ if self.highlights_patcher is not None: self.highlights_patcher.stop() @ddt.data( ExperienceTest(experience=ScheduleExperience.EXPERIENCES.default, offset=expected_offsets[0], email_sent=False), ExperienceTest(experience=ScheduleExperience.EXPERIENCES.course_updates, offset=expected_offsets[0], email_sent=True), ExperienceTest(experience=None, offset=expected_offsets[0], email_sent=False), ) def test_schedule_in_different_experience(self, test_config): self._check_if_email_sent_for_experience(test_config) @override_waffle_flag(COURSE_UPDATE_WAFFLE_FLAG, True) @patch('openedx.core.djangoapps.schedules.signals.get_current_site') def test_with_course_data(self, mock_get_current_site): self.highlights_patcher.stop() self.highlights_patcher = None mock_get_current_site.return_value = self.site_config.site course = CourseFactory(highlights_enabled_for_messaging=True, self_paced=True) with self.store.bulk_operations(course.id): ItemFactory.create(parent=course, category='chapter', highlights=[u'highlights']) enrollment = CourseEnrollmentFactory(course_id=course.id, user=self.user, mode=u'audit') self.assertEqual(enrollment.schedule.get_experience_type(), ScheduleExperience.EXPERIENCES.course_updates) _, offset, target_day, _ = self._get_dates(offset=self.expected_offsets[0]) enrollment.schedule.start = target_day enrollment.schedule.save() with patch.object(tasks, 'ace') as mock_ace: self.task().apply(kwargs=dict( site_id=self.site_config.site.id, target_day_str=serialize(target_day), day_offset=offset, bin_num=self._calculate_bin_for_user(enrollment.user), )) self.assertTrue(mock_ace.send.called)
import os import logging from autotest.client.shared import error from virttest import libvirt_vm, virsh, remote, utils_libvirtd, aexpect def run_virsh_undefine(test, params, env): """ Test virsh undefine command. Undefine an inactive domain, or convert persistent to transient. 1.Prepare test environment. 2.Backup the VM's information to a xml file. 3.When the libvirtd == "off", stop the libvirtd service. 4.Perform virsh undefine operation. 5.Recover test environment.(libvirts service,VM) 6.Confirm the test result. """ vm_ref = params.get("undefine_vm_ref", "vm_name") extra = params.get("undefine_extra", "") libvirtd_state = params.get("libvirtd", "on") status_error = params.get("status_error") undefine_twice = params.get("undefine_twice", 'no') local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") xml_file = os.path.join(test.tmpdir, 'tmp.xml') remote_user = params.get("remote_user", "user") remote_password = params.get("remote_password", "password") remote_prompt = params.get("remote_prompt", "#") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_id = vm.get_id() vm_uuid = vm.get_uuid() # Back up xml file.Xen host has no guest xml file to define a guset. virsh.dumpxml(vm_name, extra="", to_file=xml_file) # Confirm how to reference a VM. if vm_ref == "vm_name": vm_ref = vm_name elif vm_ref == "id": vm_ref = vm_id elif vm_ref == "hex_vm_id": vm_ref = hex(int(vm_id)) elif vm_ref == "uuid": vm_ref = vm_uuid elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) # Turn libvirtd into certain state. if libvirtd_state == "off": utils_libvirtd.libvirtd_stop() # Test virsh undefine command. if vm_ref != "remote": vm_ref = "%s %s" % (vm_ref, extra) cmdresult = virsh.undefine(vm_ref, ignore_status=True, debug=True) status = cmdresult.exit_status if status: logging.debug("Error status, command output: %s", cmdresult.stdout) if undefine_twice == "yes": status2 = virsh.undefine(vm_ref, ignore_status=True).exit_status else: if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"): raise error.TestNAError("remote_ip and/or local_ip parameters not" " changed from default values") try: uri = libvirt_vm.complete_uri(local_ip) session = remote.remote_login("ssh", remote_ip, "22", remote_user, remote_password, remote_prompt) cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name) status, output = session.cmd_status_output(cmd_undefine) logging.info("Undefine output: %s", output) except (error.CmdError, remote.LoginError, aexpect.ShellError), detail: logging.error("Detail: %s", detail) status = 1 # Recover libvirtd state. if libvirtd_state == "off": utils_libvirtd.libvirtd_start() # Shutdown VM. if virsh.domain_exists(vm.name): try: if vm.is_alive(): vm.destroy() except error.CmdError, detail: logging.error("Detail: %s", detail) # Check if VM exists. vm_exist = virsh.domain_exists(vm.name) # Check if xml file exists. xml_exist = False if (os.path.exists("/etc/libvirt/qemu/%s.xml" % vm_name) or os.path.exists("/etc/xen/%s" % vm_name)): xml_exist = True # Recover main VM. if not virsh.domain_exists(vm.name): s_define = virsh.define(xml_file) if s_define is not True or not virsh.domain_exists(vm.name): logging.error("Failed to define %s.", vm.name) # Check results. if status_error == 'yes': if status == 0: raise error.TestFail("virsh undefine return unexpected result.") else: if status != 0: raise error.TestFail("virsh undefine failed.") if undefine_twice == "yes": if status2 == 0: raise error.TestFail("Undefine the same VM twice succeeded.") if vm_exist: raise error.TestFail("VM still exists after undefine.") if xml_exist: raise error.TestFail("Xml file still exists after undefine.")
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: victor # @Date: 2014-02-09 # @Last Modified by: victor # @Last Modified time: 2014-06-06 # @Copyright: # # This file is part of the AppVulnMS project. # # # Copyright (c) 2014 Victor Dorneanu <info AAET dornea DOT nu> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # The MIT License (MIT) import base64 from lxml import etree from core.parser.HTTPParser import HTTPParser from core.parser.HTTPParser import HTTPRequestParser from core.parser.HTTPParser import HTTPResponseParser class AppVulnXMLParser(): """AppVulnXML parser. Edits XML data""" def __init__(self, xml_data): # Create parser to parse the XML tree and insert new data into it self.parser = etree.XMLParser(remove_blank_text=True, strip_cdata=False, ns_clean=True, recover=True, encoding='utf-8') self.xml_tree = etree.XML(str(xml_data), self.parser) self.issues = self.xml_tree.xpath("/XmlReport/Results/Vulnerabilities/*") self.issue_index = 0 def __len__(self): """Returns number of available issues :returns: Number of available issues """ return len(self.issues) def __iter__(self): """Iterator to walk through issues :returns: Iterator to iterate through issues """ return self def __next__(self): """Walk through issues""" issue = self.issues[self.issue_index] if (self.issue_index + 1) < len(self.issues): self.issue_index += 1 else: raise StopIteration return issue def get_root(self): """Get root of XML document :returns: Root XML Element """ return self.xml_tree def get_xml(self): """Returns XML tree as string :returns: XML tree as string """ return etree.tostring(self.xml_tree, pretty_print=True, encoding="utf-8").decode("utf-8") def get_scanner(self): """Returns /XmlReport/Scanner :returns: /XmlReport/Scanner as XML document """ return self.xml_tree.xpath("/XmlReport/Scanner") def get_summary(self): """Returns /XmlReport/Summary :returns: /XmlReport/Summary as XML document """ return self.xml_tree.xpath("/XmlReport/Summary") def get_vulnerabilities(self): """Return /XmlReport/Results/Vulnerabilities :returns: /XmlReport/Results/Vulnerabilities as XML document """ return self.xml_tree.xpath("/XmlReport/Results/Vulnerabilities/*") def add_request_data(self, issue, request_data): """Add parsed request data to the node :param issue: Issue as XML document :param request_data: HTTP request data """ request = HTTPRequestParser(request_data) request.parse_data() request.set_http_headers() headers = request.get_headers() # Add request attributes method like method try: xml_request_node = issue.xpath("TestProbe/HTTP/Request")[0] xml_request_node.attrib['method'] = request.get_method() xml_request_node.attrib['version'] = request.get_request_version() except IndexError: log.error("Index error") # Add parsed data try: xml_parsed_traffic = issue.xpath("TestProbe/HTTP/Request/Parsed")[0] except IndexError: Log.error("Index error") # Iterate through headers and create new XML nodes for h in headers.keys(): for v in headers[h]: # Create new sub-element header_node = etree.Element('Header', name=h, value=v) xml_parsed_traffic.append(header_node) # Add request data node request_data_node = etree.Element('Data') request_data_node.text = etree.CDATA(request.get_request_data()) xml_parsed_traffic.append(request_data_node) def add_response_data(self, issue, response_data, binary_data=False): """Add parsed response data to the node :param issue: Issue as XML document :param response_data: HTTP response data :param binary_data: Flag indicating whether responde_data is binary """ response = HTTPResponseParser(response_data, binary_data) response.parse_data() response.set_http_headers() headers = response.get_headers() # Add response metadata try: xml_response_node = issue.xpath("TestProbe/HTTP/Response")[0] xml_response_node.attrib['version'] = response.get_response_version() xml_response_node.attrib['status'] = response.get_status() xml_response_node.attrib['reason'] = response.get_reason() except IndexError: log.error("Index error") # Add response data try: xml_parsed_traffic = issue.xpath("TestProbe/HTTP/Response/Parsed")[0] except IndexError: Log.error("Index error") # Iterate through headers and create new XML nodes for h in headers.keys(): for v in headers[h]: # Create new sub-element header_node = etree.Element('Header', name=h, value=v) xml_parsed_traffic.append(header_node) # Add request data node request_data_node = etree.Element('Data') request_data_node.text = etree.CDATA(response.get_response_data()) request_data_node.attrib['base64'] = str(binary_data) xml_parsed_traffic.append(request_data_node) def extract_traffic(self, issue, binary_data=False): """Extract HTTP traffic from RawTraffic/MergedTraffic and adjust XML in single issue :param issue: Issue as XML document :param binary_data: Flag indicating whether traffic is binary """ raw_traffic = issue.xpath("RawTraffic")[0] raw_request_traffic = issue.xpath("RawTraffic/RequestTraffic") raw_response_traffic = issue.xpath("RawTraffic/ResponseTraffic") raw_merged_traffic = issue.xpath("RawTraffic/MergedTraffic") # New nodes request_node = etree.Element("RequestTraffic") response_node = etree.Element("ResponseTraffic") request_node.text = '' response_node.text = '' # Add base64 flag to traffic request_node.attrib['base64'] = 'false' response_node.attrib['base64'] = 'false' # Check if merged traffic is provided if len(raw_merged_traffic) > 0: # Split traffic http_data = HTTPParser.split_http_traffic(raw_merged_traffic[0].text) # Adjust XML data if http_data: request_node.text = etree.CDATA(http_data['request']) raw_traffic.append(request_node) response_node.text = etree.CDATA(http_data['response']) raw_traffic.append(response_node) # Remove MergedTraffic node raw_merged_traffic[0].getparent().remove(raw_merged_traffic[0]) # Check if request traffic already provided # TODO: Do the same for request traffic? if len(raw_request_traffic) > 0: if len(raw_request_traffic[0].text) > 0: base64_flag = False if 'base64' in raw_request_traffic[0].attrib: if raw_request_traffic[0].attrib['base64'] == 'true': base64_flag = True # Check if base64 if base64_flag: # Replace binary data by plaintext data decoded_request_data = base64.b64decode(raw_request_traffic[0].text).decode("utf-8") raw_request_traffic[0].getparent().remove(raw_request_traffic[0]) new_request_traffic = etree.Element("RequestTraffic") new_request_traffic.text = etree.CDATA(decoded_request_data) new_request_traffic.attrib['base64'] = "false" # Append new node raw_traffic.append(new_request_traffic) else: # Add new nodes raw_traffic.append(request_node) raw_traffic.append(response_node) def add_data(self, binary_data=False): """Adds request data (e.g. headers) to the XML tree :param binary_data: Flag indicating whether data is binary """ for issue in self.issues: # Extract traffic self.extract_traffic(issue, binary_data) # Extract request and response raw_request_traffic = issue.xpath("RawTraffic/RequestTraffic")[0] raw_response_traffic = issue.xpath("RawTraffic/ResponseTraffic")[0] # Add request data if raw_request_traffic.text: base64_flag = False if 'base64' in raw_request_traffic.attrib: if raw_request_traffic.attrib['base64'] == 'true': base64_flag = True # Check if base64 if base64_flag: decoded_request_traffic = base64.b64decode(raw_request_traffic.text) self.add_request_data(issue, decoded_request_traffic.decode(encoding="utf-8", errors="ignore")) else: self.add_request_data(issue, raw_request_traffic.text) # Add response data if raw_response_traffic.text: base64_flag = False if 'base64' in raw_response_traffic.attrib: if raw_response_traffic.attrib['base64'] == 'true': base64_flag = True # Check if base64 if base64_flag: decoded_response_traffic = base64.b64decode(raw_response_traffic.text) self.add_response_data( issue, decoded_response_traffic.decode(encoding="utf-8", errors="ignore"), True) else: self.add_response_data(issue, raw_response_traffic.text) def get_payload(self, issue): """Gets issue payload information, e.g. parameter/cookie and value :param issue: Issue as XML document :returns: XML data containing PoC information """ raw_query = issue.xpath("TestProbe/Request/Query") if len(raw_query) > 0: return raw_query else: return None def convert_base64_to_plain(self): """Converts Base64 traffic to plaintext For all issue the traffic will be converted to base64. """ for issue in self.issues: raw_traffic = issue.xpath("RawTraffic") request_traffic = issue.xpath("RawData/RawRequest") response_traffic = issue.xpath("RawData/RawResponse") # Decode request traffic if len(request_traffic) > 0: base64_traffic = request_traffic[0].text traffic = base64.b64decode(base64_traffic) request_traffic[0].text = etree.CDATA(traffic.decode('utf-8')) # Decode response traffic if len(response_traffic) > 0: base64_traffic = response_traffic[0].text traffic = base64.b64decode(base64_traffic) # FIXME: Do this better if len(traffic) < 10000: response = str(traffic) else: response = base64_traffic # print(response) response_traffic[0].text = etree.CDATA(response) # Merge traffic data raw_traffic[0].text = ''.join([request_traffic[0].text, str(response_traffic[0].text)]) # Remove RawData raw_data = issue.xpath("RawData") issue.remove(raw_data[0]) def string(self): """Returns string respresentation of XML tree :returns: Returns string respresentation of XML tree """ return etree.tostring(self.xml_tree, pretty_print=True, xml_declaration=False ).decode(encoding="utf-8") def __str__(self): return self.string()
"""docfunc module""" from deferred_binder import DeferredBinder class DocFunc(DeferredBinder): TRIGGER = None def __init__(self, f): super().__init__(f.__name__, f) self.f = self.target @staticmethod def transform(name, context, target, obj=None): """The DeferredBinder transform for this subclass. name - the attribute name to which the function will be bound. context - the class/namespace to which the function will be bound. target - the function that will be bound. obj - ignored. The DeferredBinder descriptor class will replace itself with the result of this method, when the name to which the descriptor is requested for the first time. This can be on the class or an instances of the class. This way the class to which the method is bound is available so that the inherited docstring can be identified and set. """ namespace, cls = context doc = target.__doc__ if doc == DocFunc.TRIGGER: doc = DocFunc.get_doc(cls, name, DocFunc.TRIGGER) target.__doc__ = doc return target @staticmethod def get_doc(cls, fname, default=TRIGGER, member=True): """Returns the function docstring the method should inherit. cls - the class from which to start looking for the method. fname - the method name on that class default - the docstring to return if none is found. member - is the target function already bound to cls? """ print(cls) bases = cls.__mro__[:] if member: bases = bases[1:] for base in bases: print(base) func = getattr(base, fname, None) if not func: continue doc = getattr(func, '__doc__', default) if doc == default: continue return doc return default @staticmethod def inherits_docstring(f, context=None, fname=None, default=TRIGGER): """A decorator that returns a new DocFunc object. f - the function to decorate. context - the class/namespace where the function is bound, if known. fname - the function name in that context, if known. default - the docstring to return if none is found. """ if context is not None: cls, namespace = context fname = fname or f.__name__ f.__doc__ = DocFunc.get_doc(cls, fname, default, False) return f return DocFunc(f, default)
# # General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall # model for 1st, 2nd and 3rd generation solar cells. # Copyright (C) 2012-2017 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com # # https://www.gpvdm.com # Room B86 Coates, University Park, Nottingham, NG7 2RD, UK # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License v2.0, as published by # the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # ## @package register # Registration window # import os #qt from PyQt5.QtCore import QSize, Qt from PyQt5.QtWidgets import QWidget,QLineEdit,QComboBox,QHBoxLayout,QPushButton,QLabel,QDialog,QVBoxLayout,QSizePolicy from PyQt5.QtGui import QPainter,QIcon,QImage from PyQt5.QtGui import QFont from icon_lib import icon_get from PyQt5.QtCore import QSize, Qt from inp import inp_load_file import re from error_dlg import error_dlg from lock import get_lock class license_key(QDialog): def callback_ok(self): print("boom") #get_lock().register(email=self.email0.text(),name=self.name.text()) #get_lock().get_license() self.accept() def __init__(self): QWidget.__init__(self) self.setWindowIcon(icon_get("icon")) self.setWindowTitle(_("Registration window (www.gpvdm.com)")) self.setWindowFlags(Qt.WindowStaysOnTopHint) vbox=QVBoxLayout() l=QLabel(_("Enter the license key below:")) l.setFont(QFont('SansSerif', 14)) vbox.addWidget(l) hbox_widget=QWidget() hbox=QHBoxLayout() hbox_widget.setLayout(hbox) l=QLabel("<b>"+_("Key")+"</b>:") l.setFont(QFont('SansSerif', 14)) hbox.addWidget(l) self.name = QLineEdit() hbox.addWidget(self.name) vbox.addWidget(hbox_widget) button_box=QHBoxLayout() spacer = QWidget() spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) button_box.addWidget(spacer) self.register=QPushButton("Register", self) self.register.clicked.connect(self.callback_ok) button_box.addWidget(self.register) button_box_widget=QWidget() button_box_widget.setLayout(button_box) vbox.addWidget(button_box_widget) self.setLayout(vbox) self.setMinimumWidth(400) self.name.setText("key") def run(self): return self.exec_()
#!/usr/bin/env python # -*- coding: utf-8 -*- ## This file is part of Gajim. ## ## Gajim is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published ## by the Free Software Foundation; version 3 only. ## ## Gajim is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Gajim. If not, see <http://www.gnu.org/licenses/>. ## ''' Testing PluginManager class. :author: Mateusz Biliński <mateusz@bilinski.it> :since: 05/30/2008 :copyright: Copyright (2008) Mateusz Biliński <mateusz@bilinski.it> :license: GPL ''' import sys import os import unittest gajim_root = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..') sys.path.append(gajim_root + '/src') # a temporary version of ~/.gajim for testing configdir = gajim_root + '/test/tmp' import time # define _ for i18n import __builtin__ __builtin__._ = lambda x: x # wipe config directory import os if os.path.isdir(configdir): import shutil shutil.rmtree(configdir) os.mkdir(configdir) import common.configpaths common.configpaths.gajimpaths.init(configdir) # for some reason common.gajim needs to be imported before xmpppy? from common import gajim from common import xmpp gajim.DATA_DIR = gajim_root + '/data' from common.stanza_session import StanzaSession # name to use for the test account account_name = 'test' from plugins import PluginManager class PluginManagerTestCase(unittest.TestCase): def setUp(self): self.pluginmanager = PluginManager() def tearDown(self): pass def test_01_Singleton(self): """ 1. Checking whether PluginManger class is singleton. """ self.pluginmanager.test_arg = 1 secondPluginManager = PluginManager() self.failUnlessEqual(id(secondPluginManager), id(self.pluginmanager), 'Different IDs in references to PluginManager objects (not a singleton)') self.failUnlessEqual(secondPluginManager.test_arg, 1, 'References point to different PluginManager objects (not a singleton') def suite(): suite = unittest.TestLoader().loadTestsFromTestCase(PluginManagerTestCase) return suite if __name__=='__main__': runner = unittest.TextTestRunner() test_suite = suite() runner.run(test_suite)
""" State Space Representation and Kalman Filter Author: Chad Fulton License: Simplified-BSD """ from __future__ import division, absolute_import, print_function from warnings import warn import numpy as np from .representation import OptionWrapper, Representation, FrozenRepresentation from .tools import (validate_vector_shape, validate_matrix_shape, reorder_missing_matrix, reorder_missing_vector) from . import tools from statsmodels.tools.sm_exceptions import ValueWarning # Define constants FILTER_CONVENTIONAL = 0x01 # Durbin and Koopman (2012), Chapter 4 FILTER_EXACT_INITIAL = 0x02 # ibid., Chapter 5.6 FILTER_AUGMENTED = 0x04 # ibid., Chapter 5.7 FILTER_SQUARE_ROOT = 0x08 # ibid., Chapter 6.3 FILTER_UNIVARIATE = 0x10 # ibid., Chapter 6.4 FILTER_COLLAPSED = 0x20 # ibid., Chapter 6.5 FILTER_EXTENDED = 0x40 # ibid., Chapter 10.2 FILTER_UNSCENTED = 0x80 # ibid., Chapter 10.3 INVERT_UNIVARIATE = 0x01 SOLVE_LU = 0x02 INVERT_LU = 0x04 SOLVE_CHOLESKY = 0x08 INVERT_CHOLESKY = 0x10 STABILITY_FORCE_SYMMETRY = 0x01 MEMORY_STORE_ALL = 0 MEMORY_NO_FORECAST = 0x01 MEMORY_NO_PREDICTED = 0x02 MEMORY_NO_FILTERED = 0x04 MEMORY_NO_LIKELIHOOD = 0x08 MEMORY_NO_GAIN = 0x10 MEMORY_NO_SMOOTHING = 0x20 MEMORY_NO_STD_FORECAST = 0x40 MEMORY_CONSERVE = ( MEMORY_NO_FORECAST | MEMORY_NO_PREDICTED | MEMORY_NO_FILTERED | MEMORY_NO_LIKELIHOOD | MEMORY_NO_GAIN | MEMORY_NO_SMOOTHING | MEMORY_NO_STD_FORECAST ) TIMING_INIT_PREDICTED = 0 TIMING_INIT_FILTERED = 1 class KalmanFilter(Representation): r""" State space representation of a time series process, with Kalman filter Parameters ---------- k_endog : array_like or integer The observed time-series process :math:`y` if array like or the number of variables in the process if an integer. k_states : int The dimension of the unobserved state process. k_posdef : int, optional The dimension of a guaranteed positive definite covariance matrix describing the shocks in the measurement equation. Must be less than or equal to `k_states`. Default is `k_states`. loglikelihood_burn : int, optional The number of initial periods during which the loglikelihood is not recorded. Default is 0. tolerance : float, optional The tolerance at which the Kalman filter determines convergence to steady-state. Default is 1e-19. results_class : class, optional Default results class to use to save filtering output. Default is `FilterResults`. If specified, class must extend from `FilterResults`. **kwargs Keyword arguments may be used to provide values for the filter, inversion, and stability methods. See `set_filter_method`, `set_inversion_method`, and `set_stability_method`. Keyword arguments may be used to provide default values for state space matrices. See `Representation` for more details. Notes ----- There are several types of options available for controlling the Kalman filter operation. All options are internally held as bitmasks, but can be manipulated by setting class attributes, which act like boolean flags. For more information, see the `set_*` class method documentation. The options are: filter_method The filtering method controls aspects of which Kalman filtering approach will be used. inversion_method The Kalman filter may contain one matrix inversion: that of the forecast error covariance matrix. The inversion method controls how and if that inverse is performed. stability_method The Kalman filter is a recursive algorithm that may in some cases suffer issues with numerical stability. The stability method controls what, if any, measures are taken to promote stability. conserve_memory By default, the Kalman filter computes a number of intermediate matrices at each iteration. The memory conservation options control which of those matrices are stored. filter_timing By default, the Kalman filter follows Durbin and Koopman, 2012, in initializing the filter with predicted values. Kim and Nelson, 1999, instead initialize the filter with filtered values, which is essentially just a different timing convention. The `filter_method` and `inversion_method` options intentionally allow the possibility that multiple methods will be indicated. In the case that multiple methods are selected, the underlying Kalman filter will attempt to select the optional method given the input data. For example, it may be that INVERT_UNIVARIATE and SOLVE_CHOLESKY are indicated (this is in fact the default case). In this case, if the endogenous vector is 1-dimensional (`k_endog` = 1), then INVERT_UNIVARIATE is used and inversion reduces to simple division, and if it has a larger dimension, the Cholesky decomposition along with linear solving (rather than explicit matrix inversion) is used. If only SOLVE_CHOLESKY had been set, then the Cholesky decomposition method would *always* be used, even in the case of 1-dimensional data. See Also -------- FilterResults statsmodels.tsa.statespace.representation.Representation """ filter_methods = [ 'filter_conventional', 'filter_exact_initial', 'filter_augmented', 'filter_square_root', 'filter_univariate', 'filter_collapsed', 'filter_extended', 'filter_unscented' ] filter_conventional = OptionWrapper('filter_method', FILTER_CONVENTIONAL) """ (bool) Flag for conventional Kalman filtering. """ filter_exact_initial = OptionWrapper('filter_method', FILTER_EXACT_INITIAL) """ (bool) Flag for exact initial Kalman filtering. Not implemented. """ filter_augmented = OptionWrapper('filter_method', FILTER_AUGMENTED) """ (bool) Flag for augmented Kalman filtering. Not implemented. """ filter_square_root = OptionWrapper('filter_method', FILTER_SQUARE_ROOT) """ (bool) Flag for square-root Kalman filtering. Not implemented. """ filter_univariate = OptionWrapper('filter_method', FILTER_UNIVARIATE) """ (bool) Flag for univariate filtering of multivariate observation vector. """ filter_collapsed = OptionWrapper('filter_method', FILTER_COLLAPSED) """ (bool) Flag for Kalman filtering with collapsed observation vector. """ filter_extended = OptionWrapper('filter_method', FILTER_EXTENDED) """ (bool) Flag for extended Kalman filtering. Not implemented. """ filter_unscented = OptionWrapper('filter_method', FILTER_UNSCENTED) """ (bool) Flag for unscented Kalman filtering. Not implemented. """ inversion_methods = [ 'invert_univariate', 'solve_lu', 'invert_lu', 'solve_cholesky', 'invert_cholesky' ] invert_univariate = OptionWrapper('inversion_method', INVERT_UNIVARIATE) """ (bool) Flag for univariate inversion method (recommended). """ solve_lu = OptionWrapper('inversion_method', SOLVE_LU) """ (bool) Flag for LU and linear solver inversion method. """ invert_lu = OptionWrapper('inversion_method', INVERT_LU) """ (bool) Flag for LU inversion method. """ solve_cholesky = OptionWrapper('inversion_method', SOLVE_CHOLESKY) """ (bool) Flag for Cholesky and linear solver inversion method (recommended). """ invert_cholesky = OptionWrapper('inversion_method', INVERT_CHOLESKY) """ (bool) Flag for Cholesky inversion method. """ stability_methods = ['stability_force_symmetry'] stability_force_symmetry = ( OptionWrapper('stability_method', STABILITY_FORCE_SYMMETRY) ) """ (bool) Flag for enforcing covariance matrix symmetry """ memory_options = [ 'memory_store_all', 'memory_no_forecast', 'memory_no_predicted', 'memory_no_filtered', 'memory_no_likelihood', 'memory_no_gain', 'memory_no_smoothing', 'memory_no_std_forecast', 'memory_conserve' ] memory_store_all = OptionWrapper('conserve_memory', MEMORY_STORE_ALL) """ (bool) Flag for storing all intermediate results in memory (default). """ memory_no_forecast = OptionWrapper('conserve_memory', MEMORY_NO_FORECAST) """ (bool) Flag to prevent storing forecasts. """ memory_no_predicted = OptionWrapper('conserve_memory', MEMORY_NO_PREDICTED) """ (bool) Flag to prevent storing predicted state and covariance matrices. """ memory_no_filtered = OptionWrapper('conserve_memory', MEMORY_NO_FILTERED) """ (bool) Flag to prevent storing filtered state and covariance matrices. """ memory_no_likelihood = ( OptionWrapper('conserve_memory', MEMORY_NO_LIKELIHOOD) ) """ (bool) Flag to prevent storing likelihood values for each observation. """ memory_no_gain = OptionWrapper('conserve_memory', MEMORY_NO_GAIN) """ (bool) Flag to prevent storing the Kalman gain matrices. """ memory_no_smoothing = OptionWrapper('conserve_memory', MEMORY_NO_SMOOTHING) """ (bool) Flag to prevent storing likelihood values for each observation. """ memory_no_std_forecast = ( OptionWrapper('conserve_memory', MEMORY_NO_STD_FORECAST)) """ (bool) Flag to prevent storing standardized forecast errors. """ memory_conserve = OptionWrapper('conserve_memory', MEMORY_CONSERVE) """ (bool) Flag to conserve the maximum amount of memory. """ timing_options = [ 'timing_init_predicted', 'timing_init_filtered' ] timing_init_predicted = OptionWrapper('filter_timing', TIMING_INIT_PREDICTED) """ (bool) Flag for the default timing convention (Durbin and Koopman, 2012). """ timing_init_filtered = OptionWrapper('filter_timing', TIMING_INIT_FILTERED) """ (bool) Flag for the alternate timing convention (Kim and Nelson, 2012). """ # Default filter options filter_method = FILTER_CONVENTIONAL """ (int) Filtering method bitmask. """ inversion_method = INVERT_UNIVARIATE | SOLVE_CHOLESKY """ (int) Inversion method bitmask. """ stability_method = STABILITY_FORCE_SYMMETRY """ (int) Stability method bitmask. """ conserve_memory = MEMORY_STORE_ALL """ (int) Memory conservation bitmask. """ filter_timing = TIMING_INIT_PREDICTED """ (int) Filter timing. """ def __init__(self, k_endog, k_states, k_posdef=None, loglikelihood_burn=0, tolerance=1e-19, results_class=None, kalman_filter_classes=None, **kwargs): super(KalmanFilter, self).__init__( k_endog, k_states, k_posdef, **kwargs ) # Setup the underlying Kalman filter storage self._kalman_filters = {} # Filter options self.loglikelihood_burn = loglikelihood_burn self.results_class = ( results_class if results_class is not None else FilterResults ) # Options self.prefix_kalman_filter_map = ( kalman_filter_classes if kalman_filter_classes is not None else tools.prefix_kalman_filter_map.copy()) self.set_filter_method(**kwargs) self.set_inversion_method(**kwargs) self.set_stability_method(**kwargs) self.set_conserve_memory(**kwargs) self.set_filter_timing(**kwargs) self.tolerance = tolerance @property def _kalman_filter(self): prefix = self.prefix if prefix in self._kalman_filters: return self._kalman_filters[prefix] return None def _initialize_filter(self, filter_method=None, inversion_method=None, stability_method=None, conserve_memory=None, tolerance=None, filter_timing=None, loglikelihood_burn=None): if filter_method is None: filter_method = self.filter_method if inversion_method is None: inversion_method = self.inversion_method if stability_method is None: stability_method = self.stability_method if conserve_memory is None: conserve_memory = self.conserve_memory if loglikelihood_burn is None: loglikelihood_burn = self.loglikelihood_burn if filter_timing is None: filter_timing = self.filter_timing if tolerance is None: tolerance = self.tolerance # Make sure we have endog if self.endog is None: raise RuntimeError('Must bind a dataset to the model before' ' filtering or smoothing.') # Initialize the representation matrices prefix, dtype, create_statespace = self._initialize_representation() # Determine if we need to (re-)create the filter # (definitely need to recreate if we recreated the _statespace object) create_filter = create_statespace or prefix not in self._kalman_filters if not create_filter: kalman_filter = self._kalman_filters[prefix] create_filter = ( not kalman_filter.conserve_memory == conserve_memory or not kalman_filter.loglikelihood_burn == loglikelihood_burn ) # If the dtype-specific _kalman_filter does not exist (or if we need # to re-create it), create it if create_filter: if prefix in self._kalman_filters: # Delete the old filter del self._kalman_filters[prefix] # Setup the filter cls = self.prefix_kalman_filter_map[prefix] self._kalman_filters[prefix] = cls( self._statespaces[prefix], filter_method, inversion_method, stability_method, conserve_memory, filter_timing, tolerance, loglikelihood_burn ) # Otherwise, update the filter parameters else: kalman_filter = self._kalman_filters[prefix] kalman_filter.set_filter_method(filter_method, False) kalman_filter.inversion_method = inversion_method kalman_filter.stability_method = stability_method kalman_filter.filter_timing = filter_timing kalman_filter.tolerance = tolerance # conserve_memory and loglikelihood_burn changes always lead to # re-created filters return prefix, dtype, create_filter, create_statespace def set_filter_method(self, filter_method=None, **kwargs): r""" Set the filtering method The filtering method controls aspects of which Kalman filtering approach will be used. Parameters ---------- filter_method : integer, optional Bitmask value to set the filter method to. See notes for details. **kwargs Keyword arguments may be used to influence the filter method by setting individual boolean flags. See notes for details. Notes ----- The filtering method is defined by a collection of boolean flags, and is internally stored as a bitmask. The methods available are: FILTER_CONVENTIONAL = 0x01 Conventional Kalman filter. FILTER_UNIVARIATE = 0x10 Univariate approach to Kalman filtering. Overrides conventional method if both are specified. FILTER_COLLAPSED = 0x20 Collapsed approach to Kalman filtering. Will be used *in addition* to conventional or univariate filtering. Note that only the first method is available if using a Scipy version older than 0.16. If the bitmask is set directly via the `filter_method` argument, then the full method must be provided. If keyword arguments are used to set individual boolean flags, then the lowercase of the method must be used as an argument name, and the value is the desired value of the boolean flag (True or False). Note that the filter method may also be specified by directly modifying the class attributes which are defined similarly to the keyword arguments. The default filtering method is FILTER_CONVENTIONAL. Examples -------- >>> mod = sm.tsa.statespace.SARIMAX(range(10)) >>> mod.ssm.filter_method 1 >>> mod.ssm.filter_conventional True >>> mod.ssm.filter_univariate = True >>> mod.ssm.filter_method 17 >>> mod.ssm.set_filter_method(filter_univariate=False, ... filter_collapsed=True) >>> mod.ssm.filter_method 33 >>> mod.ssm.set_filter_method(filter_method=1) >>> mod.ssm.filter_conventional True >>> mod.ssm.filter_univariate False >>> mod.ssm.filter_collapsed False >>> mod.ssm.filter_univariate = True >>> mod.ssm.filter_method 17 """ if filter_method is not None: self.filter_method = filter_method for name in KalmanFilter.filter_methods: if name in kwargs: setattr(self, name, kwargs[name]) if self._compatibility_mode and not self.filter_method == 1: raise NotImplementedError('Only conventional Kalman filtering' ' is available. Consider updating' ' dependencies for more options.') def set_inversion_method(self, inversion_method=None, **kwargs): r""" Set the inversion method The Kalman filter may contain one matrix inversion: that of the forecast error covariance matrix. The inversion method controls how and if that inverse is performed. Parameters ---------- inversion_method : integer, optional Bitmask value to set the inversion method to. See notes for details. **kwargs Keyword arguments may be used to influence the inversion method by setting individual boolean flags. See notes for details. Notes ----- The inversion method is defined by a collection of boolean flags, and is internally stored as a bitmask. The methods available are: INVERT_UNIVARIATE = 0x01 If the endogenous time series is univariate, then inversion can be performed by simple division. If this flag is set and the time series is univariate, then division will always be used even if other flags are also set. SOLVE_LU = 0x02 Use an LU decomposition along with a linear solver (rather than ever actually inverting the matrix). INVERT_LU = 0x04 Use an LU decomposition along with typical matrix inversion. SOLVE_CHOLESKY = 0x08 Use a Cholesky decomposition along with a linear solver. INVERT_CHOLESKY = 0x10 Use an Cholesky decomposition along with typical matrix inversion. If the bitmask is set directly via the `inversion_method` argument, then the full method must be provided. If keyword arguments are used to set individual boolean flags, then the lowercase of the method must be used as an argument name, and the value is the desired value of the boolean flag (True or False). Note that the inversion method may also be specified by directly modifying the class attributes which are defined similarly to the keyword arguments. The default inversion method is `INVERT_UNIVARIATE | SOLVE_CHOLESKY` Several things to keep in mind are: - If the filtering method is specified to be univariate, then simple division is always used regardless of the dimension of the endogenous time series. - Cholesky decomposition is about twice as fast as LU decomposition, but it requires that the matrix be positive definite. While this should generally be true, it may not be in every case. - Using a linear solver rather than true matrix inversion is generally faster and is numerically more stable. Examples -------- >>> mod = sm.tsa.statespace.SARIMAX(range(10)) >>> mod.ssm.inversion_method 1 >>> mod.ssm.solve_cholesky True >>> mod.ssm.invert_univariate True >>> mod.ssm.invert_lu False >>> mod.ssm.invert_univariate = False >>> mod.ssm.inversion_method 8 >>> mod.ssm.set_inversion_method(solve_cholesky=False, ... invert_cholesky=True) >>> mod.ssm.inversion_method 16 """ if inversion_method is not None: self.inversion_method = inversion_method for name in KalmanFilter.inversion_methods: if name in kwargs: setattr(self, name, kwargs[name]) def set_stability_method(self, stability_method=None, **kwargs): r""" Set the numerical stability method The Kalman filter is a recursive algorithm that may in some cases suffer issues with numerical stability. The stability method controls what, if any, measures are taken to promote stability. Parameters ---------- stability_method : integer, optional Bitmask value to set the stability method to. See notes for details. **kwargs Keyword arguments may be used to influence the stability method by setting individual boolean flags. See notes for details. Notes ----- The stability method is defined by a collection of boolean flags, and is internally stored as a bitmask. The methods available are: STABILITY_FORCE_SYMMETRY = 0x01 If this flag is set, symmetry of the predicted state covariance matrix is enforced at each iteration of the filter, where each element is set to the average of the corresponding elements in the upper and lower triangle. If the bitmask is set directly via the `stability_method` argument, then the full method must be provided. If keyword arguments are used to set individual boolean flags, then the lowercase of the method must be used as an argument name, and the value is the desired value of the boolean flag (True or False). Note that the stability method may also be specified by directly modifying the class attributes which are defined similarly to the keyword arguments. The default stability method is `STABILITY_FORCE_SYMMETRY` Examples -------- >>> mod = sm.tsa.statespace.SARIMAX(range(10)) >>> mod.ssm.stability_method 1 >>> mod.ssm.stability_force_symmetry True >>> mod.ssm.stability_force_symmetry = False >>> mod.ssm.stability_method 0 """ if stability_method is not None: self.stability_method = stability_method for name in KalmanFilter.stability_methods: if name in kwargs: setattr(self, name, kwargs[name]) def set_conserve_memory(self, conserve_memory=None, **kwargs): r""" Set the memory conservation method By default, the Kalman filter computes a number of intermediate matrices at each iteration. The memory conservation options control which of those matrices are stored. Parameters ---------- conserve_memory : integer, optional Bitmask value to set the memory conservation method to. See notes for details. **kwargs Keyword arguments may be used to influence the memory conservation method by setting individual boolean flags. See notes for details. Notes ----- The memory conservation method is defined by a collection of boolean flags, and is internally stored as a bitmask. The methods available are: MEMORY_STORE_ALL = 0 Store all intermediate matrices. This is the default value. MEMORY_NO_FORECAST = 0x01 Do not store the forecast, forecast error, or forecast error covariance matrices. If this option is used, the `predict` method from the results class is unavailable. MEMORY_NO_PREDICTED = 0x02 Do not store the predicted state or predicted state covariance matrices. MEMORY_NO_FILTERED = 0x04 Do not store the filtered state or filtered state covariance matrices. MEMORY_NO_LIKELIHOOD = 0x08 Do not store the vector of loglikelihood values for each observation. Only the sum of the loglikelihood values is stored. MEMORY_NO_GAIN = 0x10 Do not store the Kalman gain matrices. MEMORY_NO_SMOOTHING = 0x20 Do not store temporary variables related to Klaman smoothing. If this option is used, smoothing is unavailable. MEMORY_NO_SMOOTHING = 0x20 Do not store standardized forecast errors. MEMORY_CONSERVE Do not store any intermediate matrices. Note that if using a Scipy version less than 0.16, the options MEMORY_NO_GAIN, MEMORY_NO_SMOOTHING, and MEMORY_NO_STD_FORECAST have no effect. If the bitmask is set directly via the `conserve_memory` argument, then the full method must be provided. If keyword arguments are used to set individual boolean flags, then the lowercase of the method must be used as an argument name, and the value is the desired value of the boolean flag (True or False). Note that the memory conservation method may also be specified by directly modifying the class attributes which are defined similarly to the keyword arguments. The default memory conservation method is `MEMORY_STORE_ALL`, so that all intermediate matrices are stored. Examples -------- >>> mod = sm.tsa.statespace.SARIMAX(range(10)) >>> mod.ssm..conserve_memory 0 >>> mod.ssm.memory_no_predicted False >>> mod.ssm.memory_no_predicted = True >>> mod.ssm.conserve_memory 2 >>> mod.ssm.set_conserve_memory(memory_no_filtered=True, ... memory_no_forecast=True) >>> mod.ssm.conserve_memory 7 """ if conserve_memory is not None: self.conserve_memory = conserve_memory for name in KalmanFilter.memory_options: if name in kwargs: setattr(self, name, kwargs[name]) def set_filter_timing(self, alternate_timing=None, **kwargs): r""" Set the filter timing convention By default, the Kalman filter follows Durbin and Koopman, 2012, in initializing the filter with predicted values. Kim and Nelson, 1999, instead initialize the filter with filtered values, which is essentially just a different timing convention. Parameters ---------- alternate_timing : integer, optional Whether or not to use the alternate timing convention. Default is unspecified. **kwargs Keyword arguments may be used to influence the memory conservation method by setting individual boolean flags. See notes for details. """ if alternate_timing is not None: self.filter_timing = int(alternate_timing) if 'timing_init_predicted' in kwargs: self.filter_timing = int(not kwargs['timing_init_predicted']) if 'timing_init_filtered' in kwargs: self.filter_timing = int(kwargs['timing_init_filtered']) if (self._compatibility_mode and self.filter_timing == TIMING_INIT_FILTERED): raise NotImplementedError('Only "predicted" Kalman filter' ' timing is available. Consider' ' updating dependencies for more' ' options.') def _filter(self, filter_method=None, inversion_method=None, stability_method=None, conserve_memory=None, filter_timing=None, tolerance=None, loglikelihood_burn=None, complex_step=False): # Initialize the filter prefix, dtype, create_filter, create_statespace = ( self._initialize_filter( filter_method, inversion_method, stability_method, conserve_memory, filter_timing, tolerance, loglikelihood_burn ) ) kfilter = self._kalman_filters[prefix] # Initialize the state self._initialize_state(prefix=prefix, complex_step=complex_step) # Run the filter kfilter() tmp = np.array(kfilter.loglikelihood) tmp2 = np.array(kfilter.predicted_state) return kfilter def filter(self, filter_method=None, inversion_method=None, stability_method=None, conserve_memory=None, filter_timing=None, tolerance=None, loglikelihood_burn=None, complex_step=False): r""" Apply the Kalman filter to the statespace model. Parameters ---------- filter_method : int, optional Determines which Kalman filter to use. Default is conventional. inversion_method : int, optional Determines which inversion technique to use. Default is by Cholesky decomposition. stability_method : int, optional Determines which numerical stability techniques to use. Default is to enforce symmetry of the predicted state covariance matrix. conserve_memory : int, optional Determines what output from the filter to store. Default is to store everything. filter_timing : int, optional Determines the timing convention of the filter. Default is that from Durbin and Koopman (2012), in which the filter is initialized with predicted values. tolerance : float, optional The tolerance at which the Kalman filter determines convergence to steady-state. Default is 1e-19. loglikelihood_burn : int, optional The number of initial periods during which the loglikelihood is not recorded. Default is 0. Notes ----- This function by default does not compute variables required for smoothing. """ if conserve_memory is None: conserve_memory = self.conserve_memory | MEMORY_NO_SMOOTHING # Run the filter kfilter = self._filter( filter_method, inversion_method, stability_method, conserve_memory, filter_timing, tolerance, loglikelihood_burn, complex_step) tmp = np.array(kfilter.loglikelihood) # Create the results object results = self.results_class(self) results.update_representation(self) results.update_filter(kfilter) return results def loglike(self, **kwargs): r""" Calculate the loglikelihood associated with the statespace model. Parameters ---------- **kwargs Additional keyword arguments to pass to the Kalman filter. See `KalmanFilter.filter` for more details. Returns ------- loglike : float The joint loglikelihood. """ if self.memory_no_likelihood: raise RuntimeError('Cannot compute loglikelihood if' ' MEMORY_NO_LIKELIHOOD option is selected.') kwargs['conserve_memory'] = MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD kfilter = self._filter(**kwargs) loglikelihood_burn = kwargs.get('loglikelihood_burn', self.loglikelihood_burn) return np.sum(kfilter.loglikelihood[loglikelihood_burn:]) def loglikeobs(self, **kwargs): r""" Calculate the loglikelihood for each observation associated with the statespace model. Parameters ---------- **kwargs Additional keyword arguments to pass to the Kalman filter. See `KalmanFilter.filter` for more details. Notes ----- If `loglikelihood_burn` is positive, then the entries in the returned loglikelihood vector are set to be zero for those initial time periods. Returns ------- loglike : array of float Array of loglikelihood values for each observation. """ if self.memory_no_likelihood: raise RuntimeError('Cannot compute loglikelihood if' ' MEMORY_NO_LIKELIHOOD option is selected.') kwargs['conserve_memory'] = MEMORY_CONSERVE ^ MEMORY_NO_LIKELIHOOD kfilter = self._filter(**kwargs) llf_obs = np.array(kfilter.loglikelihood, copy=True) # Set any burned observations to have zero likelihood loglikelihood_burn = kwargs.get('loglikelihood_burn', self.loglikelihood_burn) llf_obs[:loglikelihood_burn] = 0 return llf_obs def simulate(self, nsimulations, measurement_shocks=None, state_shocks=None, initial_state=None): r""" Simulate a new time series following the state space model Parameters ---------- nsimulations : int The number of observations to simulate. If the model is time-invariant this can be any number. If the model is time-varying, then this number must be less than or equal to the number measurement_shocks : array_like, optional If specified, these are the shocks to the measurement equation, :math:`\varepsilon_t`. If unspecified, these are automatically generated using a pseudo-random number generator. If specified, must be shaped `nsimulations` x `k_endog`, where `k_endog` is the same as in the state space model. state_shocks : array_like, optional If specified, these are the shocks to the state equation, :math:`\eta_t`. If unspecified, these are automatically generated using a pseudo-random number generator. If specified, must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the same as in the state space model. initial_state : array_like, optional If specified, this is the state vector at time zero, which should be shaped (`k_states` x 1), where `k_states` is the same as in the state space model. If unspecified, but the model has been initialized, then that initialization is used. If unspecified and the model has not been initialized, then a vector of zeros is used. Note that this is not included in the returned `simulated_states` array. Returns ------- simulated_obs : array An (nsimulations x k_endog) array of simulated observations. simulated_states : array An (nsimulations x k_states) array of simulated states. """ time_invariant = self.time_invariant # Check for valid number of simulations if not time_invariant and nsimulations > self.nobs: raise ValueError('In a time-varying model, cannot create more' ' simulations than there are observations.') # Check / generate measurement shocks if measurement_shocks is not None: measurement_shocks = np.array(measurement_shocks) if measurement_shocks.ndim == 0: measurement_shocks = measurement_shocks[np.newaxis, np.newaxis] elif measurement_shocks.ndim == 1: measurement_shocks = measurement_shocks[:, np.newaxis] if not measurement_shocks.shape == (nsimulations, self.k_endog): raise ValueError('Invalid shape of provided measurement' ' shocks. Required (%d, %d)' % (nsimulations, self.k_endog)) elif self.shapes['obs_cov'][-1] == 1: measurement_shocks = np.random.multivariate_normal( mean=np.zeros(self.k_endog), cov=self['obs_cov'], size=nsimulations) # Check / generate state shocks if state_shocks is not None: state_shocks = np.array(state_shocks) if state_shocks.ndim == 0: state_shocks = state_shocks[np.newaxis, np.newaxis] elif state_shocks.ndim == 1: state_shocks = state_shocks[:, np.newaxis] if not state_shocks.shape == (nsimulations, self.k_posdef): raise ValueError('Invalid shape of provided state shocks.' ' Required (%d, %d).' % (nsimulations, self.k_posdef)) elif self.shapes['state_cov'][-1] == 1: state_shocks = np.random.multivariate_normal( mean=np.zeros(self.k_posdef), cov=self['state_cov'], size=nsimulations) # Get the initial states if initial_state is not None: initial_state = np.array(initial_state) if initial_state.ndim == 0: initial_state = initial_state[np.newaxis] elif (initial_state.ndim > 1 and not initial_state.shape == (self.k_states, 1)): raise ValueError('Invalid shape of provided initial state' ' vector. Required (%d, 1)' % self.k_states) elif self.initialization == 'known': initial_state = np.random.multivariate_normal( self._initial_state, self._initial_state_cov) elif self.initialization == 'stationary': from scipy.linalg import solve_discrete_lyapunov # (I - T)^{-1} c = x => (I - T) x = c initial_state_mean = np.linalg.solve( np.eye(self.k_states) - self['transition', :, :, 0], self['state_intercept', :, 0]) R = self['selection', :, :, 0] Q = self['state_cov', :, :, 0] selected_state_cov = R.dot(Q).dot(R.T) initial_state_cov = solve_discrete_lyapunov( self['transition', :, :, 0], selected_state_cov) initial_state = np.random.multivariate_normal( initial_state_mean, initial_state_cov) elif self.initialization == 'approximate_diffuse': initial_state = np.zeros(self.k_states) else: initial_state = np.zeros(self.k_states) return self._simulate(nsimulations, measurement_shocks, state_shocks, initial_state) def _simulate(self, nsimulations, measurement_shocks, state_shocks, initial_state): time_invariant = self.time_invariant # Holding variables for the simulations simulated_obs = np.zeros((nsimulations, self.k_endog), dtype=self.dtype) simulated_states = np.zeros((nsimulations+1, self.k_states), dtype=self.dtype) simulated_states[0] = initial_state # Perform iterations to create the new time series obs_intercept_t = 0 design_t = 0 state_intercept_t = 0 transition_t = 0 selection_t = 0 for t in range(nsimulations): # Get the current shocks (this accomodates time-varying matrices) if measurement_shocks is None: measurement_shock = np.random.multivariate_normal( mean=np.zeros(self.k_endog), cov=self['obs_cov', :, :, t]) else: measurement_shock = measurement_shocks[t] if state_shocks is None: state_shock = np.random.multivariate_normal( mean=np.zeros(self.k_posdef), cov=self['state_cov', :, :, t]) else: state_shock = state_shocks[t] # Get current-iteration matrices if not time_invariant: obs_intercept_t = 0 if self.obs_intercept.shape[-1] == 1 else t design_t = 0 if self.design.shape[-1] == 1 else t state_intercept_t = ( 0 if self.state_intercept.shape[-1] == 1 else t) transition_t = 0 if self.transition.shape[-1] == 1 else t selection_t = 0 if self.selection.shape[-1] == 1 else t obs_intercept = self['obs_intercept', :, obs_intercept_t] design = self['design', :, :, design_t] state_intercept = self['state_intercept', :, state_intercept_t] transition = self['transition', :, :, transition_t] selection = self['selection', :, :, selection_t] # Iterate the measurement equation simulated_obs[t] = ( obs_intercept + np.dot(design, simulated_states[t]) + measurement_shock) # Iterate the state equation simulated_states[t+1] = ( state_intercept + np.dot(transition, simulated_states[t]) + np.dot(selection, state_shock)) return simulated_obs, simulated_states[:-1] def impulse_responses(self, steps=10, impulse=0, orthogonalized=False, cumulative=False, **kwargs): r""" Impulse response function Parameters ---------- steps : int, optional The number of steps for which impulse responses are calculated. Default is 10. Note that the initial impulse is not counted as a step, so if `steps=1`, the output will have 2 entries. impulse : int or array_like If an integer, the state innovation to pulse; must be between 0 and `k_posdef-1` where `k_posdef` is the same as in the state space model. Alternatively, a custom impulse vector may be provided; must be a column vector with shape `(k_posdef, 1)`. orthogonalized : boolean, optional Whether or not to perform impulse using orthogonalized innovations. Note that this will also affect custum `impulse` vectors. Default is False. cumulative : boolean, optional Whether or not to return cumulative impulse responses. Default is False. **kwargs If the model is time-varying and `steps` is greater than the number of observations, any of the state space representation matrices that are time-varying must have updated values provided for the out-of-sample steps. For example, if `design` is a time-varying component, `nobs` is 10, and `steps` is 15, a (`k_endog` x `k_states` x 5) matrix must be provided with the new design matrix values. Returns ------- impulse_responses : array Responses for each endogenous variable due to the impulse given by the `impulse` argument. A (steps + 1 x k_endog) array. Notes ----- Intercepts in the measurement and state equation are ignored when calculating impulse responses. """ # Since the first step is the impulse itself, we actually want steps+1 steps += 1 # Check for what kind of impulse we want if type(impulse) == int: if impulse >= self.k_posdef or impulse < 0: raise ValueError('Invalid value for `impulse`. Must be the' ' index of one of the state innovations.') # Create the (non-orthogonalized) impulse vector idx = impulse impulse = np.zeros(self.k_posdef) impulse[idx] = 1 else: impulse = np.array(impulse) if impulse.ndim > 1: impulse = np.squeeze(impulse) if not impulse.shape == (self.k_posdef,): raise ValueError('Invalid impulse vector. Must be shaped' ' (%d,)' % self.k_posdef) # Orthogonalize the impulses, if requested, using Cholesky on the # first state covariance matrix if orthogonalized: state_chol = np.linalg.cholesky(self.state_cov[:, :, 0]) impulse = np.dot(state_chol, impulse) # If we have a time-invariant system, we can solve for the IRF directly if self.time_invariant: # Get the state space matrices design = self.design[:, :, 0] transition = self.transition[:, :, 0] selection = self.selection[:, :, 0] # Holding arrays irf = np.zeros((steps, self.k_endog), dtype=self.dtype) states = np.zeros((steps, self.k_states), dtype=self.dtype) # First iteration states[0] = np.dot(selection, impulse) irf[0] = np.dot(design, states[0]) # Iterations for t in range(1, steps): states[t] = np.dot(transition, states[t-1]) irf[t] = np.dot(design, states[t]) # Otherwise, create a new model else: # Get the basic model components representation = {} for name, shape in self.shapes.items(): if name in ['obs', 'obs_intercept', 'state_intercept']: continue representation[name] = getattr(self, name) # Allow additional specification warning = ('Model has time-invariant %s matrix, so the %s' ' argument to `irf` has been ignored.') exception = ('Impulse response functions for models with' ' time-varying %s matrix requires an updated' ' time-varying matrix for any periods beyond those in' ' the original model.') for name, shape in self.shapes.items(): if name in ['obs', 'obs_intercept', 'state_intercept']: continue if representation[name].shape[-1] == 1: if name in kwargs: warn(warning % (name, name), ValueWarning) elif name not in kwargs: raise ValueError(exception % name) else: mat = np.asarray(kwargs[name]) validate_matrix_shape(name, mat.shape, shape[0], shape[1], steps) if mat.ndim < 3 or not mat.shape[2] == steps: raise ValueError(exception % name) representation[name] = np.c_[representation[name], mat] # Setup the new statespace representation model_kwargs = { 'filter_method': self.filter_method, 'inversion_method': self.inversion_method, 'stability_method': self.stability_method, 'conserve_memory': self.conserve_memory, 'tolerance': self.tolerance, 'loglikelihood_burn': self.loglikelihood_burn } model_kwargs.update(representation) model = KalmanFilter(np.zeros(self.endog.T.shape), self.k_states, self.k_posdef, **model_kwargs) model.initialize_approximate_diffuse() model._initialize_filter() model._initialize_state() # Get the impulse response function via simulation of the state # space model, but with other shocks set to zero # Since simulate returns the zero-th period, we need to simulate # steps + 1 periods and exclude the zero-th observation. steps += 1 measurement_shocks = np.zeros((steps, self.k_endog)) state_shocks = np.zeros((steps, self.k_posdef)) state_shocks[0] = impulse irf, _ = model.simulate( steps, measurement_shocks=measurement_shocks, state_shocks=state_shocks) irf = irf[1:] # Get the cumulative response if requested if cumulative: irf = np.cumsum(irf, axis=0) return irf class FilterResults(FrozenRepresentation): """ Results from applying the Kalman filter to a state space model. Parameters ---------- model : Representation A Statespace representation Attributes ---------- nobs : int Number of observations. k_endog : int The dimension of the observation series. k_states : int The dimension of the unobserved state process. k_posdef : int The dimension of a guaranteed positive definite covariance matrix describing the shocks in the measurement equation. dtype : dtype Datatype of representation matrices prefix : str BLAS prefix of representation matrices shapes : dictionary of name,tuple A dictionary recording the shapes of each of the representation matrices as tuples. endog : array The observation vector. design : array The design matrix, :math:`Z`. obs_intercept : array The intercept for the observation equation, :math:`d`. obs_cov : array The covariance matrix for the observation equation :math:`H`. transition : array The transition matrix, :math:`T`. state_intercept : array The intercept for the transition equation, :math:`c`. selection : array The selection matrix, :math:`R`. state_cov : array The covariance matrix for the state equation :math:`Q`. missing : array of bool An array of the same size as `endog`, filled with boolean values that are True if the corresponding entry in `endog` is NaN and False otherwise. nmissing : array of int An array of size `nobs`, where the ith entry is the number (between 0 and `k_endog`) of NaNs in the ith row of the `endog` array. time_invariant : bool Whether or not the representation matrices are time-invariant initialization : str Kalman filter initialization method. initial_state : array_like The state vector used to initialize the Kalamn filter. initial_state_cov : array_like The state covariance matrix used to initialize the Kalamn filter. filter_method : int Bitmask representing the Kalman filtering method inversion_method : int Bitmask representing the method used to invert the forecast error covariance matrix. stability_method : int Bitmask representing the methods used to promote numerical stability in the Kalman filter recursions. conserve_memory : int Bitmask representing the selected memory conservation method. filter_timing : int Whether or not to use the alternate timing convention. tolerance : float The tolerance at which the Kalman filter determines convergence to steady-state. loglikelihood_burn : int The number of initial periods during which the loglikelihood is not recorded. converged : bool Whether or not the Kalman filter converged. period_converged : int The time period in which the Kalman filter converged. filtered_state : array The filtered state vector at each time period. filtered_state_cov : array The filtered state covariance matrix at each time period. predicted_state : array The predicted state vector at each time period. predicted_state_cov : array The predicted state covariance matrix at each time period. kalman_gain : array The Kalman gain at each time period. forecasts : array The one-step-ahead forecasts of observations at each time period. forecasts_error : array The forecast errors at each time period. forecasts_error_cov : array The forecast error covariance matrices at each time period. llf_obs : array The loglikelihood values at each time period. """ _filter_attributes = [ 'filter_method', 'inversion_method', 'stability_method', 'conserve_memory', 'filter_timing', 'tolerance', 'loglikelihood_burn', 'converged', 'period_converged', 'filtered_state', 'filtered_state_cov', 'predicted_state', 'predicted_state_cov', 'tmp1', 'tmp2', 'tmp3', 'tmp4', 'forecasts', 'forecasts_error', 'forecasts_error_cov', 'llf_obs', 'collapsed_forecasts', 'collapsed_forecasts_error', 'collapsed_forecasts_error_cov', ] _filter_options = ( KalmanFilter.filter_methods + KalmanFilter.stability_methods + KalmanFilter.inversion_methods + KalmanFilter.memory_options ) _attributes = FrozenRepresentation._model_attributes + _filter_attributes def __init__(self, model): super(FilterResults, self).__init__(model) # Setup caches for uninitialized objects self._kalman_gain = None self._standardized_forecasts_error = None def update_representation(self, model, only_options=False): """ Update the results to match a given model Parameters ---------- model : Representation The model object from which to take the updated values. only_options : boolean, optional If set to true, only the filter options are updated, and the state space representation is not updated. Default is False. Notes ----- This method is rarely required except for internal usage. """ if not only_options: super(FilterResults, self).update_representation(model) # Save the options as boolean variables for name in self._filter_options: setattr(self, name, getattr(model, name, None)) def update_filter(self, kalman_filter): """ Update the filter results Parameters ---------- kalman_filter : KalmanFilter The model object from which to take the updated values. Notes ----- This method is rarely required except for internal usage. """ # State initialization self.initial_state = np.array( kalman_filter.model.initial_state, copy=True ) self.initial_state_cov = np.array( kalman_filter.model.initial_state_cov, copy=True ) # Save Kalman filter parameters self.filter_method = kalman_filter.filter_method self.inversion_method = kalman_filter.inversion_method self.stability_method = kalman_filter.stability_method self.conserve_memory = kalman_filter.conserve_memory self.filter_timing = kalman_filter.filter_timing self.tolerance = kalman_filter.tolerance self.loglikelihood_burn = kalman_filter.loglikelihood_burn # Save Kalman filter output self.converged = bool(kalman_filter.converged) self.period_converged = kalman_filter.period_converged self.filtered_state = np.array(kalman_filter.filtered_state, copy=True) self.filtered_state_cov = np.array( kalman_filter.filtered_state_cov, copy=True ) self.predicted_state = np.array( kalman_filter.predicted_state, copy=True ) self.predicted_state_cov = np.array( kalman_filter.predicted_state_cov, copy=True ) # Reset caches has_missing = np.sum(self.nmissing) > 0 if not self._compatibility_mode and not (self.memory_no_std_forecast or self.invert_lu or self.solve_lu or self.filter_collapsed): if has_missing: self._standardized_forecasts_error = np.array( reorder_missing_vector( kalman_filter.standardized_forecast_error, self.missing, prefix=self.prefix)) else: self._standardized_forecasts_error = np.array( kalman_filter.standardized_forecast_error, copy=True) else: self._standardized_forecasts_error = None if not self._compatibility_mode: # In the partially missing data case, all entries will # be in the upper left submatrix rather than the correct placement # Re-ordering does not make sense in the collapsed case. if has_missing and (not self.memory_no_gain and not self.filter_collapsed): self._kalman_gain = np.array(reorder_missing_matrix( kalman_filter.kalman_gain, self.missing, reorder_cols=True, prefix=self.prefix)) self.tmp1 = np.array(reorder_missing_matrix( kalman_filter.tmp1, self.missing, reorder_cols=True, prefix=self.prefix)) self.tmp2 = np.array(reorder_missing_vector( kalman_filter.tmp2, self.missing, prefix=self.prefix)) self.tmp3 = np.array(reorder_missing_matrix( kalman_filter.tmp3, self.missing, reorder_rows=True, prefix=self.prefix)) self.tmp4 = np.array(reorder_missing_matrix( kalman_filter.tmp4, self.missing, reorder_cols=True, reorder_rows=True, prefix=self.prefix)) else: self._kalman_gain = np.array( kalman_filter.kalman_gain, copy=True) self.tmp1 = np.array(kalman_filter.tmp1, copy=True) self.tmp2 = np.array(kalman_filter.tmp2, copy=True) self.tmp3 = np.array(kalman_filter.tmp3, copy=True) self.tmp4 = np.array(kalman_filter.tmp4, copy=True) else: self._kalman_gain = None # Note: use forecasts rather than forecast, so as not to interfer # with the `forecast` methods in subclasses self.forecasts = np.array(kalman_filter.forecast, copy=True) self.forecasts_error = np.array( kalman_filter.forecast_error, copy=True ) self.forecasts_error_cov = np.array( kalman_filter.forecast_error_cov, copy=True ) self.llf_obs = np.array(kalman_filter.loglikelihood, copy=True) # If there was missing data, save the original values from the Kalman # filter output, since below will set the values corresponding to # the missing observations to nans. self.missing_forecasts = None self.missing_forecasts_error = None self.missing_forecasts_error_cov = None if np.sum(self.nmissing) > 0: # Copy the provided arrays (which are as the Kalman filter dataset) # into new variables self.missing_forecasts = np.copy(self.forecasts) self.missing_forecasts_error = np.copy(self.forecasts_error) self.missing_forecasts_error_cov = ( np.copy(self.forecasts_error_cov) ) # Save the collapsed values self.collapsed_forecasts = None self.collapsed_forecasts_error = None self.collapsed_forecasts_error_cov = None if self.filter_collapsed: # Copy the provided arrays (which are from the collapsed dataset) # into new variables self.collapsed_forecasts = self.forecasts[:self.k_states, :] self.collapsed_forecasts_error = ( self.forecasts_error[:self.k_states, :] ) self.collapsed_forecasts_error_cov = ( self.forecasts_error_cov[:self.k_states, :self.k_states, :] ) # Recreate the original arrays (which should be from the original # dataset) in the appropriate dimension self.forecasts = np.zeros((self.k_endog, self.nobs)) self.forecasts_error = np.zeros((self.k_endog, self.nobs)) self.forecasts_error_cov = ( np.zeros((self.k_endog, self.k_endog, self.nobs)) ) # Fill in missing values in the forecast, forecast error, and # forecast error covariance matrix (this is required due to how the # Kalman filter implements observations that are either partly or # completely missing) # Construct the predictions, forecasts if not (self.memory_no_forecast or self.memory_no_predicted): for t in range(self.nobs): design_t = 0 if self.design.shape[2] == 1 else t obs_cov_t = 0 if self.obs_cov.shape[2] == 1 else t obs_intercept_t = 0 if self.obs_intercept.shape[1] == 1 else t # For completely missing observations, the Kalman filter will # produce forecasts, but forecast errors and the forecast # error covariance matrix will be zeros - make them nan to # improve clarity of results. if self.nmissing[t] > 0: mask = ~self.missing[:, t].astype(bool) # We can recover forecasts # For partially missing observations, the Kalman filter # will produce all elements (forecasts, forecast errors, # forecast error covariance matrices) as usual, but their # dimension will only be equal to the number of non-missing # elements, and their location in memory will be in the # first blocks (e.g. for the forecasts_error, the first # k_endog - nmissing[t] columns will be filled in), # regardless of which endogenous variables they refer to # (i.e. the non- missing endogenous variables for that # observation). Furthermore, the forecast error covariance # matrix is only valid for those elements. What is done is # to set all elements to nan for these observations so that # they are flagged as missing. The variables # missing_forecasts, etc. then provide the forecasts, etc. # provided by the Kalman filter, from which the data can be # retrieved if desired. self.forecasts[:, t] = np.dot( self.design[:, :, design_t], self.predicted_state[:, t] ) + self.obs_intercept[:, obs_intercept_t] self.forecasts_error[:, t] = np.nan self.forecasts_error[mask, t] = ( self.endog[mask, t] - self.forecasts[mask, t]) self.forecasts_error_cov[:, :, t] = np.dot( np.dot(self.design[:, :, design_t], self.predicted_state_cov[:, :, t]), self.design[:, :, design_t].T ) + self.obs_cov[:, :, obs_cov_t] # In the collapsed case, everything just needs to be rebuilt # for the original observed data, since the Kalman filter # produced these values for the collapsed data. elif self.filter_collapsed: self.forecasts[:, t] = np.dot( self.design[:, :, design_t], self.predicted_state[:, t] ) + self.obs_intercept[:, obs_intercept_t] self.forecasts_error[:, t] = ( self.endog[:, t] - self.forecasts[:, t] ) self.forecasts_error_cov[:, :, t] = np.dot( np.dot(self.design[:, :, design_t], self.predicted_state_cov[:, :, t]), self.design[:, :, design_t].T ) + self.obs_cov[:, :, obs_cov_t] @property def kalman_gain(self): """ Kalman gain matrices """ if self._kalman_gain is None: # k x n self._kalman_gain = np.zeros( (self.k_states, self.k_endog, self.nobs), dtype=self.dtype) for t in range(self.nobs): # In the case of entirely missing observations, let the Kalman # gain be zeros. if self.nmissing[t] == self.k_endog: continue design_t = 0 if self.design.shape[2] == 1 else t transition_t = 0 if self.transition.shape[2] == 1 else t if self.nmissing[t] == 0: self._kalman_gain[:, :, t] = np.dot( np.dot( self.transition[:, :, transition_t], self.predicted_state_cov[:, :, t] ), np.dot( np.transpose(self.design[:, :, design_t]), np.linalg.inv(self.forecasts_error_cov[:, :, t]) ) ) else: mask = ~self.missing[:, t].astype(bool) F = self.forecasts_error_cov[np.ix_(mask, mask, [t])] self._kalman_gain[:, mask, t] = np.dot( np.dot( self.transition[:, :, transition_t], self.predicted_state_cov[:, :, t] ), np.dot( np.transpose(self.design[mask, :, design_t]), np.linalg.inv(F[:, :, 0]) ) ) return self._kalman_gain @property def standardized_forecasts_error(self): """ Standardized forecast errors Notes ----- The forecast errors produced by the Kalman filter are .. math:: v_t \sim N(0, F_t) Hypothesis tests are usually applied to the standardized residuals .. math:: v_t^s = B_t v_t \sim N(0, I) where :math:`B_t = L_t^{-1}` and :math:`F_t = L_t L_t'`; then :math:`F_t^{-1} = (L_t')^{-1} L_t^{-1} = B_t' B_t`; :math:`B_t` and :math:`L_t` are lower triangular. Finally, :math:`B_t v_t \sim N(0, B_t F_t B_t')` and :math:`B_t F_t B_t' = L_t^{-1} L_t L_t' (L_t')^{-1} = I`. Thus we can rewrite :math:`v_t^s = L_t^{-1} v_t` or :math:`L_t v_t^s = v_t`; the latter equation is the form required to use a linear solver to recover :math:`v_t^s`. Since :math:`L_t` is lower triangular, we can use a triangular solver (?TRTRS). """ if self._standardized_forecasts_error is None: if self.k_endog == 1: self._standardized_forecasts_error = ( self.forecasts_error / self.forecasts_error_cov[0, 0, :]**0.5) else: from scipy import linalg self._standardized_forecasts_error = np.zeros( self.forecasts_error.shape, dtype=self.dtype) for t in range(self.forecasts_error_cov.shape[2]): if self.nmissing[t] > 0: self._standardized_forecasts_error[:, t] = np.nan if self.nmissing[t] < self.k_endog: mask = ~self.missing[:, t].astype(bool) F = self.forecasts_error_cov[np.ix_(mask, mask, [t])] upper, _ = linalg.cho_factor(F[:, :, 0]) self._standardized_forecasts_error[mask, t] = ( linalg.solve_triangular( upper, self.forecasts_error[mask, t], trans=1)) return self._standardized_forecasts_error def predict(self, start=None, end=None, dynamic=None, **kwargs): r""" In-sample and out-of-sample prediction for state space models generally Parameters ---------- start : int, optional Zero-indexed observation number at which to start forecasting, i.e., the first forecast will be at start. end : int, optional Zero-indexed observation number at which to end forecasting, i.e., the last forecast will be at end. dynamic : int, optional Offset relative to `start` at which to begin dynamic prediction. Prior to this observation, true endogenous values will be used for prediction; starting with this observation and continuing through the end of prediction, forecasted endogenous values will be used instead. **kwargs If the prediction range is outside of the sample range, any of the state space representation matrices that are time-varying must have updated values provided for the out-of-sample range. For example, of `obs_intercept` is a time-varying component and the prediction range extends 10 periods beyond the end of the sample, a (`k_endog` x 10) matrix must be provided with the new intercept values. Returns ------- results : PredictionResults A PredictionResults object. Notes ----- All prediction is performed by applying the deterministic part of the measurement equation using the predicted state variables. Out-of-sample prediction first applies the Kalman filter to missing data for the number of periods desired to obtain the predicted states. """ # Cannot predict if we do not have appropriate arrays if self.memory_no_forecast or self.memory_no_predicted: raise ValueError('Predict is not possible if memory conservation' ' has been used to avoid storing forecasts or' ' predicted values.') # Get the start and the end of the entire prediction range if start is None: start = 0 elif start < 0: raise ValueError('Cannot predict values previous to the sample.') if end is None: end = self.nobs # Prediction and forecasting is performed by iterating the Kalman # Kalman filter through the entire range [0, end] # Then, everything is returned corresponding to the range [start, end]. # In order to perform the calculations, the range is separately split # up into the following categories: # - static: (in-sample) the Kalman filter is run as usual # - dynamic: (in-sample) the Kalman filter is run, but on missing data # - forecast: (out-of-sample) the Kalman filter is run, but on missing # data # Short-circuit if end is before start if end <= start: raise ValueError('End of prediction must be after start.') # Get the number of forecasts to make after the end of the sample nforecast = max(0, end - self.nobs) # Get the number of dynamic prediction periods # If `dynamic=True`, then assume that we want to begin dynamic # prediction at the start of the sample prediction. if dynamic is True: dynamic = 0 # If `dynamic=False`, then assume we want no dynamic prediction if dynamic is False: dynamic = None ndynamic = 0 if dynamic is not None: # Replace the relative dynamic offset with an absolute offset dynamic = start + dynamic # Validate the `dynamic` parameter if dynamic < 0: raise ValueError('Dynamic prediction cannot begin prior to the' ' first observation in the sample.') elif dynamic > end: warn('Dynamic prediction specified to begin after the end of' ' prediction, and so has no effect.', ValueWarning) dynamic = None elif dynamic > self.nobs: warn('Dynamic prediction specified to begin during' ' out-of-sample forecasting period, and so has no' ' effect.', ValueWarning) dynamic = None # Get the total size of the desired dynamic forecasting component # Note: the first `dynamic` periods of prediction are actually # *not* dynamic, because dynamic prediction begins at observation # `dynamic`. if dynamic is not None: ndynamic = max(0, min(end, self.nobs) - dynamic) # Get the number of in-sample static predictions nstatic = min(end, self.nobs) if dynamic is None else dynamic # Construct the design and observation intercept and covariance # matrices for start-npadded:end. If not time-varying in the original # model, then they will be copied over if none are provided in # `kwargs`. Otherwise additional matrices must be provided in `kwargs`. representation = {} for name, shape in self.shapes.items(): if name == 'obs': continue representation[name] = getattr(self, name) # Update the matrices from kwargs for forecasts warning = ('Model has time-invariant %s matrix, so the %s' ' argument to `predict` has been ignored.') exception = ('Forecasting for models with time-varying %s matrix' ' requires an updated time-varying matrix for the' ' period to be forecasted.') if nforecast > 0: for name, shape in self.shapes.items(): if name == 'obs': continue if representation[name].shape[-1] == 1: if name in kwargs: warn(warning % (name, name), ValueWarning) elif name not in kwargs: raise ValueError(exception % name) else: mat = np.asarray(kwargs[name]) if len(shape) == 2: validate_vector_shape(name, mat.shape, shape[0], nforecast) if mat.ndim < 2 or not mat.shape[1] == nforecast: raise ValueError(exception % name) representation[name] = np.c_[representation[name], mat] else: validate_matrix_shape(name, mat.shape, shape[0], shape[1], nforecast) if mat.ndim < 3 or not mat.shape[2] == nforecast: raise ValueError(exception % name) representation[name] = np.c_[representation[name], mat] # Update the matrices from kwargs for dynamic prediction in the case # that `end` is less than `nobs` and `dynamic` is less than `end`. In # this case, any time-varying matrices in the default `representation` # will be too long, causing an error to be thrown below in the # KalmanFilter(...) construction call, because the endog has length # nstatic + ndynamic + nforecast, whereas the time-varying matrices # from `representation` have length nobs. if ndynamic > 0 and end < self.nobs: for name, shape in self.shapes.items(): if not name == 'obs' and representation[name].shape[-1] > 1: representation[name] = representation[name][..., :end] # Construct the predicted state and covariance matrix for each time # period depending on whether that time period corresponds to # one-step-ahead prediction, dynamic prediction, or out-of-sample # forecasting. # If we only have simple prediction, then we can use the already saved # Kalman filter output if ndynamic == 0 and nforecast == 0: results = self else: # Construct the new endogenous array. endog = np.empty((self.k_endog, ndynamic + nforecast)) endog.fill(np.nan) endog = np.asfortranarray(np.c_[self.endog[:, :nstatic], endog]) # Setup the new statespace representation model_kwargs = { 'filter_method': self.filter_method, 'inversion_method': self.inversion_method, 'stability_method': self.stability_method, 'conserve_memory': self.conserve_memory, 'filter_timing': self.filter_timing, 'tolerance': self.tolerance, 'loglikelihood_burn': self.loglikelihood_burn } model_kwargs.update(representation) model = KalmanFilter( endog, self.k_states, self.k_posdef, **model_kwargs ) model.initialize_known( self.initial_state, self.initial_state_cov ) model._initialize_filter() model._initialize_state() results = self._predict(nstatic, ndynamic, nforecast, model) return PredictionResults(results, start, end, nstatic, ndynamic, nforecast) def _predict(self, nstatic, ndynamic, nforecast, model): # Note: this doesn't use self, and can either be a static method or # moved outside the class altogether. # Get the underlying filter kfilter = model._kalman_filter # Save this (which shares memory with the memoryview on which the # Kalman filter will be operating) so that we can replace actual data # with predicted data during dynamic forecasting endog = model._representations[model.prefix]['obs'] for t in range(kfilter.model.nobs): # Run the Kalman filter for the first `nstatic` periods (for # which dynamic computation will not be performed) if t < nstatic: next(kfilter) # Perform dynamic prediction elif t < nstatic + ndynamic: design_t = 0 if model.design.shape[2] == 1 else t obs_intercept_t = 0 if model.obs_intercept.shape[1] == 1 else t # Unconditional value is the intercept (often zeros) endog[:, t] = model.obs_intercept[:, obs_intercept_t] # If t > 0, then we can condition the forecast on the state if t > 0: # Predict endog[:, t] given `predicted_state` calculated in # previous iteration (i.e. t-1) endog[:, t] += np.dot( model.design[:, :, design_t], kfilter.predicted_state[:, t] ) # Advance Kalman filter next(kfilter) # Perform any (one-step-ahead) forecasting else: next(kfilter) # Return the predicted state and predicted state covariance matrices results = FilterResults(model) results.update_representation(model) results.update_filter(kfilter) return results class PredictionResults(FilterResults): r""" Results of in-sample and out-of-sample prediction for state space models generally Parameters ---------- results : FilterResults Output from filtering, corresponding to the prediction desired start : int Zero-indexed observation number at which to start forecasting, i.e., the first forecast will be at start. end : int Zero-indexed observation number at which to end forecasting, i.e., the last forecast will be at end. nstatic : int Number of in-sample static predictions (these are always the first elements of the prediction output). ndynamic : int Number of in-sample dynamic predictions (these always follow the static predictions directly, and are directly followed by the forecasts). nforecast : int Number of in-sample forecasts (these always follow the dynamic predictions directly). Attributes ---------- npredictions : int Number of observations in the predicted series; this is not necessarily the same as the number of observations in the original model from which prediction was performed. start : int Zero-indexed observation number at which to start prediction, i.e., the first predict will be at `start`; this is relative to the original model from which prediction was performed. end : int Zero-indexed observation number at which to end prediction, i.e., the last predict will be at `end`; this is relative to the original model from which prediction was performed. nstatic : int Number of in-sample static predictions. ndynamic : int Number of in-sample dynamic predictions. nforecast : int Number of in-sample forecasts. endog : array The observation vector. design : array The design matrix, :math:`Z`. obs_intercept : array The intercept for the observation equation, :math:`d`. obs_cov : array The covariance matrix for the observation equation :math:`H`. transition : array The transition matrix, :math:`T`. state_intercept : array The intercept for the transition equation, :math:`c`. selection : array The selection matrix, :math:`R`. state_cov : array The covariance matrix for the state equation :math:`Q`. filtered_state : array The filtered state vector at each time period. filtered_state_cov : array The filtered state covariance matrix at each time period. predicted_state : array The predicted state vector at each time period. predicted_state_cov : array The predicted state covariance matrix at each time period. forecasts : array The one-step-ahead forecasts of observations at each time period. forecasts_error : array The forecast errors at each time period. forecasts_error_cov : array The forecast error covariance matrices at each time period. Notes ----- The provided ranges must be conformable, meaning that it must be that `end - start == nstatic + ndynamic + nforecast`. This class is essentially a view to the FilterResults object, but returning the appropriate ranges for everything. """ representation_attributes = [ 'endog', 'design', 'design', 'obs_intercept', 'obs_cov', 'transition', 'state_intercept', 'selection', 'state_cov' ] filter_attributes = [ 'filtered_state', 'filtered_state_cov', 'predicted_state', 'predicted_state_cov', 'forecasts', 'forecasts_error', 'forecasts_error_cov' ] def __init__(self, results, start, end, nstatic, ndynamic, nforecast): # Save the filter results object self.results = results # Save prediction ranges self.npredictions = start - end self.start = start self.end = end self.nstatic = nstatic self.ndynamic = ndynamic self.nforecast = nforecast def __getattr__(self, attr): """ Provide access to the representation and filtered output in the appropriate range (`start` - `end`). """ # Prevent infinite recursive lookups if attr[0] == '_': raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)) _attr = '_' + attr # Cache the attribute if not hasattr(self, _attr): if attr == 'endog' or attr in self.filter_attributes: # Get a copy value = getattr(self.results, attr).copy() # Subset to the correct time frame value = value[..., self.start:self.end] elif attr in self.representation_attributes: value = getattr(self.results, attr).copy() # If a time-invariant matrix, return it. Otherwise, subset to # the correct period. if value.shape[-1] == 1: value = value[..., 0] else: value = value[..., self.start:self.end] else: raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)) setattr(self, _attr, value) return getattr(self, _attr)
import platform import os import sys import subprocess from pprint import pprint print('Python version:', platform.python_version()) print('max unicode:', sys.maxunicode) print('architecture:', platform.architecture()) print('sys.version:', sys.version) print('platform.machine():', platform.machine()) import _bisect import _codecs_cn import _codecs_hk import _codecs_iso2022 import _codecs_jp import _codecs_kr import _codecs_tw import _collections import _csv import _ctypes import _ctypes_test import _elementtree import _functools import _hashlib import _heapq import _hotshot import _io import _json import _locale import _lsprof import _multibytecodec import _multiprocessing import _random import _socket import _sqlite3 import _ssl import _struct import _testcapi import array import audioop import binascii import bz2 import cPickle import cStringIO import cmath import datetime import future_builtins import itertools import math import mmap import operator import parser import pyexpat #import resource import select import strop #import syslog import time import unicodedata import zlib import gzip from os import urandom if sys.platform != 'win32': import crypt import fcntl import grp import nis import ssl print('OPENSSL_VERSION:', ssl.OPENSSL_VERSION)
# -*- coding: utf-8 -*- # Copyright (c) 2013-2014 Google, Inc. # Copyright (c) 2013-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr> # Copyright (c) 2014 Arun Persaud <arun@nubati.net> # Copyright (c) 2015-2016 Claudiu Popa <pcmanticore@gmail.com> # Copyright (c) 2015 Aru Sahni <arusahni@gmail.com> # Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro> # Copyright (c) 2016 Derek Gustafson <degustaf@gmail.com> # Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net> # Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com> # Copyright (c) 2017 Anthony Sottile <asottile@umich.edu> # Copyright (c) 2017 Łukasz Rogalski <rogalski.91@gmail.com> # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/master/COPYING import io import re import warnings import astroid from pylint import utils from pylint.checkers.utils import check_messages, get_node_last_lineno from pylint.exceptions import InvalidMessageError import pytest class TestPyLintASTWalker(object): class MockLinter(object): def __init__(self, msgs): self._msgs = msgs def is_message_enabled(self, msgid): return self._msgs.get(msgid, True) class Checker(object): def __init__(self): self.called = set() @check_messages('first-message') def visit_module(self, module): self.called.add('module') @check_messages('second-message') def visit_call(self, module): raise NotImplementedError @check_messages('second-message', 'third-message') def visit_assignname(self, module): self.called.add('assignname') @check_messages('second-message') def leave_assignname(self, module): raise NotImplementedError def test_check_messages(self): linter = self.MockLinter({'first-message': True, 'second-message': False, 'third-message': True}) walker = utils.PyLintASTWalker(linter) checker = self.Checker() walker.add_checker(checker) walker.walk(astroid.parse("x = func()")) assert {'module', 'assignname'} == checker.called def test_deprecated_methods(self): class Checker(object): def __init__(self): self.called = False @check_messages('first-message') def visit_assname(self, node): self.called = True linter = self.MockLinter({'first-message': True}) walker = utils.PyLintASTWalker(linter) checker = Checker() walker.add_checker(checker) with warnings.catch_warnings(record=True): warnings.simplefilter('always') walker.walk(astroid.parse("x = 1")) assert not checker.called def test__basename_in_blacklist_re_match(): patterns = [re.compile(".*enchilada.*"), re.compile("unittest_.*")] assert utils._basename_in_blacklist_re("unittest_utils.py", patterns) assert utils._basename_in_blacklist_re("cheese_enchiladas.xml", patterns) def test__basename_in_blacklist_re_nomatch(): patterns = [re.compile(".*enchilada.*"), re.compile("unittest_.*")] assert not utils._basename_in_blacklist_re("test_utils.py", patterns) assert not utils._basename_in_blacklist_re("enchilad.py", patterns) @pytest.fixture def store(): return utils.MessagesStore() @pytest.mark.parametrize("messages,expected", [ ({'W1234': ('message one', 'msg-symbol-one', 'msg description'), 'W4321': ('message two', 'msg-symbol-two', 'msg description')}, r"Inconsistent checker part in message id 'W4321' (expected 'x12xx')"), ({'W1233': ('message two', 'msg-symbol-two', 'msg description', {'old_names': [('W1234', 'old-symbol')]}), 'W1234': ('message one', 'msg-symbol-one', 'msg description')}, "Message id 'W1234' is already defined"), ({'W1234': ('message one', 'msg-symbol-one', 'msg description'), 'W1235': ('message two', 'msg-symbol-two', 'msg description', {'old_names': [('W1234', 'old-symbol')]})}, "Message id 'W1234' is already defined"), ({'W1234': ('message one', 'msg-symbol-one', 'msg description', {'old_names': [('W1201', 'old-symbol-one')]}), 'W1235': ('message two', 'msg-symbol-two', 'msg description', {'old_names': [('W1201', 'old-symbol-two')]})}, "Message id 'W1201' is already defined"), ({'W1234': ('message one', 'msg-symbol', 'msg description'), 'W1235': ('message two', 'msg-symbol', 'msg description')}, "Message symbol 'msg-symbol' is already defined"), ({'W1233': ('message two', 'msg-symbol-two', 'msg description', {'old_names': [('W1230', 'msg-symbol-one')]}), 'W1234': ('message one', 'msg-symbol-one', 'msg description')}, "Message symbol 'msg-symbol-one' is already defined"), ({'W1234': ('message one', 'msg-symbol-one', 'msg description'), 'W1235': ('message two', 'msg-symbol-two', 'msg description', {'old_names': [('W1230', 'msg-symbol-one')]})}, "Message symbol 'msg-symbol-one' is already defined"), ({'W1234': ('message one', 'msg-symbol-one', 'msg description', {'old_names': [('W1230', 'old-symbol-one')]}), 'W1235': ('message two', 'msg-symbol-two', 'msg description', {'old_names': [('W1231', 'old-symbol-one')]})}, "Message alternate name 'old-symbol-one' is already defined"), ]) def test_register_error(store, messages, expected): class Checker(object): name = 'checker' msgs = messages with pytest.raises(InvalidMessageError) as cm: store.register_messages(Checker()) assert str(cm.value) == expected def test_register_error_new_id_duplicate_of_new(store): class CheckerOne(object): name = 'checker_one' msgs = { 'W1234': ('message one', 'msg-symbol-one', 'msg description.'), } class CheckerTwo(object): name = 'checker_two' msgs = { 'W1234': ('message two', 'msg-symbol-two', 'another msg description.'), } store.register_messages(CheckerOne()) test_register_error(store, {'W1234': ('message two', 'msg-symbol-two', 'another msg description.')}, "Message id 'W1234' is already defined") @pytest.mark.parametrize("msgid,expected", [ ("Q1234", "Bad message type Q in 'Q1234'"), ("W12345", "Invalid message id 'W12345'"), ]) def test_create_invalid_message_type(msgid, expected): with pytest.raises(InvalidMessageError) as cm: utils.MessageDefinition('checker', msgid, 'msg', 'descr', 'symbol', 'scope') assert str(cm.value) == expected def test__decoding_readline(): """_decoding_readline should yield line at a time even if the binary representation of the text is not splittable line-by-line. For convenience this test uses a codec that is easy to understand, though this *specific* codec is unlikely to be seen in the wild for files. """ binary_io = io.BytesIO(b'foo\\nbaz\\n') readline = utils._decoding_readline(binary_io, 'unicode_escape') ret = [] s = readline() while s != '': ret.append(s) s = readline() assert ret == ['foo\n', 'baz\n'] def test_decoding_stream_unknown_encoding(): """decoding_stream should fall back to *some* decoding when given an unknown encoding. """ binary_io = io.BytesIO(b'foo\nbar') stream = utils.decoding_stream(binary_io, 'garbage-encoding') # should still act like a StreamReader ret = stream.readlines() assert ret == ['foo\n', 'bar'] def test_decoding_stream_known_encoding(): binary_io = io.BytesIO(u'€'.encode('cp1252')) stream = utils.decoding_stream(binary_io, 'cp1252') assert stream.read() == u'€' class TestGetNodeLastLineno: def test_get_node_last_lineno_simple(self): node = astroid.extract_node(""" pass """) assert get_node_last_lineno(node) == 2 def test_get_node_last_lineno_if_simple(self): node = astroid.extract_node(""" if True: print(1) pass """) assert get_node_last_lineno(node) == 4 def test_get_node_last_lineno_if_elseif_else(self): node = astroid.extract_node(""" if True: print(1) elif False: print(2) else: print(3) """) assert get_node_last_lineno(node) == 7 def test_get_node_last_lineno_while(self): node = astroid.extract_node(""" while True: print(1) """) assert get_node_last_lineno(node) == 3 def test_get_node_last_lineno_while_else(self): node = astroid.extract_node(""" while True: print(1) else: print(2) """) assert get_node_last_lineno(node) == 5 def test_get_node_last_lineno_for(self): node = astroid.extract_node(""" for x in range(0, 5): print(1) """) assert get_node_last_lineno(node) == 3 def test_get_node_last_lineno_for_else(self): node = astroid.extract_node(""" for x in range(0, 5): print(1) else: print(2) """) assert get_node_last_lineno(node) == 5 def test_get_node_last_lineno_try(self): node = astroid.extract_node(""" try: print(1) except ValueError: print(2) except Exception: print(3) """) assert get_node_last_lineno(node) == 7 def test_get_node_last_lineno_try_except_else(self): node = astroid.extract_node(""" try: print(1) except Exception: print(2) print(3) else: print(4) """) assert get_node_last_lineno(node) == 8 def test_get_node_last_lineno_try_except_finally(self): node = astroid.extract_node(""" try: print(1) except Exception: print(2) finally: print(4) """) assert get_node_last_lineno(node) == 7 def test_get_node_last_lineno_try_except_else_finally(self): node = astroid.extract_node(""" try: print(1) except Exception: print(2) else: print(3) finally: print(4) """) assert get_node_last_lineno(node) == 9 def test_get_node_last_lineno_with(self): node = astroid.extract_node(""" with x as y: print(1) pass """) assert get_node_last_lineno(node) == 4 def test_get_node_last_lineno_method(self): node = astroid.extract_node(""" def x(a, b): print(a, b) pass """) assert get_node_last_lineno(node) == 4 def test_get_node_last_lineno_decorator(self): node = astroid.extract_node(""" @decor() def x(a, b): print(a, b) pass """) assert get_node_last_lineno(node) == 5 def test_get_node_last_lineno_class(self): node = astroid.extract_node(""" class C(object): CONST = True def x(self, b): print(b) def y(self): pass pass """) assert get_node_last_lineno(node) == 10 def test_get_node_last_lineno_combined(self): node = astroid.extract_node(""" class C(object): CONST = True def y(self): try: pass except: pass finally: pass """) assert get_node_last_lineno(node) == 11
import pandas as pd from math import sin, cos, sqrt, asin, radians #import ibm_db def cal_dist(lon1, lat1, lon2, lat2): lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2 c = 2 * asin(sqrt(a)) distance = 6378.137 * c return distance food = 'D:\\Dropbox\\Mcomp\\CS5224\\Project\\Cents_trip-master\\dataset\\food.csv' tourism_attractions = 'D:\\Dropbox\\Mcomp\\CS5224\\Project\\Cents_trip-master\\dataset\\TOURISM_ATTRACTIONS.csv' food_df = pd.read_csv(food) tourism_attractions_df = pd.read_csv(tourism_attractions) food_data = food_df.iloc[:,[0,6,7]] tourism_attractions_data = tourism_attractions_df.iloc[:,[0,2,3]] foodid = food_data['FOODID'].as_matrix() #print(len(roomid)) lat_food = food_data['LATITUDE'].as_matrix() lng_food = food_data['LONGITUDE'].as_matrix() attractionid = tourism_attractions_data['ATTRACTIONID'].as_matrix() #print(attractionid) lat_attractions = tourism_attractions_data['LATITUDE'].as_matrix() lng_attractions = tourism_attractions_data['LONGITUDE'].as_matrix() distances = [] # conn = ibm_db.connect("DATABASE=BLUDB;HOSTNAME=dashdb-entry-yp-dal09-09.services.dal.bluemix.net;\ # PORT=50000;PROTOCOL=TCPIP;UID=dash9787;\ # PWD=X_c03EeYTe#u;", "", "") for i in range(len(tourism_attractions_data)): for k in range(len(food_data)): distance = cal_dist(lng_attractions[i], lat_attractions[i], lng_food[k], lat_food[k]) # print(distance) distances.append(distance) output = open('rating.txt','w') k = 1 for i in range(len(tourism_attractions_data)): for j in range(len(food_data)): this_attractid = str(attractionid[i]) this_foodid = str(foodid[j]) this_distance = str(distances[(i + 1)* j]) output.write(this_attractid) output.write('\t') output.write(this_foodid) output.write('\t') output.write(this_distance) output.write('\n') output.close() #print(len(distances)) # k = 1 # for i in range(len(tourism_attractions_data)): # for j in range(len(food_data)): # this_attractid = attractionid[i] # this_foodid = foodid[j] # this_distance = distances[(i + 1)* j] # sql = r'INSERT INTO DISTANCE_FOOD_ATTRACTION(ATTRACTIONID, FOODID, DISTANCE) VALUES({attractionID}, {foodID}, {distance})'.format( # attractionID=this_attractid, foodID=this_foodid, distance=this_distance # ) # print(sql, '>>') # try: # stmt = ibm_db.exec_immediate(conn, sql) # except Exception as e: # print(e) # print("Inserting couldn't be completed.") # ibm_db.rollback(conn) # else: # ibm_db.commit(conn) # print("Inserting complete.") # print('-----' + str(k) + '-----') # k += 1 # #
"""Support for AlarmDecoder-based alarm control panels (Honeywell/DSC).""" import logging import voluptuous as vol from homeassistant.components.alarm_control_panel import ( FORMAT_NUMBER, AlarmControlPanelEntity, ) from homeassistant.components.alarm_control_panel.const import ( SUPPORT_ALARM_ARM_AWAY, SUPPORT_ALARM_ARM_HOME, SUPPORT_ALARM_ARM_NIGHT, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( ATTR_CODE, STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_NIGHT, STATE_ALARM_DISARMED, STATE_ALARM_TRIGGERED, ) from homeassistant.helpers import entity_platform import homeassistant.helpers.config_validation as cv from homeassistant.helpers.typing import HomeAssistantType from .const import ( CONF_ALT_NIGHT_MODE, CONF_AUTO_BYPASS, CONF_CODE_ARM_REQUIRED, DATA_AD, DEFAULT_ARM_OPTIONS, DOMAIN, OPTIONS_ARM, SIGNAL_PANEL_MESSAGE, ) _LOGGER = logging.getLogger(__name__) SERVICE_ALARM_TOGGLE_CHIME = "alarm_toggle_chime" SERVICE_ALARM_KEYPRESS = "alarm_keypress" ATTR_KEYPRESS = "keypress" async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ): """Set up for AlarmDecoder alarm panels.""" options = entry.options arm_options = options.get(OPTIONS_ARM, DEFAULT_ARM_OPTIONS) client = hass.data[DOMAIN][entry.entry_id][DATA_AD] entity = AlarmDecoderAlarmPanel( client=client, auto_bypass=arm_options[CONF_AUTO_BYPASS], code_arm_required=arm_options[CONF_CODE_ARM_REQUIRED], alt_night_mode=arm_options[CONF_ALT_NIGHT_MODE], ) async_add_entities([entity]) platform = entity_platform.current_platform.get() platform.async_register_entity_service( SERVICE_ALARM_TOGGLE_CHIME, { vol.Required(ATTR_CODE): cv.string, }, "alarm_toggle_chime", ) platform.async_register_entity_service( SERVICE_ALARM_KEYPRESS, { vol.Required(ATTR_KEYPRESS): cv.string, }, "alarm_keypress", ) class AlarmDecoderAlarmPanel(AlarmControlPanelEntity): """Representation of an AlarmDecoder-based alarm panel.""" def __init__(self, client, auto_bypass, code_arm_required, alt_night_mode): """Initialize the alarm panel.""" self._client = client self._display = "" self._name = "Alarm Panel" self._state = None self._ac_power = None self._alarm_event_occurred = None self._backlight_on = None self._battery_low = None self._check_zone = None self._chime = None self._entry_delay_off = None self._programming_mode = None self._ready = None self._zone_bypassed = None self._auto_bypass = auto_bypass self._code_arm_required = code_arm_required self._alt_night_mode = alt_night_mode async def async_added_to_hass(self): """Register callbacks.""" self.async_on_remove( self.hass.helpers.dispatcher.async_dispatcher_connect( SIGNAL_PANEL_MESSAGE, self._message_callback ) ) def _message_callback(self, message): """Handle received messages.""" if message.alarm_sounding or message.fire_alarm: self._state = STATE_ALARM_TRIGGERED elif message.armed_away: self._state = STATE_ALARM_ARMED_AWAY elif message.armed_home and (message.entry_delay_off or message.perimeter_only): self._state = STATE_ALARM_ARMED_NIGHT elif message.armed_home: self._state = STATE_ALARM_ARMED_HOME else: self._state = STATE_ALARM_DISARMED self._ac_power = message.ac_power self._alarm_event_occurred = message.alarm_event_occurred self._backlight_on = message.backlight_on self._battery_low = message.battery_low self._check_zone = message.check_zone self._chime = message.chime_on self._entry_delay_off = message.entry_delay_off self._programming_mode = message.programming_mode self._ready = message.ready self._zone_bypassed = message.zone_bypassed self.schedule_update_ha_state() @property def name(self): """Return the name of the device.""" return self._name @property def should_poll(self): """Return the polling state.""" return False @property def code_format(self): """Return one or more digits/characters.""" return FORMAT_NUMBER @property def state(self): """Return the state of the device.""" return self._state @property def supported_features(self) -> int: """Return the list of supported features.""" return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_ARM_NIGHT @property def code_arm_required(self): """Whether the code is required for arm actions.""" return self._code_arm_required @property def device_state_attributes(self): """Return the state attributes.""" return { "ac_power": self._ac_power, "alarm_event_occurred": self._alarm_event_occurred, "backlight_on": self._backlight_on, "battery_low": self._battery_low, "check_zone": self._check_zone, "chime": self._chime, "entry_delay_off": self._entry_delay_off, "programming_mode": self._programming_mode, "ready": self._ready, "zone_bypassed": self._zone_bypassed, "code_arm_required": self._code_arm_required, } def alarm_disarm(self, code=None): """Send disarm command.""" if code: self._client.send(f"{code!s}1") def alarm_arm_away(self, code=None): """Send arm away command.""" self._client.arm_away( code=code, code_arm_required=self._code_arm_required, auto_bypass=self._auto_bypass, ) def alarm_arm_home(self, code=None): """Send arm home command.""" self._client.arm_home( code=code, code_arm_required=self._code_arm_required, auto_bypass=self._auto_bypass, ) def alarm_arm_night(self, code=None): """Send arm night command.""" self._client.arm_night( code=code, code_arm_required=self._code_arm_required, alt_night_mode=self._alt_night_mode, auto_bypass=self._auto_bypass, ) def alarm_toggle_chime(self, code=None): """Send toggle chime command.""" if code: self._client.send(f"{code!s}9") def alarm_keypress(self, keypress): """Send custom keypresses.""" if keypress: self._client.send(keypress)
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 NTT DOCOMO, INC. # Copyright (c) 2011 University of Southern California / ISI # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.virt.baremetal import baremetal_states class NodeDriver(object): def __init__(self, virtapi): self.virtapi = virtapi def cache_images(self, context, node, instance, **kwargs): raise NotImplementedError() def destroy_images(self, context, node, instance): raise NotImplementedError() def activate_bootloader(self, context, node, instance, **kwargs): raise NotImplementedError() def deactivate_bootloader(self, context, node, instance): raise NotImplementedError() def activate_node(self, context, node, instance): """For operations after power on.""" raise NotImplementedError() def deactivate_node(self, context, node, instance): """For operations before power off.""" raise NotImplementedError() def get_console_output(self, node, instance): raise NotImplementedError() def dhcp_options_for_instance(self, instance): """Optional override to return the DHCP options to use for instance. If no DHCP options are needed, this should not be overridden or None should be returned. """ return None class PowerManager(object): def __init__(self, **kwargs): self.state = baremetal_states.DELETED pass def activate_node(self): self.state = baremetal_states.ACTIVE return self.state def reboot_node(self): self.state = baremetal_states.ACTIVE return self.state def deactivate_node(self): self.state = baremetal_states.DELETED return self.state def is_power_on(self): """Returns True or False according as the node's power state.""" return True # TODO(NTTdocomo): split out console methods to its own class def start_console(self): pass def stop_console(self): pass
# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Configurations.""" from lambada_lm.config_registry import config_registry config_registry.register('default', { # Data 'read' : 'continuosly_with_extra_word', 'eval_read': 'continuosly_with_extra_word', 'num_steps' : 100, 'eval_num_steps' : 100, # Schedule 'monitoring_frequency' : 100, 'saving_frequency' : 5000, 'max_batches_per_epoch' : 5000, 'num_epochs' : 100, 'start_annealing' : 20, 'lr_decay' : 0.8, # Model 'init_scale' : 0.1, 'forget_bias' : 0.0, 'dim' : 128, 'architecture' : 'lstm', 'act' : 'relu', 'width' : -1, # Optimization 'optimizer' : 'GradientDescentOptimizer', 'batch_size' : 32, 'learning_rate' : 1.0, 'lr_min': 0.000001, 'momentum' : 0.9, 'epsilon' : 1e-8, 'max_grad_norm': 5.0, 'next_worker_delay' : 1500, }) c = config_registry['default'] c['dim'] = 512 c['read'] = 'shards_continuosly_with_bos' c['eval_read'] = 'padded_sentences_with_bos' c['eval_num_steps'] = 210 config_registry.register('lambada', c) c = config_registry['lambada'] c['optimizer'] = 'AdamOptimizer' c['learning_rate'] = 0.001 config_registry.register('lambAdam', c) c = config_registry['lambAdam'] c['architecture'] = 'conv' c['width'] = 5 config_registry.register('lambAdamConv', c)
""" Test suite for django-elasticsearch. """ from django.test import TestCase from testproj.myapp.models import Entry, Blog, StandardAutoFieldModel, Person, TestFieldModel, EModel import datetime import time class DjangoESTest(TestCase): # multi_db = True # def test_add_and_delete_blog(self): # blog1 = Blog(title="blog1") # blog1.save() # self.assertEqual(Blog.objects.count(), 1) # blog2 = Blog(title="blog2") # self.assertEqual(blog2.pk, None) # blog2.save() # self.assertNotEqual(blog2.pk, None) # self.assertEqual(Blog.objects.count(), 2) # blog2.delete() # self.assertEqual(Blog.objects.count(), 1) # blog1.delete() # self.assertEqual(Blog.objects.count(), 0) def test_simple_get(self): blog1 = Blog(title="blog1") blog1.save() blog2 = Blog(title="blog2") blog2.save() self.assertEqual(Blog.objects.count(), 2) self.assertEqual( Blog.objects.get(title="blog2"), blog2 ) self.assertEqual( Blog.objects.get(title="blog1"), blog1 ) def test_simple_filter(self): blog1 = Blog(title="same title") blog1.save() blog2 = Blog(title="same title") blog2.save() blog3 = Blog(title="another title") blog3.save() self.assertEqual(Blog.objects.count(), 3) blog4 = Blog.objects.get(pk=blog1.pk) self.assertEqual(blog4, blog1) self.assertEqual( Blog.objects.filter(title="same title").count(), 2 ) self.assertEqual( Blog.objects.filter(title="same title", pk=blog1.pk).count(), 1 ) self.assertEqual( Blog.objects.filter(title__startswith="same").count(), 2 ) self.assertEqual( Blog.objects.filter(title__istartswith="SAME").count(), 2 ) self.assertEqual( Blog.objects.filter(title__endswith="title").count(), 3 ) self.assertEqual( Blog.objects.filter(title__iendswith="Title").count(), 3 ) self.assertEqual( Blog.objects.filter(title__icontains="same").count(), 2 ) self.assertEqual( Blog.objects.filter(title__contains="same").count(), 2 ) self.assertEqual( Blog.objects.filter(title__iexact="same Title").count(), 2 ) self.assertEqual( Blog.objects.filter(title__regex="s.me.*").count(), 2 ) self.assertEqual( Blog.objects.filter(title__iregex="S.me.*").count(), 2 ) def test_change_model(self): blog1 = Blog(title="blog 1") blog1.save() self.assertEqual(Blog.objects.count(), 1) blog1.title = "new title" blog1.save() self.assertEqual(Blog.objects.count(), 1) bl = Blog.objects.all()[0] self.assertEqual(blog1.title, bl.title) bl.delete() # def test_dates_ordering(self): # now = datetime.datetime.now() # before = now - datetime.timedelta(days=1) # # entry1 = Entry(title="entry 1", date_published=now) # entry1.save() # # entry2 = Entry(title="entry 2", date_published=before) # entry2.save() # # self.assertEqual( # list(Entry.objects.order_by('-date_published')), # [entry1, entry2] # ) # ## self.assertEqual( ## list(Entry.objects.order_by('date_published')), ## [entry2, entry1] ## ) # # ## def test_dates_less_and_more_than(self): ## now = datetime.datetime.now() ## before = now + datetime.timedelta(days=1) ## after = now - datetime.timedelta(days=1) ## ## entry1 = Entry(title="entry 1", date_published=now) ## entry1.save() ## ## entry2 = Entry(title="entry 2", date_published=before) ## entry2.save() ## ## entry3 = Entry(title="entry 3", date_published=after) ## entry3.save() ## ## a = list(Entry.objects.filter(date_published=now)) ## self.assertEqual( ## list(Entry.objects.filter(date_published=now)), ## [entry1] ## ) ## self.assertEqual( ## list(Entry.objects.filter(date_published__lt=now)), ## [entry3] ## ) ## self.assertEqual( ## list(Entry.objects.filter(date_published__gt=now)), ## [entry2] ## ) # # def test_complex_queries(self): # p1 = Person(name="igor", surname="duck", age=39) # p1.save() # p2 = Person(name="andrea", surname="duck", age=29) # p2.save() # self.assertEqual( # Person.objects.filter(name="igor", surname="duck").count(), # 1 # ) # self.assertEqual( # Person.objects.filter(age__gte=20, surname="duck").count(), # 2 # ) # # def test_fields(self): # t1 = TestFieldModel(title="p1", # mlist=["ab", "bc"], # mdict = {'a':23, "b":True }, # ) # t1.save() # # t = TestFieldModel.objects.get(id=t1.id) # self.assertEqual(t.mlist, ["ab", "bc"]) # self.assertEqual(t.mlist_default, ["a", "b"]) # self.assertEqual(t.mdict, {'a':23, "b":True }) # self.assertEqual(t.mdict_default, {"a": "a", 'b':1}) # # # def test_embedded_model(self): # em = EModel(title="1", pos = 1) # em2 = EModel(title="2", pos = 2) # t1 = TestFieldModel(title="p1", # mlist=[em, em2], # mdict = {'a':em, "b":em2 }, # ) # t1.save() # # t = TestFieldModel.objects.get(id=t1.id) # self.assertEqual(len(t.mlist), 2) # self.assertEqual(t.mlist[0].test_func(), 1) # self.assertEqual(t.mlist[1].test_func(), 2) # # def test_simple_foreign_keys(self): # now = datetime.datetime.now() # # blog1 = Blog(title="Blog") # blog1.save() # entry1 = Entry(title="entry 1", blog=blog1) # entry1.save() # entry2 = Entry(title="entry 2", blog=blog1) # entry2.save() # self.assertEqual(Entry.objects.count(), 2) # # for entry in Entry.objects.all(): # self.assertEqual( # blog1, # entry.blog # ) # # blog2 = Blog(title="Blog") # blog2.save() # entry3 = Entry(title="entry 3", blog=blog2) # entry3.save() # self.assertEqual( # # it's' necessary to explicitly state the pk here # len( list(Entry.objects.filter(blog=blog1.pk))), # len([entry1, entry2]) # ) # # ## def test_foreign_keys_bug(self): ## blog1 = Blog(title="Blog") ## blog1.save() ## entry1 = Entry(title="entry 1", blog=blog1) ## entry1.save() ## self.assertEqual( ## # this should work too ## list(Entry.objects.filter(blog=blog1)), ## [entry1] ## ) # ## def test_standard_autofield(self): ## ## sam1 = StandardAutoFieldModel(title="title 1") ## sam1.save() ## sam2 = StandardAutoFieldModel(title="title 2") ## sam2.save() ## ## self.assertEqual( ## StandardAutoFieldModel.objects.count(), ## 2 ## ) ## ## sam1_query = StandardAutoFieldModel.objects.get(title="title 1") ## self.assertEqual( ## sam1_query.pk, ## sam1.pk ## ) ## ## sam1_query = StandardAutoFieldModel.objects.get(pk=sam1.pk) ## #
from django.shortcuts import render, get_object_or_404 from django.http import HttpResponse from django.template import RequestContext, loader from news.models import News from slides.models import Slide from django.http import HttpResponseRedirect def home(request): latest_news_list = News.objects.order_by('-publication_date')[:4] try: slide = Slide.objects.get(title='home') except Slide.DoesNotExist: slide = None template = loader.get_template('bouraka/home.html') context = { 'latest_news_list': latest_news_list, 'slide': slide, } return render(request, 'bouraka/home.html', context) def gallery(request): return render(request, 'bouraka/gallery.html') def history(request): return render(request, 'bouraka/history.html') def team(request): return render(request, 'bouraka/team.html') def shell(request): return render(request, 'bouraka/shell.html') def educeco(request): return render(request, 'bouraka/educeco.html') def michelin(request): return render(request, 'bouraka/michelin.html') def futur(request): return render(request, 'bouraka/futur.html') def envol(request): return render(request, 'bouraka/envol.html') def epic(request): return render(request, 'bouraka/epic.html') def orca(request): return render(request, 'bouraka/orca.html') def elec(request): return render(request, 'bouraka/elec.html') def roues(request): return render(request, 'bouraka/roues.html') def moteur(request): return render(request, 'bouraka/moteur.html') def simulateur(request): return render(request, 'bouraka/simulateur.html') def accomplishments(request): return render(request, 'bouraka/accomplishments.html') def contacts(request): return render(request, 'bouraka/contacts.html')
#!/usr/bin/env python from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tensorflow as tf import picpac flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.') flags.DEFINE_integer('max_steps', 200000, 'Number of steps to run trainer.') flags.DEFINE_integer('channels', 3, '') flags.DEFINE_integer('out_channels', 2, '') flags.DEFINE_string('train_dir', 'data', 'Directory to put the training data.') flags.DEFINE_boolean('fake_data', False, 'If true, uses fake data ' 'for unit testing.') def cp_layer (bottom, scope, params, ksize, kstride, psize, pstride, ch_in, ch_out, relu=True): with tf.name_scope(scope): filters = tf.Variable( tf.truncated_normal( [ksize, ksize, ch_in, ch_out], dtype=tf.float32, stddev=0.01), name='filters') out = tf.nn.conv2d(bottom, filters, [1,kstride,kstride,1], padding="SAME") biases = tf.Variable( tf.constant(0.0, shape=[ch_out], dtype=tf.float32), trainable=True, name='bias') out = tf.nn.bias_add(out, biases) if relu: out = tf.nn.relu(out, name=scope) if not psize is None: out = tf.nn.max_pool(out, ksize=[1,psize,psize,1], strides=[1,pstride,pstride,1], padding='SAME', name='pool') params.extend([filters, biases]) return out pass def inference (images, train=True): params = [] out = cp_layer(images, "layer1", params, 5, 2, 2, 2, FLAGS.channels, 100) out = cp_layer(out, "layer2", params, 5, 2, 2, 2, 100, 200) out = cp_layer(out, "layer2", params, 3, 1, None, None, 200, 300) out = cp_layer(out, "layer3", params, 3, 1, None, None, 300, 300) if train: out = tf.nn.dropout(out, 0.1, name='dropout') out = cp_layer(out, "score", params, 1, 1, None, None, 300, FLAGS.out_channels, relu=False) score = out with tf.name_scope('upscale'): shape = tf.unpack(tf.shape(images)) print(shape.__class__) shape.pop() shape.append(tf.constant(FLAGS.out_channels, dtype=tf.int32)) print(len(shape)) filters = tf.Variable( tf.truncated_normal( [31, 31, FLAGS.out_channels, FLAGS.out_channels], dtype=tf.float32, stddev=0.01), name='filters') logits = tf.nn.conv2d_transpose(out, filters, tf.pack(shape), [1,16,16,1], padding='SAME', name='upscale') # do we want to add bias? return logits, score, params def fcn_loss (logits, labels): with tf.name_scope('loss'): logits = tf.reshape(logits, (-1, FLAGS.out_channels)) labels = tf.to_int32(labels) # float from picpac labels = tf.reshape(labels, (-1,)) xe = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name='xentropy') return tf.reduce_mean(xe, name='xentropy_mean') pass def training (loss, rate): tf.scalar_summary(loss.op.name, loss) optimizer = tf.train.GradientDescentOptimizer(rate) global_step = tf.Variable(0, name='global_step', trainable=False) return optimizer.minimize(loss, global_step=global_step) def run_training (): seed = 1996 config = dict(seed=seed, loop=True, shuffle=True, reshuffle=True, #resize_width=256, #resize_height=256, batch=1, split=1, split_fold=0, annotate='json', channels=FLAGS.channels, stratify=False, #mixin="db0", #mixin_group_delta=0, #pert_color1=10, #pert_angle=5, #pert_min_scale=0.8, #pert_max_scale=1.2, #pad=False, #pert_hflip=True, channel_first=False # this is tensorflow specific # Caffe's dimension order is different. ) db='db' tr_stream = picpac.ImageStream(db, negate=False, perturb=True, **config) with tf.Graph().as_default(): X = tf.placeholder(tf.float32, shape=(BATCH, None, None, FLAGS.channels), name="images") Y_ = tf.placeholder(tf.int32, shape=(BATCH, None, None, 1), name="labels") logits, score, params = inference(X) loss = fcn_loss(logits, Y_) train_op = training(loss, FLAGS.learning_rate) summary_op = tf.merge_all_summaries() summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, tf.get_default_graph()) init = tf.initialize_all_variables() graph_txt = tf.get_default_graph().as_graph_def().SerializeToString() with open(os.path.join(FLAGS.train_dir, "graph"), "w") as f: f.write(graph_txt) pass saver = tf.train.Saver() with tf.Session() as sess: sess.run(init) for step in xrange(FLAGS.max_steps): images, labels, pad = tr_stream.next() #print(images.shape, labels.shape) feed_dict = {X: images, Y_: labels} #l_v, s_v = sess.run([logits, score], feed_dict=feed_dict) #print(images.shape, s_v.shape, l_v.shape) _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) if step % 100 == 0: print('step %d: loss = %.4f' % (step, loss_value)) summary_str = sess.run(summary_op, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: saver.save(sess, os.path.join(FLAGS.train_dir, "model"), global_step=step) pass pass pass pass def main (_): run_training() if __name__ == '__main__': tf.app.run()
from django.core.urlresolvers import reverse from django.conf.urls import patterns, url from django.views.decorators.cache import never_cache from restlib2.params import Parametizer, BoolParam, StrParam from avocado.models import DataContext, DataField from avocado.query import pipeline from .base import BaseResource, ThrottledResource class StatsResource(BaseResource): def get(self, request): uri = request.build_absolute_uri return { 'title': 'Serrano Stats Endpoint', '_links': { 'self': { 'href': uri(reverse('serrano:stats:root')), }, 'counts': { 'href': uri(reverse('serrano:stats:counts')), }, } } class CountStatsParametizer(Parametizer): aware = BoolParam(False) processor = StrParam('default', choices=pipeline.query_processors) class CountStatsResource(ThrottledResource): parametizer = CountStatsParametizer def get(self, request): params = self.get_params(request) if params['aware']: context = self.get_context(request) else: context = DataContext() # Get all published app/model pairs to produce counts for. model_names = DataField.objects.published()\ .values_list('app_name', 'model_name')\ .order_by('model_name').distinct() data = [] models = set() QueryProcessor = pipeline.query_processors[params['processor']] for app_name, model_name in model_names: # DataField used here to resolve foreign key-based fields. model = DataField(app_name=app_name, model_name=model_name).model # Foreign-key based fields may resolve to models that are already # accounted for. if model in models: continue models.add(model) # Build a queryset through the context which is toggled by # the parameter. processor = QueryProcessor(context=context, tree=model) queryset = processor.get_queryset(request=request) count = queryset.values('pk').distinct().count() opts = model._meta # Format is called to resolve Django's internal proxy wrapper. verbose_name = opts.verbose_name.format() verbose_name_plural = opts.verbose_name_plural.format() # Assume no custom verbose_name as been set in Meta class, so # apply a minimal title-case. if verbose_name.islower(): verbose_name = verbose_name.title() if verbose_name_plural.islower(): verbose_name_plural = verbose_name_plural.title() data.append({ 'count': count, 'app_name': app_name, 'model_name': model_name, 'verbose_name': verbose_name, 'verbose_name_plural': verbose_name_plural, }) return data # Same logic, but supports submitting context via a POST. post = get stats_resource = never_cache(StatsResource()) counts_resource = never_cache(CountStatsResource()) # Resource endpoints urlpatterns = patterns( '', url(r'^$', stats_resource, name='root'), url(r'^counts/$', counts_resource, name='counts'), )
import json as jsonlib import os from pathlib import Path from typing import Dict, NamedTuple, Union, TYPE_CHECKING import urllib.request import jsonschema import yaml as yamllib from lightbus.exceptions import UnexpectedConfigurationFormat from lightbus.schema.hints_to_schema import python_type_to_json_schemas, SCHEMA_URI from lightbus.utilities.casting import cast_to_hint from lightbus.utilities.deforming import deform_to_bus if TYPE_CHECKING: # pylint: disable=unused-import,cyclic-import from .structure import RootConfig, BusConfig, ApiConfig class Config: """Provides access to configuration options There are two forms of configuration: * Bus-level configuration, `config.bus()` * API-level configuration, `config.api(api_name)` Bus-level configuration is global to lightbus. API-level configuration will normally have a default catch-all definition, but can be customised on a per-api basis. """ _config: "RootConfig" def __init__(self, root_config: "RootConfig"): self._config = root_config def bus(self) -> "BusConfig": return self._config.bus def api(self, api_name=None) -> "ApiConfig": """Returns config for the given API If there is no API-specific config available for the given api_name, then the root API config will be returned. """ return self._config.apis.get(api_name, None) or self._config.apis["default"] def apis(self) -> Dict[str, "ApiConfig"]: return self._config.apis def plugin(self, plugin_name) -> NamedTuple: return getattr(self._config.plugins, plugin_name) @classmethod def load_file(cls, file_path: Union[str, Path]): """Instantiate the config from the given file path Files ending in `.json` will be parsed as JSON, otherwise the file will be parsed as YAML. """ if str(file_path).startswith("http://") or str(file_path).startswith("https://"): response = urllib.request.urlopen(file_path, timeout=5) encoded_config = response.read() if "json" in response.headers.get("Content-Type") or file_path.endswith(".json"): return cls.load_json(encoded_config) else: return cls.load_yaml(encoded_config) else: file_path = Path(file_path) encoded_config = file_path.read_text(encoding="utf8") if file_path.name.endswith(".json"): return cls.load_json(encoded_config) else: return cls.load_yaml(encoded_config) @classmethod def load_json(cls, json: str): """Instantiate the config from a JSON string""" return cls.load_dict(config=jsonlib.loads(json)) @classmethod def load_yaml(cls, yaml: str): """Instantiate the config from a YAML string""" config = yamllib.safe_load(yaml) if not isinstance(config, dict): raise UnexpectedConfigurationFormat( f"The config file was loaded but it appears to be in an unexpected format. " f"The root of the configuration should be a key/value mapping, but the " f"type '{type(config).__name__}' was found instead. Check your config " f"file is correctly formatted." ) return cls.load_dict(config=config) @classmethod def load_dict(cls, config: dict, set_defaults=True): """Instantiate the config from a dictionary""" # pylint: disable=import-outside-toplevel from .structure import RootConfig config = config.copy() if set_defaults: config = set_default_config(config) validate_config(config) return cls(root_config=cast_to_hint(config, RootConfig)) @classmethod def default(cls): return cls.load_dict(config={}, set_defaults=True) def __getattr__(self, item): if hasattr(self._config, item): return getattr(self._config, item) else: raise AttributeError(f"No root-level configuration option named '{item}'") def validate_config(config: dict): """Validate the provided config dictionary against the config json schema""" json_schema = config_as_json_schema() jsonschema.validate(config, json_schema) def config_as_json_schema() -> dict: """Get the configuration structure as a json schema""" # pylint: disable=import-outside-toplevel from .structure import RootConfig schema, = python_type_to_json_schemas(RootConfig) # Some of the default values will still be python types, # so let's use deform_to_bus to turn them into something # that'll be json safe schema = deform_to_bus(schema) schema["$schema"] = SCHEMA_URI return schema def set_default_config(config: dict) -> dict: """Set the default configuration options on a loaded config dictionary""" env_service_name = os.environ.get("LIGHTBUS_SERVICE_NAME") if env_service_name: config.setdefault("service_name", env_service_name) env_process_name = os.environ.get("LIGHTBUS_PROCESS_NAME") if env_process_name: config.setdefault("process_name", env_process_name) config.setdefault("apis", {}) config.setdefault("bus", {}) config["apis"].setdefault("default", {}) config["bus"].setdefault("schema", {}) config["apis"]["default"].setdefault("rpc_transport", {"redis": {}}) config["apis"]["default"].setdefault("result_transport", {"redis": {}}) config["apis"]["default"].setdefault("event_transport", {"redis": {}}) config["bus"]["schema"].setdefault("transport", {"redis": {}}) return config
# -*- coding: utf-8 -*- # code for console Encoding difference. Dont' mind on it import sys import imp imp.reload(sys) try: sys.setdefaultencoding('UTF8') except Exception as E: pass import testValue from popbill import StatementService, PopbillException statementService = StatementService(testValue.LinkID, testValue.SecretKey) statementService.IsTest = testValue.IsTest statementService.IPRestrictOnOff = testValue.IPRestrictOnOff statementService.UseStaticIP = testValue.UseStaticIP statementService.UseLocalTimeYN = testValue.UseLocalTimeYN ''' 전자명세서에 첨부된 파일을 삭제합니다. - 파일을 식별하는 파일아이디는 첨부파일 목록(GetFileList API) 의 응답항목 중 파일아이디(AttachedFile) 값을 통해 확인할 수 있습니다. - https://docs.popbill.com/statement/python/api#DeleteFile ''' try: print("=" * 15 + " 전자명세서 첨부파일 삭제 " + "=" * 15) # 팝빌회원 사업자번호 CorpNum = testValue.testCorpNum # 명세서 코드, 121-명세서, 122-청구서, 123-견적서, 124-발주서, 125-입금표, 126-영수증 ItemCode = 121 # 전자명세서 문서번호 MgtKey = "20210429-001" # 삭제할 FileID, 첨부파일목록(getFiles API) 응답 전문의 attachedFile 값 FileID = "4DB71521-DC61-43EB-A061-DB0987ABACAB.PBF" # 팝빌회원 아이디 UserID = testValue.testUserID result = statementService.deleteFile(CorpNum, ItemCode, MgtKey, FileID, UserID) print("처리결과 : [%d] %s" % (result.code, result.message)) except PopbillException as PE: print("Exception Occur : [%d] %s" % (PE.code, PE.message))
#!/usr/bin/env python import sys from setuptools import setup, find_packages from fabric.version import get_version readme = open('README').read() long_description = """ To find out what's new in this version of Fabric, please see `the changelog <http://docs.fabfile.org/changes/%s.html>`_. You can also install the <a class="reference external" href="https://github.com/bitprophet/fabric/tarball/master#egg=fabric-dev">in-development version</a> using pip, with `pip install fabric==dev`. ---- %s ---- For more information, please see the Fabric website or execute ``fab --help``. """ % (get_version('short'), readme) # PyCrypto>2.0 + Python 2.5 + pip == bad times. # We can't easily detect pip usage at this point, but we can at least limit our # "downgrade" of the PyCrypto requirement to 2.5-only. PYCRYPTO = "<2.1" if (sys.version_info[:2] == (2, 5)) else ">=1.9" setup( name='Fabric', version=get_version('short'), description='Fabric is a simple, Pythonic tool for remote execution and deployment.', long_description=long_description, author='Jeff Forcier', author_email='jeff@bitprophet.org', url='http://fabfile.org', packages=find_packages(), test_suite='nose.collector', tests_require=['nose', 'fudge'], install_requires=['pycrypto %s' % PYCRYPTO, 'paramiko >=1.7.6'], entry_points={ 'console_scripts': [ 'fab = fabric.main:main', ] }, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: BSD License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Unix', 'Operating System :: POSIX', 'Programming Language :: Python', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Topic :: Software Development', 'Topic :: Software Development :: Build Tools', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: System :: Clustering', 'Topic :: System :: Software Distribution', 'Topic :: System :: Systems Administration', ], )
import datetime class Token(object): accessToken = None refreshToken = None generationDate = None expiresIn = None # in seconds type = None userId = None def __init__(self, userId, accessToken, refreshToken, type, expiresIn, generationDate=None): self.userId = userId self.accessToken = accessToken self.refreshToken = refreshToken self.type = type self.expiresIn = expiresIn self.generationDate = generationDate if self.generationDate == None: self.generationDate = datetime.datetime.utcnow().timestamp() def isExpired(self): expirationDate = float(self.generationDate) + float(self.expiresIn) if datetime.datetime.utcnow().timestamp() > expirationDate: return True else: return False # Create an object of the type Token based on a response from the Requests package def createFromDDIC(dictionary): try: return Token(dictionary[0], dictionary[1], dictionary[2], dictionary[3], dictionary[4], dictionary[5]) except KeyError as e: return Token(dictionary["user_id"], dictionary["access_token"], dictionary["refresh_token"], dictionary["token_type"], dictionary["expires_in"])
from __future__ import absolute_import, division, unicode_literals from django.db import models from django.utils.encoding import python_2_unicode_compatible from modelcluster.fields import ParentalKey from modelcluster.models import ClusterableModel from wagtail.wagtailadmin.edit_handlers import ( FieldPanel, InlinePanel, MultiFieldPanel, ) from wagtail.wagtailcore.fields import RichTextField from wagtail.wagtailcore.models import Page from wagtail.wagtailimages.edit_handlers import ImageChooserPanel from wagtail.wagtailsnippets.edit_handlers import SnippetChooserPanel from wagtail.wagtailsnippets.models import register_snippet from . import utils @python_2_unicode_compatible class ThemeContent(ClusterableModel): name = models.CharField(max_length=255) contact_email = models.EmailField( blank=True, null=True, help_text="Only provide if this should be different from the site default email contact address.", ) default = models.BooleanField(default=False) panels = [ FieldPanel('name'), FieldPanel('contact_email'), FieldPanel('default'), InlinePanel('block_links', label="Content Blocks"), InlinePanel('follow_links', label="Follow Links"), InlinePanel('logo_links', label="Logos"), ] def __str__(self): return self.name register_snippet(ThemeContent) @python_2_unicode_compatible class Theme(models.Model): name = models.CharField(max_length=1024) folder = models.CharField(max_length=1024, default="themes/default") content = models.ForeignKey(ThemeContent, null=True) def __str__(self): return self.name panels = [ FieldPanel('name'), FieldPanel('folder'), SnippetChooserPanel('content'), ] register_snippet(Theme) class ThemeablePage(Page): ''' Abstract model class to inherit from for themable pages ''' is_creatable = False class Meta: abstract = True theme = models.ForeignKey( Theme, on_delete=models.SET_NULL, blank=True, null=True, ) def get_template(self, request, *args, **kwargs): original_template = super(ThemeablePage, self).get_template(request, *args, **kwargs) if self.theme is None: return original_template custom_template = utils.get_themed_template_name(self.theme, original_template) if utils.template_exists(custom_template): return custom_template return original_template style_panels = [ MultiFieldPanel( [ SnippetChooserPanel('theme'), ], heading="Theme" ), ] @python_2_unicode_compatible class TextBlock(models.Model): name = models.CharField(max_length=255) usage = models.CharField(max_length=255, blank=True, default="") heading = models.TextField(blank=True, default="") content = RichTextField(blank=True, default="") panels = [ FieldPanel('name'), FieldPanel('heading'), FieldPanel('content'), FieldPanel('usage'), ] def __str__(self): return self.name register_snippet(TextBlock) @python_2_unicode_compatible class FollowLink(models.Model): name = models.CharField(max_length=255) usage = models.CharField(max_length=255, blank=True, default="") link = models.CharField(max_length=1024) panels = [ FieldPanel('name'), FieldPanel('link'), FieldPanel('usage'), ] def __str__(self): return self.name register_snippet(FollowLink) @python_2_unicode_compatible class LogoBlock(models.Model): name = models.CharField(max_length=255) usage = models.CharField(max_length=255, blank=True, default="") logo = models.ForeignKey( 'images.AttributedImage', ) link = models.CharField(max_length=2048, blank=True, null=True) panels = [ FieldPanel('name'), ImageChooserPanel('logo'), FieldPanel('link'), FieldPanel('usage'), ] def __str__(self): return self.name register_snippet(LogoBlock) class ContentBlockLink(models.Model): block = models.ForeignKey( "TextBlock", related_name='content_links' ) theme_content = ParentalKey( "ThemeContent", related_name='block_links' ) panels = [SnippetChooserPanel("block")] class ContentFollowLink(models.Model): block = models.ForeignKey( "FollowLink", related_name='content_links' ) theme_content = ParentalKey( "ThemeContent", related_name='follow_links' ) panels = [SnippetChooserPanel("block")] class ContentLogoLink(models.Model): block = models.ForeignKey( "LogoBlock", related_name='content_links' ) theme_content = ParentalKey( "ThemeContent", related_name='logo_links' ) panels = [SnippetChooserPanel("block")]
import os import unittest from conans.client.cache.cache import ClientCache from conans.client.tools import chdir from conans.model.info import ConanInfo from conans.model.ref import ConanFileReference from conans.paths import (BUILD_FOLDER, CONANINFO, EXPORT_FOLDER, PACKAGES_FOLDER) from conans.search.search import search_packages, search_recipes from conans.test.utils.test_files import temp_folder from conans.test.utils.tools import TestBufferConanOutput from conans.util.files import save, mkdir class SearchTest(unittest.TestCase): def setUp(self): folder = temp_folder() self.cache = ClientCache(folder, output=TestBufferConanOutput()) mkdir(self.cache.store) def basic_test2(self): with chdir(self.cache.store): ref1 = ConanFileReference.loads("opencv/2.4.10@lasote/testing") root_folder = str(ref1).replace("@", "/") artifacts = ["a", "b", "c"] reg1 = "%s/%s" % (root_folder, EXPORT_FOLDER) os.makedirs(reg1) for artif_id in artifacts: build1 = "%s/%s/%s" % (root_folder, BUILD_FOLDER, artif_id) artif1 = "%s/%s/%s" % (root_folder, PACKAGES_FOLDER, artif_id) os.makedirs(build1) info = ConanInfo().loads("[settings]\n[options]") save(os.path.join(artif1, CONANINFO), info.dumps()) packages = search_packages(self.cache.package_layout(ref1), "") all_artif = [_artif for _artif in sorted(packages)] self.assertEqual(all_artif, artifacts) def pattern_test(self): with chdir(self.cache.store): references = ["opencv/2.4.%s@lasote/testing" % ref for ref in ("1", "2", "3")] refs = [ConanFileReference.loads(reference) for reference in references] for ref in refs: root_folder = str(ref).replace("@", "/") reg1 = "%s/%s" % (root_folder, EXPORT_FOLDER) os.makedirs(reg1) recipes = search_recipes(self.cache, "opencv/*@lasote/testing") self.assertEqual(recipes, refs) def case_insensitive_test(self): with chdir(self.cache.store): root_folder2 = "sdl/1.5/lasote/stable" ref2 = ConanFileReference.loads("sdl/1.5@lasote/stable") os.makedirs("%s/%s" % (root_folder2, EXPORT_FOLDER)) root_folder3 = "assimp/0.14/phil/testing" ref3 = ConanFileReference.loads("assimp/0.14@phil/testing") os.makedirs("%s/%s" % (root_folder3, EXPORT_FOLDER)) root_folder4 = "sdl/2.10/lasote/stable" ref4 = ConanFileReference.loads("sdl/2.10@lasote/stable") os.makedirs("%s/%s" % (root_folder4, EXPORT_FOLDER)) root_folder5 = "SDL_fake/1.10/lasote/testing" ref5 = ConanFileReference.loads("SDL_fake/1.10@lasote/testing") os.makedirs("%s/%s" % (root_folder5, EXPORT_FOLDER)) # Case insensitive searches reg_conans = sorted([str(_reg) for _reg in search_recipes(self.cache, "*")]) self.assertEqual(reg_conans, [str(ref5), str(ref3), str(ref2), str(ref4)]) reg_conans = sorted([str(_reg) for _reg in search_recipes(self.cache, pattern="sdl*")]) self.assertEqual(reg_conans, [str(ref5), str(ref2), str(ref4)]) # Case sensitive search self.assertEqual(str(search_recipes(self.cache, pattern="SDL*", ignorecase=False)[0]), str(ref5))
# unzipR - A library for recursively extracting files. # Copyright (C) 2014 Stoney Jackson <dr.stoney@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ''' Unzipr determines a files compression algorithm based on the file's extension. Zip files are extracted into the a directory by the same name as the zip file minus its extension. So foo.zip is extracted in to a directory named foo. New formats can be supports via Unzipr.registerUnzipFormat(). See installRarSupport() at the end of this file for an example. ''' import pathlib import shutil import logging logger = logging.getLogger(__name__) def deleteZipFilesFromDirectoryRecursively(directory): directory = pathlib.Path(directory) for a_file in directory.iterdir(): if isZipFile(a_file): a_file.unlink() elif a_file.is_dir(): deleteZipFilesFromDirectoryRecursively(a_file) def unzipFileRecursively(zipfile, toDir=None): ''' If toDir is None, zipfile is extracted to a directory whose name is the same as the zipfile's name minus its extensions. ''' zipfile = pathlib.Path(zipfile) toDir = unzipFile(zipfile, toDir) unzipFilesInDirectoryRecursively(toDir) return toDir def unzipFilesInDirectoryRecursively(directory): directory = pathlib.Path(directory) for a_file in directory.iterdir(): logger.debug("processing " + str(a_file)) if isZipFile(a_file): logger.debug("unzipping " + str(a_file)) unzipFileRecursively(a_file) elif a_file.is_dir(): logger.debug("recursing " + str(a_file)) unzipFilesInDirectoryRecursively(a_file) def unzipFile(zipfile, toDir=None): ''' If toDir is None, zipfile is extracted to a directory whose name is the same as the zipfile's name minus its extensions. ''' zipfile = pathlib.Path(zipfile) if toDir: toDir = pathlib.Path(toDir) else: toDir = zipfile.parent / getFileNameWithoutExtension(zipfile) shutil.unpack_archive(str(zipfile), str(toDir)) return toDir def getFileNameWithoutExtension(theFile): theFile = pathlib.Path(theFile) extension = getFileExtension(theFile) return theFile.name[:-len(extension)] def isZipFile(zipfile): zipfile = pathlib.Path(zipfile) isZipFile = zipfile.is_file() and fileHasSupportedExtension(zipfile) return isZipFile def fileHasSupportedExtension(zipfile): zipfile = pathlib.Path(zipfile) extension = getFileExtension(zipfile) return isSupportedExtension(extension) def getFileExtension(theFile): if len(theFile.suffixes) >= 2: lastTwoSuffixes = ''.join(theFile.suffixes[-2:]) if lastTwoSuffixes == '.tar.gz': return lastTwoSuffixes else: return theFile.suffix def isSupportedExtension(extension): return extension in getSupportedExtensions() def getSupportedExtensions(): supported_extensions = [] for format_ in shutil.get_unpack_formats(): supported_extensions += format_[1] return supported_extensions def registerUnzipFormat(name, extensions, function): shutil.register_unpack_format(name, extensions, function) def installRarSupport(): try: import rarfile def unrar(zipFile, toDir): with rarfile.RarFile(zipFile) as rf: rf.extractall(path=toDir) registerUnzipFormat('rar', ['.rar'], unrar) except ImportError: pass def install7zipSupport(): if shutil.which('7z'): import subprocess def un7zip(zipFile, toDir): subprocess.call(['7z', 'x', str(zipFile), '-o' + str(toDir)]) registerUnzipFormat('7zip', ['.7z'], un7zip) installRarSupport() install7zipSupport()
""" Utility functions related to jGrid2 """ import gdal import numpy as np import numpy.ma as ma import rastercube.imutils as imutils import rastercube.gdal_utils as gdal_utils def fracs_for_poly_bbox_xy(header, polygon_xy): """ Returns fractions covered by the given polygon. This is based on the polygon's bounding box. """ assert np.all([header.in_bounds_xy(p) for p in polygon_xy]), \ "Polygon not contained in jgrid" xy_from, xy_to = polygon_xy.min(axis=0), polygon_xy.max(axis=0) return header.fracs_for_rect_xy(xy_from, xy_to) def fracs_for_poly_bbox_latlng(header, polygon_latlng): poly_xy = np.array([header.latlng2xy(p) for p in polygon_latlng]) return fracs_for_poly_bbox_xy(header, poly_xy) def load_poly_xy_from_jgrid(header, polygon_xy, **kwargs): """ Given a header and a polygon (*assumed* to be fully contained in the jgrid), returns a masked array containing the jgrid data in the polygon. The returned masked array has the shape of the polygon bounding box but only pixels inside the polygon are unmasked """ assert np.all([header.in_bounds_xy(p) for p in polygon_xy]), \ "Polygon not contained in jgrid" xy_from, xy_to = polygon_xy.min(axis=0), polygon_xy.max(axis=0) ndvi_data = header.load_slice_xy(xy_from, xy_to, **kwargs) poly_mask = imutils.rasterize_poly(polygon_xy - xy_from, ndvi_data.shape) return ndvi_data, poly_mask, xy_from def load_poly_latlng_from_jgrid(header, polygon_latlng, **kwargs): """ Like `load_poly_xy_from_jgrid`, but the polygon is given in latlng """ poly_xy = np.array([header.latlng2xy(p) for p in polygon_latlng]) return load_poly_xy_from_jgrid(header, poly_xy, **kwargs) def load_poly_latlng_from_multi_jgrids(headers, polygon, **kwargs): """ Given a set of jgrid header, loads the given polygon from all all grids and reproject all of them on the first one. Returns: xy_from: A single xy_from Followed by a list of data/mask pairs : data0, mask0, data1, mask1, data2, mask2, ... """ header0 = headers[0] data0, mask0, xy_from0 = load_poly_latlng_from_jgrid(header0, polygon, **kwargs) retval = [xy_from0, data0, mask0] for _h in headers[1:]: _data, _mask, _xy_from = load_poly_latlng_from_jgrid(_h, polygon, **kwargs) # only reproject if needed if (not _h.spatialref.IsSame(header0.spatialref)) or \ (_h.geot != header0.geot): _data, _mask = reproject_jgrid_on_jgrid( header0, xy_from0, data0.shape, _h, _xy_from, _data, _mask ) retval.append(_data) retval.append(_mask) return retval def poly_latlng_for_frac(header, frac_num): """ Returns the latlng polygon corresponding to a given fraction """ poly = [ header.xy2latlng((header.x_start(frac_num), header.y_start(frac_num))), header.xy2latlng((header.x_end(frac_num), header.y_start(frac_num))), header.xy2latlng((header.x_end(frac_num), header.y_end(frac_num))), header.xy2latlng((header.x_start(frac_num), header.y_end(frac_num))) ] return np.array(poly) def headers_are_same_geogrid(header1, header2): """ Given two headers, verify that they are in the same projection with the same geotransform and the same fraction sizes """ return header1.spatialref.IsSame(header2.spatialref) and \ (header1.geot == header2.geot) and \ header1.width == header2.width and \ header1.height == header2.height and \ header1.frac_width == header2.frac_width and \ header1.frac_height == header2.frac_height def load_frac_from_multi_jgrids(headers, frac_num, **kwargs): """ Given a set of jgrid headers and a frac_num in headers[0], loads the corresponding area from all headers Returns: xy_from: A single xy_from Followed by a list of data/mask pairs : data0, mask0, data1, mask1, data2, mask2, ... """ header0 = headers[0] xy_from0 = (header0.x_start(frac_num), header0.y_start(frac_num)) data0 = header0.load_frac_by_num(frac_num, **kwargs) mask0 = np.ones((data0.shape[0], data0.shape[1]), dtype=np.bool) frac_poly = poly_latlng_for_frac(header0, frac_num) retval = [xy_from0, data0, mask0] for _h in headers[1:]: if headers_are_same_geogrid(header0, _h): print 'Headers in same geogrid' _data = _h.load_frac_by_num(frac_num, **kwargs) _mask = np.ones((_data.shape[0], _data.shape[1]), dtype=np.bool) else: _data, _mask, _xy_from = load_poly_latlng_from_jgrid( _h, frac_poly, **kwargs) _data, _mask = reproject_jgrid_on_jgrid( header0, xy_from0, data0.shape, _h, _xy_from, _data, _mask ) retval.append(_data) retval.append(_mask) return retval def latlng_for_grid(header, xy_from, shape): """ For each point in the grid, computes its latlng coordinates, returning a (shape[0], shape[1], 2) array """ yx = np.indices(shape) yx[0] += xy_from[1] yx[1] += xy_from[0] latlng = [header.xy2latlng((x, y)) for y, x in zip(yx[0].reshape(-1), yx[1].reshape(-1))] return np.array(latlng).reshape(shape[0], shape[1], 2) def slice_and_reproject_to_grid(header, xy_from, grid_shape, src_ds, interpolation='near'): """ Helper function which takes a jgrid slice (so Header, xy_from, grid_shape) and a GDAL dataset and slice/reprojects the GDAL dataset to the jgrid slice. This is typically useful to reproject some arbitrary TIFF file on some part of the NDVI worldgrid. Args: header: A jgrid3.Header xy_from: the (x, y) at which the subgrid starts in the given header grid_shape: the (height, width) of the subgrid src_ds: The source GDAL dataset to reproject interpolation: The resampling mode : one of 'near', 'mode', 'average' Returns: A masked array containing the reprojected values """ # https://jgomezdans.github.io/gdal_notes/reprojection.html # http://www.gdal.org/gdalwarper_8h.html#ad36462e8d5d34642df7f9ea1cfc2fec4 src_wkt = src_ds.GetProjectionRef() nbands = src_ds.RasterCount src_dtype = src_ds.GetRasterBand(1).DataType # print 'src dtype : %s' % gdal.GetDataTypeName(src_dtype) mem_drv = gdal.GetDriverByName('MEM') dst_ds = mem_drv.Create('', grid_shape[1], grid_shape[0], nbands, src_dtype) dst_geo = header.geot_for_xyfrom(xy_from) dst_ds.SetGeoTransform(dst_geo) dst_ds.SetProjection(header.spatialref.ExportToWkt()) # NoData handling when using ReprojectImage with a MEM target ds is # a bit tricky. See those discussions : # https://trac.osgeo.org/gdal/ticket/6404 # http://gis.stackexchange.com/q/158503 # We have to fill each band with the nodata value before doing the # reprojectimage because the bands are initialized with 0 ndv = None for i in range(1, nbands + 1): src_b = src_ds.GetRasterBand(i) if ndv is not None and not np.isnan(ndv): assert src_b.GetNoDataValue() == ndv, \ "All bands of the source dataset should have the same NODATA" else: ndv = src_b.GetNoDataValue() dst_b = dst_ds.GetRasterBand(i) if ndv is not None: dst_b.SetNoDataValue(ndv) dst_b.Fill(ndv) if interpolation == 'near': gdal_mode = gdal.GRA_NearestNeighbour elif interpolation == 'mode': gdal_mode = gdal.GRA_Mode elif interpolation == 'average': gdal_mode = gdal.GRA_Average else: raise ValueError("Invalid interpolation mode %s" % interpolation) res = gdal.ReprojectImage( src_ds, dst_ds, src_ds.GetProjectionRef(), dst_ds.GetProjectionRef(), gdal_mode ) assert res == 0, 'Error reprojecting, res=%d' % res dst_arr = dst_ds.ReadAsArray() # GDAL ReadAsArray returns (bands, height, width) but we expect # (height, width, bands) if len(dst_arr.shape) == 3: dst_arr = dst_arr.transpose(1, 2, 0) # TODO: This assumes that the no data value is the same for all bands if ndv is not None: dst_arr = ma.masked_where(dst_arr == ndv, dst_arr) else: dst_arr = ma.asarray(dst_arr) return dst_arr def gdal_ds_from_jgrid_slice(header, xy_from, data): """ Returns a GDAL in-memory dataset that maps the provided jgrid slice. Note that the dataset only keeps a reference to the data array. """ ds = gdal_utils.gdal_ds_from_array(data) ds.SetGeoTransform(header.geot_for_xyfrom(xy_from)) ds.SetProjection(header.spatialref.ExportToWkt()) return ds def reproject_jgrid_on_jgrid(target_header, target_xy_from, target_shape, src_header, src_xy_from, src_data, src_mask): """ Reproject a source jgrid on a target jgrid """ data_ds = gdal_ds_from_jgrid_slice(src_header, src_xy_from, src_data) # This requires a mask copy because GDAL doesn't support bool # Also, GDAL ignores 0 during reproject, so add 1 to the mask here src_mask = src_mask.astype(np.uint8) + 1 mask_ds = gdal_ds_from_jgrid_slice(src_header, src_xy_from, src_mask) new_data = slice_and_reproject_to_grid(target_header, target_xy_from, target_shape, data_ds) new_mask = slice_and_reproject_to_grid(target_header, target_xy_from, target_shape, mask_ds) # recover the boolean mask new_mask = new_mask > 1 return new_data, new_mask
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ A simple device interface for build steps. """ import logging import os import re import sys from util import build_utils BUILD_ANDROID_DIR = os.path.join(os.path.dirname(__file__), '..', '..') sys.path.append(BUILD_ANDROID_DIR) from pylib.device import adb_wrapper from pylib.device import device_errors from pylib.device import device_utils def GetAttachedDevices(): return [a.GetDeviceSerial() for a in adb_wrapper.AdbWrapper.Devices()] class BuildDevice(object): def __init__(self, configuration): self.id = configuration['id'] self.description = configuration['description'] self.install_metadata = configuration['install_metadata'] self.device = device_utils.DeviceUtils(self.id) def RunShellCommand(self, *args, **kwargs): return self.device.RunShellCommand(*args, **kwargs) def PushChangedFiles(self, *args, **kwargs): return self.device.PushChangedFiles(*args, **kwargs) def GetSerialNumber(self): return self.id def Install(self, *args, **kwargs): return self.device.Install(*args, **kwargs) def InstallSplitApk(self, *args, **kwargs): return self.device.InstallSplitApk(*args, **kwargs) def GetInstallMetadata(self, apk_package): """Gets the metadata on the device for the apk_package apk.""" # Matches lines like: # -rw-r--r-- system system 7376582 2013-04-19 16:34 \ # org.chromium.chrome.shell.apk # -rw-r--r-- system system 7376582 2013-04-19 16:34 \ # org.chromium.chrome.shell-1.apk apk_matcher = lambda s: re.match('.*%s(-[0-9]*)?.apk$' % apk_package, s) matches = filter(apk_matcher, self.install_metadata) return matches[0] if matches else None def GetConfigurationForDevice(device_id): device = device_utils.DeviceUtils(device_id) configuration = None has_root = False is_online = device.IsOnline() if is_online: cmd = 'ls -l /data/app; getprop ro.build.description' cmd_output = device.RunShellCommand(cmd) has_root = not 'Permission denied' in cmd_output[0] if not has_root: # Disable warning log messages from EnableRoot() logging.getLogger().disabled = True try: device.EnableRoot() has_root = True except device_errors.CommandFailedError: has_root = False finally: logging.getLogger().disabled = False cmd_output = device.RunShellCommand(cmd) configuration = { 'id': device_id, 'description': cmd_output[-1], 'install_metadata': cmd_output[:-1], } return configuration, is_online, has_root def WriteConfigurations(configurations, path): # Currently we only support installing to the first device. build_utils.WriteJson(configurations[:1], path, only_if_changed=True) def ReadConfigurations(path): return build_utils.ReadJson(path) def GetBuildDevice(configurations): assert len(configurations) == 1 return BuildDevice(configurations[0]) def GetBuildDeviceFromPath(path): configurations = ReadConfigurations(path) if len(configurations) > 0: return GetBuildDevice(ReadConfigurations(path)) return None
# -*- coding: utf-8 -*- """ First stage in flypy translation. Translates Python bytecode to untyped pykit IR. Initially adapted from flypypro/npm/symbolic.py by Siu Kwan Lam. """ from __future__ import print_function, division, absolute_import import __builtin__ import inspect import dis import pprint import operator import collections from collections import namedtuple from flypy.errors import error_context, CompileError, EmptyStackError from flypy.runtime.obj import tupleobject, listobject, sliceobject from .bytecode import ByteCode from pykit.ir import Function, Builder, Op, Const, OConst, Value, ops from pykit import types #===------------------------------------------------------------------=== # Translation #===------------------------------------------------------------------=== COMPARE_OP_FUNC = { '>': operator.gt, '<': operator.lt, '>=': operator.ge, '<=': operator.le, '==': operator.eq, '!=': operator.ne, 'in': operator.contains, 'is': operator.is_, 'exception match': isinstance, } def const(val): if not isinstance(val, Value): val = OConst(val) return val def blockname(func, offset): return "Block%d.%s" % (offset, func.__name__) class Translate(object): """ Translate bytecode to untypes pykit IR. """ def __init__(self, func, env): self.func = func self.env = env self.bytecode = ByteCode(func) # ------------------------------------------------- # Find predecessors self.blocks = {} # offset -> Block self.block2offset = {} # Block -> offset self.allocas = {} # varname -> alloca self.stacks = {} # Block -> value stack self.exc_handlers = set() # { Block } # ------------------------------------------------- # Block stacks self.block_stack = [] self.loop_stack = [] self.except_stack = [] self.finally_stack = [] # ------------------------------------------------- # CFG self.predecessors = collections.defaultdict(set) self.phis = collections.defaultdict(list) # ------------------------------------------------- # Variables and scoping self.code = self.bytecode.code self.varnames = self.bytecode.code.co_varnames self.consts = self.bytecode.code.co_consts self.names = self.bytecode.code.co_names self.argnames = list(self.varnames[:self.bytecode.code.co_argcount]) self.globals = dict(vars(__builtin__)) self.builtins = set(self.globals.values()) self.globals.update(self.func.func_globals) self.call_annotations = collections.defaultdict(dict) # ------------------------------------------------- # Error checks argspec = inspect.getargspec(self.func) if argspec.varargs: self.argnames.append(argspec.varargs) if argspec.keywords: self.argnames.append(argspec.keywords) assert not argspec.keywords, "keywords not yet supported" def initialize(self): """Initialize pykit untypes structures""" # Setup Function sig = types.Function(types.Opaque, [types.Opaque] * len(self.argnames), False) self.dst = Function(func_name(self.func), self.argnames, sig) # Setup Builder self.builder = Builder(self.dst) # Setup Blocks for offset in self.bytecode.labels: name = blockname(self.func, offset) block = self.dst.new_block(name) self.blocks[offset] = block self.stacks[block] = [] # Setup Variables self.builder.position_at_beginning(self.dst.startblock) for varname in self.varnames: stackvar = self.builder.alloca(types.Pointer(types.Opaque), result=self.dst.temp(varname)) self.allocas[varname] = stackvar # Initialize function arguments if varname in self.argnames: self.builder.store(self.dst.get_arg(varname), stackvar) def interpret(self): self.curblock = self.dst.startblock for inst in self.bytecode: if inst.offset in self.blocks: # Block switch newblock = self.blocks[inst.offset] if self.curblock != newblock: self.switchblock(newblock) elif self.curblock.is_terminated(): continue self.op(inst) # ------------------------------------------------- # Finalize self.update_phis() def op(self, inst): during = "Operation translate in %s" % (self.func.__name__, ) with error_context(lineno=inst.lineno, during="Translate operation", pyfunc=self.func): self.lineno = inst.lineno attr = 'op_%s' % inst.opname.replace('+', '_') fn = getattr(self, attr, self.generic_op) fn(inst) def generic_op(self, inst): raise NotImplementedError(inst) def switchblock(self, newblock): """ Switch to a new block and merge incoming values from the stacks. """ #print("%s -> %s" % (self.curblock.name, newblock.name), self.stack) if not self.curblock.is_terminated(): self.jump(newblock) self.builder.position_at_end(newblock) self.prevblock = self.curblock self.curblock = newblock # ------------------------------------------------- # Find predecessors if newblock in self.exc_handlers: self.push_insert('exc_fetch') self.push_insert('exc_fetch_value') self.push_insert('exc_fetch_tb') # ------------------------------------------------- # Find predecessors incoming = self.predecessors.get(newblock) if not incoming: return # ------------------------------------------------- # Merge stack values stack = max([self.stacks[block] for block in incoming], key=len) for value in stack: phi = self.push_insert('phi', [], []) self.phis[newblock].append(phi) assert len(self.stack) == len(stack) def update_phis(self): laststack = self.stacks[self.dst.blocks.tail] assert not laststack, laststack for block in self.dst.blocks: phis = self.phis[block] preds = list(self.predecessors[block]) stacks = [self.stacks[pred] for pred in preds] stacklen = len(phis) # ------------------------------------------------- # Sanity check assert all(len(stack) == stacklen for stack in stacks), (preds, stacks) if not preds or not stacklen: continue # ------------------------------------------------- # Update φs with stack values from predecessors for pos, phi in enumerate(phis): values = [] for pred in preds: value_stack = self.stacks[pred] value = value_stack[pos] values.append(value) phi.set_args([preds, values]) @property def stack(self): return self.stacks[self.curblock] @property def stack_level(self): return len(self.stack) def insert(self, opcode, *args): type = types.Void if ops.is_void(opcode) else types.Opaque op = Op(opcode, type, list(args)) op.add_metadata({'lineno': self.lineno}) self.builder.emit(op) return op def push_insert(self, opcode, *args): inst = self.insert(opcode, *args) self.push(inst) return inst def push(self, val): self.stack.append(val) def peek(self): """ Take a peek at the top of stack. """ if not self.stack: # Assuming the bytecode is valid, our predecessors must have left # some values on the stack. # return self._insert_phi() raise EmptyStackError else: return self.stack[-1] def pop(self): if not self.stack: # return self._insert_phi() raise EmptyStackError else: return self.stack.pop() def _insert_phi(self): with self.builder.at_front(self.curblock): phi = self.insert('phi', [], []) self.phis[self.curblock].append(phi) return phi def call(self, func, args=()): if not isinstance(func, Value): func = const(func) return self.push_insert('call', func, list(args)) def call_pop(self, func, args=()): self.call(func, args) return self.pop() def binary_op(self, op): rhs = self.pop() lhs = self.pop() self.call(op, args=(lhs, rhs)) def unary_op(self, op): tos = self.pop() self.call(op, args=(tos,)) def jump(self, target): self.predecessors[target].add(self.curblock) self.insert('jump', target) def jump_if(self, cond, truebr, falsebr): self.predecessors[truebr].add(self.curblock) self.predecessors[falsebr].add(self.curblock) self.insert('cbranch', cond, truebr, falsebr) # ------- stack ------- # def op_POP_BLOCK(self, inst): block = self.block_stack.pop() if isinstance(block, LoopBlock): self.loop_stack.pop() elif isinstance(block, ExceptionBlock): self.except_stack.pop() elif isinstance(block, FinallyBlock): self.finally_stack.pop() del self.stack[block.level:] def op_POP_TOP(self, inst): self.pop() def op_DUP_TOP(self, inst): value = self.pop() self.push(value) self.push(value) def op_DUP_TOPX(self, inst): count = inst.arg self.stack.extend(self.stack[-count:]) def op_ROT_TWO(self, inst): one = self.pop() two = self.pop() self.push(one) self.push(two) def op_ROT_THREE(self, inst): one = self.pop() two = self.pop() three = self.pop() self.push(one) self.push(three) self.push(two) def op_ROT_FOUR(self, inst): one = self.pop() two = self.pop() three = self.pop() four = self.pop() self.push(one) self.push(four) self.push(three) self.push(two) # ------- control flow ------- # def op_POP_JUMP_IF_TRUE(self, inst): falsebr = self.blocks[inst.next] truebr = self.blocks[inst.arg] self.jump_if(self.pop(), truebr, falsebr) def op_POP_JUMP_IF_FALSE(self, inst): truebr = self.blocks[inst.next] falsebr = self.blocks[inst.arg] self.jump_if(self.pop(), truebr, falsebr) def op_JUMP_IF_TRUE(self, inst): falsebr = self.blocks[inst.next] truebr = self.blocks[inst.next + inst.arg] self.jump_if(self.peek(), truebr, falsebr) def op_JUMP_IF_FALSE(self, inst): truebr = self.blocks[inst.next] falsebr = self.blocks[inst.next + inst.arg] self.jump_if(self.peek(), truebr, falsebr) def _make_popblock(self): popblock = self.dst.new_block(self.dst.temp("popblock"), after=self.curblock) self.stacks[popblock] = [] return popblock def op_JUMP_IF_TRUE_OR_POP(self, inst): falsebr = self.blocks[inst.next] truebr = self.blocks[inst.arg] popblock = self._make_popblock() self.jump_if(self.peek(), truebr, popblock) self.switchblock(popblock) self.pop() self.jump(falsebr) def op_JUMP_IF_FALSE_OR_POP(self, inst): truebr = self.blocks[inst.next] falsebr = self.blocks[inst.arg] popblock = self._make_popblock() self.jump_if(self.peek(), popblock, falsebr) self.switchblock(popblock) self.pop() self.jump(truebr) def op_JUMP_ABSOLUTE(self, inst): target = self.blocks[inst.arg] self.jump(target) def op_JUMP_FORWARD(self, inst): target = self.blocks[inst.next + inst.arg] self.jump(target) def op_RETURN_VALUE(self, inst): val = self.pop() if isinstance(val, Const) and val.const is None: val = None # Generate a bare 'ret' instruction self.insert('ret', val) def op_CALL_FUNCTION(self, inst, varargs=None): argc = inst.arg & 0xff kwsc = (inst.arg >> 8) & 0xff def pop_kws(): val = self.pop() key = self.pop() if key.opcode != 'const': raise ValueError('keyword must be a constant') return key.value, val kws = list(reversed([pop_kws() for i in range(kwsc)])) args = list(reversed([self.pop() for i in range(argc)])) assert not kws, "Keyword arguments not yet supported" func = self.pop() return self.call(func, args) def op_CALL_FUNCTION_VAR(self, inst): it = self.call_pop(tuple, [self.pop()]) #varargs = self.insert('unpack', it) call = self.op_CALL_FUNCTION(inst, varargs=it) # Add unpacked iterable to args list f, args = call.args call.set_args([f, args + [it]]) # Annotate call as a 'varargs' application self.call_annotations[call]['varargs'] = True def op_GET_ITER(self, inst): self.call(iter, [self.pop()]) def op_FOR_ITER(self, inst): """ Translate a for loop to: it = getiter(iterable) try: while 1: i = next(t) ... except StopIteration: pass """ iterobj = self.peek() delta = inst.arg loopexit = self.blocks[inst.next + delta] loop_block = self.loop_stack[-1] loop_block.catch_block = loopexit # ------------------------------------------------- # Try self.insert('exc_setup', [loopexit]) self.call(next, [iterobj]) # We assume a 1-to-1 block mapping, resolve a block split in a # later pass self.insert('exc_end') # ------------------------------------------------- # Catch with self.builder.at_front(loopexit): self.insert('exc_catch', [Const(StopIteration, type=types.Exception)]) # ------------------------------------------------- # Exit # Add the loop exit at a successor to the header self.predecessors[loopexit].add(self.curblock) # Remove ourselves as a predecessor from the actual exit block, set by # SETUP_LOOP self.predecessors[loop_block.end].remove(self.prevblock) def op_BREAK_LOOP(self, inst): loopblock = self.loop_stack[-1] self.jump(target=loopblock.catch_block or loopblock.end) def op_BUILD_TUPLE(self, inst): count = inst.arg items = [self.pop() for _ in range(count)] ordered = list(reversed(items)) if all(isinstance(item, Const) for item in ordered): # create constant tuple self.push(const(tuple(item.const for item in ordered))) elif len(ordered) < tupleobject.STATIC_THRESHOLD: # Build static tuple result = self.call_pop(tupleobject.EmptyTuple) for item in items: result = self.call_pop(tupleobject.StaticTuple, args=(item, result)) self.push(result) else: raise NotImplementedError("Generic tuples") def op_BUILD_LIST(self, inst): count = inst.arg if not count: self.call(listobject.EmptyList, ()) return self.op_BUILD_TUPLE(inst) result_tuple = self.pop() self.call(list, (result_tuple,)) def op_LOAD_ATTR(self, inst): attr = self.names[inst.arg] obj = self.pop() if isinstance(obj, Const) and hasattr(obj.const, attr): val = getattr(obj.const, attr) self.push(const(val)) else: self.push_insert('getfield', obj, attr) def op_LOAD_GLOBAL(self, inst): name = self.names[inst.arg] if name not in self.globals: raise NameError("Could not resolve %r at compile time" % name) value = self.globals[name] self.push(const(value)) def op_LOAD_DEREF(self, inst): i = inst.arg cell = self.func.__closure__[i] value = cell.cell_contents self.push(const(value)) def op_LOAD_FAST(self, inst): name = self.varnames[inst.arg] self.push_insert('load', self.allocas[name]) def op_LOAD_CONST(self, inst): val = self.consts[inst.arg] self.push(const(val)) def op_STORE_FAST(self, inst): value = self.pop() name = self.varnames[inst.arg] self.insert('store', value, self.allocas[name]) def op_STORE_ATTR(self, inst): attr = self.names[inst.arg] obj = self.pop() value = self.pop() self.insert('setfield', obj, attr, value) def op_STORE_SUBSCR(self, inst): tos0 = self.pop() tos1 = self.pop() tos2 = self.pop() self.call(operator.setitem, (tos1, tos0, tos2)) self.pop() def op_UNPACK_SEQUENCE(self, inst): value = self.pop() itemct = inst.arg for i in reversed(range(itemct)): self.push_insert('unpack', value, i, itemct) def op_COMPARE_OP(self, inst): opname = dis.cmp_op[inst.arg] if opname == 'not in': self.binary_op(COMPARE_OP_FUNC['in']) self.unary_op(operator.not_) elif opname == 'is not': self.binary_op(COMPARE_OP_FUNC['is']) self.unary_op(operator.not_) else: opfunc = COMPARE_OP_FUNC[opname] self.binary_op(opfunc) def op_UNARY_POSITIVE(self, inst): self.unary_op(operator.pos) def op_UNARY_NEGATIVE(self, inst): self.unary_op(operator.neg) def op_UNARY_INVERT(self, inst): self.unary_op(operator.invert) def op_UNARY_NOT(self, inst): self.unary_op(operator.not_) def op_BINARY_SUBSCR(self, inst): self.binary_op(operator.getitem) def op_BINARY_ADD(self, inst): self.binary_op(operator.add) def op_BINARY_SUBTRACT(self, inst): self.binary_op(operator.sub) def op_BINARY_MULTIPLY(self, inst): self.binary_op(operator.mul) def op_BINARY_DIVIDE(self, inst): self.binary_op(operator.floordiv) def op_BINARY_FLOOR_DIVIDE(self, inst): self.binary_op(operator.floordiv) def op_BINARY_TRUE_DIVIDE(self, inst): self.binary_op(operator.truediv) def op_BINARY_MODULO(self, inst): self.binary_op(operator.mod) def op_BINARY_POWER(self, inst): self.binary_op(operator.pow) def op_BINARY_RSHIFT(self, inst): self.binary_op(operator.rshift) def op_BINARY_LSHIFT(self, inst): self.binary_op(operator.lshift) def op_BINARY_AND(self, inst): self.binary_op(operator.and_) def op_BINARY_OR(self, inst): self.binary_op(operator.or_) def op_BINARY_XOR(self, inst): self.binary_op(operator.xor) def op_INPLACE_ADD(self, inst): self.binary_op(operator.add) def op_INPLACE_SUBTRACT(self, inst): self.binary_op(operator.sub) def op_INPLACE_MULTIPLY(self, inst): self.binary_op(operator.mul) def op_INPLACE_DIVIDE(self, inst): self.binary_op(operator.floordiv) def op_INPLACE_FLOOR_DIVIDE(self, inst): self.binary_op(operator.floordiv) def op_INPLACE_TRUE_DIVIDE(self, inst): self.binary_op(operator.truediv) def op_INPLACE_MODULO(self, inst): self.binary_op(operator.mod) def op_INPLACE_POWER(self, inst): self.binary_op(operator.pow) def op_INPLACE_RSHIFT(self, inst): self.binary_op(operator.rshift) def op_INPLACE_LSHIFT(self, inst): self.binary_op(operator.lshift) def op_INPLACE_AND(self, inst): self.binary_op(operator.and_) def op_INPLACE_OR(self, inst): self.binary_op(operator.or_) def op_INPLACE_XOR(self, inst): self.binary_op(operator.xor) def slice(self, start=None, stop=None, step=None): start, stop, step = map(const, [start, stop, step]) return self.call_pop(const(sliceobject.Slice), [start, stop, step]) def op_SLICE_0(self, inst): tos = self.pop() self.call(operator.getitem, args=(tos, self.slice())) def op_SLICE_1(self, inst): start = self.pop() tos = self.pop() self.call(operator.getitem, args=(tos, self.slice(start=start))) def op_SLICE_2(self, inst): stop = self.pop() tos = self.pop() self.call(operator.getitem, args=(tos, self.slice(stop=stop))) def op_SLICE_3(self, inst): stop = self.pop() start = self.pop() tos = self.pop() self.call(operator.getitem, args=(tos, self.slice(start, stop))) def op_STORE_SLICE_0(self, inst): tos = self.pop() val = self.pop() self.call_pop(operator.setitem, args=(tos, self.slice(), val)) def op_STORE_SLICE_1(self, inst): start = self.pop() tos = self.pop() val = self.pop() self.call_pop(operator.setitem, args=(tos, self.slice(start=start), val)) def op_STORE_SLICE_2(self, inst): stop = self.pop() tos = self.pop() val = self.pop() self.call_pop(operator.setitem, args=(tos, self.slice(stop=stop), val)) def op_STORE_SLICE_3(self, inst): stop = self.pop() start = self.pop() tos = self.pop() val = self.pop() self.call_pop(operator.setitem, args=(tos, self.slice(start, stop), val)) def op_BUILD_SLICE(self, inst): argc = inst.arg tos = [self.pop() for _ in range(argc)] if argc == 2: start, stop, step = [tos[1], tos[0], None] elif argc == 3: start, stop, step = [tos[2], tos[1], tos[0]] else: raise Exception('unreachable') self.push(self.slice(start, stop, step)) # ------- Exceptions ------- # def op_RAISE_VARARGS(self, inst): nargs = inst.arg if nargs == 3: raise CompileError("Traceback argument to raise not supported") args = list(reversed([self.pop() for _ in range(nargs)])) if self.except_stack: except_block = self.except_stack[-1] self.predecessors[except_block].add(self.curblock) self.insert('exc_throw', *args) # ------- Generators ------- # def op_YIELD_VALUE(self, inst): val = self.pop() self.push_insert('yield', val) self.env['flypy.state.generator'] += 1 # ------- Blocks ------- # def op_SETUP_LOOP(self, inst): exit_block = self.blocks[inst.next + inst.arg] self.predecessors[exit_block].add(self.curblock) block = LoopBlock(None, exit_block, self.stack_level) self.block_stack.append(block) self.loop_stack.append(block) def op_SETUP_EXCEPT(self, inst): try_block = self.blocks[inst.next] except_block = self.blocks[inst.next + inst.arg] self.predecessors[except_block].add(self.curblock) self.exc_handlers.add(except_block) with self.builder.at_front(self.curblock): self.builder.exc_setup([except_block]) block = ExceptionBlock(try_block, except_block, self.stack_level) self.block_stack.append(block) self.except_stack.append(block) def op_SETUP_FINALLY(self, inst): try_block = self.blocks[inst.next] finally_block = self.blocks[inst.next + inst.arg] self.predecessors[finally_block].add(self.curblock) block = FinallyBlock(try_block, finally_block, self.stack_level) self.block_stack.append(block) self.finally_stack.append(block) def op_END_FINALLY(self, inst): self.pop() self.pop() self.pop() # self.insert('end_finally') # ------- print ------- # def op_PRINT_ITEM(self, inst): self.call_pop(print, [self.pop()]) def op_PRINT_NEWLINE(self, inst): self.call_pop(print, [const('\n')]) # ------- Misc ------- # def op_STOP_CODE(self, inst): pass #===------------------------------------------------------------------=== # Internals #===------------------------------------------------------------------=== def func_name(func): if func.__module__: return ".".join([func.__module__, func.__name__]) return func.__name__ def slicearg(v): """Construct an argument to a slice instruction""" return OConst(v) #===------------------------------------------------------------------=== # Exceptions #===------------------------------------------------------------------=== Exc = collections.namedtuple('Exc', ['arg']) Val = collections.namedtuple('Val', ['arg']) Tb = collections.namedtuple('Tb', ['arg']) #===------------------------------------------------------------------=== # Blocks #===------------------------------------------------------------------=== class BasicBlock(object): def __init__(self, block, level): self.block = block self.level = level class LoopBlock(BasicBlock): def __init__(self, block, end, level): BasicBlock.__init__(self, block, level) self.end = end self.catch_block = None # Block with the exc_catch(StopIteration) class ExceptionBlock(BasicBlock): def __init__(self, block, first_except_block, level): BasicBlock.__init__(self, block, level) self.first_except_block = first_except_block class FinallyBlock(BasicBlock): def __init__(self, block, finally_block, level): BasicBlock.__init__(self, block, level) self.finally_block = finally_block #===------------------------------------------------------------------=== # Globals #===------------------------------------------------------------------=== def lookup_global(func, name, env): func_globals = env['flypy.state.func_globals'] if (func is not None and name in func.__code__.co_freevars and func.__closure__): cell_idx = func.__code__.co_freevars.index(name) cell = func.__closure__[cell_idx] value = cell.cell_contents elif name in func_globals: value = func_globals[name] else: raise CompileError("No global named '%s'" % (name,)) return value
# download_HCP_1200.py # # Author: Joke Durnez (joke.durnez@gmail.com) ''' This script downloads data from the Human Connectome Project - 1200 subjects release. ''' # Import packages import os import tarfile import shutil SERIES_MAP = { 'MEG_unprocessed':'unprocessed/MEG/', '3T_unprocessed':'unprocessed/3T/', '7T_unprocessed':'7T', 'Diffusion':'Diffusion', 'T1w':'T1w', 'MNINonLinear':'MNINonLinear', 'release-notes':'release-notes', 'MEG':'MEG' } # Main collect and download function def collect_and_download(out_dir, subject, series=SERIES_MAP.keys(), tartasks=False ): ''' Function to collect and download images from the Rockland sample directory on FCP-INDI's S3 bucket Parameters ---------- out_dir : string filepath to a local directory to save files to series : list the series to download (for functional scans) Returns ------- boolean Returns true if the download was successful, false otherwise. ''' # Import packages import pandas import boto3 import botocore # Init variables s3_bucket_name = 'hcp-openaccess' s3_prefix = 'HCP_1200' boto3.setup_default_session(profile_name='hcp') s3 = boto3.resource('s3') bucket = s3.Bucket('hcp-openaccess') s3_keys = bucket.objects.filter(Prefix='HCP_1200/%s/'%subject) s3_keylist = [key.key for key in s3_keys] prefixes = ["HCP_1200/%s/%s"%(subject,x) for x in SERIES_MAP.values()] prefixes = tuple(prefixes) s3_keylist = [x for x in s3_keylist if x.startswith(prefixes)] # remove png and html s3_keylist = [x for x in s3_keylist if not x.endswith(('png','html'))] # If output path doesn't exist, create it if not os.path.exists(out_dir): print 'Could not find %s, creating now...' % out_dir os.makedirs(out_dir) # Init a list to store paths. print 'Collecting images of interest...' # And download the items total_num_files = len(s3_keylist) files_downloaded = len(s3_keylist) for path_idx, s3_path in enumerate(s3_keylist): rel_path = s3_path.replace(s3_prefix, '') rel_path = rel_path.lstrip('/') download_file = os.path.join(out_dir, rel_path) download_dir = os.path.dirname(download_file) if not os.path.exists(download_dir): os.makedirs(download_dir) try: if not os.path.exists(download_file): print 'Downloading to: %s' % download_file with open(download_file, 'wb') as f: bucket.download_file(s3_path,download_file) print("FACTS: path: %s, file: %s"%(s3_path, download_file)) print '%.3f%% percent complete' % \ (100*(float(path_idx+1)/total_num_files)) else: print 'File %s already exists, skipping...' % download_file files_downloaded -= 1 except Exception as exc: print 'There was a problem downloading %s.\n'\ 'Check and try again.' % s3_path print exc if tartasks: subdir = os.path.join(out_dir,"%s/MNINonLinear/Results/"%subject) if os.path.exists(subdir): try: protocols = [x for x in os.listdir(subdir) if x.startswith('tfMRI') and not x.endswith('tar.gz')] except OSError: print("OSError") else: for protocol in protocols: print('tarring protocol %s in subject %s'%(subject,protocol)) protocoldir = os.path.join(subdir,protocol) with tarfile.open(protocoldir+".tar.gz","w:gz") as tar: tar.add(protocoldir,arcname=os.path.basename(protocoldir)) shutil.rmtree(protocoldir) print '%d files downloaded for subject %s.' % (files_downloaded,subject) print 'Done!' # Make module executable if __name__ == '__main__': # Import packages import argparse import sys # Init arparser parser = argparse.ArgumentParser(description=__doc__) # Required arguments parser.add_argument('-o', '--out_dir', required=True, type=str, help='Path to local folder to download files to') parser.add_argument('-s', '--subject', required=True, type=str, help='Subject code') # Optional arguments parser.add_argument('-t', '--tartasks', required=False, action='store_true',help='To limit number of files: tar together tasks') args = parser.parse_args() out_dir = os.path.abspath(args.out_dir) subject = args.subject kwargs = {} if args.tartasks: kwargs['tartasks'] = args.tartasks collect_and_download(out_dir=out_dir,subject=subject,**kwargs)
# # Copyright (C) 2001 Andrew T. Csillag <drew_csillag@geocities.com> # # You may distribute under the terms of either the GNU General # Public License or the SkunkWeb License, as specified in the # README file. # """LALR(1) operations""" import LR0 import LR1 import copy from Common import * def canonicalLRToLALR(C): """converts a canonical LR(1) set of items to an LALR(1) set""" nC = [] for i in range(len(C)): I = C[i] #since we're building nC one bit at a time, there will be at #most one duplicate of I in nC #find dup rules (ignoring lookaheads) try: dup = nC.index(I) except: #no duplicate, add to nC nC.append(copy.deepcopy(I)) else: #duplicate found, update lookaheads for ncItem, CItem in map(None, nC[dup], I): ncItem.lookaheads.addSet(CItem.lookaheads) return nC def compareSet(old, new): """returns: 1 if new has lookaheads not in old 0 otherwise """ for oldItem, newItem in map(None, old, new): if not oldItem.lookaheads.contains(newItem.lookaheads): return 1 return 0 def updateLookaheads(old, new): """add the lookaheads from new to old""" for oldItem, newItem in map(None, old, new): oldItem.lookaheads.addSet(newItem.lookaheads) def items(ruleSet, terminals, nonTerminals): """compute LALR1 items for ruleset""" symbols = nonTerminals + terminals #start with closure of [ [S' -> S, $] ] C = [LR1.closure([startItem], ruleSet, terminals)] added = 1 while added: added = 0 for I in C: for X in symbols: g = LR1.goto(I, X, ruleSet, terminals) if g and not g in C: #if no core already there: added = 1 C.append(g) elif g and g in C: #if core is there: target = C[C.index(g)] if compareSet(target, g): added = 1 updateLookaheads(target, g) return C
""" 消防单位火灾风险预测 """ import sys import fire_risk.data_utils as fr import sklearn.linear_model as lm from sklearn.externals import joblib # 读入配置参数 file_train_path, _, model_path, deadline, _, _ = fr.read_config(sys.argv[1]) # 导入数据 fire_data = fr.data_input_from_file(file_train_path) # 对数据进行转换 fire_data = fr.DataTranslate(fire_data).if_else('aqsks', '>=', 10).if_else('dwdj', '==', 1).if_else('dwdj', '==', 2).\ if_else('dwlx', '==', 1).if_else('dwlx', '==', 2).if_else('dwlx', '==', 3).if_else('dwxz', '==', 1).\ if_else('dwxz', '==', 2).if_else('dwxz', '==', 3).if_else('dwxz', '==', 4).\ del_col(['gdzc', 'ssqy', 'wgs84_x', 'wgs84_y', 'yyszdrs']).if_else('jcsl', '>', 100).\ if_else('jzmj', '>', 10000).if_else('max_dscs', '>', 30).if_else('max_dsmj', '>', 10000).\ if_else('max_dtsl', '>', 10).if_else('max_dxcs', '>', 3).if_else('max_dxmj', '>', 1000).\ if_else('max_jzgd', '>', 100).if_else('max_jznl', '<', 0).if_else('max_jznl', '>', 10000).\ if_else('max_rzsl', '>', 50).if_else('max_zdmj', '>', 10000).if_else('ssdts', '>', 10).if_else('xfcds', '>', 10).\ if_else('xfdts', '>', 10).if_else('yhsl', '>', 300).if_else('zgrs', '>', 1000).if_else('zgsl', '>', 300).\ fill_na('zjhzsj', 20010101000000).fill_na('zjjcsj', 20010101000000).if_else('zjyhsl', '>', 10).\ if_else('zjzgsl', '>', 10).date_diff('zjhzsj', deadline).date_diff('zjjcsj', deadline).\ col_diff_if_else('yhsl', 'zgsl').col_diff_if_else('zjyhsl', 'zjzgsl') # 提取需要的数据列 fire_data = fire_data.fire_data.ix[:, ['dwid', 'Y', 'aqsks', 'dwdj_1', 'dwdj_2', 'dwxz_1', 'dwxz_2', 'dwxz_3', 'dwxz_4', 'hzsl', 'jcsl', 'jzmj', 'jzsl', 'sfgpdw', 'sfzdyhdw', 'ssdts', 'xfcds', 'xfdts', 'yhsl', 'zdxfss', 'zgrs', 'zjyhsl', 'zjzgsl', 'hzts_to_deadline', 'jcts_to_deadline', 'yhsl_minus_zgsl', 'zjyhsl_minus_zjzgsl', 'zddw', 'ybdw', 'jxdw', 'wxp', 'max_jzzt', 'max_jznl', 'max_jzgd', 'max_zdmj', 'max_dscs', 'max_dsmj', 'max_dxcs', 'max_dxmj', 'max_nhdj', 'max_rnrs', 'max_dtsl', 'max_xfkzs', 'max_rzsl', 'max_xfsssl']].fillna(0) # 运行模型:弹性网络模型 # enet = lm.ElasticNetCV(l1_ratio=1, cv=10, n_jobs=1) # l1_ratio=1表示Lasso回归 # enet.fit(X=fire_data.ix[:, 2:], y=fire_data.ix[:, 1]) # joblib.dump(enet, model_path+'/fire_risk_model_enet.pkl') # 运行模型:逻辑回归模型 lgr = lm.LogisticRegressionCV(cv=10, penalty='l1', solver='liblinear', n_jobs=1) lgr.fit(X=fire_data.ix[:, 2:], y=fire_data.ix[:, 1]) joblib.dump(lgr, model_path+'/fire_risk_model_lgr.pkl')
"PubMed interface." import json import os import os.path import sys import time import unicodedata import xml.etree.ElementTree import requests PUBMED_FETCH_URL = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&rettype=abstract&id=%s" PUBMED_SEARCH_URL = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&retmax=%s&term=%s" DEFAULT_TIMEOUT = 5.0 DEFAULT_DELAY = 1.0 MONTHS = dict(jan=1, feb=2, mar=3, apr=4, may=5, jun=6, jul=7, aug=8, sep=9, oct=10, nov=11, dec=12) def search(author=None, published=None, journal=None, doi=None, affiliation=None, title=None, exclude_title=None, retmax=20, timeout=DEFAULT_TIMEOUT, delay=DEFAULT_DELAY, api_key=None, debug=False): """Get list of PMIDs for PubMed hits given the data. Delay the HTTP request if positive value (seconds). The API key is the one set for your NCBI account, if any. """ parts = [] if author: parts.append("%s[AU]" % to_ascii(str(author))) if published: parts.append("%s[DP]" % published) if journal: parts.append("%s[TA]" % journal) if doi: parts.append("%s[LID]" % doi) if affiliation: parts.append("%s[AD]" % to_ascii(str(affiliation))) if title: parts.append("%s[TI]" % to_ascii(str(title))) query = " AND ".join(parts) if exclude_title: query += " NOT %s[TI]" % to_ascii(str(exclude_title)) url = PUBMED_SEARCH_URL % (retmax, query) if api_key: url += f"&api_key={api_key}" if delay > 0.0: time.sleep(delay) try: if debug: print("url>", url) response = requests.get(url, timeout=timeout) except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError): raise IOError("timeout") if response.status_code != 200: raise IOError(f"HTTP status {response.status_code} {url}") root = xml.etree.ElementTree.fromstring(response.content) return [e.text for e in root.findall("IdList/Id")] def fetch(pmid, dirname=None, timeout=DEFAULT_TIMEOUT, delay=DEFAULT_DELAY, api_key=None, debug=False): """Fetch publication XML from PubMed and parse into a dictionary. Return None if no article data in XML. Use the file cache directory if given. Delay the HTTP request if positive value (seconds). The API key is the one set for your NCBI account, if any. """ filename = pmid + ".xml" content = None # Get the locally stored XML file if it exists. if dirname: try: with open(os.path.join(dirname, filename)) as infile: content = infile.read() except IOError: pass if not content: url = PUBMED_FETCH_URL % pmid if api_key: url += f"&api_key={api_key}" if delay > 0.0: time.sleep(delay) if debug: print("url>", url) try: response = requests.get(url, timeout=timeout) except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError): raise IOError("timeout") if response.status_code != 200: raise IOError(f"HTTP status {response.status_code} {url}") content = response.content # Store the XML file locally. if dirname: with open(os.path.join(dirname, filename), "wb") as outfile: outfile.write(content) return parse(content) def parse(data): "Parse XML text data for a publication into a dictionary." tree = xml.etree.ElementTree.fromstring(data) try: article = get_element(tree, "PubmedArticle") except ValueError: raise ValueError("no article with the given PMID") result = dict() result["title"] = squish(get_title(article)) result["pmid"] = get_pmid(article) result["doi"] = None result["authors"] = get_authors(article) result["journal"] = get_journal(article) result["type"] = get_type(article) result["published"] = get_published(article) result["epublished"] = get_epublished(article) result["abstract"] = get_abstract(article) result["xrefs"] = [] # Remove PMID from xrefs; get and remove DOI for xref in get_xrefs(article): if xref["db"] == "doi": result["doi"] = xref["key"] elif xref["db"] == "pubmed": pass else: result["xrefs"].append(xref) return result def get_title(article): "Get the title from the article XML tree." element = get_element(article, "MedlineCitation/Article/ArticleTitle") return get_text(element) def get_pmid(article): "Get the PMID from the article XML tree." return article.findtext("MedlineCitation/PMID") def get_authors(article): "Get the list of authors from the article XML tree." element = get_element(article, "MedlineCitation/Article") authorlist = element.find("AuthorList") if not authorlist: return [] result = [] existing = set() # Handle pathological multi-mention. for element in authorlist.findall("Author"): author = dict() # Name of author for jkey, xkey in [("family", "LastName"), ("given", "ForeName"), ("initials", "Initials")]: value = element.findtext(xkey) if not value: continue value = str(value) author[jkey] = value author[jkey + "_normalized"] = to_ascii(value).lower() # For consortia and such, names are a mess. Try to sort out. if not author.get("family"): try: author["family"] = author.pop("given") except KeyError: value = element.findtext("CollectiveName") if not value: continue # Give up. value = str(value) author["family"] = value author["given"] = "" author["initials"] = "" author["family_normalized"] = to_ascii(author["family"]).lower() author["given_normalized"] = "" author["initials_normalized"] = "" for elem in element.findall("Identifier"): if elem.attrib.get("Source") == "ORCID": # ORCID may be given as an URL; split away all except id proper. author["orcid"] = get_text(elem).split("/")[-1] for elem in element.findall(".//Affiliation"): author.setdefault("affiliations", []).append(get_text(elem)) if author: try: # Don't add author if this doesn't work. key = f"{author['family']} {author['given']}" if key not in existing: result.append(author) existing.add(key) except KeyError: pass return result def get_journal(article): "Get the journal data from the article XML tree." element = get_element(article, "MedlineCitation/Article/Journal") result = dict() if element is not None: result["title"] = element.findtext("ISOAbbreviation") if not result["title"]: result["title"] = element.findtext("Title") result["issn"] = element.findtext("ISSN") issue = element.find("JournalIssue") if issue is not None: result["volume"] = issue.findtext("Volume") result["issue"] = issue.findtext("Issue") element = article.find("MedlineCitation/Article/Pagination/MedlinePgn") if element is not None: pages = element.text if pages: pages = pages.split("-") if len(pages) == 2: # Complete page numbers! diff = len(pages[0]) - len(pages[1]) if diff > 0: pages[1] = pages[0][0:diff] + pages[1] pages = "-".join(pages) result["pages"] = pages return result def get_type(article): "Get the type from the article XML tree." element = get_element(article, "MedlineCitation/Article/PublicationTypeList/PublicationType") if element is not None: return element.text.lower() else: return None def get_published(article): "Get the publication date from the article XML tree." elem = article.find("MedlineCitation/Article/Journal/JournalIssue/PubDate") date = [] if elem is not None: date = get_date(elem) if len(date) < 2: # Fallback 1: ArticleDate elem = article.find("MedlineCitation/Article/ArticleDate") if elem is not None: date = get_date(elem) if len(date) < 2: # Fallback 2: PubMedPubDate dates = article.findall("PubmedData/History/PubMedPubDate") for status in ["epublish", "aheadofprint", "pubmed"]: for elem in dates: if elem.get("PubStatus") == status: date = get_date(elem) break if len(date) >= 2: break if len(date) == 0: # Fallback 3: today's year d = time.localtime() date = [d.tm_year, 0, 0] # Add dummy values, if missing if len(date) == 1: date.append(0) if len(date) == 2: date.append(0) return "%s-%02i-%02i" % tuple(date) def get_epublished(article): "Get the online publication date from the article XML tree, or None." date = [] elem = article.find("MedlineCitation/Article/ArticleDate") if elem is not None and elem.get("DateType") == "Electronic": date = get_date(elem) if len(date) < 2: dates = article.findall("PubmedData/History/PubMedPubDate") for status in ["epublish", "aheadofprint", "pubmed"]: for elem in dates: if elem.get("PubStatus") == status: date = get_date(elem) break if len(date) >= 2: break if len(date) == 0: # No date found return None # Add dummy values, if missing if len(date) == 1: date.append(0) if len(date) == 2: date.append(0) return "%s-%02i-%02i" % tuple(date) def get_abstract(article): "Get the abstract from the article XML tree." try: element = get_element(article, "MedlineCitation/Article/Abstract") except ValueError: return None else: text = [] for elem in element.findall("AbstractText"): text.append(get_text(elem)) return "\n\n".join([t for t in text if t]).strip() def get_xrefs(article): "Get the list of cross-references from the article XML tree." result = [] for elem in article.findall("PubmedData/ArticleIdList/ArticleId"): result.append(dict(db=elem.get("IdType"), key=elem.text)) for elem in article.findall("MedlineCitation/Article/DataBankList/DataBank"): db = elem.findtext("DataBankName") if not db: continue for elem2 in elem.findall("AccessionNumberList/AccessionNumber"): result.append(dict(db=db, key=elem2.text)) return result def get_element(tree, key): element = tree.find(key) if element is None: raise ValueError(f"Could not find '{key}' element.") return element def get_date(element): "Get the [year, month, day] from the element." year = element.findtext("Year") if not year: return [] result = [int(year)] month = element.findtext("Month") if not month: return result try: month = int(MONTHS.get(month.lower()[:3], month)) except (TypeError, ValueError): return result else: result.append(month) day = element.findtext("Day") try: day = int(day) except (TypeError, ValueError): day = 0 result.append(day) return result def get_text(element): "Get all text from element and its children. Normalize blanks." text = [] for elem in element.iter(): text.append(elem.text) text.append(elem.tail) text = "".join([t for t in text if t]) text = "".join([t for t in text.split("\n")]) text = " ".join([t for t in text.split()]) return text def to_ascii(value): "Convert any non-ASCII character to its closest ASCII equivalent." if value is None: return "" value = unicodedata.normalize("NFKD", str(value)) return u"".join([c for c in value if not unicodedata.combining(c)]) def squish(value): "Remove all unnecessary white spaces." return " ".join([p for p in value.split() if p]) if __name__ == "__main__": dirname = os.getcwd() pmids = sys.argv[1:] if not pmids: pmids = ["32283633", "8142349", "7525970"] for pmid in pmids: data = fetch(pmid, dirname=dirname, debug=True) print(json.dumps(data, indent=2, ensure_ascii=False))
#!/usr/bin/env python # Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Prints the total PSS attributed to another app in Chrome's mappings. This script assumes a device with Monochrome, and requires root access. For instance, to get the part of Chrome's memory footprint coming from GMSCore code and bytecode pages: $ tools/android/native_lib_memory/java_code_pages_pss.py --chrome-package com.android.chrome --app-package com.google.android.gms --verbose """ from __future__ import print_function import argparse import logging import os import re import sys import parse_smaps _SRC_PATH = os.path.join( os.path.dirname(__file__), os.pardir, os.pardir, os.pardir) sys.path.append(os.path.join(_SRC_PATH, 'third_party', 'catapult', 'devil')) from devil.android import device_utils def _GetPssInKb(mappings, app_package, verbose): """Returns the total PSS from mappings. Args: mappings: ([parse_smaps.Mapping]) List of mappings. app_package: (str) App package to look for. verbose: (bool) Verbose output or not. Returns: (executable_pss (int), other_pss (int)) Executable mappings and others, in kB. """ executable_pss, other_pss = (0, 0) for mapping in mappings: if app_package in mapping.pathname: if mapping.permissions == 'r-xp': executable_pss += mapping.fields['Pss'] else: other_pss += mapping.fields['Pss'] if verbose: print(mapping.ToString()) return (executable_pss, other_pss) def _CreateArgumentParser(): parser = argparse.ArgumentParser() parser.add_argument('--chrome-package', help='Chrome package to look for.', required=True) parser.add_argument('--app-package', help='Application to inspect.', required=True) parser.add_argument('--verbose', help='Verbose output.', action='store_true') return parser def main(): parser = _CreateArgumentParser() args = parser.parse_args() devices = device_utils.DeviceUtils.HealthyDevices() if not devices: logging.error('No connected devices') return device = devices[0] device.EnableRoot() processes = device.ListProcesses(args.chrome_package) logging.basicConfig(level=logging.INFO) logging.info('Processes:\n\t' + '\n\t'.join(p.name for p in processes)) total_executable_pss_kb, total_other_pss_kb = (0, 0) for process in processes: mappings = parse_smaps.ParseProcSmaps(device, process.pid) executable_pss_kb, other_pss_kb = _GetPssInKb( mappings, args.app_package, args.verbose) total_executable_pss_kb += executable_pss_kb total_other_pss_kb += other_pss_kb print('Total executable PSS = %dkB' % total_executable_pss_kb) print('Total other mappings PSS = %dkB' % total_other_pss_kb) if __name__ == '__main__': main()
import os import django from django.conf import settings, global_settings import oscar def configure(): if not settings.configured: from oscar.defaults import OSCAR_SETTINGS # Helper function to extract absolute path location = lambda x: os.path.join( os.path.dirname(os.path.realpath(__file__)), x) test_settings = { 'DATABASES': { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:', }, }, 'INSTALLED_APPS': [ 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.flatpages', 'django.contrib.staticfiles', 'sorl.thumbnail', 'compressor', # Use a custom partner app to test overriding models. I can't # find a way of doing this on a per-test basis, so I'm using a # global change. ] + oscar.get_core_apps(['tests._site.apps.partner']), 'TEMPLATE_CONTEXT_PROCESSORS': ( "django.contrib.auth.context_processors.auth", "django.core.context_processors.request", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.core.context_processors.static", "django.contrib.messages.context_processors.messages", 'oscar.apps.search.context_processors.search_form', 'oscar.apps.customer.notifications.context_processors.notifications', 'oscar.apps.promotions.context_processors.promotions', 'oscar.apps.checkout.context_processors.checkout', 'oscar.core.context_processors.metadata', ), 'TEMPLATE_DIRS': ( location('templates'), oscar.OSCAR_MAIN_TEMPLATE_DIR, ), 'MIDDLEWARE_CLASSES': global_settings.MIDDLEWARE_CLASSES + ( 'oscar.apps.basket.middleware.BasketMiddleware', ), 'AUTHENTICATION_BACKENDS': ( 'oscar.apps.customer.auth_backends.Emailbackend', 'django.contrib.auth.backends.ModelBackend', ), 'HAYSTACK_CONNECTIONS': { 'default': { 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine', } }, 'PASSWORD_HASHERS': ['django.contrib.auth.hashers.MD5PasswordHasher'], 'ROOT_URLCONF': 'tests._site.urls', 'LOGIN_REDIRECT_URL': '/accounts/', 'STATIC_URL': '/static/', 'COMPRESS_ENABLED': False, 'ADMINS': ('admin@example.com',), 'DEBUG': False, 'SITE_ID': 1, 'APPEND_SLASH': True, 'DDF_DEFAULT_DATA_FIXTURE': 'tests.dynamic_fixtures.OscarDynamicDataFixtureClass', 'SESSION_SERIALIZER': 'django.contrib.sessions.serializers.PickleSerializer', } if django.VERSION >= (1, 5): test_settings['INSTALLED_APPS'] += ['tests._site.myauth', ] test_settings['AUTH_USER_MODEL'] = 'myauth.User' test_settings.update(OSCAR_SETTINGS) settings.configure(**test_settings)
# encoding: utf-8 # # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Author: Kyle Lahnakoski (kyle@lahnakoski.com) # from __future__ import unicode_literals from collections import Mapping from jx_base import container from mo_dots import Data from mo_dots import wrap, set_default, split_field from mo_future import text_type from mo_logs import Log config = Data() # config.default IS EXPECTED TO BE SET BEFORE CALLS ARE MADE _ListContainer = None _meta = None def _delayed_imports(): global _ListContainer global _meta from jx_python import meta as _meta from jx_python.containers.list_usingPythonList import ListContainer as _ListContainer _ = _ListContainer _ = _meta try: from pyLibrary.queries.jx_usingMySQL import MySQL except Exception: MySQL = None try: from jx_elasticsearch.meta import FromESMetadata except Exception: FromESMetadata = None set_default(container.type2container, { "mysql": MySQL, "memory": None, "meta": FromESMetadata }) def wrap_from(frum, schema=None): """ :param frum: :param schema: :return: """ if not _meta: _delayed_imports() frum = wrap(frum) if isinstance(frum, text_type): if not container.config.default.settings: Log.error("expecting jx_base.container.config.default.settings to contain default elasticsearch connection info") type_ = None index = frum if frum.startswith("meta."): if frum == "meta.columns": return _meta.singlton.meta.columns.denormalized() elif frum == "meta.tables": return _meta.singlton.meta.tables else: Log.error("{{name}} not a recognized table", name=frum) else: type_ = container.config.default.type index = split_field(frum)[0] settings = set_default( { "index": index, "name": frum, "exists": True, }, container.config.default.settings ) settings.type = None return container.type2container[type_](settings) elif isinstance(frum, Mapping) and frum.type and container.type2container[frum.type]: # TODO: Ensure the frum.name is set, so we capture the deep queries if not frum.type: Log.error("Expecting from clause to have a 'type' property") return container.type2container[frum.type](frum.settings) elif isinstance(frum, Mapping) and (frum["from"] or isinstance(frum["from"], (list, set))): from jx_base.query import QueryOp return QueryOp.wrap(frum, schema=schema) elif isinstance(frum, (list, set)): return _ListContainer("test_list", frum) else: return frum
import pytest from django.conf import settings from django.core.urlresolvers import reverse from .. import factories as f from tests.utils import disconnect_signals, reconnect_signals pytestmark = pytest.mark.django_db def setup_module(module): disconnect_signals() def teardown_module(module): reconnect_signals() @pytest.fixture def data(): m = type("Models", (object,), {}) m.user = f.UserFactory.create() m.project = f.ProjectFactory(is_private=False, owner=m.user) m.role1 = f.RoleFactory(project=m.project) m.role2 = f.RoleFactory(project=m.project) m.null_points = f.PointsFactory(project=m.project, value=None) m.points1 = f.PointsFactory(project=m.project, value=1) m.points2 = f.PointsFactory(project=m.project, value=2) m.points3 = f.PointsFactory(project=m.project, value=4) m.points4 = f.PointsFactory(project=m.project, value=8) m.open_status = f.UserStoryStatusFactory(is_closed=False) m.closed_status = f.UserStoryStatusFactory(is_closed=True) m.role_points1 = f.RolePointsFactory(role=m.role1, points=m.points1, user_story__project=m.project, user_story__status=m.open_status) m.role_points2 = f.RolePointsFactory(role=m.role1, points=m.points2, user_story__project=m.project, user_story__status=m.open_status) m.role_points3 = f.RolePointsFactory(role=m.role1, points=m.points3, user_story__project=m.project, user_story__status=m.open_status) m.role_points4 = f.RolePointsFactory(role=m.project.roles.all()[0], points=m.points4, user_story__project=m.project, user_story__status=m.open_status) m.user_story1 = m.role_points1.user_story m.user_story2 = m.role_points2.user_story m.user_story3 = m.role_points3.user_story m.user_story4 = m.role_points4.user_story m.milestone = f.MilestoneFactory(project=m.project) return m def test_project_defined_points(client, data): assert data.project.defined_points == {data.role1.pk: 15} data.role_points1.role = data.role2 data.role_points1.save() assert data.project.defined_points == {data.role1.pk: 14, data.role2.pk: 1} def test_project_closed_points(client, data): assert data.project.closed_points == {} data.role_points1.role = data.role2 data.role_points1.save() assert data.project.closed_points == {} data.user_story1.is_closed = True data.user_story1.save() assert data.project.closed_points == {data.role2.pk: 1} data.user_story2.is_closed = True data.user_story2.save() assert data.project.closed_points == {data.role1.pk: 2, data.role2.pk: 1} data.user_story3.is_closed = True data.user_story3.save() assert data.project.closed_points == {data.role1.pk: 6, data.role2.pk: 1} data.user_story4.is_closed = True data.user_story4.save() assert data.project.closed_points == {data.role1.pk: 14, data.role2.pk: 1} def test_project_assigned_points(client, data): assert data.project.assigned_points == {} data.role_points1.role = data.role2 data.role_points1.save() assert data.project.assigned_points == {} data.user_story1.milestone = data.milestone data.user_story1.save() assert data.project.assigned_points == {data.role2.pk: 1} data.user_story2.milestone = data.milestone data.user_story2.save() assert data.project.assigned_points == {data.role1.pk: 2, data.role2.pk: 1} data.user_story3.milestone = data.milestone data.user_story3.save() assert data.project.assigned_points == {data.role1.pk: 6, data.role2.pk: 1} data.user_story4.milestone = data.milestone data.user_story4.save() assert data.project.assigned_points == {data.role1.pk: 14, data.role2.pk: 1}
import py from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr from rpython.jit.metainterp.history import ResOperation, TargetToken,\ JitCellToken from rpython.jit.metainterp.history import (ConstInt, ConstPtr, Const, BasicFailDescr, BasicFinalDescr) from rpython.jit.backend.detect_cpu import getcpuclass from rpython.jit.backend.x86.arch import WORD from rpython.jit.backend.x86.rx86 import fits_in_32bits from rpython.jit.backend.llsupport import symbolic from rpython.jit.metainterp.resoperation import rop, InputArgInt, InputArgRef from rpython.jit.metainterp.executor import execute from rpython.jit.backend.test.runner_test import LLtypeBackendTest from rpython.jit.tool.oparser import parse import ctypes CPU = getcpuclass() class FakeStats(object): pass U = LLtypeBackendTest.U S = LLtypeBackendTest.S # ____________________________________________________________ class TestX86(LLtypeBackendTest): # for the individual tests see # ====> ../../test/runner_test.py if WORD == 4: add_loop_instructions = ('mov; ' 'lea; ' # a nop, for the label 'add; test; je; jmp;') # plus some padding bridge_loop_instructions = 'cmp; jge; mov; mov; call; jmp;' else: add_loop_instructions = ('mov; ' 'nop; ' # for the label 'add; test; je; jmp;') # plus some padding bridge_loop_instructions = ( 'cmp; jge; mov;( movabs;)? mov; mov(abs)?; call; mov(abs)?; jmp;') def get_cpu(self): cpu = CPU(rtyper=None, stats=FakeStats()) cpu.setup_once() return cpu def test_execute_ptr_operation(self): cpu = self.cpu u_box, _, _ = self.alloc_instance(U) u = u_box.getref(lltype.Ptr(U)) ofs = cpu.fielddescrof(S, 'value') assert self.execute_operation(rop.SETFIELD_GC, [u_box, InputArgInt(3)], 'void', ofs) == None assert u.parent.parent.value == 3 u.parent.parent.value += 100 assert (self.execute_operation(rop.GETFIELD_GC_I, [u_box], 'int', ofs) == 103) def test_unicode(self): ofs = symbolic.get_field_token(rstr.UNICODE, 'chars', False)[0] u = rstr.mallocunicode(13) for i in range(13): u.chars[i] = unichr(ord(u'a') + i) b = InputArgRef(lltype.cast_opaque_ptr(llmemory.GCREF, u)) r = self.execute_operation(rop.UNICODEGETITEM, [b, ConstInt(2)], 'int') assert r == ord(u'a') + 2 self.execute_operation(rop.UNICODESETITEM, [b, ConstInt(2), ConstInt(ord(u'z'))], 'void') assert u.chars[2] == u'z' assert u.chars[3] == u'd' @staticmethod def _resbuf(res, item_tp=ctypes.c_long): return ctypes.cast(res._obj.intval, ctypes.POINTER(item_tp)) def test_allocations(self): py.test.skip("rewrite or kill") from rpython.rtyper.lltypesystem import rstr allocs = [None] all = [] orig_new = self.cpu.gc_ll_descr.funcptr_for_new def f(size): allocs.insert(0, size) return orig_new(size) self.cpu.assembler.setup_once() self.cpu.gc_ll_descr.funcptr_for_new = f ofs = symbolic.get_field_token(rstr.STR, 'chars', False)[0] res = self.execute_operation(rop.NEWSTR, [ConstInt(7)], 'ref') assert allocs[0] == 7 + ofs + WORD resbuf = self._resbuf(res) assert resbuf[ofs/WORD] == 7 # ------------------------------------------------------------ res = self.execute_operation(rop.NEWSTR, [InputArgInt(7)], 'ref') assert allocs[0] == 7 + ofs + WORD resbuf = self._resbuf(res) assert resbuf[ofs/WORD] == 7 # ------------------------------------------------------------ TP = lltype.GcArray(lltype.Signed) ofs = symbolic.get_field_token(TP, 'length', False)[0] descr = self.cpu.arraydescrof(TP) res = self.execute_operation(rop.NEW_ARRAY, [ConstInt(10)], 'ref', descr) assert allocs[0] == 10*WORD + ofs + WORD resbuf = self._resbuf(res) assert resbuf[ofs/WORD] == 10 # ------------------------------------------------------------ res = self.execute_operation(rop.NEW_ARRAY, [InputArgInt(10)], 'ref', descr) assert allocs[0] == 10*WORD + ofs + WORD resbuf = self._resbuf(res) assert resbuf[ofs/WORD] == 10 def test_stringitems(self): from rpython.rtyper.lltypesystem.rstr import STR ofs = symbolic.get_field_token(STR, 'chars', False)[0] ofs_items = symbolic.get_field_token(STR.chars, 'items', False)[0] res = self.execute_operation(rop.NEWSTR, [ConstInt(10)], 'ref') self.execute_operation(rop.STRSETITEM, [InputArgRef(res), ConstInt(2), ConstInt(ord('d'))], 'void') resbuf = self._resbuf(res, ctypes.c_char) assert resbuf[ofs + ofs_items + 2] == 'd' self.execute_operation(rop.STRSETITEM, [InputArgRef(res), InputArgInt(2), ConstInt(ord('z'))], 'void') assert resbuf[ofs + ofs_items + 2] == 'z' r = self.execute_operation(rop.STRGETITEM, [InputArgRef(res), InputArgInt(2)], 'int') assert r == ord('z') def test_arrayitems(self): TP = lltype.GcArray(lltype.Signed) ofs = symbolic.get_field_token(TP, 'length', False)[0] itemsofs = symbolic.get_field_token(TP, 'items', False)[0] descr = self.cpu.arraydescrof(TP) res = self.execute_operation(rop.NEW_ARRAY, [ConstInt(10)], 'ref', descr) resbuf = self._resbuf(res) assert resbuf[ofs/WORD] == 10 self.execute_operation(rop.SETARRAYITEM_GC, [InputArgRef(res), ConstInt(2), InputArgInt(38)], 'void', descr) assert resbuf[itemsofs/WORD + 2] == 38 self.execute_operation(rop.SETARRAYITEM_GC, [InputArgRef(res), InputArgInt(3), InputArgInt(42)], 'void', descr) assert resbuf[itemsofs/WORD + 3] == 42 r = self.execute_operation(rop.GETARRAYITEM_GC_I, [InputArgRef(res), ConstInt(2)], 'int', descr) assert r == 38 r = self.execute_operation(rop.GETARRAYITEM_GC_I, [ConstPtr(res), InputArgInt(2)], 'int', descr) assert r == 38 r = self.execute_operation(rop.GETARRAYITEM_GC_I, [ConstPtr(res), ConstInt(2)], 'int', descr) assert r == 38 r = self.execute_operation(rop.GETARRAYITEM_GC_I, [InputArgRef(res), InputArgInt(2)], 'int', descr) assert r == 38 r = self.execute_operation(rop.GETARRAYITEM_GC_I, [InputArgRef(res), InputArgInt(3)], 'int', descr) assert r == 42 def test_arrayitems_not_int(self): TP = lltype.GcArray(lltype.Char) ofs = symbolic.get_field_token(TP, 'length', False)[0] itemsofs = symbolic.get_field_token(TP, 'items', False)[0] descr = self.cpu.arraydescrof(TP) res = self.execute_operation(rop.NEW_ARRAY, [ConstInt(10)], 'ref', descr) resbuf = self._resbuf(res, ctypes.c_char) res = InputArgRef(res) assert resbuf[ofs] == chr(10) for i in range(10): self.execute_operation(rop.SETARRAYITEM_GC, [res, ConstInt(i), InputArgInt(i)], 'void', descr) for i in range(10): assert resbuf[itemsofs + i] == chr(i) for i in range(10): r = self.execute_operation(rop.GETARRAYITEM_GC_I, [res, ConstInt(i)], 'int', descr) assert r == i def test_getfield_setfield(self): TP = lltype.GcStruct('x', ('s', lltype.Signed), ('i', rffi.INT), ('f', lltype.Float), ('u', rffi.USHORT), ('c1', lltype.Char), ('c2', lltype.Char), ('c3', lltype.Char)) res = InputArgRef(self.execute_operation(rop.NEW, [], 'ref', self.cpu.sizeof(TP))) ofs_s = self.cpu.fielddescrof(TP, 's') ofs_i = self.cpu.fielddescrof(TP, 'i') #ofs_f = self.cpu.fielddescrof(TP, 'f') ofs_u = self.cpu.fielddescrof(TP, 'u') ofsc1 = self.cpu.fielddescrof(TP, 'c1') ofsc2 = self.cpu.fielddescrof(TP, 'c2') ofsc3 = self.cpu.fielddescrof(TP, 'c3') self.execute_operation(rop.SETFIELD_GC, [res, ConstInt(3)], 'void', ofs_s) # XXX ConstFloat #self.execute_operation(rop.SETFIELD_GC, [res, ofs_f, 1e100], 'void') # XXX we don't support shorts (at all) #self.execute_operation(rop.SETFIELD_GC, [res, ofs_u, ConstInt(5)], 'void') s = self.execute_operation(rop.GETFIELD_GC_I, [res], 'int', ofs_s) assert s == 3 self.execute_operation(rop.SETFIELD_GC, [res, InputArgInt(3)], 'void', ofs_s) s = self.execute_operation(rop.GETFIELD_GC_I, [res], 'int', ofs_s) assert s == 3 self.execute_operation(rop.SETFIELD_GC, [res, InputArgInt(1234)], 'void', ofs_i) i = self.execute_operation(rop.GETFIELD_GC_I, [res], 'int', ofs_i) assert i == 1234 #u = self.execute_operation(rop.GETFIELD_GC, [res, ofs_u], 'int') #assert u.value == 5 self.execute_operation(rop.SETFIELD_GC, [res, ConstInt(1)], 'void', ofsc1) self.execute_operation(rop.SETFIELD_GC, [res, ConstInt(3)], 'void', ofsc3) self.execute_operation(rop.SETFIELD_GC, [res, ConstInt(2)], 'void', ofsc2) c = self.execute_operation(rop.GETFIELD_GC_I, [res], 'int', ofsc1) assert c == 1 c = self.execute_operation(rop.GETFIELD_GC_I, [res], 'int', ofsc2) assert c == 2 c = self.execute_operation(rop.GETFIELD_GC_I, [res], 'int', ofsc3) assert c == 3 def test_bug_setfield_64bit(self): if WORD == 4: py.test.skip("only for 64 bits") TP = lltype.GcStruct('S', ('i', lltype.Signed)) ofsi = self.cpu.fielddescrof(TP, 'i') for i in range(500): p = lltype.malloc(TP) addr = rffi.cast(lltype.Signed, p) if fits_in_32bits(addr): break # fitting in 32 bits, good else: py.test.skip("cannot get a 32-bit pointer") res = ConstPtr(rffi.cast(llmemory.GCREF, addr)) self.execute_operation(rop.SETFIELD_RAW, [res, ConstInt(3**33)], 'void', ofsi) assert p.i == 3**33 def test_and_mask_common_patterns(self): cases = [8, 16, 24] if WORD == 8: cases.append(32) for i in cases: box = InputArgInt(0xAAAAAAAAAAAA) res = self.execute_operation(rop.INT_AND, [box, ConstInt(2 ** i - 1)], 'int') assert res == 0xAAAAAAAAAAAA & (2 ** i - 1) def test_nullity_with_guard(self): allops = [rop.INT_IS_TRUE] guards = [rop.GUARD_TRUE, rop.GUARD_FALSE] p = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(lltype.GcStruct('x'))) nullptr = lltype.nullptr(llmemory.GCREF.TO) f = InputArgInt() for op in allops: for guard in guards: if op == rop.INT_IS_TRUE: bp = InputArgInt(1) n = InputArgInt(0) else: bp = InputArgRef(p) n = InputArgRef(nullptr) for b in (bp, n): i1 = ResOperation(rop.SAME_AS_I, [ConstInt(1)]) f = ResOperation(op, [b]) ops = [ i1, f, ResOperation(guard, [f], descr=BasicFailDescr()), ResOperation(rop.FINISH, [ConstInt(0)], descr=BasicFinalDescr()), ] ops[-2].setfailargs([i1]) looptoken = JitCellToken() self.cpu.compile_loop([b], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, b.getint()) result = self.cpu.get_int_value(deadframe, 0) if guard == rop.GUARD_FALSE: assert result == execute(self.cpu, None, op, None, b) else: assert result != execute(self.cpu, None, op, None, b) def test_stuff_followed_by_guard(self): boxes = [(InputArgInt(1), InputArgInt(0)), (InputArgInt(0), InputArgInt(1)), (InputArgInt(1), InputArgInt(1)), (InputArgInt(-1), InputArgInt(1)), (InputArgInt(1), InputArgInt(-1)), (ConstInt(1), InputArgInt(0)), (ConstInt(0), InputArgInt(1)), (ConstInt(1), InputArgInt(1)), (ConstInt(-1), InputArgInt(1)), (ConstInt(1), InputArgInt(-1)), (InputArgInt(1), ConstInt(0)), (InputArgInt(0), ConstInt(1)), (InputArgInt(1), ConstInt(1)), (InputArgInt(-1), ConstInt(1)), (InputArgInt(1), ConstInt(-1))] guards = [rop.GUARD_FALSE, rop.GUARD_TRUE] all = [rop.INT_EQ, rop.INT_NE, rop.INT_LE, rop.INT_LT, rop.INT_GT, rop.INT_GE, rop.UINT_GT, rop.UINT_LT, rop.UINT_LE, rop.UINT_GE] for a, b in boxes: for guard in guards: for op in all: i1 = ResOperation(rop.SAME_AS_I, [ConstInt(1)]) res = ResOperation(op, [a, b]) ops = [ i1, res, ResOperation(guard, [res], descr=BasicFailDescr()), ResOperation(rop.FINISH, [ConstInt(0)], descr=BasicFinalDescr()), ] ops[-2].setfailargs([i1]) inputargs = [i for i in (a, b) if not isinstance(i, Const)] looptoken = JitCellToken() self.cpu.compile_loop(inputargs, ops, looptoken) inputvalues = [box.getint() for box in inputargs] deadframe = self.cpu.execute_token(looptoken, *inputvalues) result = self.cpu.get_int_value(deadframe, 0) expected = execute(self.cpu, None, op, None, a, b) if guard == rop.GUARD_FALSE: assert result == expected else: assert result != expected def test_compile_bridge_check_profile_info(self): py.test.skip("does not work, reinvestigate") class FakeProfileAgent(object): def __init__(self): self.functions = [] def native_code_written(self, name, address, size): self.functions.append((name, address, size)) self.cpu.profile_agent = agent = FakeProfileAgent() i0 = InputArgInt() i1 = InputArgInt() i2 = InputArgInt() targettoken = TargetToken() faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) looptoken = JitCellToken() looptoken.number = 17 class FakeString(object): def __init__(self, val): self.val = val def _get_str(self): return self.val operations = [ ResOperation(rop.LABEL, [i0], None, descr=targettoken), ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0, 0], None), ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] operations[-2].setfailargs([i1]) self.cpu.compile_loop(inputargs, operations, looptoken) name, loopaddress, loopsize = agent.functions[0] assert name == "Loop # 17: hello (loop counter 0)" assert loopaddress <= looptoken._ll_loop_code assert loopsize >= 40 # randomish number i1b = InputArgInt() i3 = InputArgInt() bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0, 0], None), ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] bridge[1].setfailargs([i1b]) self.cpu.compile_bridge(faildescr1, [i1b], bridge, looptoken) name, address, size = agent.functions[1] assert name == "Bridge # 0: bye (loop counter 1)" # Would be exactly ==, but there are some guard failure recovery # stubs in-between assert address >= loopaddress + loopsize assert size >= 10 # randomish number deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 res = self.cpu.get_int_value(deadframe, 0) assert res == 20 def test_ops_offset(self): from rpython.rlib import debug looptoken = JitCellToken() targettoken = TargetToken() loop = parse(""" [i0] label(i0, descr=targettoken) i1 = int_add(i0, 1) i2 = int_le(i1, 9) jump(i1, descr=targettoken) """, namespace=locals()) debug._log = dlog = debug.DebugLog() info = self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) ops_offset = info.ops_offset debug._log = None # assert ops_offset is looptoken._x86_ops_offset # 2*increment_debug_counter + ops + None assert len(ops_offset) == 2 + len(loop.operations) + 1 assert (ops_offset[loop.operations[0]] <= ops_offset[loop.operations[1]] <= ops_offset[loop.operations[2]] <= ops_offset[None]) def test_calling_convention(self, monkeypatch): if WORD != 4: py.test.skip("32-bit only test") from rpython.jit.backend.x86.regloc import eax, edx from rpython.jit.backend.x86 import codebuf, callbuilder from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.rlib.libffi import types, clibffi had_stdcall = hasattr(clibffi, 'FFI_STDCALL') if not had_stdcall: # not running on Windows, but we can still test monkeypatch.setattr(clibffi, 'FFI_STDCALL', 12345, raising=False) monkeypatch.setattr(callbuilder, 'stdcall_or_cdecl', True) else: assert callbuilder.stdcall_or_cdecl # for real_ffi, reported_ffi in [ (clibffi.FFI_DEFAULT_ABI, clibffi.FFI_DEFAULT_ABI), (clibffi.FFI_STDCALL, clibffi.FFI_DEFAULT_ABI), (clibffi.FFI_STDCALL, clibffi.FFI_STDCALL)]: cpu = self.cpu mc = codebuf.MachineCodeBlockWrapper() mc.MOV_rs(eax.value, 4) # argument 1 mc.MOV_rs(edx.value, 40) # argument 10 mc.SUB_rr(eax.value, edx.value) # return arg1 - arg10 if real_ffi == clibffi.FFI_DEFAULT_ABI: mc.RET() else: mc.RET16_i(40) rawstart = mc.materialize(cpu, []) # calldescr = cpu._calldescr_dynamic_for_tests([types.slong] * 10, types.slong) calldescr.get_call_conv = lambda: reported_ffi # <==== hack # ^^^ we patch get_call_conv() so that the test also makes sense # on Linux, because clibffi.get_call_conv() would always # return FFI_DEFAULT_ABI on non-Windows platforms. funcbox = ConstInt(rawstart) i1 = InputArgInt() i2 = InputArgInt() c = ConstInt(-1) faildescr = BasicFailDescr(1) cz = ConstInt(0) # we must call it repeatedly: if the stack pointer gets increased # by 40 bytes by the STDCALL call, and if we don't expect it, # then we are going to get our stack emptied unexpectedly by # several repeated calls ops = [ ResOperation(rop.CALL_RELEASE_GIL_I, [cz, funcbox, i1, c, c, c, c, c, c, c, c, i2], descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], descr=faildescr), ResOperation(rop.CALL_RELEASE_GIL_I, [cz, funcbox, i1, c, c, c, c, c, c, c, c, i2], descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], descr=faildescr), ResOperation(rop.CALL_RELEASE_GIL_I, [cz, funcbox, i1, c, c, c, c, c, c, c, c, i2], descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], descr=faildescr), ResOperation(rop.CALL_RELEASE_GIL_I, [cz, funcbox, i1, c, c, c, c, c, c, c, c, i2], descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], descr=faildescr), ] i3 = ops[0] i4 = ops[2] i5 = ops[4] i6 = ops[6] ops += [ ResOperation(rop.GUARD_FALSE, [i3], descr=BasicFailDescr(0)), ResOperation(rop.FINISH, [], descr=BasicFinalDescr(1)) ] ops[-2].setfailargs([i3, i4, i5, i6]) ops[1].setfailargs([]) ops[3].setfailargs([]) ops[5].setfailargs([]) ops[7].setfailargs([]) looptoken = JitCellToken() self.cpu.compile_loop([i1, i2], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, 123450, 123408) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 assert self.cpu.get_int_value(deadframe, 0) == 42 assert self.cpu.get_int_value(deadframe, 1) == 42 assert self.cpu.get_int_value(deadframe, 2) == 42 assert self.cpu.get_int_value(deadframe, 3) == 42 class TestDebuggingAssembler(object): def setup_method(self, meth): self.cpu = CPU(rtyper=None, stats=FakeStats()) self.cpu.setup_once() def test_debugger_on(self): from rpython.tool.logparser import parse_log_file, extract_category from rpython.rlib import debug targettoken, preambletoken = TargetToken(), TargetToken() loop = """ [i0] label(i0, descr=preambletoken) debug_merge_point('xyz', 0, 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] label(i1, descr=targettoken) debug_merge_point('xyz', 0, 0) i11 = int_add(i1, 1) i12 = int_ge(i11, 10) guard_false(i12) [] jump(i11, descr=targettoken) """ ops = parse(loop, namespace={'targettoken': targettoken, 'preambletoken': preambletoken}) debug._log = dlog = debug.DebugLog() try: self.cpu.assembler.set_debug(True) looptoken = JitCellToken() self.cpu.compile_loop(ops.inputargs, ops.operations, looptoken) self.cpu.execute_token(looptoken, 0) # check debugging info struct = self.cpu.assembler.loop_run_counters[0] assert struct.i == 1 struct = self.cpu.assembler.loop_run_counters[1] assert struct.i == 1 struct = self.cpu.assembler.loop_run_counters[2] assert struct.i == 9 self.cpu.finish_once() finally: debug._log = None l0 = ('debug_print', 'entry -1:1') l1 = ('debug_print', preambletoken.repr_of_descr() + ':1') l2 = ('debug_print', targettoken.repr_of_descr() + ':9') assert ('jit-backend-counts', [l0, l1, l2]) in dlog
#!/usr/bin/python2.7 import subprocess import atexit import time import boto3 def shell(command): subprocess.call(command, shell=True) class MessageQueue(object): def __init__(self, name): self.q = boto3.resource("sqs"). get_queue_by_name(QueueName=queue_name) def __iter__(self): return self def __next__(self): for message in self.q.retrieve_messages(): yield message.body message.delete() class ImpatientHaltingWorker(object): '''Halts if it becomes impatient while waiting for work''' def __init__(self, f, work_items, patience=300): self.f = f self.work_items = work_items self.patience = patience self.mark_time() def mark_time(self): self.t = time.time() @property def impatient(self): return self.duration_of_wait > self.patience @property def duration_of_wait(self): return time.time() - self.t def process_work(self): for x in self.work_items: self.f(x) self.mark_time() def start(self): while not self.impatient(): self.process_work() time.sleep(10) def impatient_map(f, domain): ImpatientHaltingWorker(f, domain).start() if __name__ == "__main__": atexit.register(subprocess.call, "halt", shell=True) shell("yum -y update") shell("yum -y install git docker") shell("service docker start") impatient_map(shell, MessageQueue("ghost_ci_commands.fifo"))
# -*- coding: utf-8 -*- import ast from _ast import AST from blirgen import * from astprint import to_source from utils import anon #------------------------------------------------------------------------ # Kernel Manipulation #------------------------------------------------------------------------ IN = 0 OUT = 1 INOUT = 2 class Kernel(object): def __init__(self): raise NotImplementedError @property def dimensions(self): raise NotImplementedError @property def retty(self): raise NotImplementedError @property def retval(self): raise NotImplementedError @property def ins(self): return [a for i,a in self.arguments if i == IN or i == INOUT] @property def outs(self): return [a for i,a in self.arguments if i == OUT or i == INOUT] @property def argtys(self): return [arg.ty for arg in self.ins] def compile(self, **opts): """ Compile the kernel into native code. """ from blaze.blir import compile return compile(str(self), **opts) def verify(self): """ Verify the kernel is well-formed before compilation. """ shape = None # uniform dimensions for varg in self.ins: if isinstance(varg, VectorArg): if not shape: shape = varg.shape assert varg.shape == shape return True def __add__(self, other): """ Kernel fusion """ if isinstance(other, Kernel): return fuse(self, other) else: raise NotImplementedError class Logic(AST): """ The inner loop logic of the kernel, can be transparently treated as if it were a Python AST and manipulated with term-rewrite rules. """ def __init__(self, sexp): self.ast = ast.parse(sexp) self._fields = self.ast._fields self._attributes = self.ast._attributes self.body = self.ast.body def __str__(self): return to_source(self.ast) + ';' #------------------------------------------------------------------------ # Kernel Parameters #------------------------------------------------------------------------ class ScalarArg(object): def __init__(self, ty, name=None): self.ty = ty self.name = name class VectorArg(object): def __init__(self, shape, ty, name=None): self.ty = ty self.name = name self.shape = shape #------------------------------------------------------------------------ # Kernels #------------------------------------------------------------------------ # Intentionally designed to mirror the PyOpenCL and PyCuda API. These # high level descriptions of the kernels will allow us to fuse and # compose kernels symbolically at a high level. LLVM then takes care of # the instruction level optimizations. class ElementwiseKernel(Kernel): def __init__(self, arguments, operation, name=None): self.arguments = arguments self.operation = operation self.name = name @property def retty(self): # Elemntwise always returns void, if a new array is # needed then memory is allocated outside of the kernel return 'void' @property def retval(self): return None @property def dimensions(self): # assuming this is verified... for varg in self.ins: if isinstance(varg, VectorArg): for dim in varg.shape: yield (0, dim) break def __str__(self): if hasattr(self, '__cached'): return self.__cached ivars = {} icount = 0 ins = {} outs = {} params = [] _operation = self.operation # Preamble # -------- for i, arg in enumerate(self.ins): name = arg.name or anon('in') param = Arg(arg.ty, name) params.append(param) ins[i] = param _operation = _operation.replace('_in%i' % i, name) for i, arg in enumerate(self.outs): name = arg.name or anon('out') param = Arg(arg.ty, name) params.append(param) outs[i] = param _operation = _operation.replace('_out%s' % i, name) inner = Logic(_operation) # Loops # ----- for lower, upper in self.dimensions: ivar = 'i%s' % icount ivars[ivar] = VarDecl('int', ivar, 0) inner = For(ivar, Range(lower, upper), Block([inner])) icount += 1 # Return # ------ # always void for elemwise ret = [] # Kernel Body decls = ivars.values() body = Block(decls + [inner] + ret) fn = FuncDef( name = self.name or anon('kernel'), args = params, ret = self.retty, body = body, ) self.__cached = str(fn) return self.__cached class ZipKernel(Kernel): def __init__(self, arguments, operation, name=None): self.arguments = arguments self.operation = operation def __str__(self): # TODO raise NotImplementedError class ReductionKernel(Kernel): def __init__(self, retty, neutral, reduce_expr, map_expr, arguments, name=None): self.retty = retty self.neutral = neutral self.reduce_expr = reduce_epxr self.map_expr = map_expr def __str__(self): # TODO raise NotImplementedError class ScanKernel(Kernel): def __init__(self, retty, arguments, scan_expr, neutral, output_statement, name=None): self.retty = retty self.neutral = neutral self.scan_expr = scan_expr self.output_statement = output_statement def __str__(self): # TODO raise NotImplementedError class OuterKernel(Kernel): def __init__(self, retty, arguments, scan_expr, neutral, output_statement, name=None): self.retty = retty self.neutral = neutral self.scan_expr = scan_expr self.output_statement = output_statement def __str__(self): # TODO raise NotImplementedError #------------------------------------------------------------------------ # Kernel Fusion #------------------------------------------------------------------------ # Naive kernel fusion def fuse(k1, k2): kty1 = k1.__class__ kty2 = k2.__class__ if kty1 == ElementwiseKernel and kty2 == ElementwiseKernel: return ElementwiseKernel(k1.arguments + k2.arguments, k1.operation) else: raise NotImplementedError def compose(k1, k2): raise NotImplementedError
import os.path from sentry.models import Activity from sentry.services.smtp import SentrySMTPServer, STATUS from sentry.testutils import TestCase from sentry.utils.email import group_id_to_email, email_to_group_id fixture = open(os.path.dirname(os.path.realpath(__file__)) + '/email.txt').read() class SentrySMTPTest(TestCase): def setUp(self): self.address = ('0.0.0.0', 0) self.server = SentrySMTPServer(*self.address) self.mailto = group_id_to_email(self.group.pk) self.event # side effect of generating an event def test_decode_email_address(self): self.assertEqual(email_to_group_id(self.mailto), self.group.pk) def test_process_message(self): self.assertEqual(self.server.process_message('', self.user.email, [self.mailto], fixture), STATUS[200]) self.assertEqual(Activity.objects.filter(type=Activity.NOTE)[0].data, {'text': 'sup'}) def test_process_message_no_recipients(self): self.assertEqual(self.server.process_message('', self.user.email, [], fixture), STATUS[550]) def test_process_message_too_long(self): self.assertEqual(self.server.process_message('', self.user.email, [self.mailto], fixture * 100), STATUS[552]) self.assertEqual(Activity.objects.count(), 0) def test_process_message_invalid_email(self): self.assertEqual(self.server.process_message('', self.user.email, ['lol@localhost'], fixture), STATUS[550])
import collections import optparse # using optparse as hyrda still python 2.6 import re import sys def report_error(error): """Prints the error, and exits -1""" print error sys.stderr.write(error) sys.stderr.write("\n") sys.stderr.flush() sys.exit(1) def clean_part(part, tab_replace=" "): part = part.strip() part = part.replace("\t", tab_replace) return part def merge_files(file_paths, file_names, target_path=None, divider="\t", reguired_row_regexes=[], negative_row_regexes=[], column_sort=False, row_sort=False, na_value="", tab_replace=" ", verbose=False): """ Merges a list of files into a single tsv file. file_paths is a list of paths to the input files file_names is an equally long list of names for these files which will be taken as the column names. Note: use run_merge_files to shorten the file_names, read them from a file or to use the file_paths as file names target_path specifies where the tsv file will be written divider is the string to be search for in each line of the input files. If found exactly once the part before will be considered a row_name and the part after the data Note: If the same row_name is found only the last line is used. column_sort and row_sort if set cause the data to be sorted accordingly. reguired_row_regexes if provided must be a list of regex patterns. Each row_name must match at least one of these for the row to be included negative_row_regexes if provided must be a list of regex patterns. Each row_name must match none of these for the row to be included na_value whenever a file does not have data for a row_name tab_replace is used to replace any tabs that remain in the row_names and or data after they have been striped of starting and ending whitespace verbose if set will cause more verbose infomormation such as lines that do not have the divider """ # Check parameters if not file_paths: report_error("One or more file_paths parameter must be provided") if not target_path: report_error("No target_path parameter provided") if len(file_names) != len(file_paths): report_error("Found " + str(len(file_paths)) + " file_paths but file_names/names_path contains " + str(len(file_names)) + " values.") # Read data from file all_values = collections.defaultdict(lambda: collections.defaultdict(lambda: na_value)) for count, file_path in enumerate(file_paths): mis_match = 0 with open(file_path, 'r') as f: for line in f: parts = line.strip().split(divider) if len(parts) == 2: key = clean_part(parts[0], tab_replace) value = clean_part(parts[1], tab_replace) all_values[key][file_names[count]] = value else: mis_match += 1 if verbose: if mis_match < 5: print "ignoring following line from", file_path print line if mis_match > 0: print "In file " + file_path + " " + str(mis_match) + " lines did not have 1 divider (" + divider + ") " # rows names are all the keys from the data found row_names = all_values.keys() # check row_names against the regex rules if reguired_row_regexes or negative_row_regexes: ok_names = [] if reguired_row_regexes: reguired_res = [] for reguired_row_regex in reguired_row_regexes: reguired_res.append(re.compile(reguired_row_regex)) if negative_row_regexes: negative_res = [] for negative_row_regex in negative_row_regexes: negative_res.append(re.compile(negative_row_regex)) for row_name in row_names: if reguired_row_regexes: ok = False for reguired_re in reguired_res: if reguired_re.search(row_name): ok = True else: ok = True if negative_row_regexes and ok: for negative_re in negative_res: if negative_re.search(row_name): ok = False if ok: ok_names.append(row_name) row_names = ok_names # Sort keys if required if column_sort: file_names = sorted(file_names) if row_sort: row_names = sorted(row_names) # Write the data with open(target_path, 'w') as f: for name in file_names: f.write("\t") f.write(name) f.write("\n") for key in row_names: f.write(key) for name in file_names: f.write("\t") f.write(all_values[key][name]) f.write("\n") # To run the method shortening and if reguried getting file_names or file_paths use this section def remove_common(names): start = names[0] end = names[0] for name in names: while len(start) > 0 and not(name.startswith(start)): start = start[: -1] while len(end) > 0 and not(name.endswith(end)): end = end[1:] new_names = [] for name in names: if len(end) > 0: new_name = name[len(start): -len(end)] else: new_name = name[len(start):] new_names.append(new_name) return new_names # See merge_files method for kwargs def run_merge_files(file_paths=[], file_names=[], files_path=None, **kwargs): """ Handles file paths and file names before calling merge-files. file_paths is a list of the paths to be merge together. file_names is a list of names that will be shortened and then used for column names. The lenght of file_names must match file_paths, and the order is relevant to file_names. files_path if provided will the path of files paths and or file names to be used if they are not supplied directly. The kwargs arguements are defined by merge_files method which is called at the end of this method. """ # read file_paths and/or file_names if required if files_path: if file_paths: print "Using parameters file_paths and not the ones in files_path" else: file_paths = read_names(files_path) if file_names: print "Using parameters file_names and not the ones in files_path" else: file_names = read_names(files_path) # use file_paths if no file_names provided if not file_names: file_names = file_paths #To avoid wide column names the start and end text shared by all names is removed file_names = remove_common(file_names) #Call the name merge_files method merge_files(file_paths, file_names, **kwargs) # From here on down is the code if this is being run from the command line including galaxy. def remove_symbols(s): if s.find("__") == -1: return s # Patterns used by Galaxy s = s.replace("__cb__", ']') s = s.replace("__cc__", '}') s = s.replace("__dq__", '"') s = s.replace("__lt__", '<') s = s.replace("__gt__", '>') s = s.replace("__ob__", '[') s = s.replace("__oc__", '{') s = s.replace("__sq__", "'") # Patterns added by Christian s = s.replace("__in__", '%in%') s = s.replace("__not__", '!') end = 0 # tab = 9 # | = 124 while True: start = s.find("__", end) if start == -1: return s end = s.find("__", start + 2) + 2 if end == 1: return s part = s[start + 2: end - 2] if part == "": # start + 2 to leave one set of __ behind s = s[:start + 2] + s[end:] end = start + 2 else: try: ascii = int(part) s = s[:start] + chr(ascii) + s[end:] end = start - 1 # (2) __ removed before start and one character added after so -1 except ValueError: pass return s def read_names(names_path): names = [] with open(names_path, 'r') as f: for line in f: line = line.strip() if len(line) > 0: names.append(line) return names if __name__ == '__main__': parser = optparse.OptionParser() parser.add_option("--verbose", action="store_true", default=False, help="If set will generate output of what the tool is doing.") parser.add_option("--file_path", action="append", type="string", help="Path to one of the files to be merged together.") parser.add_option("--file_name", action="append", type="string", help="Names for the files. To be used to generate column names. " "Order and size are relavant and must match file_path. " "Optional: Can also be provides as a path to a file using names_path " "If neither are provide the file_paths are used.") parser.add_option("--files_path", action="store", type="string", help="Path to file that holds the file_paths and or file_names. " "Ignored if file_paths and or file_names are provided directly.") parser.add_option("--target_path", action="store", type="string", help="Path to write merged data to") parser.add_option("--divider", action="store", type="string", help="Divider between key and value. Special symbols can be entered using galaxy code or __acsii__ (for __ use ____). " "Note: After splitiing on divider both parts will be trimmed for whitespace.") parser.add_option("--na_value", action="store", type="string", help="String to use when the part before the divider/ row name is found in some files but not in others. " "Default if not specified is a blank. ") parser.add_option("--column_sort", action="store_true", default=False, help="If set will sort the columns based on shortened file names.") parser.add_option("--row_sort", action="store_true", default=False, help="If set will sort the row based on shortened file names.") parser.add_option("--reguired_row_regex", action="append", type="string", help="If provided, only rows whose cleaned name matches one or more of these regex rules will be kept. " "Special symbols can be entered using galaxy code or __acsii__ (for __ use ____) ") parser.add_option("--negative_row_regex", action="append", type="string", help="If provided, only rows whose cleaned name matches none of these regex rules will be kept. " "Special symbols can be entered using galaxy code or __acsii__ (for __ use ____) ") parser.add_option("--tab_replace", action="store", type="string", default=" ", help="Value to beinserted in data including column and row names whenever a tab is found. " "Default is a single space.") (options, args) = parser.parse_args() if not options.divider: report_error("No divider parameter provided") clean_divider = remove_symbols(options.divider) if options.verbose and (clean_divider != options.divider): print "divider", options.divider, "cleaned to", clean_divider options.divider = clean_divider if not options.na_value: if options.verbose: print "As no na-value provided a blank space will be used" options.na_value = "" if not options.tab_replace: options.tab_replace = " " if not options.reguired_row_regex: options.reguired_row_regex = [] for i, rule in enumerate(options.reguired_row_regex): clean_rule = remove_symbols(rule) if options.verbose and (clean_rule != rule): print "reguired_row_regex", rule, "cleaned to", clean_rule options.reguired_row_regex[i] = clean_rule if not options.negative_row_regex: options.negative_row_regex = [] for i, rule in enumerate(options.negative_row_regex): clean_rule = remove_symbols(rule) if options.verbose and (clean_rule != rule): print "negative_row_regex", rule, "cleaned to", clean_rule options.negative_row_regex[i] = remove_symbols(rule) run_merge_files(file_paths=options.file_path, file_names=options.file_name, files_path=options.files_path, target_path=options.target_path, verbose=options.verbose, divider=options.divider, column_sort=options.column_sort, row_sort=options.row_sort, na_value=options.na_value, tab_replace=options.tab_replace, reguired_row_regexes=options.reguired_row_regex, negative_row_regexes=options.negative_row_regex)
def BFS(graph,root,k): checked = [] visited=[] level=[] l=[] l.append(root) level.append(l) count =0 checked.append(root) while len(checked)>0: v = checked.pop(0) visited.append(v) l=[] for edge in graph[v]: #l=list(set(graph[v])|set(l)) if edge not in checked and edge not in visited: checked.append(edge) str1="v"+str(v)+","+"v"+str(edge)+","+"false"+","+str(A[v][edge])+","+"true\n" fil_out.write(str1) ## if count<k: ## str1="v"+str(v)+","+"v"+str(edge)+","+"false"+","+str(A[v][edge])+","+"true\n" ## fil.write(str1) for edge in level[(len(level)-1)]: l=list(set(graph[edge])|set(l)) for i in range(len(level)): for j in level[i]: if j in l: l.remove(j) if len(l)>0: level.append(l) print len(level) for i in range(k-1): visit=[] for each_node in level[i]: inter=list(set(graph[each_node])&set(level[i+1])) for each_inter in inter: if each_inter not in visit: str1="v"+str(each_node)+","+"v"+str(each_inter)+","+"false"+","+str(A[each_node][each_inter])+","+"true\n" fil.write(str1) visit.append(each_inter) print(level) print(len(level))
# -*- coding: utf-8 -*- # Copyright 2009-2017 Jaap Karssenberg <jaap.karssenberg@gmail.com> # TODO: allow more complex queries for filter, in particular (NOT tag AND tag) # allow multiple tabs in dialog / side pane with configurable query # # TODO: add an interface for this plugin in the WWW frontend # # TODO: commandline option # - open dialog # - output to stdout with configurable format # - force update, intialization # # TODO: store parser settings in notebook, not in preferences # in dialog make it clear what is per notebook and what is user prefs # tab in properties, link to open that from plugin prefs ? # TODO: test coverage for the start date label (and due with "<") # TODO: test coverage for start / due date from calendar page # TODO: test coverage for sorting in list_open_tasks # TODO: test coverage include / exclude sections # TODO: update manual from __future__ import with_statement from zim.plugins import PluginClass, extends, ObjectExtension, WindowExtension from zim.actions import action from zim.config import StringAllowEmpty from zim.signals import DelayedCallback from zim.gui.widgets import RIGHT_PANE, PANE_POSITIONS from .indexer import TasksIndexer, TasksView from .gui import TaskListDialog, TaskListWidget class TaskListPlugin(PluginClass): plugin_info = { 'name': _('Task List'), # T: plugin name 'description': _('''\ This plugin adds a dialog showing all open tasks in this notebook. Open tasks can be either open checkboxes or items marked with tags like "TODO" or "FIXME". This is a core plugin shipping with zim. '''), # T: plugin description 'author': 'Jaap Karssenberg', 'help': 'Plugins:Task List' } parser_preferences = ( # key, type, label, default ('all_checkboxes', 'bool', _('Consider all checkboxes as tasks'), True), # T: label for plugin preferences dialog ('labels', 'string', _('Labels marking tasks'), 'FIXME, TODO', StringAllowEmpty), # T: label for plugin preferences dialog - labels are e.g. "FIXME", "TODO" ('integrate_with_journal', 'choice', _('Use date from journal pages'), 'start', ( ('none', _('do not use')), # T: choice for "Use date from journal pages" ('start', _('as start date for tasks')), # T: choice for "Use date from journal pages" ('due', _('as due date for tasks')) # T: choice for "Use date from journal pages" )), ('included_subtrees', 'string', _('Section(s) to index'), '', StringAllowEmpty), # T: Notebook sections to search for tasks - default is the whole tree (empty string means everything) ('excluded_subtrees', 'string', _('Section(s) to ignore'), '', StringAllowEmpty), # T: Notebook sections to exclude when searching for tasks - default is none ) plugin_preferences = ( # key, type, label, default ('embedded', 'bool', _('Show tasklist in sidepane'), False), # T: preferences option ('pane', 'choice', _('Position in the window'), RIGHT_PANE, PANE_POSITIONS), # T: preferences option ) + parser_preferences + ( ('nonactionable_tags', 'string', _('Tags for non-actionable tasks'), '', StringAllowEmpty), # T: label for plugin preferences dialog ('tag_by_page', 'bool', _('Turn page name into tags for task items'), False), # T: label for plugin preferences dialog ('use_workweek', 'bool', _('Flag tasks due on Monday or Tuesday before the weekend'), False), # T: label for plugin preferences dialog ) hide_preferences = ('nonactionable_tags', 'tag_by_page', 'use_workweek') # These are deprecated, but I don't dare to remove them yet # so hide them in the configuration dialog instead @extends('Notebook') class NotebookExtension(ObjectExtension): __signals__ = { 'tasklist-changed': (None, None, ()), } def __init__(self, plugin, notebook): ObjectExtension.__init__(self, plugin, notebook) self.notebook = notebook self._parser_key = self._get_parser_key() self.index = notebook.index if self.index.get_property(TasksIndexer.PLUGIN_NAME) != TasksIndexer.PLUGIN_DB_FORMAT: self.index._db.executescript(TasksIndexer.TEARDOWN_SCRIPT) # XXX self.index.flag_reindex() self.indexer = None self._setup_indexer(self.index, self.index.update_iter) self.connectto(self.index, 'new-update-iter', self._setup_indexer) self.connectto(plugin.preferences, 'changed', self.on_preferences_changed) def _setup_indexer(self, index, update_iter): if self.indexer is not None: self.disconnect_from(self.indexer) self.indexer.disconnect_all() self.indexer = TasksIndexer.new_from_index(index, self.plugin.preferences) update_iter.add_indexer(self.indexer) self.connectto(self.indexer, 'tasklist-changed') def on_preferences_changed(self, preferences): # Need to construct new parser, re-index pages if self._parser_key != self._get_parser_key(): self._parser_key = self._get_parser_key() self.disconnect_from(self.indexer) self.indexer.disconnect_all() self.indexer = TasksIndexer.new_from_index(self.index, preferences) self.index.flag_reindex() self.connectto(self.indexer, 'tasklist-changed') def on_tasklist_changed(self, indexer): self.emit('tasklist-changed') def _get_parser_key(self): return tuple( self.plugin.preferences[t[0]] for t in self.plugin.parser_preferences ) def teardown(self): self.indexer.disconnect_all() self.notebook.index.update_iter.remove_indexer(self.indexer) self.index._db.executescript(TasksIndexer.TEARDOWN_SCRIPT) # XXX self.index.set_property(TasksIndexer.PLUGIN_NAME, None) @extends('MainWindow') class MainWindowExtension(WindowExtension): uimanager_xml = ''' <ui> <menubar name='menubar'> <menu action='view_menu'> <placeholder name="plugin_items"> <menuitem action="show_task_list" /> </placeholder> </menu> </menubar> <toolbar name='toolbar'> <placeholder name='tools'> <toolitem action='show_task_list'/> </placeholder> </toolbar> </ui> ''' def __init__(self, plugin, window): WindowExtension.__init__(self, plugin, window) self._widget = None self.on_preferences_changed(plugin.preferences) self.connectto(plugin.preferences, 'changed', self.on_preferences_changed) @action(_('Task List'), stock='zim-task-list', readonly=True) # T: menu item def show_task_list(self): # TODO: add check + dialog for index probably_up_to_date index = self.window.ui.notebook.index # XXX tasksview = TasksView.new_from_index(index) dialog = TaskListDialog.unique(self, self.window, tasksview, self.plugin.preferences) dialog.present() def on_preferences_changed(self, preferences): if preferences['embedded']: if self._widget is None: self._init_widget() else: self._widget.task_list.refresh() try: self.window.remove(self._widget) except ValueError: pass self.window.add_tab(_('Tasks'), self._widget, preferences['pane']) # T: tab label for side pane self._widget.show_all() else: if self._widget: self.window.remove(self._widget) self._widget = None def _init_widget(self): index = self.window.ui.notebook.index # XXX tasksview = TasksView.new_from_index(index) opener = self.window.get_resource_opener() uistate = self.window.ui.uistate['TaskListSidePane'] self._widget = TaskListWidget(tasksview, opener, self.plugin.preferences, uistate) def on_tasklist_changed(o): self._widget.task_list.refresh() callback = DelayedCallback(10, on_tasklist_changed) # Don't really care about the delay, but want to # make it less blocking - now it is at least on idle ### XXX HACK to get dependency to connect to ### -- no access to plugin, so can;t use get_extension() ## -- duplicat of this snippet in TaskListDialog for e in self.window.ui.notebook.__zim_extension_objects__: if hasattr(e, 'indexer') and e.indexer.__class__.__name__ == 'TasksIndexer': self.connectto(e, 'tasklist-changed', callback) break else: raise AssertionError('Could not find tasklist notebook extension') def teardown(self): if self._widget: self.window.remove(self._widget) self._widget = None
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import unicode_literals # python lib: from datetime import date, timedelta # django: from django.db.models.functions import ExtractHour from django.views.generic import ListView, DetailView from django.conf import settings from django.shortcuts import get_object_or_404 from django.utils.dates import MONTHS_ALT try: from django.core.urlresolvers import reverse except ImportError: from django.urls import reverse # thirdparties: import six # happenings: from .models import Event from .utils.displays import month_display, day_display from .utils.next_event import get_next_event from .utils.mixins import JSONResponseMixin from .utils import common as c URLS_NAMESPACE = getattr(settings, "CALENDAR_URLS_NAMESPACE", 'calendar') class GenericEventView(JSONResponseMixin, ListView): model = Event def render_to_response(self, context, **kwargs): self.postprocess_context(context) if self.request.is_ajax(): return self.render_to_json_response(context, **kwargs) return super(GenericEventView, self).render_to_response( context, **kwargs ) def get_context_data(self, **kwargs): context = super(GenericEventView, self).get_context_data(**kwargs) self.net, self.category, self.tag = c.get_net_category_tag( self.request ) if self.category is not None: context['cal_category'] = self.category if self.tag is not None: context['cal_tag'] = self.tag return context def postprocess_context(self, context, *args, **kwargs): return class EventMonthView(GenericEventView): template_name = 'happenings/event_month_list.html' def get_year_and_month(self, net, qs, **kwargs): """ Get the year and month. First tries from kwargs, then from querystrings. If none, or if cal_ignore qs is specified, sets year and month to this year and this month. """ now = c.get_now() year = now.year month = now.month + net month_orig = None if 'cal_ignore=true' not in qs: if 'year' and 'month' in self.kwargs: # try kwargs year, month_orig = map( int, (self.kwargs['year'], self.kwargs['month']) ) month = month_orig + net else: try: # try querystring year = int(self.request.GET['cal_year']) month_orig = int(self.request.GET['cal_month']) month = month_orig + net except Exception: pass # return the year and month, and any errors that may have occurred do # to an invalid month/year being given. return c.clean_year_month(year, month, month_orig) def get_month_events(self, *args, **kwargs): return Event.objects.all_month_events(*args, **kwargs) def get_context_data(self, **kwargs): context = super(EventMonthView, self).get_context_data(**kwargs) qs = self.request.META['QUERY_STRING'] year, month, error = self.get_year_and_month(self.net, qs) # add a dict containing the year, month, and month name to the context current = dict( year=year, month_num=month, month=MONTHS_ALT[month][:3] ) context['current'] = current display_month = MONTHS_ALT[month] if isinstance(display_month, six.binary_type): display_month = display_month.decode('utf-8') context['month_and_year'] = u"%(month)s, %(year)d" % ( {'month': display_month, 'year': year} ) if error: # send any year/month errors context['cal_error'] = error all_month_events = list( self.get_month_events( year, month, self.category, self.tag, loc=True, cncl=True ).annotate( start_hour=ExtractHour('start_date') ).order_by('start_hour') ) context['raw_all_month_events'] = all_month_events context['show_events'] = False if getattr(settings, "CALENDAR_SHOW_LIST", False): context['show_events'] = True context['events'] = c.order_events(all_month_events, d=True) \ if self.request.is_ajax() else c.order_events(all_month_events) return context def postprocess_context(self, context, *args, **kwargs): qs = self.request.META['QUERY_STRING'] mini = True if 'cal_mini=true' in qs else False start_day = getattr(settings, "CALENDAR_START_DAY", 0) # get any querystrings that are not next/prev/year/month if qs: qs = c.get_qs(qs) if getattr(settings, "CALENDAR_PASS_VIEW_CONTEXT_TO_DISPLAY_METHOD", False): month_display_base_context = dict(context) month_display_base_context.pop('events', None) else: month_display_base_context = None all_month_events = context['raw_all_month_events'] context['calendar'] = month_display( context['current']['year'], context['current']['month_num'], all_month_events, start_day, self.net, qs, mini, request=self.request, base_context=month_display_base_context, ) class EventDayView(GenericEventView): template_name = 'happenings/event_day_list.html' def get_calendar_back_url(self, year, month_num): self.request.current_app = self.request.resolver_match.namespace if URLS_NAMESPACE: view_name = URLS_NAMESPACE + ':list' else: view_name = 'list' return reverse(view_name, args=(year, month_num), current_app=self.request.current_app) def check_for_cancelled_events(self, d): """Check if any events are cancelled on the given date 'd'.""" for event in self.events: for cn in event.cancellations.all(): if cn.date == d: event.title += ' (CANCELLED)' def get_month_events(self, *args, **kwargs): return Event.objects.all_month_events(*args, **kwargs) def get_context_data(self, **kwargs): context = super(EventDayView, self).get_context_data(**kwargs) kw = self.kwargs y, m, d = map(int, (kw['year'], kw['month'], kw['day'])) year, month, day, error = c.clean_year_month_day(y, m, d, self.net) if error: context['cal_error'] = error # Note that we don't prefetch 'cancellations' because they will be # prefetched later (in day_display in displays.py) all_month_events = self.get_month_events( year, month, self.category, self.tag ) self.events = day_display( year, month, all_month_events, day ) self.check_for_cancelled_events(d=date(year, month, day)) context['events'] = self.events display_month = MONTHS_ALT[month] if isinstance(display_month, six.binary_type): display_month = display_month.decode('utf-8') context['month'] = display_month context['month_num'] = month context['year'] = year context['day'] = day context['month_day_year'] = u"%(month)s %(day)d, %(year)d" % ( {'month': display_month, 'day': day, 'year': year} ) context['calendar_back_url'] = self.get_calendar_back_url(year, month) # for use in the template to build next & prev querystrings context['next'], context['prev'] = c.get_next_and_prev(self.net) return context class EventDetailView(DetailView): model = Event context_object_name = 'event' def get_object(self): return get_object_or_404( Event.objects.prefetch_related( 'location', 'categories', 'tags', 'cancellations' ), pk=self.kwargs['pk'] ) def get_cncl_days(self): now = c.get_now() cncl = self.object.cancellations.all() return [(x.date, x.reason) for x in cncl if x.date >= now.date()] def check_cncl(self, d): cncl = self.object.cancellations.all() return True if [x for x in cncl if x.date == d] else False def get_context_data(self, **kwargs): now = c.get_now() context = super(EventDetailView, self).get_context_data(**kwargs) e = self.object for choice in Event.REPEAT_CHOICES: if choice[0] == e.repeat: context['repeat'] = choice[1] context['cncl_days'] = self.get_cncl_days() event = [e] # event needs to be an iterable, see get_next_event() if not e.repeats('NEVER'): # event is ongoing; get next occurrence if e.will_occur(now): year, month, day = get_next_event(event, now) next_event = date(year, month, day) context['next_event'] = date(year, month, day) context['next_or_prev_cncl'] = self.check_cncl(next_event) else: # event is finished repeating; get last occurrence end = e.end_repeat last_event = end if e.repeats('WEEKDAY'): year, month, day = c.check_weekday( end.year, end.month, end.day, reverse=True ) last_event = date(year, month, day) context['last_event'] = last_event context['next_or_prev_cncl'] = self.check_cncl(last_event) else: if e.is_chunk(): # list of days for single-day event chunk context['event_days'] = ( # list comp (e.l_start_date + timedelta(days=x)) for x in range(e.start_end_diff + 1) ) else: # let template know if this single-day, non-repeating event is # cancelled context['this_cncl'] = self.check_cncl(e.l_start_date.date()) return context
import socket import logging logger = logging.getLogger("debugging") def check_port(host, port_number, content=None): logger.info(u"Port check, host: %s, port: %s, content: '%s'" % (host, port_number, content)) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(10) # connect_ex returns an error number instead of raising an exception... in theory try: result = s.connect_ex((host, port_number)) except Exception as e: logger.error(u"Error: %s" % e) return {'valid': False} logger.info(u"Port check, connection errno: %i" % result) if result == 0: ret_obj = {'status_ok': True, 'valid': True} if content: try: recv_content = s.recv(512) except Exception as e: logger.error(u"Error: %s" % e) return {'valid': False} logger.info(u"Received: %s" % recv_content) if content.lower() not in recv_content.lower(): ret_obj['status_ok'] = False return ret_obj else: return {'valid': False}
''' Synk - Sublime Text Plugin ''' import os import socket import sublime import sublime_plugin from threading import Thread, Timer # variables for storing user defined settings settings_filename = "synk_pre.sublime-settings" enabled_field = "enabled" server_ips_field = "project_server_ips" # NOTE: add feature to have more than one server later uplink_ports_field = "uplink_ports" downlink_ports_field = "downlink_ports" all_files_field = "synk_all_files" current_file_field = "synk_current_file" delay_field = "delay_in_seconds" # Object for connecting to the server class ServerConnection(object): def __init__(self, attempts=5): self.settings = sublime.load_settings(settings_filename) self.delay = self.settings.get(delay_field) self.server = self.settings.get(server_ips_field) self.up_port = self.settings.get(uplink_ports_field) self.down_port = self.settings.get(downlink_ports_field) self.current_file = self.settings.get(current_file_field) self.upsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.downsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.file_locked = False for a in range(attempts): try: self.upsock.connect((self.server, self.up_port)) self.downsock.connect((self.server, self.down_port)) return except: continue sublime.status_message("An error occured while attempting to connect to the server.") def recv_data(conn): received = "" while True: data = conn.recv(4096) received += data.decode("utf-8") if len(data) <= 4096: break return received def write_file(fname, data, mode="w"): with open(fname, mode) as f: for line in data: f.write(line + '\n') def push_changes(self, filename=self.current_file, attempts=30): for a in attempts: if not self.file_locked: self.file_locked = True data = self.current_file + '\n' + view.substr(sublime.Region(0, view.size())) self.upsock.send(data.encode("utf-8")) self.file_locked = False break def get_changes(self): #change_thread = Thread(target=self.get_changes_thread) #change_thread.start() Timer(self.delay, self.get_changes_thread).start() def get_changes_thread(self): while True: self.recved_data = self.recv_data(self.downsock) if len(self.recv_data) and not self.file_locked and not view.is_loading(): self.file_locked = True self.write_file(self.current_file, self.recved_data) self.file_locked = False class SynkPreListener(sublime_plugin.EventListener): save_queue = [] @staticmethod def generate_backup_filename(filename): dirname, basename = [os.path.dirname(filename), os.path.basename(filename).split('.')] if len(basename) > 1: basename.insert(-1, 'bak') else: basename.append('bak') return dirname + '/' + '.'.join(basename) def on_modified(self, view): settings = sublime.load_settings(settings_filename) if not (view.file_name() and view.is_dirty()): return delay = settings.get(delay_field) all_files = settings.get(all_files_field) current_file = settings.get(current_file_field) if not all_files and current_file != view.file_name(): return def callback(): settings = sublime.load_settings(settings_filename) current_file = settings.get(current_file_field) if view.is_dirty() and not view.is_loading(): view.run_command("save") serv_conn.push_changes(filename=current_file) else: content = view.substr(sublime.Region(0, view.size())) try: with open(SynkPreListener.generate_backup_filename(view.filename()), 'w', encoding="utf-8") as f: f.write(content) except Exception as e: sublime.status_message(str(e)) raise e class SynkPreCommand(sublime_plugin.TextCommand): def run(self, **kwargs): enable = kwargs.get("enable", None) all_files = kwargs.get("all_files", False) settings = sublime.load_settings(settings_filename) if enable is None: enable = not settings.get(enabled_field) if not enable: message = "Autosynk is turned off." filename = settings.get(current_file_field) settings.set(enabled_field, enable) settings.set(all_files_field, all_files) filename = sublime.Window.active_view(sublime.active_window()).file_name() settings.set(current_file_field, filename) if enable: message = "Autosynk is turned on." if not all_files: message += " for: " + os.path.basename(filename) serv_conn = ServerConnection() global serv_conn serv_conn.get_changes() sublime.status_message(message)
# coding: utf-8 from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import io import six import requests from nose.tools import with_setup # type: ignore from nose.tools import assert_raises # type: ignore from nose.tools import raises # type: ignore import leancloud from leancloud import File from leancloud import ACL __author__ = 'asaka' def setup_func(): leancloud.init( os.environ['APP_ID'], master_key=os.environ['MASTER_KEY'] ) def test_basic(): # type: () -> None def fn(s): f = File('Blah', s, mime_type='text/plain') assert f.name == 'Blah' assert f._metadata['size'] == 14 assert f.size == 14 b = b'blah blah blah' fn(io.BytesIO(b)) fn(memoryview(b)) if six.PY2: import StringIO import cStringIO fn(StringIO.StringIO(b)) fn(cStringIO.StringIO(b)) fn(buffer(b)) def test_create_with_url(): # type: () -> None f = File.create_with_url('xxx', u'http://i1.wp.com/leancloud.cn/images/static/default-avatar.png', meta_data={}) assert f.url == 'http://i1.wp.com/leancloud.cn/images/static/default-avatar.png' def test_create_without_data(): # type: () -> None f = File.create_without_data('a123') assert f.id == 'a123' def test_acl(): # type: () -> None acl_ = ACL() f = File('Blah', io.BytesIO(b'xxx')) assert_raises(TypeError, f.set_acl, 'a') f.set_acl(acl_) assert f.get_acl() == acl_ @with_setup(setup_func) def test_save(): # type: () -> None user = leancloud.User() user.login('user1_name', 'password') f = File('Blah.txt', open('tests/sample_text.txt', 'rb')) f.save() assert f.owner_id == user.id assert f.id assert f.name == 'Blah.txt' assert f.mime_type == 'text/plain' assert not f.url.endswith('.') @with_setup(setup_func) def test_query(): # type: () -> None files = leancloud.Query('File').find() for f in files: assert isinstance(f, File) assert f.url assert f.name assert f.metadata assert isinstance(leancloud.File.query.first(), File) @with_setup(setup_func) def test_save_external(): # type: () -> None f = File.create_with_url('lenna.jpg', 'http://i1.wp.com/leancloud.cn/images/static/default-avatar.png') f.save() assert f.id @raises(ValueError) def test_thumbnail_url_erorr(): # type: () -> None f = File.create_with_url('xx', '') f.get_thumbnail_url(100, 100) @with_setup(setup_func) @raises(ValueError) def test_thumbnail_size_erorr(): # type: () -> None r = requests.get('http://i1.wp.com/leancloud.cn/images/static/default-avatar.png') b = io.BytesIO(r.content) f = File('Lenna2.jpg', b) f.save() assert f.id f.get_thumbnail_url(-1, -1) f.get_thumbnail_url(1, 1, quality=110) @with_setup(setup_func) def test_thumbnail(): # type: () -> None r = requests.get('http://i1.wp.com/leancloud.cn/images/static/default-avatar.png') b = io.BytesIO(r.content) f = File('Lenna2.jpg', b) f.save() assert f.id url = f.get_thumbnail_url(100, 100) assert url.endswith('?imageView/2/w/100/h/100/q/100/format/png') @with_setup(setup_func) def test_destroy(): # type: () -> None r = requests.get('http://i1.wp.com/leancloud.cn/images/static/default-avatar.png') b = io.BytesIO(r.content) f = File('Lenna2.jpg', b) f.save() assert f.id f.destroy() @with_setup(setup_func) def test_file_callback(): # type: () -> None d = {} def noop(token, *args, **kwargs): d['token'] = token f = File('xxx', io.BytesIO(b'xxx')) f._save_to_s3 = noop f._save_to_qiniu = noop f._save_to_qcloud = noop f.save() f._save_callback(d['token'], False) # time.sleep(3) # File should be deleted by API server # assert_raises(leancloud.LeanCloudError, File.query().get, f.id) @with_setup(setup_func) def test_fetch(): # type: () -> None r = requests.get('http://i1.wp.com/leancloud.cn/images/static/default-avatar.png') b = io.BytesIO(r.content) f = File('Lenna2.jpg', b) f.metadata['foo'] = 'bar' f.save() fetched = File.create_without_data(f.id) fetched.fetch() assert fetched.id == f.id assert fetched.metadata == f.metadata assert fetched.name == f.name assert fetched.url == f.url assert fetched.size == f.size assert fetched.url == f.url f.destroy() def test_checksum(): # type: () -> None f = File('Blah', open('tests/sample_text.txt', 'rb')) assert f._metadata['_checksum'] == 'd0588d95e45eed70745ffabdf0b18acd'
""" Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import reversion from collections import OrderedDict from django.db.models import Q, Prefetch from django.http import HttpResponse, Http404, JsonResponse from django.shortcuts import get_object_or_404 from django.utils import timezone from django.views.generic import TemplateView from django_filters import rest_framework as restfilters from drf_yasg import openapi from drf_yasg.utils import swagger_auto_schema from reversion.views import RevisionMixin from rest_framework import filters, status, exceptions from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView, ListAPIView from rest_framework.response import Response from rest_framework.views import APIView from drf_multiple_model.views import ObjectMultipleModelAPIView from gwells.documents import MinioClient from gwells.roles import REGISTRIES_VIEWER_ROLE from gwells.models import ProvinceStateCode from gwells.pagination import APILimitOffsetPagination from gwells.roles import REGISTRIES_EDIT_ROLE, REGISTRIES_VIEWER_ROLE from gwells.settings.base import get_env_variable from reversion.models import Version from registries.models import ( AccreditedCertificateCode, ActivityCode, ApplicationStatusCode, Organization, OrganizationNote, Person, PersonNote, ProofOfAgeCode, Register, RegistriesApplication, RegistriesRemovalReason, SubactivityCode, WellClassCode) from registries.permissions import RegistriesEditPermissions, RegistriesEditOrReadOnly from registries.serializers import ( ApplicationAdminSerializer, ApplicationStatusCodeSerializer, ApplicationListSerializer, CityListSerializer, ProofOfAgeCodeSerializer, OrganizationListSerializer, OrganizationAdminSerializer, OrganizationNameListSerializer, PersonAdminSerializer, PersonListSerializer, RegistrationAdminSerializer, RegistriesRemovalReasonSerializer, PersonNoteSerializer, ProvinceStateCodeSerializer, SubactivitySerializer, WellClassCodeSerializer, AccreditedCertificateCodeSerializer, OrganizationNoteSerializer, PersonNameSerializer) from gwells.change_history import generate_history_diff from gwells.views import AuditCreateMixin, AuditUpdateMixin class OrganizationListView(RevisionMixin, AuditCreateMixin, ListCreateAPIView): """ get: Returns a list of all registered drilling organizations post: Creates a new drilling organization record """ permission_classes = (RegistriesEditPermissions,) serializer_class = OrganizationListSerializer pagination_class = None # prefetch related objects for the queryset to prevent duplicate database trips later queryset = Organization.objects.all() \ .select_related('province_state',) \ .prefetch_related('registrations', 'registrations__person') # Allow searching against fields like organization name, address, # name or registration of organization contacts filter_backends = (filters.SearchFilter,) search_fields = ( 'name', 'street_address', 'city', 'registrations__person__first_name', 'registrations__person__surname', 'registrations__applications__file_no' ) def get_queryset(self): return self.queryset.filter(expiry_date__gt=timezone.now()) class OrganizationDetailView(RevisionMixin, AuditUpdateMixin, RetrieveUpdateDestroyAPIView): """ get: Returns the specified drilling organization put: Replaces the specified record with a new one patch: Updates a drilling organization with the fields/values provided in the request body delete: Removes the specified drilling organization record """ permission_classes = (RegistriesEditPermissions,) # 'pk' and 'id' have been replaced by 'org_guid' as primary key for Organization model lookup_field = "org_guid" serializer_class = OrganizationAdminSerializer # prefetch related province, contacts and person records to prevent future additional database trips queryset = Organization.objects.all() \ .select_related('province_state',) \ .prefetch_related('registrations', 'registrations__person') def get_queryset(self): return self.queryset.filter(expiry_date__gt=timezone.now()) def destroy(self, request, *args, **kwargs): """ Set expiry_date to current date """ instance = self.get_object() for reg in instance.registrations.all(): if reg.person.expiry_date is None: raise exceptions.ValidationError( ('Organization has registrations associated with it. ') ('Remove this organization from registration records first.')) instance.expiry_date = timezone.now() instance.save() return Response(status=status.HTTP_204_NO_CONTENT) class PersonOptionsView(APIView): @swagger_auto_schema(auto_schema=None) def get(self, request, format=None, **kwargs): result = {} for activity in ActivityCode.objects.all(): # Well class query well_class_query = WellClassCode \ .objects.filter( qualification__subactivity__registries_activity=activity.registries_activity_code) \ .order_by('registries_well_class_code').distinct('registries_well_class_code') # Sub activity query sub_activity_query = SubactivityCode \ .objects.filter( registries_activity=activity).order_by('display_order') # Certificate code query cert_code_query = AccreditedCertificateCode \ .objects.filter( registries_activity=activity.registries_activity_code) \ .order_by('name') result[activity.registries_activity_code] = { 'well_class_codes': list(map(lambda item: WellClassCodeSerializer( item).data, well_class_query)), 'subactivity_codes': list(map(lambda item: SubactivitySerializer( item).data, sub_activity_query)), 'accredited_certificate_codes': list(map(lambda item: AccreditedCertificateCodeSerializer( item).data, cert_code_query)) } result['proof_of_age_codes'] = \ list(map(lambda item: ProofOfAgeCodeSerializer(item).data, ProofOfAgeCode.objects.all().order_by('display_order'))) result['approval_outcome_codes'] = \ list(map(lambda item: ApplicationStatusCodeSerializer(item).data, ApplicationStatusCode.objects.all())) result['reason_removed_codes'] = \ list(map(lambda item: RegistriesRemovalReasonSerializer(item).data, RegistriesRemovalReason.objects.all())) result['province_state_codes'] = \ list(map(lambda item: ProvinceStateCodeSerializer(item).data, ProvinceStateCode.objects.all().order_by('display_order'))) return Response(result) def person_search_qs(request): """ Returns Person queryset, removing non-active and unregistered drillers for anonymous users """ query = request.GET qs = Person.objects.filter(expiry_date__gt=timezone.now()) # base registration and application querysets registrations_qs = Register.objects.all() applications_qs = RegistriesApplication.objects.all() # Search for cities (split list and return all matches) # search comes in as a comma-separated querystring param e.g: ?city=Atlin,Lake Windermere,Duncan cities = query.get('city', None) if cities: cities = cities.split(',') qs = qs.filter(registrations__organization__city__in=cities) registrations_qs = registrations_qs.filter( organization__city__in=cities) activity = query.get('activity', None) status = query.get('status', None) user_is_staff = request.user.groups.filter(name=REGISTRIES_VIEWER_ROLE).exists() if activity: if (status == 'P' or not status) and user_is_staff: # We only allow staff to filter on status # For pending, or all, we also return search where there is no registration. qs = qs.filter(Q(registrations__registries_activity__registries_activity_code=activity) | Q(registrations__isnull=True)) registrations_qs = registrations_qs.filter( registries_activity__registries_activity_code=activity) else: # For all other searches, we strictly filter on activity. qs = qs.filter( registrations__registries_activity__registries_activity_code=activity) registrations_qs = registrations_qs.filter( registries_activity__registries_activity_code=activity) if user_is_staff: # User is logged in if status: if status == 'Removed': # Things are a bit more complicated if we're looking for removed, as the current # status doesn't come in to play. qs = qs.filter( registrations__applications__removal_date__isnull=False) else: if status == 'P': # If the status is pending, we also pull in any people without registrations # or applications. qs = qs.filter(Q(registrations__applications__current_status__code=status) | Q(registrations__isnull=True) | Q(registrations__applications__isnull=True), Q(registrations__applications__removal_date__isnull=True)) else: qs = qs.filter( Q(registrations__applications__current_status__code=status), Q(registrations__applications__removal_date__isnull=True)) else: # User is not logged in # Only show active drillers to non-admin users and public qs = qs.filter( Q(registrations__applications__current_status__code='A', registrations__registries_activity=activity), Q(registrations__applications__removal_date__isnull=True), Q() ) registrations_qs = registrations_qs.filter( Q(applications__current_status__code='A'), Q(applications__removal_date__isnull=True)) applications_qs = applications_qs.filter( current_status='A', removal_date__isnull=True) # generate applications queryset applications_qs = applications_qs \ .select_related( 'current_status', 'primary_certificate', 'primary_certificate__cert_auth', 'subactivity', ) \ .prefetch_related( 'subactivity__qualification_set', 'subactivity__qualification_set__well_class' ).distinct() # generate registrations queryset, inserting filtered applications queryset defined above registrations_qs = registrations_qs \ .select_related( 'registries_activity', 'organization', 'organization__province_state', ) \ .prefetch_related( Prefetch('applications', queryset=applications_qs) ).distinct() # insert filtered registrations set qs = qs \ .prefetch_related( Prefetch('registrations', queryset=registrations_qs) ) return qs.distinct() class PersonListView(RevisionMixin, AuditCreateMixin, ListCreateAPIView): """ get: Returns a list of all person records post: Creates a new person record """ permission_classes = (RegistriesEditOrReadOnly,) serializer_class = PersonAdminSerializer pagination_class = APILimitOffsetPagination # Allow searching on name fields, names of related companies, etc. filter_backends = (restfilters.DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter) ordering_fields = ('surname', 'registrations__organization__name') ordering = ('surname',) search_fields = ( 'first_name', 'surname', 'registrations__organization__name', 'registrations__organization__city', 'registrations__registration_no' ) # fetch related companies and registration applications (prevent duplicate database trips) queryset = Person.objects.all() def get_queryset(self): """ Returns Person queryset, removing non-active and unregistered drillers for anonymous users """ return person_search_qs(self.request) @swagger_auto_schema(responses={200: PersonListSerializer(many=True)}) def get(self, request, *args, **kwargs): # Returns self.list - overridden for schema documentation return self.list(request, *args, **kwargs) def list(self, request, **kwargs): """ List response using serializer with reduced number of fields """ queryset = self.get_queryset() filtered_queryset = self.filter_queryset(queryset) page = self.paginate_queryset(filtered_queryset) if page is not None: serializer = PersonListSerializer(page, many=True) return self.get_paginated_response(serializer.data) serializer = PersonListSerializer(filtered_queryset, many=True) return Response(serializer.data) class PersonDetailView(RevisionMixin, AuditUpdateMixin, RetrieveUpdateDestroyAPIView): """ get: Returns the specified person put: Replaces the specified person record with a new one patch: Updates a person with the fields/values provided in the request body delete: Removes the specified person record """ permission_classes = (RegistriesEditPermissions,) serializer_class = PersonAdminSerializer # pk field has been replaced by person_guid lookup_field = "person_guid" queryset = Person.objects \ .all() \ .prefetch_related( 'notes', 'notes__author', 'registrations', 'registrations__registries_activity', 'registrations__organization', 'registrations__applications', 'registrations__applications__current_status', 'registrations__applications__primary_certificate', 'registrations__applications__primary_certificate__cert_auth', 'registrations__applications__subactivity', 'registrations__applications__subactivity__qualification_set', 'registrations__applications__subactivity__qualification_set__well_class' ).distinct() def get_queryset(self): """ Returns only registered people (i.e. drillers with active registration) to anonymous users """ qs = self.queryset.filter(expiry_date__gt=timezone.now()) if not self.request.user.groups.filter(name=REGISTRIES_VIEWER_ROLE).exists(): qs = qs.filter(Q(applications__current_status__code='A'), Q(applications__removal_date__isnull=True)) return qs def destroy(self, request, *args, **kwargs): """ Set expiry_date to current date """ instance = self.get_object() instance.expiry_date = timezone.now() instance.save() return Response(status=status.HTTP_204_NO_CONTENT) class CitiesListView(ListAPIView): """ List of cities with a qualified, registered operator (driller or installer) get: returns a list of cities with a qualified, registered operator (driller or installer) """ serializer_class = CityListSerializer lookup_field = 'register_guid' pagination_class = None swagger_schema = None permission_classes = (RegistriesEditOrReadOnly,) queryset = Register.objects \ .exclude(organization__city__isnull=True) \ .exclude(organization__city='') \ .select_related( 'organization', 'organization__province_state' ) \ .distinct('organization__city') \ .order_by('organization__city') def get_queryset(self): """ Returns only registered operators (i.e. drillers with active registration) to anonymous users if request has a kwarg 'activity' (accepts values 'drill' and 'install'), queryset will filter for that activity """ qs = self.queryset if not self.request.user.groups.filter(name=REGISTRIES_VIEWER_ROLE).exists(): qs = qs.filter( Q(applications__current_status__code='A'), Q(applications__removal_date__isnull=True)) if self.kwargs.get('activity') == 'drill': qs = qs.filter(registries_activity='DRILL') if self.kwargs.get('activity') == 'install': qs = qs.filter(registries_activity='PUMP') return qs class RegistrationListView(RevisionMixin, AuditCreateMixin, ListCreateAPIView): """ get: List all registration records post: Create a new well driller or well pump installer registration record for a person """ permission_classes = (RegistriesEditPermissions,) serializer_class = RegistrationAdminSerializer queryset = Register.objects.all() \ .select_related( 'person', 'registries_activity', 'organization',) \ .prefetch_related( 'applications', 'applications__current_status', 'applications__primary_certificate', 'applications__primary_certificate__cert_auth', 'applications__subactivity', 'applications__subactivity__qualification_set', 'applications__subactivity__qualification_set__well_class' ) class RegistrationDetailView(RevisionMixin, AuditUpdateMixin, RetrieveUpdateDestroyAPIView): """ get: Returns a well driller or well pump installer registration record put: Replaces a well driller or well pump installer registration record with a new one patch: Updates a registration record with new values delete: Removes the specified registration record from the database """ permission_classes = (RegistriesEditPermissions,) serializer_class = RegistrationAdminSerializer lookup_field = 'register_guid' queryset = Register.objects.all() \ .select_related( 'person', 'registries_activity', 'organization',) \ .prefetch_related( 'applications', 'applications__current_status', 'applications__primary_certificate', 'applications__primary_certificate__cert_auth', 'applications__subactivity', 'applications__subactivity__qualification_set', 'applications__subactivity__qualification_set__well_class' ) class ApplicationListView(RevisionMixin, AuditCreateMixin, ListCreateAPIView): """ get: Returns a list of all registration applications post: Creates a new registries application """ permission_classes = (RegistriesEditPermissions,) serializer_class = ApplicationAdminSerializer queryset = RegistriesApplication.objects.all() \ .select_related( 'registration', 'registration__person', 'registration__registries_activity') class ApplicationDetailView(RevisionMixin, AuditUpdateMixin, RetrieveUpdateDestroyAPIView): """ get: Returns the specified drilling application put: Replaces the specified record with a new one patch: Updates a drilling application with the set of values provided in the request body delete: Removes the specified drilling application record """ permission_classes = (RegistriesEditPermissions,) serializer_class = ApplicationAdminSerializer queryset = RegistriesApplication.objects.all() \ .select_related( 'registration', 'registration__person', 'registration__registries_activity') lookup_field = "application_guid" class OrganizationNameListView(ListAPIView): """ A list of organizations with only organization names """ permission_classes = (RegistriesEditOrReadOnly,) serializer_class = OrganizationNameListSerializer queryset = Organization.objects \ .select_related('province_state') pagination_class = None lookup_field = 'organization_guid' def get_queryset(self): return self.queryset.filter(expiry_date__gt=timezone.now()) class PersonNoteListView(AuditCreateMixin, ListCreateAPIView): """ get: Returns notes associated with a Person record post: Adds a note record to the specified Person record """ permission_classes = (RegistriesEditPermissions,) serializer_class = PersonNoteSerializer swagger_schema = None def get_queryset(self): person = self.kwargs['person_guid'] return PersonNote.objects.filter(person=person).order_by('-date') def perform_create(self, serializer): """ Add author to serializer data """ person = self.kwargs['person_guid'] serializer.validated_data['person'] = Person.objects.get( person_guid=person) serializer.validated_data['author'] = self.request.user return super(PersonNoteListView, self).perform_create(serializer) class PersonNoteDetailView(AuditUpdateMixin, RetrieveUpdateDestroyAPIView): """ get: Returns a PersonNote record put: Replaces a PersonNote record with a new one patch: Updates a PersonNote record with the set of fields provided in the request body delete: Removes a PersonNote record """ permission_classes = (RegistriesEditPermissions,) serializer_class = PersonNoteSerializer swagger_schema = None def get_queryset(self): person = self.kwargs['person'] return PersonNote.objects.filter(person=person) class OrganizationNoteListView(AuditCreateMixin, ListCreateAPIView): """ get: Returns notes associated with a Organization record post: Adds a note record to the specified Organization record """ permission_classes = (RegistriesEditPermissions,) serializer_class = OrganizationNoteSerializer swagger_schema = None def get_queryset(self): org = self.kwargs['org_guid'] return OrganizationNote.objects.filter(organization=org).order_by('-date') def perform_create(self, serializer): """ Add author to serializer data """ org = self.kwargs['org_guid'] serializer.validated_data['organization'] = Organization.objects.get( org_guid=org) serializer.validated_data['author'] = self.request.user return super(OrganizationNoteListView, self).perform_create(serializer) class OrganizationNoteDetailView(AuditUpdateMixin, RetrieveUpdateDestroyAPIView): """ get: Returns a OrganizationNote record put: Replaces a OrganizationNote record with a new one patch: Updates a OrganizationNote record with the set of fields provided in the request body delete: Removes a OrganizationNote record """ permission_classes = (RegistriesEditPermissions,) serializer_class = OrganizationNoteSerializer swagger_schema = None def get_queryset(self): org = self.kwargs['org_guid'] return OrganizationNote.objects.filter(organization=org) class OrganizationHistory(APIView): """ get: returns a history of changes to an Organization model record """ permission_classes = (RegistriesEditPermissions,) queryset = Organization.objects.all() swagger_schema = None def get(self, request, org_guid, **kwargs): try: organization = Organization.objects.get(org_guid=org_guid) except Organization.DoesNotExist: raise Http404("Organization not found") # query records in history for this model. organization_history = [obj for obj in organization.history.all().order_by( '-revision__date_created')] history_diff = generate_history_diff(organization_history) return Response(history_diff) class PersonHistory(APIView): """ get: returns a history of changes to a Person model record """ permission_classes = (RegistriesEditPermissions,) queryset = Person.objects.all() swagger_schema = None def get(self, request, person_guid, **kwargs): """ Retrieves version history for the specified Person record and creates a list of diffs for each revision. """ try: person = Person.objects.get(person_guid=person_guid) except Person.DoesNotExist: raise Http404("Person not found") # query records in history for this model. person_history = [obj for obj in person.history.all().order_by( '-revision__date_created')] person_history_diff = generate_history_diff( person_history, 'Person profile') registration_history = [] registration_history_diff = [] application_history = [] application_history_diff = [] # generate diffs for version history in each of the individual's registrations for reg in person.registrations.all(): registration_history = [ obj for obj in reg.history.all()] registration_history_diff += generate_history_diff( registration_history, reg.registries_activity.description + ' registration') for app in reg.applications.all(): application_history = [ obj for obj in app.history.all()] application_history_diff += generate_history_diff( application_history, app.subactivity.description + ' application') # generate application diffs history_diff = sorted( person_history_diff + registration_history_diff + application_history_diff, key=lambda x: x['date'], reverse=True) return Response(history_diff) class PersonNameSearch(ListAPIView): """Search for a person in the Register""" permission_classes = (RegistriesEditOrReadOnly,) serializer_class = PersonNameSerializer pagination_class = None lookup_field = 'person_guid' ordering = ('surname',) def get_queryset(self): """ This view returns all names with expired records filtered out. """ return Person.objects.filter(expiry_date__gt=timezone.now()) class ListFiles(APIView): """ List documents associated with an aquifer get: list files found for the aquifer identified in the uri """ @swagger_auto_schema(responses={200: openapi.Response('OK', openapi.Schema(type=openapi.TYPE_OBJECT, properties={ 'public': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema( type=openapi.TYPE_OBJECT, properties={ 'url': openapi.Schema(type=openapi.TYPE_STRING), 'name': openapi.Schema(type=openapi.TYPE_STRING) } )), 'private': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema( type=openapi.TYPE_OBJECT, properties={ 'url': openapi.Schema(type=openapi.TYPE_STRING), 'name': openapi.Schema(type=openapi.TYPE_STRING) } )) }) )}) def get(self, request, person_guid, **kwargs): user_is_staff = self.request.user.groups.filter( Q(name=REGISTRIES_EDIT_ROLE) | Q(name=REGISTRIES_VIEWER_ROLE)).exists() client = MinioClient( request=request, disable_private=(not user_is_staff)) documents = client.get_documents( person_guid, resource="driller", include_private=user_is_staff) return Response(documents) class PreSignedDocumentKey(APIView): """ Get a pre-signed document key to upload into an S3 compatible document store post: obtain a URL that is pre-signed to allow client-side uploads """ queryset = Person.objects.all() permission_classes = (RegistriesEditPermissions,) @swagger_auto_schema(auto_schema=None) def get(self, request, person_guid, **kwargs): person = get_object_or_404(self.queryset, pk=person_guid) client = MinioClient( request=request, disable_private=False) object_name = request.GET.get("filename") filename = client.format_object_name(object_name, person.person_guid, "driller") bucket_name = get_env_variable("S3_REGISTRANT_BUCKET") # All documents are private for drillers url = client.get_presigned_put_url( filename, bucket_name=bucket_name, private=True) return JsonResponse({"object_name": object_name, "url": url}) class DeleteDrillerDocument(APIView): """ Delete a document from a S3 compatible store delete: remove the specified object from the S3 store """ queryset = Person.objects.all() permission_classes = (RegistriesEditPermissions,) @swagger_auto_schema(auto_schema=None) def delete(self, request, person_guid, **kwargs): person = get_object_or_404(self.queryset, pk=person_guid) client = MinioClient( request=request, disable_private=False) is_private = False bucket_name = get_env_variable("S3_REGISTRANT_BUCKET") if request.GET.get("private") == "true": is_private = True bucket_name = get_env_variable("S3_PRIVATE_REGISTRANT_BUCKET") object_name = request.GET.get("filename") client.delete_document(object_name, bucket_name=bucket_name, private=is_private) return HttpResponse(status=204)
# -*- coding: utf-8 -*- # Copyright (c) 2016 Severen Redwood <severen@shrike.me> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from flask import Flask from flask_restful import Api from bluepill import BluePillStateError from bluepill.server import BackgroundServer, get_application, get_api @pytest.fixture def server(): """Create an instance of BackgroundServer for testing.""" return BackgroundServer() def test_get_application(): # TODO: Check that the routes are all correct. # Check that it returns a Flask application instance. assert(isinstance(get_application(), Flask)) def test_get_api(): # Check that it returns a Flask-RESTful API instance. assert(isinstance(get_api(), Api)) def test_backgroundserver(server): # TODO: Terminate server if it hangs and fail test. # Test starting and immediately stopping. server.start() server.stop() def test_backgroundserver_already_started(server): # Test the already started error. with pytest.raises(BluePillStateError) as e: server.start() server.start() assert e.value.message == 'Server already running.' # Test that the state is correct. assert server.get_state() == BackgroundServer.STARTED def test_backgroundserver_already_stopped(server): # Test the already stopped error. with pytest.raises(BluePillStateError) as e: server.stop() server.stop() assert e.value.message == 'Server already stopped.' # Test that the state is correct. assert server.get_state() == BackgroundServer.STOPPED
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'NewLJ.ui' # # Created: Thu May 10 03:10:06 2012 # by: PyQt4 UI code generator 4.8.6 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: _fromUtf8 = lambda s: s class Ui_DialogLJSetup(object): def setupUi(self, DialogLJSetup): DialogLJSetup.setObjectName(_fromUtf8("DialogLJSetup")) DialogLJSetup.resize(349, 144) DialogLJSetup.setWindowTitle(QtGui.QApplication.translate("DialogLJSetup", "Create new Lennard-Jones system", None, QtGui.QApplication.UnicodeUTF8)) DialogLJSetup.setModal(True) self.buttonBox = QtGui.QDialogButtonBox(DialogLJSetup) self.buttonBox.setGeometry(QtCore.QRect(20, 100, 301, 32)) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok) self.buttonBox.setObjectName(_fromUtf8("buttonBox")) self.gridLayoutWidget = QtGui.QWidget(DialogLJSetup) self.gridLayoutWidget.setGeometry(QtCore.QRect(20, 20, 301, 61)) self.gridLayoutWidget.setObjectName(_fromUtf8("gridLayoutWidget")) self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget) self.gridLayout.setMargin(0) self.gridLayout.setObjectName(_fromUtf8("gridLayout")) self.label_2 = QtGui.QLabel(self.gridLayoutWidget) self.label_2.setText(QtGui.QApplication.translate("DialogLJSetup", "Number of minima to save", None, QtGui.QApplication.UnicodeUTF8)) self.label_2.setObjectName(_fromUtf8("label_2")) self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1) self.lineNatoms = QtGui.QLineEdit(self.gridLayoutWidget) self.lineNatoms.setInputMask(_fromUtf8("")) self.lineNatoms.setText(QtGui.QApplication.translate("DialogLJSetup", "13", None, QtGui.QApplication.UnicodeUTF8)) self.lineNatoms.setObjectName(_fromUtf8("lineNatoms")) self.gridLayout.addWidget(self.lineNatoms, 1, 1, 1, 1) self.lineNsave = QtGui.QLineEdit(self.gridLayoutWidget) self.lineNsave.setInputMask(QtGui.QApplication.translate("DialogLJSetup", "999; ", None, QtGui.QApplication.UnicodeUTF8)) self.lineNsave.setText(QtGui.QApplication.translate("DialogLJSetup", "50", None, QtGui.QApplication.UnicodeUTF8)) self.lineNsave.setObjectName(_fromUtf8("lineNsave")) self.gridLayout.addWidget(self.lineNsave, 2, 1, 1, 1) self.label = QtGui.QLabel(self.gridLayoutWidget) self.label.setText(QtGui.QApplication.translate("DialogLJSetup", "Number of particles", None, QtGui.QApplication.UnicodeUTF8)) self.label.setObjectName(_fromUtf8("label")) self.gridLayout.addWidget(self.label, 1, 0, 1, 1) self.retranslateUi(DialogLJSetup) QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), DialogLJSetup.accept) QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), DialogLJSetup.reject) QtCore.QMetaObject.connectSlotsByName(DialogLJSetup) def retranslateUi(self, DialogLJSetup): pass
# -*- coding: utf-8 -*- # # pywws - Python software for USB Wireless Weather Stations # http://github.com/jim-easterbrook/pywws # Copyright (C) 2008-15 pywws contributors # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # pywws documentation build configuration file, created by # sphinx-quickstart on Fri Sep 30 08:05:58 2011. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.abspath('..')) on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # cludge to allow documentation to be compiled without installing dependencies class Dummy(object): def __getattr__(self, name): if name in ('__file__',): return None return Dummy for mod_name in ('hid', 'oauth2', 'twitter', 'usb', 'usb.core', 'usb.util', 'libusb1', 'usb1', 'daemon', 'daemon.runner'): sys.modules[mod_name] = Dummy() # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.viewcode'] autosummary_generate = True autoclass_content = 'both' autodoc_member_order = 'bysource' autodoc_default_flags = ['members', 'undoc-members'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] rst_epilog = """ ---- Comments or questions? Please subscribe to the pywws mailing list http://groups.google.com/group/pywws and let us know. """ # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'pywws' copyright = u'2008-15, pywws contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. #version = # The full version, including alpha/beta/rc tags. #release = from pywws import __version__ as release version = release[:release.rfind('.')] # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None if not on_rtd and 'LANG' in os.environ: language = os.environ['LANG'].split('_')[0] locale_dirs = ['../pywws/lang'] # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. if on_rtd: html_theme = 'default' else: html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None html_logo = 'pywws_logo.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None html_favicon = 'pywws_logo.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'pywwsdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'pywws.tex', u'pywws Documentation', u'Jim Easterbrook', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'pywws', u'pywws Documentation', [u'Jim Easterbrook'], 1) ]
#!/usr/bin/env python2 # -*- coding: utf-8 -*- import re import sys from getpass import getpass import os import copy import random import time import datetime import json import argparse import requests import urllib import hashlib import select from mutagen.id3 import ID3,TRCK,TIT2,TALB,TPE1,APIC,TDRC,COMM,TPOS,USLT from HTMLParser import HTMLParser url_song = "http://www.xiami.com/song/%s" url_album = "http://www.xiami.com/album/%s" url_collect = "http://www.xiami.com/collect/ajax-get-list" url_artist_albums = "http://www.xiami.com/artist/album/id/%s/page/%s" url_artist_top_song = "http://www.xiami.com/artist/top-%s" url_lib_songs = "http://www.xiami.com/space/lib-song/u/%s/page/%s" url_recent = "http://www.xiami.com/space/charts-recent/u/%s/page/%s" # 电台来源:来源于"收藏的歌曲","收藏的专辑","喜欢的艺人","我收藏的精选集" url_radio_my = "http://www.xiami.com/radio/xml/type/4/id/%s" # 虾米猜, 基于你的虾米试听行为所建立的个性电台 url_radio_c = "http://www.xiami.com/radio/xml/type/8/id/%s" ############################################################ # wget exit status wget_es = { 0:"No problems occurred.", 2:"User interference.", 1<<8:"Generic error code.", 2<<8:"Parse error - for instance, when parsing command-line ' \ 'optio.wgetrc or .netrc...", 3<<8:"File I/O error.", 4<<8:"Network failure.", 5<<8:"SSL verification failure.", 6<<8:"Username/password authentication failure.", 7<<8:"Protocol errors.", 8<<8:"Server issued an error response." } ############################################################ parser = HTMLParser() s = '\x1b[%d;%dm%s\x1b[0m' # terminual color template cookie_file = os.path.join(os.path.expanduser('~'), '.Xiami.cookies') headers = { "Accept":"text/html,application/xhtml+xml,application/xml; " \ "q=0.9,image/webp,*/*;q=0.8", "Accept-Encoding":"text/html", "Accept-Language":"en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2", "Content-Type":"application/x-www-form-urlencoded", "Referer":"http://www.xiami.com/", "User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"\ } HEADERS2 = { 'pragma': 'no-cache', 'accept-encoding': 'gzip, deflate, br', 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36', 'accept': 'text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01', 'cache-control': 'no-cache', 'authority': 'www.xiami.com', 'x-requested-with': 'XMLHttpRequest', 'referer': 'https://www.xiami.com/play?ids=/song/playlist/id/', } ss = requests.session() ss.headers.update(headers) ############################################################ # Regular Expression Templates re_disc_description = r'disc (\d+) \[(.+?)\]' ############################################################ def decry(row, encryed_url): url = encryed_url urllen = len(url) rows = int(row) cols_base = urllen / rows # basic column count rows_ex = urllen % rows # count of rows that have 1 more column matrix = [] for r in xrange(rows): length = cols_base + 1 if r < rows_ex else cols_base matrix.append(url[:length]) url = url[length:] url = '' for i in xrange(urllen): url += matrix[i % rows][i / rows] return urllib.unquote(url).replace('^', '0') def modificate_text(text): text = parser.unescape(text) text = re.sub(r'//*', '-', text) text = text.replace('/', '-') text = text.replace('\\', '-') text = re.sub(r'\s\s+', ' ', text) text = text.strip() return text def modificate_file_name_for_wget(file_name): file_name = re.sub(r'\s*:\s*', u' - ', file_name) # for FAT file system file_name = file_name.replace('?', '') # for FAT file system file_name = file_name.replace('"', '\'') # for FAT file system file_name = file_name.replace('$', '\\$') # for command, see issue #7 return file_name def z_index(song_infos): size = len(song_infos) z = len(str(size)) return z ######################################################## class Song(object): def __init__(self): self.__sure() self.track = 0 self.year = 0 self.cd_serial = 0 self.disc_description = '' # z = len(str(album_size)) self.z = 1 def __sure(self): __dict__ = self.__dict__ if '__keys' not in __dict__: __dict__['__keys'] = {} def __getattr__(self, name): __dict__ = self.__dict__ return __dict__['__keys'].get(name) def __setattr__(self, name, value): __dict__ = self.__dict__ __dict__['__keys'][name] = value def __getitem__(self, key): return getattr(self, key) def __setitem__(self, key, value): return setattr(self, key, value) def feed(self, **kwargs): for name, value in kwargs.items(): setattr(self, name, value) class XiamiH5API(object): URL = 'http://api.xiami.com/web' PARAMS = { 'v': '2.0', 'app_key': '1', } def __init__(self): self.cookies = { 'user_from': '2', 'XMPLAYER_addSongsToggler': '0', 'XMPLAYER_isOpen': '0', '_xiamitoken': hashlib.md5(str(time.time())).hexdigest() } self.sess = requests.session() self.sess.cookies.update(self.cookies) def _request(self, url, method='GET', **kwargs): try: resp = self.sess.request(method, url, **kwargs) except Exception, err: print 'Error:', err sys.exit() return resp def _make_params(self, **kwargs): params = copy.deepcopy(self.PARAMS) params.update(kwargs) return params def song(self, song_id): params = self._make_params(id=song_id, r='song/detail') url = self.URL resp = self._request(url, params=params, headers=headers) info = resp.json()['data']['song'] pic_url = re.sub('_\d+\.', '.', info['logo']) song = Song() song.feed( song_id=info['song_id'], song_name=info['song_name'], album_id=info['album_id'], album_name=info['album_name'], artist_id=info['artist_id'], artist_name=info['artist_name'], singers=info['singers'], album_pic_url=pic_url, comment='http://www.xiami.com/song/' + str(info['song_id']) ) return song def album(self, album_id): url = self.URL params = self._make_params(id=album_id, r='album/detail') resp = self._request(url, params=params, headers=headers) info = resp.json()['data'] songs = [] album_id=info['album_id'], album_name=info['album_name'], artist_id = info['artist_id'] artist_name = info['artist_name'] pic_url = re.sub('_\d+\.', '.', info['album_logo']) for track, info_n in enumerate(info['songs'], 1): song = Song() song.feed( song_id=info_n['song_id'], song_name=info_n['song_name'], album_id=album_id, album_name=album_name, artist_id=artist_id, artist_name=artist_name, singers=info_n['singers'], album_pic_url=pic_url, track=track, comment='http://www.xiami.com/song/' + str(info_n['song_id']) ) songs.append(song) return songs def collect(self, collect_id): url = self.URL params = self._make_params(id=collect_id, r='collect/detail') resp = self._request(url, params=params, headers=headers) info = resp.json()['data'] collect_name = info['collect_name'] collect_id = info['list_id'] songs = [] for info_n in info['songs']: pic_url = re.sub('_\d+\.', '.', info['album_logo']) song = Song() song.feed( song_id=info_n['song_id'], song_name=info_n['song_name'], album_id=info_n['album_id'], album_name=info_n['album_name'], artist_id=info_n['artist_id'], artist_name=info_n['artist_name'], singers=info_n['singers'], album_pic_url=pic_url, comment='http://www.xiami.com/song/' + str(info_n['song_id']) ) songs.append(song) return collect_id, collect_name, songs def artist_top_songs(self, artist_id, page=1, limit=20): url = self.URL params = self._make_params(id=artist_id, page=page, limit=limit, r='artist/hot-songs') resp = self._request(url, params=params, headers=headers) info = resp.json()['data'] for info_n in info['songs']: song_id = info_n['song_id'] yield self.song(song_id) def search_songs(self, keywords, page=1, limit=20): url = self.URL params = self._make_params(key=keywords, page=page, limit=limit, r='search/songs') resp = self._request(url, params=params, headers=headers) info = resp.json()['data'] for info_n in info['songs']: pic_url = re.sub('_\d+\.', '.', info['album_logo']) song = Song() song.feed( song_id=info_n['song_id'], song_name=info_n['song_name'], album_id=info_n['album_id'], album_name=info_n['album_name'], artist_id=info_n['artist_id'], artist_name=info_n['artist_name'], singers=info_n['singer'], album_pic_url=pic_url, comment='http://www.xiami.com/song/' + str(info_n['song_id']) ) yield song def get_song_id(self, *song_sids): song_ids = [] for song_sid in song_sids: if isinstance(song_sid, int) or song_sid.isdigit(): song_ids.append(int(song_sid)) url = 'https://www.xiami.com/song/playlist/id/{}/cat/json'.format(song_sid) resp = self._request(url, headers=headers) info = resp.json() song_id = int(str(info['data']['trackList'][0]['song_id'])) song_ids.append(song_id) return song_ids class XiamiWebAPI(object): URL = 'https://www.xiami.com/song/playlist/' def __init__(self): self.sess = requests.session() def _request(self, url, method='GET', **kwargs): try: resp = self.sess.request(method, url, **kwargs) except Exception, err: print 'Error:', err sys.exit() return resp def _make_song(self, info): song = Song() location=info['location'] row = location[0] encryed_url = location[1:] durl = decry(row, encryed_url) song.feed( song_id=info['song_id'], song_sub_title=info['song_sub_title'], songwriters=info['songwriters'], singers=info['singers'], song_name=parser.unescape(info['name']), album_id=info['album_id'], album_name=info['album_name'], artist_id=info['artist_id'], artist_name=info['artist_name'], composer=info['composer'], lyric_url='http:' + info['lyric_url'], track=info['track'], cd_serial=info['cd_serial'], album_pic_url='http:' + info['album_pic'], comment='http://www.xiami.com/song/' + str(info['song_id']), length=info['length'], play_count=info['playCount'], location=info['location'], location_url=durl ) return song def _find_z(self, album): zs = [] song = album[0] for i, song in enumerate(album[:-1]): next_song = album[i+1] cd_serial = song.cd_serial next_cd_serial = next_song.cd_serial if cd_serial != next_cd_serial: z = len(str(song.track)) zs.append(z) z = len(str(song.track)) zs.append(z) for song in album: song.z = zs[song.cd_serial - 1] def song(self, song_id): url = self.URL + 'id/%s/cat/json' % song_id resp = self._request(url, headers=HEADERS2) # there is no song if not resp.json().get('data'): return None info = resp.json()['data']['trackList'][0] song = self._make_song(info) return song def songs(self, *song_ids): url = self.URL + 'id/%s/cat/json' % '%2C'.join(song_ids) resp = self._request(url, headers=HEADERS2) # there is no song if not resp.json().get('data'): return None info = resp.json()['data'] songs = [] for info_n in info['trackList']: song = self._make_song(info_n) songs.append(song) return songs def album(self, album_id): url = self.URL + 'id/%s/type/1/cat/json' % album_id resp = self._request(url, headers=HEADERS2) # there is no album if not resp.json().get('data'): return None info = resp.json()['data'] songs = [] for info_n in info['trackList']: song = self._make_song(info_n) songs.append(song) self._find_z(songs) return songs def collect(self, collect_id): url = self.URL + 'id/%s/type/3/cat/json' % collect_id resp = self._request(url, headers=HEADERS2) info = resp.json()['data'] songs = [] for info_n in info['trackList']: song = self._make_song(info_n) songs.append(song) return songs def search_songs(self, keywords): url = 'https://www.xiami.com/search?key=%s&_=%s' % ( urllib.quote(keywords), int(time.time() * 1000)) resp = self._request(url, headers=headers) html = resp.content song_ids = re.findall(r'song/(\w+)"', html) songs = self.songs(*song_ids) return songs class xiami(object): def __init__(self): self.dir_ = os.getcwdu() self.template_record = 'https://www.xiami.com/count/playrecord?sid={song_id}&ishq=1&t={time}&object_id={song_id}&object_name=default&start_point=120&_xiamitoken={token}' self.collect_id = '' self.album_id = '' self.artist_id = '' self.song_id = '' self.user_id = '' self.cover_id = '' self.cover_data = '' self.html = '' self.disc_description_archives = {} self.download = self.play if args.play else self.download self._is_play = bool(args.play) self._api = XiamiWebAPI() def init(self): if os.path.exists(cookie_file): try: cookies = json.load(open(cookie_file)) ss.cookies.update(cookies.get('cookies', cookies)) if not self.check_login(): print s % (1, 91, ' !! cookie is invalid, please login\n') sys.exit(1) except: open(cookie_file, 'w').close() print s % (1, 97, ' please login') sys.exit(1) else: print s % (1, 91, ' !! cookie_file is missing, please login') sys.exit(1) def check_login(self): #print s % (1, 97, '\n -- check_login') url = 'http://www.xiami.com/task/signin' r = self._request(url) if r.content: #print s % (1, 92, ' -- check_login success\n') # self.save_cookies() return True else: print s % (1, 91, ' -- login fail, please check email and password\n') return False def _request(self, url, headers=None, params=None, data=None, method='GET', timeout=30, retry=2): for _ in range(retry): try: headers = headers or ss.headers resp = ss.request(method, url, headers=headers, params=params, data=data, timeout=timeout) except Exception, err: continue if not resp.ok: raise Exception("response is not ok, status_code = %s" % resp.status_code) # save cookies self.save_cookies() return resp raise err # manually, add cookies # you must know how to get the cookie def add_cookies(self, cookies): _cookies = {} for item in cookies.strip('; ').split('; '): k, v = item.split('=', 1) _cookies[k] = v self.save_cookies(_cookies) ss.cookies.update(_cookies) def login(self, email, password): print s % (1, 97, '\n -- login') #validate = self.get_validate() data = { 'email': email, 'password': password, #'validate': validate, 'remember': 1, 'LoginButton': '登录' } hds = { 'Origin': 'http://www.xiami.com', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.8', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36', 'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Cache-Control': 'max-age=1', 'Referer': 'http://www.xiami.com/web/login', 'Connection': 'keep-alive', '_xiamitoken': hashlib.md5(str(time.time())).hexdigest() } url = 'https://login.xiami.com/web/login' for i in xrange(2): res = self._request(url, headers=hds, data=data) if ss.cookies.get('member_auth'): return True else: if 'checkcode' not in res.content: return False validate = self.get_validate(res.content) data['validate'] = validate return False # {{{ code from https://github.com/ly0/xiami-tools/blob/master/xiami.py def login_taobao(self, username, password): print s % (1, 97, '\n -- login taobao') p = { "lang": "zh_cn", "appName": "xiami", "appEntrance": "taobao", "cssLink": "", "styleType": "vertical", "bizParams": "", "notLoadSsoView": "", "notKeepLogin": "", "appName": "xiami", "appEntrance": "taobao", "cssLink": "https://h.alipayobjects.com/static/applogin/" \ "assets/login/mini-login-form-min.css", "styleType": "vertical", "bizParams": "", "notLoadSsoView": "true", "notKeepLogin": "true", "rnd": str(random.random()), } url = 'https://passport.alipay.com/mini_login.htm' r = ss.get(url, params=p, verify=True) cm = r.content data = { "loginId": username, "password": password, "appName": "xiami", "appEntrance": "taobao", "hsid": re.search(r'"hsid" value="(.+?)"', cm).group(1), "cid": re.search(r'"cid" value="(.+?)"', cm).group(1), "rdsToken": re.search(r'"rdsToken" value="(.+?)"', cm).group(1), "umidToken": re.search(r'"umidToken" value="(.+?)"', cm).group(1), "_csrf_token": re.search(r'"_csrf_token" value="(.+?)"', cm).group(1), "checkCode": "", } url = 'https://passport.alipay.com/newlogin/login.do?fromSite=0' theaders = headers theaders['Referer'] = 'https://passport.alipay.com/mini_login.htm' while True: r = ss.post(url, data=data, headers=theaders, verify=True) j = r.json() if j['content']['status'] == -1: if 'titleMsg' not in j['content']['data']: continue err_msg = j['content']['data']['titleMsg'] if err_msg == u'请输入验证码' or err_msg == u'验证码错误,请重新输入': captcha_url = 'http://pin.aliyun.com/get_img?' \ 'identity=passport.alipay.com&sessionID=%s' % data['cid'] tr = self._request(captcha_url, headers=theaders) path = os.path.join(os.path.expanduser('~'), 'vcode.jpg') with open(path, 'w') as g: img = tr.content g.write(img) print " ++ 验证码已经保存至", s % (2, 91, path) captcha = raw_input( (s % (2, 92, ' ++ %s: ' % err_msg)).encode('utf8')) data['checkCode'] = captcha continue if not j['content']['data'].get('st'): print s % (2, 91, " !! 输入的 username 或 password 有误.") sys.exit(1) url = 'http://www.xiami.com/accounts/back?st=%s' \ % j['content']['data']['st'] self._request(url, headers=theaders) self.save_cookies() return # }}} def get_validate(self, cn): #url = 'https://login.xiami.com/coop/checkcode?forlogin=1&%s' \ #% int(time.time()) url = re.search(r'src="(http.+checkcode.+?)"', cn).group(1) path = os.path.join(os.path.expanduser('~'), 'vcode.png') with open(path, 'w') as g: data = self._request(url).content g.write(data) print " ++ 验证码已经保存至", s % (2, 91, path) validate = raw_input(s % (2, 92, ' 请输入验证码: ')) return validate def save_cookies(self, cookies=None): if not cookies: cookies = ss.cookies.get_dict() with open(cookie_file, 'w') as g: json.dump(cookies, g) def get_durl(self, id_): while True: try: if not args.low: url = 'http://www.xiami.com/song/gethqsong/sid/%s' j = self._request(url % id_).json() t = j['location'] else: url = 'http://www.xiami.com/song/playlist/id/%s' cn = self._request(url % id_).text t = re.search(r'location>(.+?)</location', cn).group(1) if not t: return None row = t[0] encryed_url = t[1:] durl = decry(row, encryed_url) return durl except Exception, e: print s % (1, 91, ' |-- Error, get_durl --'), e time.sleep(5) # FIXME, this request alway returns 405 def record(self, song_id, album_id): return # token = ss.cookies.get('_xiamitoken', '') # t = int(time.time() * 1000) # self._request(self.template_record.format( # song_id=song_id, album_id=album_id, token=token, time=t)) def get_cover(self, info): if info['album_name'] == self.cover_id: return self.cover_data else: self.cover_id = info['album_name'] while True: url = info['album_pic_url'] try: self.cover_data = self._request(url).content if self.cover_data[:5] != '<?xml': return self.cover_data except Exception, e: print s % (1, 91, ' \\\n \\-- Error, get_cover --'), e time.sleep(5) def get_lyric(self, info): def lyric_parser(data): # get ' ' from http://img.xiami.net/lyric/1_13772259457649.lrc if len(data) < 10: return None if re.search(r'\[\d\d:\d\d', data): title = ' title: %s\n' % info['song_name'].encode('utf8') album = ' album: %s\n' % info['album_name'].encode('utf8') artist = 'artist: %s\n' % info['artist_name'].encode('utf8') tdict = {} for line in data.split('\n'): if re.search(r'^\[\d\d:', line): cn = re.sub(r'\[\d{2}:\d{2}\.\d{2}\]', '', line) time_tags = re.findall(r'\[\d{2}:\d{2}\.\d{2}\]', line) for tag in time_tags: tdict[tag] = cn + '\n' time_tags = tdict.keys() time_tags.sort() data = ''.join([title, album, artist, '\n------------------\n\n'] + \ [tdict[tag] for tag in time_tags]) return data else: # for http://img.xiami.net/lyric/upload/19/1770983119_1356864643.lrc return data url = 'http://www.xiami.com/song/playlist/id/%s' % info['song_id'] xml = self._request(url).content t = re.search('<lyric>(http.+?)</lyric>', xml) if not t: return None lyric_url = t.group(1) data = self._request(lyric_url).content.replace('\r\n', '\n') data = lyric_parser(data) if data: return data.decode('utf8', 'ignore') else: return None def get_disc_description(self, album_url, info): if not self.html: self.html = self._request(album_url).text t = re.findall(re_disc_description, self.html) t = dict([(a, modificate_text(parser.unescape(b))) \ for a, b in t]) self.disc_description_archives = dict(t) if self.disc_description_archives.has_key(info['cd_serial']): disc_description = self.disc_description_archives[info['cd_serial']] return u'(%s)' % disc_description else: return u'' def modified_id3(self, file_name, info): id3 = ID3() id3.add(TRCK(encoding=3, text=str(info['track']))) id3.add(TDRC(encoding=3, text=str(info['year']))) id3.add(TIT2(encoding=3, text=info['song_name'])) id3.add(TALB(encoding=3, text=info['album_name'])) id3.add(TPE1(encoding=3, text=info['artist_name'])) id3.add(TPOS(encoding=3, text=str(info['cd_serial']))) lyric_data = self.get_lyric(info) id3.add(USLT(encoding=3, text=lyric_data)) if lyric_data else None #id3.add(TCOM(encoding=3, text=info['composer'])) #id3.add(WXXX(encoding=3, desc=u'xiami_song_url', text=info['song_url'])) #id3.add(TCON(encoding=3, text=u'genre')) #id3.add(TSST(encoding=3, text=info['sub_title'])) #id3.add(TSRC(encoding=3, text=info['disc_code'])) id3.add(COMM(encoding=3, desc=u'Comment', \ text=info['comment'])) id3.add(APIC(encoding=3, mime=u'image/jpeg', type=3, \ desc=u'Front Cover', data=self.get_cover(info))) id3.save(file_name) def url_parser(self, urls): for url in urls: if '/collect/' in url: self.collect_id = re.search(r'/collect/(\w+)', url).group(1) #print(s % (2, 92, u'\n -- 正在分析精选集信息 ...')) self.download_collect() elif '/album/' in url: self.album_id = re.search(r'/album/(\w+)', url).group(1) #print(s % (2, 92, u'\n -- 正在分析专辑信息 ...')) self.download_album() elif '/artist/' in url or 'i.xiami.com' in url: def get_artist_id(url): html = self._request(url).text artist_id = re.search(r'artist_id = \'(\w+)\'', html).group(1) return artist_id self.artist_id = re.search(r'/artist/(\w+)', url).group(1) \ if '/artist/' in url else get_artist_id(url) code = raw_input(' >> a # 艺术家所有专辑.\n' \ ' >> r # 艺术家 radio\n' \ ' >> t # 艺术家top 20歌曲.\n >> ') if code == 'a': #print(s % (2, 92, u'\n -- 正在分析艺术家专辑信息 ...')) self.download_artist_albums() elif code == 't': #print(s % (2, 92, u'\n -- 正在分析艺术家top20信息 ...')) self.download_artist_top_20_songs() elif code == 'r': self.download_artist_radio() else: print(s % (1, 92, u' --> Over')) elif '/song/' in url: self.song_id = re.search(r'/song/(\w+)', url).group(1) #print(s % (2, 92, u'\n -- 正在分析歌曲信息 ...')) self.download_song() elif '/u/' in url: self.user_id = re.search(r'/u/(\w+)', url).group(1) code = raw_input( ' >> m # 该用户歌曲库.\n' ' >> c # 最近在听\n' ' >> s # 分享的音乐\n' ' >> r # 歌曲试听排行 - 一周\n' ' >> rt # 歌曲试听排行 - 全部 \n' ' >> rm # 私人电台:来源于"收藏的歌曲","收藏的专辑",' ' "喜欢的艺人","收藏的精选集"\n' ' >> rc # 虾米猜:基于试听行为所建立的个性电台\n >> ') if code == 'm': #print(s % (2, 92, u'\n -- 正在分析用户歌曲库信息 ...')) self.download_user_songs(url_lib_songs, u'收藏的歌曲') elif code == 'c': self.download_user_songs(url_recent, u'最近在听的歌曲') elif code == 's': url_shares = 'http://www.xiami.com' \ '/space/feed/u/%s/type/3/page/%s' % (self.user_id, '%s') self.download_user_shares(url_shares) elif code == 'r': url = 'http://www.xiami.com/space/charts/u/%s/c/song/t/week' % self.user_id self.download_ranking_songs(url, 'week') elif code == 'rt': url = 'http://www.xiami.com/space/charts/u/%s/c/song/t/all' % self.user_id self.download_ranking_songs(url, 'all') elif code == 'rm': #print(s % (2, 92, u'\n -- 正在分析该用户的虾米推荐 ...')) url_rndsongs = url_radio_my self.download_user_radio(url_rndsongs) elif code == 'rc': url_rndsongs = url_radio_c self.download_user_radio(url_rndsongs) else: print(s % (1, 92, u' --> Over')) elif '/chart/' in url: self.chart_id = re.search(r'/c/(\d+)', url).group(1) \ if '/c/' in url else 101 type_ = re.search(r'/type/(\d+)', url).group(1) \ if '/type/' in url else 0 self.download_chart(type_) elif '/genre/' in url: if '/gid/' in url: self.genre_id = re.search(r'/gid/(\d+)', url).group(1) url_genre = 'http://www.xiami.com' \ '/genre/songs/gid/%s/page/%s' elif '/sid/' in url: self.genre_id = re.search(r'/sid/(\d+)', url).group(1) url_genre = 'http://www.xiami.com' \ '/genre/songs/sid/%s/page/%s' else: print s % (1, 91, ' !! Error: missing genre id at url') sys.exit(1) code = raw_input(' >> t # 风格推荐\n' \ ' >> r # 风格radio\n >> ') if code == 't': self.download_genre(url_genre) elif code == 'r': self.download_genre_radio(url_genre) elif 'luoo.net' in url: self.hack_luoo(url) elif 'sid=' in url: _mod = re.search(r'sid=([\w+,]+\w)', url) if _mod: song_ids = _mod.group(1).split(',') self.download_songs(song_ids) else: print s % (2, 91, u' 请正确输入虾米网址.') def make_file_name(self, song, cd_serial_auth=False): z = song['z'] file_name = str(song['track']).zfill(z) + '.' \ + song['song_name'] \ + ' - ' + song['artist_name'] + '.mp3' if cd_serial_auth: song['file_name'] = ''.join([ '[Disc-', str(song['cd_serial']), ' # ' + song['disc_description'] \ if song['disc_description'] else '', '] ', file_name]) else: song['file_name'] = file_name def get_songs(self, album_id, song_id=None): songs = self._api.album(album_id) if not songs: return [] cd_serial_auth = int(songs[-1]['cd_serial']) > 1 for song in songs: self.make_file_name(song, cd_serial_auth=cd_serial_auth) songs = [i for i in songs if i['song_id'] == song_id] \ if song_id else songs return songs def get_song(self, song_id): song = self._api.song(song_id) if not song: return [] self.make_file_name(song) return [song] def download_song(self): songs = self.get_song(self.song_id) print(s % (2, 97, u'\n >> ' + u'1 首歌曲将要下载.')) \ if not args.play else '' #self.song_infos = [song_info] self.download(songs) def download_songs(self, song_ids): for song_id in song_ids: self.song_id = song_id songs = self.get_song(self.song_id) self.download(songs) def download_album(self): songs = self.get_songs(self.album_id) if not songs: return song = songs[0] d = song['album_name'] + ' - ' + song['artist_name'] dir_ = os.path.join(os.getcwdu(), d) self.dir_ = modificate_file_name_for_wget(dir_) amount_songs = unicode(len(songs)) songs = songs[args.from_ - 1:] print(s % (2, 97, u'\n >> ' + amount_songs + u' 首歌曲将要下载.')) \ if not args.play else '' self.download(songs, amount_songs, args.from_) def download_collect(self): page = 1 song_ids = [] while True: params = { 'id': self.collect_id, 'p': page, 'limit': 50, } infos = self._request(url_collect, params=params).json() for info in infos['result']['data']: song_ids.append(str(info['song_id'])) if infos['result']['total_page'] == page: break page += 1 html = self._request('http://www.xiami.com/collect/%s' % self.collect_id).text html = html.split('<div id="wall"')[0] collect_name = re.search(r'<title>(.+?)<', html).group(1) d = collect_name dir_ = os.path.join(os.getcwdu(), d) self.dir_ = modificate_file_name_for_wget(dir_) amount_songs = unicode(len(song_ids)) song_ids = song_ids[args.from_ - 1:] print(s % (2, 97, u'\n >> ' + amount_songs + u' 首歌曲将要下载.')) \ if not args.play else '' n = args.from_ for i in song_ids: songs = self.get_song(i) self.download(songs, amount_songs, n) self.html = '' self.disc_description_archives = {} n += 1 def download_artist_albums(self): ii = 1 album_ids = [] while True: html = self._request( url_artist_albums % (self.artist_id, str(ii))).text t = re.findall(r'/album/(\w+)"', html) if album_ids == t: break album_ids = t if album_ids: for i in album_ids: print ' ++ http://www.xiami.com/album/%s' % i self.album_id = i self.download_album() self.html = '' self.disc_description_archives = {} else: break ii += 1 def download_artist_top_20_songs(self): html = self._request(url_artist_top_song % self.artist_id).text song_ids = re.findall(r'/music/send/id/(\d+)', html) artist_name = re.search( r'<p><a href="/artist/\w+">(.+?)<', html).group(1) d = modificate_text(artist_name + u' - top 20') dir_ = os.path.join(os.getcwdu(), d) self.dir_ = modificate_file_name_for_wget(dir_) amount_songs = unicode(len(song_ids)) print(s % (2, 97, u'\n >> ' + amount_songs + u' 首歌曲将要下载.')) \ if not args.play else '' n = 1 for i in song_ids: songs = self.get_song(i) self.download(songs, amount_songs, n) self.html = '' self.disc_description_archives = {} n += 1 def download_artist_radio(self): html = self._request(url_artist_top_song % self.artist_id).text artist_name = re.search( r'<p><a href="/artist/\w+">(.+?)<', html).group(1) d = modificate_text(artist_name + u' - radio') dir_ = os.path.join(os.getcwdu(), d) self.dir_ = modificate_file_name_for_wget(dir_) url_artist_radio = "http://www.xiami.com/radio/xml/type/5/id/%s" \ % self.artist_id n = 1 while True: xml = self._request(url_artist_radio).text song_ids = re.findall(r'<song_id>(\d+)', xml) for i in song_ids: songs = self.get_song(i) self.download(songs, n=n) self.html = '' self.disc_description_archives = {} n += 1 def download_user_songs(self, url, desc): dir_ = os.path.join(os.getcwdu(), u'虾米用户 %s %s' % (self.user_id, desc)) self.dir_ = modificate_file_name_for_wget(dir_) ii = 1 n = 1 while True: html = self._request(url % (self.user_id, str(ii))).text song_ids = re.findall(r'/song/(.+?)"', html) if song_ids: for i in song_ids: songs = self.get_song(i) self.download(songs, n) self.html = '' self.disc_description_archives = {} n += 1 else: break ii += 1 def download_user_shares(self, url_shares): d = modificate_text(u'%s 的分享' % self.user_id) dir_ = os.path.join(os.getcwdu(), d) self.dir_ = modificate_file_name_for_wget(dir_) page = 1 while True: html = self._request(url_shares % page).text shares = re.findall(r'play.*\(\'\d+\'\)', html) for share in shares: if 'album' in share: self.album_id = re.search(r'\d+', share).group() self.download_album() else: self.song_id = re.search(r'\d+', share).group() self.download_song() if not shares: break page += 1 def download_ranking_songs(self, url, tp): d = modificate_text(u'%s 的试听排行 - %s' % (self.user_id, tp)) dir_ = os.path.join(os.getcwdu(), d) self.dir_ = modificate_file_name_for_wget(dir_) page = 1 n = 1 while True: html = self._request(url + '/page/' + str(page)).text song_ids = re.findall(r"play\('(\d+)'", html) if not song_ids: break for song_id in song_ids: songs = self.get_song(song_id) self.download(songs, n=n) self.html = '' self.disc_description_archives = {} n += 1 page += 1 def download_user_radio(self, url_rndsongs): d = modificate_text(u'%s 的虾米推荐' % self.user_id) dir_ = os.path.join(os.getcwdu(), d) self.dir_ = modificate_file_name_for_wget(dir_) n = 1 while True: xml = self._request(url_rndsongs % self.user_id).text song_ids = re.findall(r'<song_id>(\d+)', xml) for i in song_ids: songs = self.get_song(i) self.download(songs, n=n) self.html = '' self.disc_description_archives = {} n += 1 def download_chart(self, type_): html = self._request('http://www.xiami.com/chart/index/c/%s' \ % self.chart_id).text title = re.search(r'<title>(.+?)</title>', html).group(1) d = modificate_text(title) dir_ = os.path.join(os.getcwdu(), d) self.dir_ = modificate_file_name_for_wget(dir_) html = self._request( 'http://www.xiami.com/chart/data?c=%s&limit=200&type=%s' \ % (self.chart_id, type_)).text song_ids = re.findall(r'/song/(\d+)', html) n = 1 for i in song_ids: songs = self.get_song(i) self.download(songs, n=n) self.html = '' self.disc_description_archives = {} n += 1 def download_genre(self, url_genre): html = self._request(url_genre % (self.genre_id, 1)).text if '/gid/' in url_genre: t = re.search( r'/genre/detail/gid/%s".+?title="(.+?)"' \ % self.genre_id, html).group(1) elif '/sid/' in url_genre: t = re.search( r'/genre/detail/sid/%s" title="(.+?)"' \ % self.genre_id, html).group(1) d = modificate_text(u'%s - 代表曲目 - xiami' % t) dir_ = os.path.join(os.getcwdu(), d) self.dir_ = modificate_file_name_for_wget(dir_) n = 1 page = 2 while True: song_ids = re.findall(r'/song/(\d+)', html) if not song_ids: break for i in song_ids: songs = self.get_song(i) self.download(songs, n=n) self.html = '' self.disc_description_archives = {} n += 1 html = self._request(url_genre % (self.chart_id, page)).text page += 1 def download_genre_radio(self, url_genre): html = self._request(url_genre % (self.genre_id, 1)).text if '/gid/' in url_genre: t = re.search( r'/genre/detail/gid/%s".+?title="(.+?)"' \ % self.genre_id, html).group(1) url_genre_radio = "http://www.xiami.com/radio/xml/type/12/id/%s" \ % self.genre_id elif '/sid/' in url_genre: t = re.search( r'/genre/detail/sid/%s" title="(.+?)"' \ % self.genre_id, html).group(1) url_genre_radio = "http://www.xiami.com/radio/xml/type/13/id/%s" \ % self.genre_id d = modificate_text(u'%s - radio - xiami' % t) dir_ = os.path.join(os.getcwdu(), d) self.dir_ = modificate_file_name_for_wget(dir_) n = 1 while True: xml = self._request(url_genre_radio).text song_ids = re.findall(r'<song_id>(\d+)', xml) for i in song_ids: songs = self.get_song(i) self.download(songs, n=n) self.html = '' self.disc_description_archives = {} n += 1 def hack_luoo(self, url): # parse luoo.net theaders = headers theaders.pop('Referer') r = requests.get(url) if not r.ok: return None cn = r.content songs_info = re.findall(r'<p class="name">(.+?)</p>\s+' r'<p class="artist">(?:Artist:|艺人:)(.+?)</p>\s+' r'<p class="album">(?:Album:|专辑:)(.+?)</p>', cn) # search song at xiami for name, artist, album in songs_info: name = name.strip() artist = artist.strip() album = album.strip() songs = self._api.search_songs(name + ' ' + artist) if not songs: print s % (1, 93, ' !! no find:'), ' - '.join([name, artist, album]) continue self.make_file_name(songs[0]) self.download(songs[:1], n=1) def display_infos(self, i, nn, n, durl): length = datetime.datetime.fromtimestamp(i['length']).strftime('%M:%S') print n, '/', nn print s % (2, 94, i['file_name']) print s % (2, 95, i['album_name']) print s % (2, 93, length) print 'http://www.xiami.com/song/%s' % i['song_id'] print 'http://www.xiami.com/album/%s' % i['album_id'] print durl if i['durl_is_H'] == 'h': print s % (1, 97, 'MP3-Quality:'), s % (1, 92, 'High') else: print s % (1, 97, 'MP3-Quality:'), s % (1, 91, 'Low') print '—' * int(os.popen('tput cols').read()) def get_mp3_quality(self, durl): if 'm3.file.xiami.com' in durl \ or 'm6.file.xiami.com' in durl \ or '_h.mp3' in durl \ or 'm320.xiami.net' in durl: return 'h' else: return 'l' def play(self, songs, nn=u'1', n=1): if args.play == 2: songs = sorted(songs, key=lambda k: k['play_count'], reverse=True) for i in songs: self.record(i['song_id'], i['album_id']) durl = self.get_durl(i['song_id']) if not durl: print s % (2, 91, ' !! Error: can\'t get durl'), i['song_name'] continue cookies = '; '.join(['%s=%s' % (k, v) for k, v in ss.cookies.items()]) mp3_quality = self.get_mp3_quality(durl) i['durl_is_H'] = mp3_quality self.display_infos(i, nn, n, durl) n = int(n) + 1 cmd = 'mpv --really-quiet ' \ '--cache 8146 ' \ '--user-agent "%s" ' \ '--http-header-fields "Referer: http://img.xiami.com' \ '/static/swf/seiya/1.4/player.swf?v=%s",' \ '"Cookie: %s" ' \ '"%s"' \ % (headers['User-Agent'], int(time.time()*1000), cookies, durl) os.system(cmd) timeout = 1 ii, _, _ = select.select([sys.stdin], [], [], timeout) if ii: sys.exit(0) else: pass def download(self, songs, amount_songs=u'1', n=1): dir_ = modificate_file_name_for_wget(self.dir_) cwd = os.getcwd() if dir_ != cwd: if not os.path.exists(dir_): os.mkdir(dir_) ii = 1 for i in songs: num = random.randint(0, 100) % 8 col = s % (2, num + 90, i['file_name']) t = modificate_file_name_for_wget(i['file_name']) file_name = os.path.join(dir_, t) if os.path.exists(file_name): ## if file exists, no get_durl if args.undownload: self.modified_id3(file_name, i) ii += 1 n += 1 continue else: ii += 1 n += 1 continue if not args.undownload: if n == None: print(u'\n ++ download: #%s/%s# %s' \ % (ii, amount_songs, col)) else: print(u'\n ++ download: #%s/%s# %s' \ % (n, amount_songs, col)) n += 1 durl = self.get_durl(i['song_id']) if not durl: print s % (2, 91, ' |-- Error: can\'t get durl') continue mp3_quality = self.get_mp3_quality(durl) if mp3_quality == 'h': print ' |--', s % (1, 97, 'MP3-Quality:'), s % (1, 91, 'High') else: print ' |--', s % (1, 97, 'MP3-Quality:'), s % (1, 91, 'Low') cookies = '; '.join(['%s=%s' % (k, v) for k, v in ss.cookies.items()]) file_name_for_wget = file_name.replace('`', '\`') quiet = ' -q' if args.quiet else ' -nv' cmd = 'wget -c%s ' \ '-U "%s" ' \ '--header "Referer:http://img.xiami.com' \ '/static/swf/seiya/1.4/player.swf?v=%s" ' \ '--header "Cookie: member_auth=%s" ' \ '-O "%s.tmp" %s' \ % (quiet, headers['User-Agent'], int(time.time()*1000), cookies, file_name_for_wget, durl) cmd = cmd.encode('utf8') status = os.system(cmd) if status != 0: # other http-errors, such as 302. wget_exit_status_info = wget_es[status] print('\n\n ----### \x1b[1;91mERROR\x1b[0m ==> \x1b[1;91m%d ' \ '(%s)\x1b[0m ###--- \n\n' % (status, wget_exit_status_info)) print s % (1, 91, ' ===> '), cmd sys.exit(1) else: os.rename('%s.tmp' % file_name, file_name) self.modified_id3(file_name, i) ii += 1 time.sleep(5) def _save_do(self, id_, type, tags): data = { "tags": tags, "type": type, "id": id_, "desc": "", "grade": "", "share": 0, "shareTo": "all", "_xiamitoken": ss.cookies['_xiamitoken'], } url = 'https://www.xiami.com/ajax/addtag' r = self._request(url, data=data, method='POST') j = r.json() if j['status'] == 'ok': return 0 else: return j['status'] def save(self, urls): tags = args.tags for url in urls: if '/collect/' in url: collect_id = re.search(r'/collect/(\w+)', url).group(1) print s % (1, 97, u'\n ++ save collect:'), \ 'http://www.xiami.com/song/collect/' + collect_id result = self._save_do(collect_id, 4, tags) elif '/album/' in url: album_id = re.search(r'/album/(\w+)', url).group(1) album = self._api.album(album_id) album_id = album[0].album_id print s % (1, 97, u'\n ++ save album:'), \ 'http://www.xiami.com/album/' + str(album_id) result = self._save_do(album_id, 5, tags) elif '/artist/' in url: artist_id = re.search(r'/artist/(\w+)', url).group(1) print s % (1, 97, u'\n ++ save artist:'), \ 'http://www.xiami.com/artist/' + artist_id result = self._save_do(artist_id, 6, tags) elif '/song/' in url: song_id = re.search(r'/song/(\w+)', url).group(1) song = self._api.song(song_id) song_id = song.song_id print s % (1, 97, u'\n ++ save song:'), \ 'http://www.xiami.com/song/' + str(song_id) result = self._save_do(song_id, 3, tags) elif '/u/' in url: user_id = re.search(r'/u/(\d+)', url).group(1) print s % (1, 97, u'\n ++ save user:'), \ 'http://www.xiami.com/u/' + user_id result = self._save_do(user_id, 1, tags) else: result = -1 print(s % (2, 91, u' 请正确输入虾米网址.')) if result == 0: print s % (1, 92, ' ++ success.\n') else: print s % (1, 91, ' !! Error at _save_do.'), result, '\n' def main(argv): if len(argv) < 2: sys.exit() ###################################################### # for argparse p = argparse.ArgumentParser(description='downloading any xiami.com') p.add_argument('xxx', type=str, nargs='*', \ help='命令对象.') p.add_argument('-p', '--play', action='count', \ help='play with mpv') p.add_argument('-l', '--low', action='store_true', \ help='low mp3') p.add_argument('-q', '--quiet', action='store_true', \ help='quiet for download') p.add_argument('-f', '--from_', action='store', \ default=1, type=int, \ help='从第几个开始下载,eg: -f 42') p.add_argument('-d', '--undescription', action='store_true', \ help='no add disk\'s distribution') p.add_argument('-t', '--tags', action='store', \ type=str, default='', help='tags. eg: piano,cello') p.add_argument('-n', '--undownload', action='store_true', \ help='no download, using to renew id3 tags') global args args = p.parse_args(argv[2:]) comd = argv[1] xxx = args.xxx if comd == 'login' or comd == 'g': # or comd == 'logintaobao' or comd == 'gt': # taobao has updated login algorithms which is hard to hack # so remove it. if len(xxx) < 1: email = raw_input(s % (1, 97, ' username: ') \ if comd == 'logintaobao' or comd == 'gt' \ else s % (1, 97, ' email: ')) cookies = getpass(s % (1, 97, ' cookies: ')) elif len(xxx) == 1: # for add_member_auth if '; ' in xxx[0]: email = None cookies = xxx[0] else: email = xxx[0] cookies = getpass(s % (1, 97, ' cookies: ')) elif len(xxx) == 2: email = xxx[0] cookies = xxx[1] else: msg = ('login: \n' 'login cookies') print s % (1, 91, msg) return x = xiami() x.add_cookies(cookies) is_signin = x.check_login() if is_signin: print s % (1, 92, ' ++ login succeeds.') else: print s % (1, 91, ' login failes') elif comd == 'signout': g = open(cookie_file, 'w') g.close() elif comd == 'd' or comd == 'download': urls = xxx x = xiami() x.init() x.url_parser(urls) elif comd == 'p' or comd == 'play': if not args.play: args.play = 1 urls = xxx x = xiami() x.init() x.url_parser(urls) elif comd == 's' or comd == 'save': urls = xxx x = xiami() x.init() x.save(urls) else: print s % (2, 91, u' !! 命令错误\n') if __name__ == '__main__': argv = sys.argv main(argv)
# coding=utf-8 import base64 import tornado.ioloop import tornado.web from tornado.web import _create_signature_v1, _time_independent_equals import tornado.gen import tornado.httpclient import tornado.escape from tornado.escape import utf8 from tornado.concurrent import Future from qr import get_qrcode import uuid import os def create_url_signed_value(secret, value): signature = _create_signature_v1(secret, value) token = "-".join([value, signature]) return token class LoginBuff(object): def __init__(self): self.waiters = {} def wait_for_login(self, user_id): future = Future() self.waiters[user_id] = future return future def new_login_msg(self, user_id): if user_id in self.waiters: self.waiters[user_id].set_result(True) self.waiters.pop(user_id) global_login_buff = LoginBuff() class BaseHandler(tornado.web.RequestHandler): def get_current_user(self): user_id = self.get_secure_cookie('user_id') if not user_id: return None else: return user_id def decode_url_signed_value(self, token): token = utf8(token) parts = utf8(token).split("-") if len(parts) != 2: return False signature = _create_signature_v1(self.application.settings["cookie_secret"], parts[0]) if not _time_independent_equals(parts[1], signature): return False try: return parts[0] except Exception: return False class CellPhoneLoginHandler(BaseHandler): def get(self, token): user_id = self.decode_url_signed_value(token) if user_id and user_id in global_login_buff.waiters: self.render('cellphone.html') else: self.write('二维码识别错误,请重新扫码') def post(self, token): user_id = self.decode_url_signed_value(token) if user_id and user_id in global_login_buff.waiters: global_login_buff.new_login_msg(user_id) self.write('PC端登录成功!') else: self.write('二维码识别错误,请重新扫码') class HelloHandler(BaseHandler): @tornado.web.authenticated def get(self): self.render('hello.html') class LogoutHandler(BaseHandler): def get(self): self.clear_cookie("user_id") self.redirect("/pc") class PCLoginRedirectHandler(BaseHandler): def get(self): user_id = uuid.uuid4().get_hex() token = create_url_signed_value(self.application.settings["cookie_secret"], user_id) url = '/pc/{0}'.format(token) self.redirect(url) class PCLoginHandler(BaseHandler): def get(self, token): user_id = self.decode_url_signed_value(token) if user_id and user_id not in global_login_buff.waiters: url = 'http://{0}/cellphone/{1}'.format(self.request.host, token) img_data = get_qrcode(url) base64_img_data = base64.b64encode(img_data) self.render('pc.html', base64_img_data=base64_img_data) else: self.redirect('/pc') @tornado.gen.coroutine def post(self, token): user_id = self.decode_url_signed_value(token) self.user_id = user_id login_success = yield global_login_buff.wait_for_login(user_id) if login_success: self.set_secure_cookie('user_id', user_id) self.write('ok') def on_connection_close(self): global_login_buff.waiters.pop(self.user_id) application = tornado.web.Application([ (r"/cellphone/([^/]+)", CellPhoneLoginHandler), (r"/hello", HelloHandler), (r"/logout", LogoutHandler), (r"/pc/([^/]+)", PCLoginHandler), (r"/pc", PCLoginRedirectHandler), (r"/", tornado.web.RedirectHandler, {'url': '/pc'}), ], template_path=os.path.join(os.path.dirname(__file__), "templates"), static_path=os.path.join(os.path.dirname(__file__), "static"), cookie_secret="fuck xi bao zi", debug=True, login_url='/pc' ) if __name__ == "__main__": import tornado.options tornado.options.parse_command_line() application.listen(8888, '0.0.0.0') tornado.ioloop.IOLoop.current().start()
# -*- coding: utf-8 -* from kivy.uix.floatlayout import FloatLayout from kivy.properties import ListProperty, ObjectProperty from components.touch_selector import TouchSelector from components.bubble_buttons import BubbleButtons from layout.image_layout import ImageLayout from kivy.uix.button import Button class EditImageLayout(FloatLayout): color_button = ListProperty([1, .3, .4, 1]) button_color = ListProperty([0, 0, 0, 1]) rectangle_selector = ObjectProperty() text_size_rectangle = ObjectProperty() image_layout = ObjectProperty() bubble_buttons = ObjectProperty() bubble_buttons_undo_confirm = ObjectProperty() def __init__(self, **kwargs): self.sm = kwargs.pop('sm', None) self.crop_image_screen = kwargs.pop('crop_image_screen', None) super(EditImageLayout, self).__init__(**kwargs) self.rectangle_selector.bind(size_selected=self.on_change_size_rectangle_selector) self.rectangle_selector.bind(size_selected_temp=self.update_text_size_rectangle) self.bind(on_touch_down=self.bubble_buttons.hide) self.bubble_buttons.resize_button.bind(on_press=self.on_press_resize_button) self.bubble_buttons_undo_confirm.undo_button.bind(on_press=self.on_press_undo_button) self.bubble_buttons_undo_confirm.confirm_button.bind(on_press=self.on_press_confirm_button) def on_change_size_rectangle_selector(self, instance, size_selected): if not self.rectangle_selector.tap_not_draw_a_line(): self.bubble_buttons.show() else: self.text_size_rectangle.text = '' def on_press_resize_button(self, instance): self.image_layout.resize_image(width=self.rectangle_selector.size_selected[0], height=self.rectangle_selector.size_selected[1]) self.rectangle_selector.delete_line() self.text_size_rectangle.text = '' self.bubble_buttons_undo_confirm.show() def on_press_undo_button(self, instance): size = self.image_layout.old_size self.image_layout.resize_image(width=size[0], height=size[1]) self.bubble_buttons_undo_confirm.hide() def on_press_confirm_button(self, instance): self.bubble_buttons_undo_confirm.hide() def update_text_size_rectangle(self, instance, size): self.text_size_rectangle.text = str('({0}, {1})'.format(int(size[0]), int(size[1])))
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Training functions for Gradient boosted decision trees.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy from tensorflow.contrib import learn from tensorflow.contrib import stateless from tensorflow.contrib.boosted_trees.lib.learner.batch import categorical_split_handler from tensorflow.contrib.boosted_trees.lib.learner.batch import ordinal_split_handler from tensorflow.contrib.boosted_trees.proto import learner_pb2 from tensorflow.contrib.boosted_trees.python.ops import batch_ops_utils from tensorflow.contrib.boosted_trees.python.ops import gen_model_ops from tensorflow.contrib.boosted_trees.python.ops import model_ops from tensorflow.contrib.boosted_trees.python.ops import prediction_ops from tensorflow.contrib.boosted_trees.python.ops import stats_accumulator_ops from tensorflow.contrib.boosted_trees.python.ops import training_ops from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib from tensorflow.contrib.layers.python.layers import feature_column_ops from tensorflow.python.feature_column import feature_column as fc_core from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.ops.losses import losses from tensorflow.python.platform import tf_logging as logging from tensorflow.python.summary import summary from tensorflow.python.training import device_setter # Key names for prediction dict. ENSEMBLE_STAMP = "ensemble_stamp" PREDICTIONS = "predictions" PARTITION_IDS = "partition_ids" NUM_LAYERS_ATTEMPTED = "num_layers" NUM_TREES_ATTEMPTED = "num_trees" NUM_USED_HANDLERS = "num_used_handlers" USED_HANDLERS_MASK = "used_handlers_mask" LEAF_INDEX = "leaf_index" _FEATURE_NAME_TEMPLATE = "%s_%d" # Keys in Training state. GBDTTrainingState = collections.namedtuple("GBDTTrainingState", [ "num_layer_examples", "num_layer_steps", "num_layers", "active_tree", "active_layer", "continue_centering", "bias_stats_accumulator", "steps_accumulator", "handlers" ]) def _get_column_by_index(tensor, indices): """Returns columns from a 2-D tensor by index.""" shape = array_ops.shape(tensor) p_flat = array_ops.reshape(tensor, [-1]) i_flat = array_ops.reshape( array_ops.reshape(math_ops.range(0, shape[0]) * shape[1], [-1, 1]) + indices, [-1]) return array_ops.reshape(array_ops.gather(p_flat, i_flat), [shape[0], -1]) def _make_predictions_dict(stamp, logits, partition_ids, ensemble_stats, used_handlers, leaf_index=None): """Returns predictions for the given logits and n_classes. Args: stamp: The ensemble stamp. logits: A rank 2 `Tensor` with shape [batch_size, n_classes - 1]. that contains predictions when no dropout was applied. partition_ids: A rank 1 `Tensor` with shape [batch_size]. ensemble_stats: A TreeEnsembleStatsOp result tuple. used_handlers: A TreeEnsembleUsedHandlerOp result tuple of an int and a boolean mask. leaf_index: A rank 2 `Tensor` with shape [batch_size, number of trees]. that contains leaf id for each example prediction. Returns: A dict of predictions. """ result = {} result[ENSEMBLE_STAMP] = stamp result[PREDICTIONS] = logits result[PARTITION_IDS] = partition_ids result[NUM_LAYERS_ATTEMPTED] = ensemble_stats.attempted_layers result[NUM_TREES_ATTEMPTED] = ensemble_stats.attempted_trees result[NUM_USED_HANDLERS] = used_handlers.num_used_handlers result[USED_HANDLERS_MASK] = used_handlers.used_handlers_mask if leaf_index is not None: result[LEAF_INDEX] = leaf_index return result class _OpRoundRobinStrategy(object): """Returns the next ps task index for placement via per-Op round-robin order. This strategy works slightly better for the GBDT graph because of using custom resources which vary significantly in compute cost. """ def __init__(self, ps_ops, num_tasks): """Create a new `_RoundRobinStrategy`. Args: ps_ops: List of Op types to place on PS. num_tasks: Number of ps tasks to cycle among. """ next_task = 0 self._next_task_per_op = {} for op in ps_ops: self._next_task_per_op[op] = next_task next_task = (next_task + 1) % num_tasks if num_tasks else 0 self._num_tasks = num_tasks def __call__(self, op): """Choose a ps task index for the given `Operation`. Args: op: An `Operation` to be placed on ps. Returns: The next ps task index to use for the `Operation`. Returns the next index, in the range `[offset, offset + num_tasks)`. Raises: ValueError: If attempting to place non-PS Op. """ if op.type not in self._next_task_per_op: raise ValueError("Unknown op type '%s' for placement:" % op.type) task = self._next_task_per_op[op.type] self._next_task_per_op[op.type] = ((task + 1) % self._num_tasks if self._num_tasks else 0) return task def extract_features(features, feature_columns, use_core_columns): """Extracts columns from a dictionary of features. Args: features: `dict` of `Tensor` objects. feature_columns: A list of feature_columns. Returns: Seven values: - A list of all feature column names. - A list of dense floats. - A list of sparse float feature indices. - A list of sparse float feature values. - A list of sparse float feature shapes. - A list of sparse int feature indices. - A list of sparse int feature values. - A list of sparse int feature shapes. Raises: ValueError: if features is not valid. """ if not features: raise ValueError("Features dictionary must be specified.") # Make a shallow copy of features to ensure downstream usage # is unaffected by modifications in the model function. features = copy.copy(features) if feature_columns: scope = "gbdt" with variable_scope.variable_scope(scope): feature_columns = list(feature_columns) transformed_features = collections.OrderedDict() for fc in feature_columns: # pylint: disable=protected-access if use_core_columns: # pylint: disable=protected-access tensor = fc_core._transform_features(features, [fc])[fc] transformed_features[fc.name] = tensor elif isinstance(fc, feature_column_lib._EmbeddingColumn): # pylint: enable=protected-access transformed_features[fc.name] = fc_core.input_layer( features, [fc], weight_collections=[scope]) else: result = feature_column_ops.transform_features(features, [fc]) if len(result) > 1: raise ValueError("Unexpected number of output features") transformed_features[fc.name] = result[list(result.keys())[0]] features = transformed_features dense_float_names = [] dense_floats = [] sparse_float_names = [] sparse_float_indices = [] sparse_float_values = [] sparse_float_shapes = [] sparse_int_names = [] sparse_int_indices = [] sparse_int_values = [] sparse_int_shapes = [] for key in sorted(features.keys()): tensor = features[key] if isinstance(tensor, sparse_tensor.SparseTensor): if tensor.values.dtype == dtypes.float32: sparse_float_names.append(key) sparse_float_indices.append(tensor.indices) sparse_float_values.append(tensor.values) sparse_float_shapes.append(tensor.dense_shape) elif tensor.values.dtype == dtypes.int64: sparse_int_names.append(key) sparse_int_indices.append(tensor.indices) sparse_int_values.append(tensor.values) sparse_int_shapes.append(tensor.dense_shape) else: raise ValueError("Unsupported sparse feature %s with dtype %s." % (tensor.indices.name, tensor.dtype)) else: if tensor.dtype == dtypes.float32: if len(tensor.shape) > 1 and tensor.shape[1] > 1: unstacked = array_ops.unstack(tensor, axis=1) for i in range(len(unstacked)): dense_float_names.append(_FEATURE_NAME_TEMPLATE % (key, i)) dense_floats.append(array_ops.reshape(unstacked[i], [-1, 1])) else: dense_float_names.append(key) dense_floats.append(tensor) else: raise ValueError("Unsupported dense feature %s with dtype %s." % (tensor.name, tensor.dtype)) # Feature columns are logically organized into incrementing slots starting # from dense floats, then sparse floats then sparse ints. fc_names = (dense_float_names + sparse_float_names + sparse_int_names) return (fc_names, dense_floats, sparse_float_indices, sparse_float_values, sparse_float_shapes, sparse_int_indices, sparse_int_values, sparse_int_shapes) def _dropout_params(mode, ensemble_stats): """Returns parameters relevant for dropout. Args: mode: Train/Eval/Infer ensemble_stats: A TreeEnsembleStatsOp result tuple. Returns: Whether to apply dropout and a dropout seed. """ if mode == learn.ModeKeys.TRAIN: # Do dropout only during training. apply_dropout = True seed = ensemble_stats.attempted_trees else: seed = -1 apply_dropout = False return apply_dropout, seed class GradientBoostedDecisionTreeModel(object): """A GBDT model function.""" def __init__(self, is_chief, num_ps_replicas, ensemble_handle, center_bias, examples_per_layer, learner_config, features, logits_dimension, loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS, feature_columns=None, use_core_columns=False, output_leaf_index=False, output_leaf_index_modes=None): """Construct a new GradientBoostedDecisionTreeModel function. Args: is_chief: Whether to build the chief graph. num_ps_replicas: Number of parameter server replicas, can be 0. ensemble_handle: A handle to the ensemble variable. center_bias: Whether to center the bias before growing trees. examples_per_layer: Number of examples to accumulate before growing a tree layer. It can also be a function that computes the number of examples based on the depth of the layer that's being built. learner_config: A learner config. features: `dict` of `Tensor` objects. logits_dimension: An int, the dimension of logits. loss_reduction: Either `SUM_OVER_NONZERO_WEIGHTS` (mean) or `SUM`. feature_columns: A list of feature columns. use_core_columns: A boolean specifying whether core feature columns are used. output_leaf_index: A boolean variable indicating whether to output leaf index into predictions dictionary. output_leaf_index_modes: A list of modes from (TRAIN, EVAL, INFER) which dictates when leaf indices will be outputted. By default, leaf indices are only outputted in INFER mode. Raises: ValueError: if inputs are not valid. """ if ensemble_handle is None: raise ValueError("ensemble_handle must be specified.") if learner_config is None: raise ValueError("learner_config must be specified.") if learner_config.num_classes < 2: raise ValueError("Number of classes must be >=2") self._logits_dimension = logits_dimension self._is_chief = is_chief self._num_ps_replicas = num_ps_replicas self._ensemble_handle = ensemble_handle self._center_bias = center_bias self._examples_per_layer = examples_per_layer # Check loss reduction value. if (loss_reduction != losses.Reduction.SUM and loss_reduction != losses.Reduction.SUM_OVER_NONZERO_WEIGHTS): raise ValueError( "Invalid loss reduction is provided: %s." % loss_reduction) self._loss_reduction = loss_reduction # Fill in the defaults. if (learner_config.multi_class_strategy == learner_pb2.LearnerConfig.MULTI_CLASS_STRATEGY_UNSPECIFIED): if logits_dimension == 1: learner_config.multi_class_strategy = ( learner_pb2.LearnerConfig.TREE_PER_CLASS) else: learner_config.multi_class_strategy = ( learner_pb2.LearnerConfig.DIAGONAL_HESSIAN) if logits_dimension == 1 or learner_config.multi_class_strategy == ( learner_pb2.LearnerConfig.TREE_PER_CLASS): self._gradient_shape = tensor_shape.scalar() self._hessian_shape = tensor_shape.scalar() else: if center_bias: raise ValueError("Center bias should be False for multiclass.") self._gradient_shape = tensor_shape.TensorShape([logits_dimension]) if (learner_config.multi_class_strategy == learner_pb2.LearnerConfig.FULL_HESSIAN): self._hessian_shape = tensor_shape.TensorShape( ([logits_dimension, logits_dimension])) else: # Diagonal hessian strategy. self._hessian_shape = tensor_shape.TensorShape(([logits_dimension])) if (learner_config.growing_mode == learner_pb2.LearnerConfig.GROWING_MODE_UNSPECIFIED): learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER if (learner_config.pruning_mode == learner_pb2.LearnerConfig.PRUNING_MODE_UNSPECIFIED): learner_config.pruning_mode = learner_pb2.LearnerConfig.POST_PRUNE if learner_config.constraints.max_tree_depth == 0: # Use 6 as the default maximum depth. learner_config.constraints.max_tree_depth = 6 tuner = learner_config.learning_rate_tuner.WhichOneof("tuner") if not tuner: learner_config.learning_rate_tuner.fixed.learning_rate = 0.1 self._learner_config = learner_config self._feature_columns = feature_columns self._learner_config_serialized = learner_config.SerializeToString() self._max_tree_depth = variables.Variable( initial_value=self._learner_config.constraints.max_tree_depth) self._attempted_trees = variables.Variable( initial_value=array_ops.zeros([], dtypes.int64), trainable=False, name="attempted_trees") self._finalized_trees = variables.Variable( initial_value=array_ops.zeros([], dtypes.int64), trainable=False, name="finalized_trees") if not features: raise ValueError("Features dictionary must be specified.") (fc_names, dense_floats, sparse_float_indices, sparse_float_values, sparse_float_shapes, sparse_int_indices, sparse_int_values, sparse_int_shapes) = extract_features( features, self._feature_columns, use_core_columns) logging.info("Active Feature Columns: " + str(fc_names)) logging.info("Learner config: " + str(learner_config)) self._fc_names = fc_names self._dense_floats = dense_floats self._sparse_float_indices = sparse_float_indices self._sparse_float_values = sparse_float_values self._sparse_float_shapes = sparse_float_shapes self._sparse_int_indices = sparse_int_indices self._sparse_int_values = sparse_int_values self._sparse_int_shapes = sparse_int_shapes self._reduce_dim = ( self._learner_config.multi_class_strategy == learner_pb2.LearnerConfig.TREE_PER_CLASS and learner_config.num_classes == 2) if output_leaf_index_modes is None: output_leaf_index_modes = [learn.ModeKeys.INFER] elif not all( mode in (learn.ModeKeys.TRAIN, learn.ModeKeys.EVAL, learn.ModeKeys.INFER) for mode in output_leaf_index_modes): raise ValueError("output_leaf_index_modes should only contain ModeKeys.") self._output_leaf_index = output_leaf_index self._output_leaf_index_modes = output_leaf_index_modes def _predict_and_return_dict(self, ensemble_handle, ensemble_stamp, mode): """Runs prediction and returns a dictionary of the prediction results. Args: ensemble_handle: ensemble resource handle. ensemble_stamp: stamp of ensemble resource. mode: learn.ModeKeys.TRAIN or EVAL or INFER. Returns: a dictionary of prediction results - ENSEMBLE_STAMP, PREDICTION, PARTITION_IDS, NUM_LAYER_ATTEMPTED, NUM_TREES_ATTEMPTED. """ ensemble_stats = training_ops.tree_ensemble_stats(ensemble_handle, ensemble_stamp) num_handlers = ( len(self._dense_floats) + len(self._sparse_float_shapes) + len( self._sparse_int_shapes)) # Used during feature selection. used_handlers = model_ops.tree_ensemble_used_handlers( ensemble_handle, ensemble_stamp, num_all_handlers=num_handlers) # We don't need dropout info - we can always restore it based on the # seed. apply_dropout, seed = _dropout_params(mode, ensemble_stats) # Make sure ensemble stats run. This will check that the ensemble has # the right stamp. with ops.control_dependencies(ensemble_stats): leaf_index = None if self._output_leaf_index and mode in self._output_leaf_index_modes: predictions, _, leaf_index = ( prediction_ops).gradient_trees_prediction_verbose( ensemble_handle, seed, self._dense_floats, self._sparse_float_indices, self._sparse_float_values, self._sparse_float_shapes, self._sparse_int_indices, self._sparse_int_values, self._sparse_int_shapes, learner_config=self._learner_config_serialized, apply_dropout=apply_dropout, apply_averaging=mode != learn.ModeKeys.TRAIN, use_locking=True, center_bias=self._center_bias, reduce_dim=self._reduce_dim) else: leaf_index = None predictions, _ = prediction_ops.gradient_trees_prediction( ensemble_handle, seed, self._dense_floats, self._sparse_float_indices, self._sparse_float_values, self._sparse_float_shapes, self._sparse_int_indices, self._sparse_int_values, self._sparse_int_shapes, learner_config=self._learner_config_serialized, apply_dropout=apply_dropout, apply_averaging=mode != learn.ModeKeys.TRAIN, use_locking=True, center_bias=self._center_bias, reduce_dim=self._reduce_dim) partition_ids = prediction_ops.gradient_trees_partition_examples( ensemble_handle, self._dense_floats, self._sparse_float_indices, self._sparse_float_values, self._sparse_float_shapes, self._sparse_int_indices, self._sparse_int_values, self._sparse_int_shapes, use_locking=True) return _make_predictions_dict(ensemble_stamp, predictions, partition_ids, ensemble_stats, used_handlers, leaf_index) def predict(self, mode): """Returns predictions given the features and mode. Args: mode: Mode the graph is running in (train|predict|eval). Returns: A dict of predictions tensors. Raises: ValueError: if features is not valid. """ # Use the current ensemble to predict on the current batch of input. # For faster prediction we check if the inputs are on the same device # as the model. If not, we create a copy of the model on the worker. input_deps = ( self._dense_floats + self._sparse_float_indices + self._sparse_int_indices) if not input_deps: raise ValueError("No input tensors for prediction.") # Get most current model stamp. ensemble_stamp = model_ops.tree_ensemble_stamp_token(self._ensemble_handle) # Determine if ensemble is colocated with the inputs. if self._ensemble_handle.device != input_deps[0].device: # Create a local ensemble and get its local stamp. with ops.name_scope("local_ensemble", "TreeEnsembleVariable") as name: local_ensemble_handle = ( gen_model_ops.decision_tree_ensemble_resource_handle_op(name=name)) create_op = gen_model_ops.create_tree_ensemble_variable( local_ensemble_handle, stamp_token=-1, tree_ensemble_config="") with ops.control_dependencies([create_op]): local_stamp = model_ops.tree_ensemble_stamp_token( local_ensemble_handle) # Determine whether the local ensemble is stale and update it if needed. def _refresh_local_ensemble_fn(): # Serialize the model from parameter server after reading the inputs. with ops.control_dependencies([input_deps[0]]): (ensemble_stamp, serialized_model) = ( model_ops.tree_ensemble_serialize(self._ensemble_handle)) # Update local ensemble with the serialized model from parameter server. with ops.control_dependencies([create_op]): return model_ops.tree_ensemble_deserialize( local_ensemble_handle, stamp_token=ensemble_stamp, tree_ensemble_config=serialized_model), ensemble_stamp refresh_local_ensemble, ensemble_stamp = control_flow_ops.cond( math_ops.not_equal(ensemble_stamp, local_stamp), _refresh_local_ensemble_fn, lambda: (control_flow_ops.no_op(), ensemble_stamp)) # Once updated, use the local model for prediction. with ops.control_dependencies([refresh_local_ensemble]): return self._predict_and_return_dict(local_ensemble_handle, ensemble_stamp, mode) else: # Use ensemble_handle directly, if colocated. with ops.device(self._ensemble_handle.device): return self._predict_and_return_dict(self._ensemble_handle, ensemble_stamp, mode) def _get_class_id(self, predictions_dict): # Handle different multiclass strategies. if (self._learner_config.multi_class_strategy == learner_pb2.LearnerConfig.TREE_PER_CLASS and self._logits_dimension != 1): # Choose the class for which the tree is built (one vs rest). return math_ops.to_int32( predictions_dict[NUM_TREES_ATTEMPTED] % self._logits_dimension) return constant_op.constant(-1, dtype=dtypes.int32) def update_stats(self, loss, predictions_dict): """Update the accumulators with stats from this batch. Args: loss: A scalar tensor representing average loss of examples. predictions_dict: Dictionary of Rank 2 `Tensor` representing information about predictions per example. Returns: Three values: - An op that adds a new tree to the ensemble, and - An op that increments the stamp but removes all the trees and resets the handlers. This can be used to reset the state of the ensemble. - A dict containing the training state. Raises: ValueError: if inputs are not valid. """ # Get the worker device from input dependencies. input_deps = ( self._dense_floats + self._sparse_float_indices + self._sparse_int_indices) worker_device = input_deps[0].device # Get tensors relevant for training and form the loss. predictions = predictions_dict[PREDICTIONS] partition_ids = predictions_dict[PARTITION_IDS] ensemble_stamp = predictions_dict[ENSEMBLE_STAMP] gradients = gradients_impl.gradients( loss, predictions, name="Gradients", colocate_gradients_with_ops=False, gate_gradients=0, aggregation_method=None)[0] strategy = self._learner_config.multi_class_strategy class_id = self._get_class_id(predictions_dict) # Handle different multiclass strategies. if strategy == learner_pb2.LearnerConfig.TREE_PER_CLASS: # We build one vs rest trees. if self._logits_dimension == 1: # We have only 1 score, gradients is of shape [batch, 1]. hessians = gradients_impl.gradients( gradients, predictions, name="Hessian", colocate_gradients_with_ops=False, gate_gradients=0, aggregation_method=None)[0] squeezed_gradients = array_ops.squeeze(gradients, axis=[1]) squeezed_hessians = array_ops.squeeze(hessians, axis=[1]) else: hessian_list = self._diagonal_hessian(gradients, predictions) # Assemble hessian list into a tensor. hessians = array_ops.stack(hessian_list, axis=1) # Use class id tensor to get the column with that index from gradients # and hessians. squeezed_gradients = array_ops.squeeze( _get_column_by_index(gradients, class_id)) squeezed_hessians = array_ops.squeeze( _get_column_by_index(hessians, class_id)) else: # Other multiclass strategies. if strategy == learner_pb2.LearnerConfig.FULL_HESSIAN: hessian_list = self._full_hessian(gradients, predictions) else: # Diagonal hessian strategy. hessian_list = self._diagonal_hessian(gradients, predictions) squeezed_gradients = gradients hessians = array_ops.stack(hessian_list, axis=1) squeezed_hessians = hessians # Get the weights for each example for quantiles calculation, weights = self._get_weights(self._hessian_shape, squeezed_hessians) # Create all handlers ensuring resources are evenly allocated across PS. fc_name_idx = 0 handlers = [] init_stamp_token = constant_op.constant(0, dtype=dtypes.int64) l1_regularization = constant_op.constant( self._learner_config.regularization.l1, dtypes.float32) l2_regularization = constant_op.constant( self._learner_config.regularization.l2, dtypes.float32) tree_complexity_regularization = constant_op.constant( self._learner_config.regularization.tree_complexity, dtypes.float32) min_node_weight = constant_op.constant( self._learner_config.constraints.min_node_weight, dtypes.float32) loss_uses_sum_reduction = self._loss_reduction == losses.Reduction.SUM loss_uses_sum_reduction = constant_op.constant(loss_uses_sum_reduction) epsilon = 0.01 num_quantiles = 100 strategy_tensor = constant_op.constant(strategy) with ops.device(self._get_replica_device_setter(worker_device)): # Create handlers for dense float columns for dense_float_column_idx in range(len(self._dense_floats)): fc_name = self._fc_names[fc_name_idx] handlers.append( ordinal_split_handler.DenseSplitHandler( l1_regularization=l1_regularization, l2_regularization=l2_regularization, tree_complexity_regularization=tree_complexity_regularization, min_node_weight=min_node_weight, feature_column_group_id=constant_op.constant( dense_float_column_idx), epsilon=epsilon, num_quantiles=num_quantiles, dense_float_column=self._dense_floats[dense_float_column_idx], name=fc_name, gradient_shape=self._gradient_shape, hessian_shape=self._hessian_shape, multiclass_strategy=strategy_tensor, init_stamp_token=init_stamp_token, loss_uses_sum_reduction=loss_uses_sum_reduction, )) fc_name_idx += 1 # Create handlers for sparse float columns. for sparse_float_column_idx in range(len(self._sparse_float_indices)): fc_name = self._fc_names[fc_name_idx] handlers.append( ordinal_split_handler.SparseSplitHandler( l1_regularization=l1_regularization, l2_regularization=l2_regularization, tree_complexity_regularization=tree_complexity_regularization, min_node_weight=min_node_weight, feature_column_group_id=constant_op.constant( sparse_float_column_idx), epsilon=epsilon, num_quantiles=num_quantiles, sparse_float_column=sparse_tensor.SparseTensor( self._sparse_float_indices[sparse_float_column_idx], self._sparse_float_values[sparse_float_column_idx], self._sparse_float_shapes[sparse_float_column_idx]), name=fc_name, gradient_shape=self._gradient_shape, hessian_shape=self._hessian_shape, multiclass_strategy=strategy_tensor, init_stamp_token=init_stamp_token, loss_uses_sum_reduction=loss_uses_sum_reduction)) fc_name_idx += 1 # Create handlers for sparse int columns. for sparse_int_column_idx in range(len(self._sparse_int_indices)): fc_name = self._fc_names[fc_name_idx] handlers.append( categorical_split_handler.EqualitySplitHandler( l1_regularization=l1_regularization, l2_regularization=l2_regularization, tree_complexity_regularization=tree_complexity_regularization, min_node_weight=min_node_weight, feature_column_group_id=constant_op.constant( sparse_int_column_idx), sparse_int_column=sparse_tensor.SparseTensor( self._sparse_int_indices[sparse_int_column_idx], self._sparse_int_values[sparse_int_column_idx], self._sparse_int_shapes[sparse_int_column_idx]), name=fc_name, gradient_shape=self._gradient_shape, hessian_shape=self._hessian_shape, multiclass_strategy=strategy_tensor, init_stamp_token=init_stamp_token, loss_uses_sum_reduction=loss_uses_sum_reduction)) fc_name_idx += 1 # Create ensemble stats variables. num_layer_examples = variables.Variable( initial_value=array_ops.zeros([], dtypes.int64), name="num_layer_examples", trainable=False) num_layer_steps = variables.Variable( initial_value=array_ops.zeros([], dtypes.int64), name="num_layer_steps", trainable=False) num_layers = variables.Variable( initial_value=array_ops.zeros([], dtypes.int64), name="num_layers", trainable=False) active_tree = variables.Variable( initial_value=array_ops.zeros([], dtypes.int64), name="active_tree", trainable=False) active_layer = variables.Variable( initial_value=array_ops.zeros([], dtypes.int64), name="active_layer", trainable=False) # Variable that becomes false once bias centering is done. continue_centering = variables.Variable( initial_value=self._center_bias, name="continue_centering", trainable=False) # Create bias stats accumulator. bias_stats_accumulator = stats_accumulator_ops.StatsAccumulator( stamp_token=0, gradient_shape=self._gradient_shape, hessian_shape=self._hessian_shape, name="BiasAccumulator") # Create steps accumulator. steps_accumulator = stats_accumulator_ops.StatsAccumulator( stamp_token=0, gradient_shape=tensor_shape.scalar(), hessian_shape=tensor_shape.scalar(), name="StepsAccumulator") # Create ensemble stats summaries. summary.scalar("layer_stats/num_examples", num_layer_examples) summary.scalar("layer_stats/num_steps", num_layer_steps) summary.scalar("ensemble_stats/active_tree", active_tree) summary.scalar("ensemble_stats/active_layer", active_layer) # Update bias stats. stats_update_ops = [] stats_update_ops.append( control_flow_ops.cond( continue_centering, self._make_update_bias_stats_fn( ensemble_stamp, predictions, gradients, bias_stats_accumulator), control_flow_ops.no_op)) # Update handler stats. handler_reads = collections.OrderedDict() for handler in handlers: handler_reads[handler] = handler.scheduled_reads() handler_results = batch_ops_utils.run_handler_scheduled_ops( handler_reads, ensemble_stamp, worker_device) per_handler_updates = collections.OrderedDict() # Two values per handler. First one is if the handler is active for the # current layer. The second one is if the handler is going to be active # for the next layer. subsampling_type = self._learner_config.WhichOneof("feature_fraction") if subsampling_type == "feature_fraction_per_level": seed = predictions_dict[NUM_LAYERS_ATTEMPTED] active_handlers_current_layer = stateless.stateless_random_uniform( shape=[len(handlers)], seed=[seed, 1]) active_handlers_next_layer = stateless.stateless_random_uniform( shape=[len(handlers)], seed=[seed + 1, 1]) active_handlers = array_ops.stack( [active_handlers_current_layer, active_handlers_next_layer], axis=1) active_handlers = ( active_handlers < self._learner_config.feature_fraction_per_level) elif subsampling_type == "feature_fraction_per_tree": seed = predictions_dict[NUM_TREES_ATTEMPTED] active_handlers_current_layer = stateless.stateless_random_uniform( shape=[len(handlers)], seed=[seed, 2]) active_handlers_current_layer = ( active_handlers_current_layer < self._learner_config.feature_fraction_per_tree) active_handlers = array_ops.stack( [ active_handlers_current_layer, array_ops.ones([len(handlers)], dtype=dtypes.bool) ], axis=1) else: active_handlers = array_ops.ones([len(handlers), 2], dtype=dtypes.bool) if self._learner_config.constraints.max_number_of_unique_feature_columns: target = ( self._learner_config.constraints.max_number_of_unique_feature_columns) def _feature_selection_active_handlers(): # The active list for current and the next iteration. used_handlers = array_ops.reshape(predictions_dict[USED_HANDLERS_MASK], [-1, 1]) used_handlers = array_ops.concat([used_handlers, used_handlers], axis=1) return math_ops.logical_and(used_handlers, active_handlers) active_handlers = ( control_flow_ops.cond(predictions_dict[NUM_USED_HANDLERS] >= target, _feature_selection_active_handlers, lambda: active_handlers)) # Prepare empty gradients and hessians when handlers are not ready. empty_hess_shape = [1] + self._hessian_shape.as_list() empty_grad_shape = [1] + self._gradient_shape.as_list() empty_gradients = constant_op.constant( [], dtype=dtypes.float32, shape=empty_grad_shape) empty_hessians = constant_op.constant( [], dtype=dtypes.float32, shape=empty_hess_shape) active_handlers = array_ops.unstack(active_handlers, axis=0) for handler_idx in range(len(handlers)): handler = handlers[handler_idx] is_active = active_handlers[handler_idx] updates, scheduled_updates = handler.update_stats( ensemble_stamp, partition_ids, squeezed_gradients, squeezed_hessians, empty_gradients, empty_hessians, weights, is_active, handler_results[handler]) stats_update_ops.append(updates) per_handler_updates[handler] = scheduled_updates update_results = batch_ops_utils.run_handler_scheduled_ops( per_handler_updates, ensemble_stamp, worker_device) for update in update_results.values(): stats_update_ops += update training_state = GBDTTrainingState( num_layer_examples=num_layer_examples, num_layer_steps=num_layer_steps, num_layers=num_layers, active_tree=active_tree, active_layer=active_layer, continue_centering=continue_centering, bias_stats_accumulator=bias_stats_accumulator, steps_accumulator=steps_accumulator, handlers=handlers) reset_op = control_flow_ops.no_op() if self._is_chief: # Advance the ensemble stamp to throw away staggered workers. stamp_token, _ = model_ops.tree_ensemble_serialize(self._ensemble_handle) next_stamp_token = stamp_token + 1 reset_ops = [] for handler in handlers: reset_ops.append(handler.reset(stamp_token, next_stamp_token)) if self._center_bias: reset_ops.append( bias_stats_accumulator.flush(stamp_token, next_stamp_token)) reset_ops.append(steps_accumulator.flush(stamp_token, next_stamp_token)) reset_ops.append(self._finalized_trees.assign(0).op) reset_ops.append(self._attempted_trees.assign(0).op) reset_ops.append( model_ops.tree_ensemble_deserialize( self._ensemble_handle, stamp_token=next_stamp_token, tree_ensemble_config="", name="reset_gbdt")) reset_op = control_flow_ops.group([reset_ops]) return stats_update_ops, reset_op, training_state def increment_step_counter_and_maybe_update_ensemble(self, predictions_dict, training_state): """Increments number of visited examples and grows the ensemble. If the number of visited examples reaches the target examples_per_layer, ensemble is updated. Args: predictions_dict: Dictionary of Rank 2 `Tensor` representing information about predictions per example. training_state: `dict` returned by update_stats. Returns: An op that updates the counters and potientially grows the ensemble. """ batch_size = math_ops.cast( array_ops.shape(predictions_dict[PREDICTIONS])[0], dtypes.float32) ensemble_stamp = predictions_dict[ENSEMBLE_STAMP] # Accumulate a step after updating stats. steps_accumulator = training_state.steps_accumulator num_layer_examples = training_state.num_layer_examples num_layer_steps = training_state.num_layer_steps active_layer = training_state.active_layer add_step_op = steps_accumulator.add( ensemble_stamp, [0], [[0, 0]], [batch_size], [1.0]) # After adding the step, decide if further processing is needed. ensemble_update_ops = [add_step_op] class_id = self._get_class_id(predictions_dict) with ops.control_dependencies([add_step_op]): if self._is_chief: dropout_seed = predictions_dict[NUM_TREES_ATTEMPTED] # Get accumulated steps and examples for the current layer. _, _, _, _, acc_examples, acc_steps = ( steps_accumulator.serialize()) acc_examples = math_ops.cast(acc_examples[0], dtypes.int64) acc_steps = math_ops.cast(acc_steps[0], dtypes.int64) ensemble_update_ops.append( num_layer_examples.assign(acc_examples)) ensemble_update_ops.append(num_layer_steps.assign(acc_steps)) # Determine whether we need to update tree ensemble. examples_per_layer = self._examples_per_layer if callable(examples_per_layer): examples_per_layer = examples_per_layer(active_layer) ensemble_update_ops.append( control_flow_ops.cond( acc_examples >= examples_per_layer, self.make_update_ensemble_fn(ensemble_stamp, training_state, dropout_seed, class_id), control_flow_ops.no_op)) # Note, the loss is calculated from the prediction considering dropouts, so # that the value might look staggering over steps when the dropout ratio is # high. eval_loss might be referred instead in the aspect of convergence. return control_flow_ops.group(*ensemble_update_ops) def make_update_ensemble_fn(self, ensemble_stamp, training_state, dropout_seed, class_id): """A method to create the function which updates the tree ensemble.""" # Determine learning rate. learning_rate_tuner = self._learner_config.learning_rate_tuner.WhichOneof( "tuner") if learning_rate_tuner == "fixed" or learning_rate_tuner == "dropout": tuner = getattr(self._learner_config.learning_rate_tuner, learning_rate_tuner) learning_rate = tuner.learning_rate else: # TODO(nponomareva, soroush) do the line search. raise ValueError("Line search learning rate is not yet supported.") def _update_ensemble(): """A method to update the tree ensemble.""" # Get next stamp token. next_ensemble_stamp = ensemble_stamp + 1 # Finalize bias stats. _, _, _, bias_grads, bias_hess = ( training_state.bias_stats_accumulator.flush(ensemble_stamp, next_ensemble_stamp)) # Finalize handler splits. are_splits_ready_list = [] partition_ids_list = [] gains_list = [] split_info_list = [] for handler in training_state.handlers: (are_splits_ready, partition_ids, gains, split_info) = handler.make_splits( ensemble_stamp, next_ensemble_stamp, class_id) are_splits_ready_list.append(are_splits_ready) partition_ids_list.append(partition_ids) gains_list.append(gains) split_info_list.append(split_info) # Stack all the inputs to one tensor per type. # This is a workaround for the slowness of graph building in tf.cond. # See (b/36554864). split_sizes = array_ops.reshape( array_ops.shape_n(partition_ids_list), [len(partition_ids_list)]) partition_ids = array_ops.concat(partition_ids_list, axis=0) gains = array_ops.concat(gains_list, axis=0) split_infos = array_ops.concat(split_info_list, axis=0) # Determine if all splits are ready. are_all_splits_ready = math_ops.reduce_all( array_ops.stack( are_splits_ready_list, axis=0, name="stack_handler_readiness")) # Define bias centering update operation. def _center_bias_fn(): # Center tree ensemble bias. delta_updates = array_ops.where(bias_hess > 0, -bias_grads / bias_hess, array_ops.zeros_like(bias_grads)) center_bias = training_ops.center_tree_ensemble_bias( tree_ensemble_handle=self._ensemble_handle, stamp_token=ensemble_stamp, next_stamp_token=next_ensemble_stamp, delta_updates=delta_updates, learner_config=self._learner_config_serialized) return training_state.continue_centering.assign(center_bias) # Define ensemble growing operations. def _grow_ensemble_ready_fn(): # Grow the ensemble given the current candidates. sizes = array_ops.unstack(split_sizes) partition_ids_list = list(array_ops.split(partition_ids, sizes, axis=0)) gains_list = list(array_ops.split(gains, sizes, axis=0)) split_info_list = list(array_ops.split(split_infos, sizes, axis=0)) return training_ops.grow_tree_ensemble( tree_ensemble_handle=self._ensemble_handle, stamp_token=ensemble_stamp, next_stamp_token=next_ensemble_stamp, learning_rate=learning_rate, partition_ids=partition_ids_list, gains=gains_list, splits=split_info_list, learner_config=self._learner_config_serialized, dropout_seed=dropout_seed, center_bias=self._center_bias, max_tree_depth=self._max_tree_depth) def _grow_ensemble_not_ready_fn(): # Don't grow the ensemble, just update the stamp. return training_ops.grow_tree_ensemble( tree_ensemble_handle=self._ensemble_handle, stamp_token=ensemble_stamp, next_stamp_token=next_ensemble_stamp, learning_rate=0, partition_ids=[], gains=[], splits=[], learner_config=self._learner_config_serialized, dropout_seed=dropout_seed, center_bias=self._center_bias, max_tree_depth=self._max_tree_depth) def _grow_ensemble_fn(): # Conditionally grow an ensemble depending on whether the splits # from all the handlers are ready. return control_flow_ops.cond(are_all_splits_ready, _grow_ensemble_ready_fn, _grow_ensemble_not_ready_fn) # Update ensemble. update_ops = [are_all_splits_ready] if self._center_bias: update_model = control_flow_ops.cond(training_state.continue_centering, _center_bias_fn, _grow_ensemble_fn) else: update_model = _grow_ensemble_fn() update_ops.append(update_model) # Update ensemble stats. with ops.control_dependencies([update_model]): stats = training_ops.tree_ensemble_stats( self._ensemble_handle, stamp_token=next_ensemble_stamp) update_ops.append(self._finalized_trees.assign(stats.num_trees)) update_ops.append(self._attempted_trees.assign(stats.attempted_trees)) update_ops.append(training_state.num_layers.assign(stats.num_layers)) update_ops.append(training_state.active_tree.assign(stats.active_tree)) update_ops.append( training_state.active_layer.assign(stats.active_layer)) # Flush step stats. update_ops.extend( training_state.steps_accumulator.flush(ensemble_stamp, next_ensemble_stamp)) return control_flow_ops.group(*update_ops, name="update_ensemble") return _update_ensemble def get_number_of_trees_tensor(self): return self._finalized_trees, self._attempted_trees def get_max_tree_depth(self): return self._max_tree_depth def train(self, loss, predictions_dict, labels): """Updates the accumalator stats and grows the ensemble. Args: loss: A scalar tensor representing average loss of examples. predictions_dict: Dictionary of Rank 2 `Tensor` representing information about predictions per example. labels: Rank 2 `Tensor` representing labels per example. Has no effect on the training and is only kept for backward compatibility. Returns: An op that adds a new tree to the ensemble. Raises: ValueError: if inputs are not valid. """ del labels # unused; kept for backward compatibility. update_op, _, training_state = self.update_stats(loss, predictions_dict) with ops.control_dependencies(update_op): return self.increment_step_counter_and_maybe_update_ensemble( predictions_dict, training_state) def _get_weights(self, hessian_shape, hessians): """Derives weights to be used based on hessians and multiclass strategy.""" if hessian_shape == tensor_shape.scalar(): # This is tree per class. weights = hessians elif len(hessian_shape.dims) == 1: # This is diagonal hessian. weights = math_ops.reduce_sum(hessians, axis=1) else: # This is full hessian. weights = math_ops.trace(hessians) return weights def _full_hessian(self, grads, predictions): """Prepares hessians for full-hessian multiclass strategy.""" # Because of # https://github.com/tensorflow/tensorflow/issues/675, we can't just # compute the full hessian with a single call to gradients, but instead # must compute it row-by-row. gradients_list = array_ops.unstack( grads, num=self._logits_dimension, axis=1) hessian_rows = [] for row in range(self._logits_dimension): # If current row is i, K is number of classes,each row returns a tensor of # size batch_size x K representing for each example dx_i dx_1, dx_i dx_2 # etc dx_i dx_K hessian_row = gradients_impl.gradients( gradients_list[row], predictions, name="Hessian_%d" % row, colocate_gradients_with_ops=False, gate_gradients=0, aggregation_method=None) # hessian_row is of dimension 1, batch_size, K, => trim first dimension # to get batch_size x K hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0]) hessian_rows.append(hessian_row) return hessian_rows def _diagonal_hessian(self, grads, predictions): """Prepares hessians for diagonal-hessian multiclass mode.""" diag_hessian_list = [] gradients_list = array_ops.unstack( grads, num=self._logits_dimension, axis=1) for row, row_grads in enumerate(gradients_list): # If current row is i, K is number of classes,each row returns a tensor of # size batch_size x K representing for each example dx_i dx_1, dx_1 dx_2 # etc dx_i dx_K hessian_row = gradients_impl.gradients( row_grads, predictions, name="Hessian_%d" % row, colocate_gradients_with_ops=False, gate_gradients=0, aggregation_method=None) # hessian_row is of dimension 1, batch_size, K, => trim first dimension # to get batch_size x K hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0]) # Get dx_i^2 for the whole batch. elem = array_ops.transpose(hessian_row)[row] diag_hessian_list.append(elem) return diag_hessian_list def _get_replica_device_setter(self, worker_device): """Creates a replica device setter.""" ps_tasks = self._num_ps_replicas ps_ops = [ "Variable", "VariableV2", "DecisionTreeEnsembleResourceHandleOp", "StatsAccumulatorScalarResourceHandleOp", "StatsAccumulatorTensorResourceHandleOp", ] ps_strategy = _OpRoundRobinStrategy(ps_ops, ps_tasks) return device_setter.replica_device_setter( worker_device=worker_device, ps_tasks=ps_tasks, merge_devices=True, ps_ops=ps_ops, ps_strategy=ps_strategy) def _make_update_bias_stats_fn(self, ensemble_stamp, predictions, gradients, bias_stats_accumulator): """A method to create the function which updates the bias stats.""" def _update_bias_stats(): """A method to update the bias stats.""" # Get reduced gradients and hessians. grads_sum = math_ops.reduce_sum(gradients, 0) hess = gradients_impl.gradients( grads_sum, predictions, name="Hessians", colocate_gradients_with_ops=False, gate_gradients=0, aggregation_method=None)[0] hess_sum = math_ops.reduce_sum(hess, 0) # Accumulate gradients and hessians. partition_ids = math_ops.range(self._logits_dimension) feature_ids = array_ops.zeros( [self._logits_dimension, 2], dtype=dtypes.int64) add_stats_op = bias_stats_accumulator.add( ensemble_stamp, partition_ids, feature_ids, grads_sum, hess_sum) return control_flow_ops.group(*[add_stats_op], name="update_bias_stats") return _update_bias_stats
from django.contrib.auth import logout from django.conf import settings from django.contrib.auth.models import User from tendenci.apps.site_settings.utils import get_setting class ProfileMiddleware(object): """ Appends a profile instance to anonymous users. Creates a profile for logged in users without one. """ def process_request(self, request): from tendenci.apps.profiles.models import Profile if request.user.is_anonymous(): request.user.profile = Profile(status=False, status_detail="inactive", user=User(is_staff=False, is_superuser=False, is_active=False)) else: try: profile = request.user.profile except Profile.DoesNotExist: profile = Profile.objects.create_profile(user=request.user) class ProfileLanguageMiddleware(object): """This middleware should come before django's LocaleMiddleware """ if settings.USE_I18N: def get_user_language(self, request): try: lang = getattr(request.user.profile, 'language') except: lang = None if not lang: lang = get_setting('site', 'global', 'localizationlanguage') return lang def process_request(self, request): """check user language and assign it to session or cookie accordingly """ user_language = self.get_user_language(request) if user_language: if hasattr(request, 'session'): lang_code_in_session = request.session.get('django_language', None) if not lang_code_in_session or lang_code_in_session != user_language: request.session['django_language'] = user_language else: lang_code_in_cookie = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME) if lang_code_in_cookie and lang_code_in_cookie != user_language: request.COOKIES[settings.LANGUAGE_COOKIE_NAME] = user_language def process_response(self, request, response): """assign user_language to cookie LANGUAGE_COOKIE_NAME """ user_language = self.get_user_language(request) lang_code_in_cookie = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME) if user_language and (not lang_code_in_cookie or user_language != lang_code_in_cookie): response.set_cookie(settings.LANGUAGE_COOKIE_NAME, user_language) return response class ForceLogoutProfileMiddleware(object): def process_request(self, request): # this will force logout deactivated user on next request if request.user.is_authenticated(): if not request.user.is_active: logout(request)
from typing import Generator, List, Optional, Type from iota.crypto.kerl import Kerl from iota.crypto.signing import validate_signature_fragments from iota.transaction.base import Bundle, Transaction __all__ = [ 'BundleValidator', ] # In very rare cases, the IOTA protocol may switch hash algorithms. # When this happens, the IOTA Foundation will create a snapshot, so # that all new objects on the Tangle use the new hash algorithm. # # However, the snapshot will still contain references to addresses # created using the legacy hash algorithm, so the bundle validator has # to be able to use that as a fallback when validation fails. SUPPORTED_SPONGE = Kerl LEGACY_SPONGE = None # Curl class BundleValidator(object): """ Checks a bundle and its transactions for problems. """ def __init__(self, bundle: Bundle) -> None: super(BundleValidator, self).__init__() self.bundle = bundle self._errors: Optional[List[str]] = [] self._validator = self._create_validator() @property def errors(self) -> List[str]: """ Returns all errors found with the bundle. """ try: self._errors.extend(self._validator) # type: List[str] except StopIteration: pass return self._errors def is_valid(self) -> bool: """ Returns whether the bundle is valid. """ if not self._errors: try: # We only have to check for a single error to determine # if the bundle is valid or not. self._errors.append(next(self._validator)) except StopIteration: pass return not self._errors def _create_validator(self) -> Generator[str, None, None]: """ Creates a generator that does all the work. """ # Group transactions by address to make it easier to iterate # over inputs. grouped_transactions = self.bundle.group_transactions() # Define a few expected values. bundle_hash = self.bundle.hash last_index = len(self.bundle) - 1 # Track a few others as we go along. balance = 0 # Check indices and balance first. # Note that we use a counter to keep track of the current index, # since at this point we can't trust that the transactions have # correct ``current_index`` values. counter = 0 for group in grouped_transactions: for txn in group: balance += txn.value if txn.bundle_hash != bundle_hash: yield 'Transaction {i} has invalid bundle hash.'.format( i=counter, ) if txn.current_index != counter: yield ( 'Transaction {i} has invalid current index value ' '(expected {i}, actual {actual}).'.format( actual=txn.current_index, i=counter, ) ) if txn.last_index != last_index: yield ( 'Transaction {i} has invalid last index value ' '(expected {expected}, actual {actual}).'.format( actual=txn.last_index, expected=last_index, i=counter, ) ) counter += 1 # Bundle must be balanced (spends must match inputs). if balance != 0: yield ( 'Bundle has invalid balance ' '(expected 0, actual {actual}).'.format( actual=balance, ) ) # Signature validation is only meaningful if the transactions # are otherwise valid. if not self._errors: signature_validation_queue: List[List[Transaction]] = [] for group in grouped_transactions: # Signature validation only applies to inputs. if group[0].value >= 0: continue validate_group_signature = True for j, txn in enumerate(group): if (j > 0) and (txn.value != 0): # Input is malformed; signature fragments after # the first should have zero value. yield ( 'Transaction {i} has invalid value ' '(expected 0, actual {actual}).'.format( actual=txn.value, # If we get to this point, we know that # the ``current_index`` value for each # transaction can be trusted. i=txn.current_index, ) ) # We won't be able to validate the signature, # but continue anyway, so that we can check that # the other transactions in the group have the # correct ``value``. validate_group_signature = False continue # After collecting the signature fragment from each # transaction in the group, queue them up to run through # the validator. # # We have to perform signature validation separately so # that we can try different algorithms (for # backwards-compatibility). # # References: # # - https://github.com/iotaledger/kerl#kerl-integration-in-iota if validate_group_signature: signature_validation_queue.append(group) # Once we've finished checking the attributes from each # transaction in the bundle, go back and validate # signatures. if signature_validation_queue: # ``yield from`` is an option here, but for # compatibility with Python 2 clients, we will do it the # old-fashioned way. for error in self._get_bundle_signature_errors( signature_validation_queue ): yield error def _get_bundle_signature_errors( self, groups: List[List[Transaction]] ) -> List[str]: """ Validates the signature fragments in the bundle. :return: List of error messages. If empty, signature fragments are valid. """ # Start with the currently-supported hash algo. current_pos = None current_errors = [] for current_pos, group in enumerate(groups): error = self._get_group_signature_error(group, SUPPORTED_SPONGE) if error: current_errors.append(error) # Pause and retry with the legacy algo. break # If validation failed, then go back and try with the legacy # algo (only applies if we are currently transitioning to a new # algo). if current_errors and LEGACY_SPONGE: for group in groups: if self._get_group_signature_error(group, LEGACY_SPONGE): # Legacy algo doesn't work, either; no point in # continuing. break else: # If we get here, then we were able to validate the # signature fragments successfully using the legacy # algorithm. return [] # If we get here, then validation also failed when using the # legacy algorithm. # At this point, we know that the bundle is invalid, but we will # continue validating with the supported algorithm anyway, so # that we can return an error message for every invalid input. current_errors.extend(filter(None, ( self._get_group_signature_error(group, SUPPORTED_SPONGE) for group in groups[current_pos + 1:] ))) return current_errors @staticmethod def _get_group_signature_error( group: List[Transaction], sponge_type: Type ) -> Optional[str]: """ Validates the signature fragments for a group of transactions using the specified sponge type. Note: this method assumes that the transactions in the group have already passed basic validation (see :py:meth:`_create_validator`). :return: - ``None``: Indicates that the signature fragments are valid. - ``str``: Error message indicating the fragments are invalid. """ validate_group_signature = validate_signature_fragments( fragments=[txn.signature_message_fragment for txn in group], hash_=group[0].bundle_hash, public_key=group[0].address, sponge_type=sponge_type, ) if validate_group_signature: return None return ( 'Transaction {i} has invalid signature ' '(using {fragments} fragments).'.format( fragments=len(group), i=group[0].current_index, ) )
# -*- coding: utf-8 -*- # Generated by Django 1.11.1 on 2017-08-04 14:21 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Author', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('firstname', models.CharField(blank=True, default='', max_length=150)), ('lastname', models.CharField(blank=True, default='', max_length=150)), ('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')), ('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')), ], ), migrations.CreateModel( name='Chord', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('note', models.CharField(default='', max_length=15)), ], ), migrations.CreateModel( name='Editor', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, default='', max_length=150)), ('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')), ('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')), ], ), migrations.CreateModel( name='Harmonization', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('spot_in_verse', models.PositiveIntegerField()), ('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')), ('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')), ('chord', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='songwriter.Chord')), ], ), migrations.CreateModel( name='Paragraph', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('order', models.PositiveIntegerField()), ('is_refrain', models.BooleanField(default=False, verbose_name='Is a refrain paragraph?')), ('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')), ('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')), ], ), migrations.CreateModel( name='Song', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(default='', max_length=150)), ('rights_paid', models.BooleanField(default=True, verbose_name='rights paid')), ('secli_number', models.CharField(blank=True, default='', max_length=150)), ('sacem_number', models.CharField(blank=True, default='', max_length=150)), ('comments', models.TextField(verbose_name='Comments')), ('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')), ('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')), ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='songwriter.Author')), ('editor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='songwriter.Editor')), ], ), migrations.CreateModel( name='Theme', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='', max_length=150)), ('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')), ('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')), ], ), migrations.CreateModel( name='Verse', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('order', models.PositiveIntegerField()), ('content', models.TextField()), ('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')), ('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')), ], ), migrations.AddField( model_name='song', name='theme', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='songwriter.Theme'), ), migrations.AddField( model_name='paragraph', name='song', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='paragraphs', to='songwriter.Song'), ), migrations.AddField( model_name='harmonization', name='verse', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='harmonizations', to='songwriter.Verse'), ), ]
#!/usr/local/lib/python2.6 import re import sys filename = sys.argv[1] def generate_string_features(word, label): dict = {} ''' patterns = ['\d$', '\d\d$', '\d\d\d+$', '\d?\d?:\d\d$', '[0-9:]+$', '[A-Z]', '[A-Z]$', '[A-Z][A-Z]$', '[A-Z]+$', '[^0-9A-Za-z]+$', '[^0-9]+$', '[A-Za-z]+$', '[a-z]+$'] for pattern in patterns: if re.match(pattern, word): dict['regex=' + pattern + '_label=' + label] = 1 ''' dict['bias_label=' + label] = 1 dict['word=' + word.lower() + '_label=' + label] = 1 return dict words = set() labels = set() with open(filename, 'r') as f: for line in f: chunks = line.strip().split(" ") for i in range(0, len(chunks), 2): words.add(chunks[i].strip()) labels.add(chunks[i + 1].strip()) for word in words: for label in labels: features = generate_string_features(word, label) for feature in features.keys(): print "%s@#@#@%s@#@#@%s@#@#@%d" % (word, label, feature, features[feature])
"""empty message Revision ID: 399106d8a6ad Revises: None Create Date: 2015-03-06 03:55:19.157958 """ # revision identifiers, used by Alembic. revision = '399106d8a6ad' down_revision = None from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table('category', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=100), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_table('product', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('price', sa.Float(), nullable=True), sa.Column('category_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['category_id'], ['category.id'], ), sa.PrimaryKeyConstraint('id') ) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_table('product') op.drop_table('category') ### end Alembic commands ###
# Copyright 2014-2016 Morgan Delahaye-Prat. All Rights Reserved. # # Licensed under the Simplified BSD License (the "License"); # you may not use this file except in compliance with the License. """Test basic CRUD operations of the CRUDProvider.""" import json import pytest from hypr.providers import CRUDProvider def deserialize(data, model): """Deserialize JSON data.""" data = json.loads(data) if 'content' in data and 'count' in data: return data['count'], [model.load(r) for r in data['content']] return model.load(data) @pytest.fixture def app(app, model): """All the tests are conducted with application/json as default mime.""" provider = type('IntestProvider', (CRUDProvider,), {'__model__': model}) app.add_provider(provider, '/test', '/test/<int:id>') return app class TestModelCreate: """Test create.""" models = 'SQLiteModel', def test_create(self, app, model): """Create one resource.""" payload = json.dumps({'value': 'foo'}) with app.test_client() as client: rv = client.post('/test', data=payload) assert rv.status == 201 data = deserialize(rv.text, model) assert data == model.one(data.id) def test_bulk_create(self, app, model): """Create multiple resources at once.""" payload = json.dumps([ {'value': 'foo'}, {'value': 'bar'} ]) with app.test_client() as client: rv = client.post('/test?_bulk=1', data=payload) assert rv.status == 201 count, resources = deserialize(rv.text, model) for resource in resources: assert resource == model.one(resource.id) @pytest.mark.populate(5) class TestProviderRead: """Test read.""" models = 'SQLiteModel', def test_get_collection(self, app, model): """Test.""" with app.test_client() as client: rv = client.get('/test') assert rv.status == 200 count, resources = deserialize(rv.text, model) assert count == model.count() == 5 assert sorted(resources) == sorted(model.get()) def test_get_one(self, app, model): """Test.""" with app.test_client() as client: rv = client.get('/test/1') assert rv.status == 200 resource = deserialize(rv.text, model) assert resource == model.one(1) @pytest.mark.populate(5) class TestModelUpdate: """Test update.""" models = 'SQLiteModel', def test_update(self, app, model): """Update an instance with PATCH.""" ref = model.one(1) payload = json.dumps({'value': 'test_ok'}) with app.test_client() as client: rv = client.patch('/test/1', data=payload) assert rv.status == 200 resource = deserialize(rv.text, model) assert resource != ref assert resource == model.one(1) def test_update_alt(self, app, model): """Update an instance with PUT.""" ref = model.one(2) payload = json.dumps({'value': 'test_ok'}) with app.test_client() as client: rv = client.put('/test/2', data=payload) assert rv.status == 200 resource = deserialize(rv.text, model) assert resource != ref assert resource == model.one(2) def test_bulk_update(self, app, model): """Update multiple resources at once.""" ref = [model.one(3), model.one(4)] payload = json.dumps([ {'id': 3, 'value': 'test_ok0'}, {'id': 4, 'value': 'test_ok1'} ]) with app.test_client() as client: rv = client.put('/test?_bulk=1', data=payload) assert rv.status == 200 count, data = deserialize(rv.text, model) for instance in ref: assert instance != model.one(instance.id) for resource in data: assert resource == model.one(resource.id) @pytest.mark.populate(5) class TestModelDelete: """Test delete.""" models = 'SQLiteModel', def test_delete(self, app, model): """Delete a resource.""" with app.test_client() as client: rv = client.delete('/test/1') assert rv.status == 204 assert model.one(1) is None def test_bulk_delete(self, app, model): """Delete multiple resources at once.""" ref = [model.one(3), model.one(4)] payload = json.dumps([ {'id': 3}, {'id': 4} ]) with app.test_client() as client: rv = client.delete('/test?_bulk=1', data=payload) assert rv.status == 204 for instance in ref: assert model.one(instance.id) is None @pytest.mark.populate(5) class TestMissingPayloadException: """Test requests with missing payload.""" models = 'SQLiteModel', def test_create(self, app, model): """Create one resource.""" with app.test_client() as client: rv = client.post('/test') assert rv.status == 400 def test_bulk_create(self, app, model): """Create multiple resources at once.""" with app.test_client() as client: rv = client.post('/test?_bulk=1') assert rv.status == 400 def test_update(self, app, model): """Update an instance.""" with app.test_client() as client: rv = client.patch('/test/1') assert rv.status == 400 def test_bulk_update(self, app, model): """Update multiple resources at once.""" with app.test_client() as client: rv = client.put('/test?_bulk=1') assert rv.status == 400 def test_bulk_delete(self, app, model): """Delete multiple resources at once.""" with app.test_client() as client: rv = client.delete('/test?_bulk=1') assert rv.status == 400 @pytest.mark.populate(5) class TestInvalidPayloadException: """Test requests with invalid payload.""" models = 'SQLiteModel', def test_create(self, app): """Create one resource.""" payload = json.dumps({'invalid': 'property'}) with app.test_client() as client: rv = client.post('/test', data=payload) assert rv.status == 400 def test_update(self, app, model): """Update one resource.""" ref = model.one(1) payload = json.dumps({'invalid': 'property'}) with app.test_client() as client: rv = client.patch('/test/1', data=payload) assert rv.status == 400 assert ref == model.one(1) @pytest.mark.populate(5) class TestInvalidBulkRequest: """Test invalid bulk requests.""" models = 'SQLiteModel', def test_bulk_create_missing_flag(self, app, model): """A missing bulk flag returns an error 400.""" payload = json.dumps([ {'value': 'foo'}, {'value': 'bar'} ]) with app.test_client() as client: rv = client.post('/test', data=payload) assert rv.status == 400 assert model.count() == 5 def test_bulk_update_missing_flag(self, app, model): """Update multiple resources at once.""" ref = model.get() payload = json.dumps([ {'id': 3, 'value': 'test_ok0'}, {'id': 4, 'value': 'test_ok1'} ]) with app.test_client() as client: rv = client.put('/test', data=payload) assert rv.status == 400 assert sorted(ref) == sorted(model.get()) def test_bulk_delete_missing_flag(self, app, model): """Delete multiple resources at once.""" ref = model.get() payload = json.dumps([ {'id': 3}, {'id': 4} ]) with app.test_client() as client: rv = client.delete('/test', data=payload) assert rv.status == 400 assert sorted(ref) == sorted(model.get()) def test_bulk_update_on_single_resource(self, app, model): """Update multiple resources at once.""" ref = model.get() payload = json.dumps([ {'id': 3, 'value': 'test_ok0'}, {'id': 4, 'value': 'test_ok1'} ]) with app.test_client() as client: rv = client.put('/test/1?_bulk=1', data=payload) assert rv.status == 400 assert sorted(ref) == sorted(model.get()) def test_bulk_delete_on_single_resource(self, app, model): """Delete multiple resources at once.""" ref = model.get() payload = json.dumps([ {'id': 3}, {'id': 4} ]) with app.test_client() as client: rv = client.delete('/test/1?_bulk=1', data=payload) assert rv.status == 400 assert sorted(ref) == sorted(model.get()) def test_bulk_update_unknown_resource(self, app, model): """Update multiple resources at once.""" ref = model.get() payload = json.dumps([ {'id': 3, 'value': 'test_ok0'}, {'id': 100, 'value': 'test_ok1'} # unkwnown resource ]) with app.test_client() as client: rv = client.put('/test?_bulk=1', data=payload) assert rv.status == 400 assert sorted(ref) == sorted(model.get()) def test_bulk_delete_unknown_resource(self, app, model): """Delete multiple resources at once.""" ref = model.get() payload = json.dumps([ {'id': 3}, {'id': 100} # unknwon resource ]) with app.test_client() as client: rv = client.delete('/test?_bulk=1', data=payload) assert rv.status == 400 assert sorted(ref) == sorted(model.get()) def test_bulk_create_invalid_property(self, app, model): """Create multiple resources at once.""" payload = json.dumps([ {'value': 'foo'}, {'invalid': 'property'} ]) with app.test_client() as client: rv = client.post('/test?_bulk=1', data=payload) assert rv.status == 400 assert model.count() == 5 def test_bulk_update_invalid_property(self, app, model): """Update multiple resources at once.""" ref = model.get() payload = json.dumps([ {'id': 3, 'value': 'test_ok0'}, {'id': 4, 'invalid': 'property'} ]) with app.test_client() as client: rv = client.put('/test?_bulk=1', data=payload) assert rv.status == 400 assert sorted(ref) == sorted(model.get()) def test_bulk_update_missing_id(self, app, model): """Update multiple resources at once.""" ref = model.get() payload = json.dumps([ {'id': 3, 'value': 'test_ok0'}, {'value': 'test_ok1'} # missing id ]) with app.test_client() as client: rv = client.put('/test?_bulk=1', data=payload) assert rv.status == 400 assert sorted(ref) == sorted(model.get()) def test_bulk_delete_missing_id(self, app, model): """Delete multiple resources at once.""" ref = model.get() payload = json.dumps([ {'id': 3}, {} # missing id ]) with app.test_client() as client: rv = client.delete('/test?_bulk=1', data=payload) assert rv.status == 400 assert sorted(ref) == sorted(model.get()) class TestEmptySet: """Crud operations (except create) on an empty database.""" models = 'SQLiteModel', def test_get_collection(self, app, model): """Get an empty set.""" with app.test_client() as client: rv = client.get('/test') assert rv.status == 200 count, resources = deserialize(rv.text, model) assert count == 0 assert resources == [] def test_get_one(self, app, model): """Get an unknown resource.""" with app.test_client() as client: rv = client.get('/test/1') assert rv.status == 404 def test_update(self, app, model): """Update an unknown resource.""" payload = json.dumps({'value': 'test_ok'}) with app.test_client() as client: rv = client.patch('/test/1', data=payload) assert rv.status == 404 def test_delete(self, app, model): """Delete an unknown resource.""" with app.test_client() as client: rv = client.delete('/test/1') assert rv.status == 404
"""Formats domain objects for route responses.""" from collections import OrderedDict from flask import url_for from ._bases import Formatter # TODO: figure out a better way to serialize objects without parent objects # pylint: disable=arguments-differ class GameFormatter(Formatter): """Serializes games into dictionaries.""" def format_single(self, game): data = OrderedDict() kwargs = dict(_external=True, key=game.key) data['uri'] = url_for('games.detail', **kwargs) data['key'] = game.key data['timestamp'] = game.timestamp data['players'] = url_for('players.index', **kwargs) data['turn'] = game.turn data['pending'] = game.pending data['start'] = url_for('games.start', **kwargs) return data def format_multiple(self, games): return [url_for('games.detail', _external=True, key=game.key) for game in games] class PlayerFormatter(Formatter): """Serializes players into dictionaries.""" def format_single(self, player, game, auth): data = OrderedDict() kwargs = dict(_external=True, key=game.key, color=player.color) if auth: kwargs.update(code=player.code) data['uri'] = url_for('players.detail', **kwargs) data['color'] = player.color if auth: data['code'] = player.code data['done'] = player.turn.done if auth: data['turns'] = url_for('turns.index', **kwargs) return data def format_multiple(self, players, game): return [url_for('players.detail', _external=True, key=game.key, color=player.color) for player in players] class BoardFormatter(Formatter): def format_single(self, board): data = OrderedDict() # TODO: format board print(board) return data class TurnFormatter(Formatter): """Serializes turns into dictionaries.""" def format_single(self, game, player, number): data = OrderedDict() kwargs = dict(_external=True, key=game.key, color=player.color, code=player.code, number=number) data['uri'] = url_for('turns.detail', **kwargs) data['moves'] = url_for('moves.index', **kwargs) data['finish'] = url_for('turns.finish', **kwargs) return data def format_multiple(self, turns, game, player): return [url_for('turns.detail', _external=True, key=game.key, color=player.color, code=player.code, number=index + 1) for index in range(len(turns))] class MoveFormatter(Formatter): """Serializes moves into dictionaries.""" def format_single(self, move): data = OrderedDict() data['count'] = move.count return data def format_multiple(self, moves, game, player): return [url_for('moves.detail', _external=True, key=game.key, color=player.color, code=player.code, begin=move.begin, end=move.end) for move in moves] game_formatter = GameFormatter() player_formatter = PlayerFormatter() board_formatter = BoardFormatter() turn_formatter = TurnFormatter() move_formatter = MoveFormatter()
# Copyright (c) 2003-2016 CORE Security Technologies # # This software is provided under under a slightly modified version # of the Apache Software License. See the accompanying LICENSE file # for more information. # # Author: Alberto Solino (@agsolino) # # Description: # [MS-VDS]: Virtual Disk Service (VDS) Protocol # This was used as a way to test the DCOM runtime. Further # testing is needed to verify it is working as expected # # Best way to learn how to use these calls is to grab the protocol standard # so you understand what the call does, and then read the test case located # at https://github.com/CoreSecurity/impacket/tree/master/impacket/testcases/SMB_RPC # # Since DCOM is like an OO RPC, instead of helper functions you will see the # classes described in the standards developed. # There are test cases for them too. # from impacket.dcerpc.v5.ndr import NDRSTRUCT, NDRUniConformantVaryingArray, NDRENUM from impacket.dcerpc.v5.dcomrt import DCOMCALL, DCOMANSWER, IRemUnknown2, PMInterfacePointer, INTERFACE from impacket.dcerpc.v5.dtypes import LPWSTR, ULONG, DWORD, SHORT, GUID from impacket.dcerpc.v5.rpcrt import DCERPCException from impacket.dcerpc.v5.enum import Enum from impacket import hresult_errors from impacket.uuid import string_to_bin class DCERPCSessionError(DCERPCException): def __init__(self, error_string=None, error_code=None, packet=None): DCERPCException.__init__(self, error_string, error_code, packet) def __str__( self ): if hresult_errors.ERROR_MESSAGES.has_key(self.error_code): error_msg_short = hresult_errors.ERROR_MESSAGES[self.error_code][0] error_msg_verbose = hresult_errors.ERROR_MESSAGES[self.error_code][1] return 'VDS SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose) else: return 'VDS SessionError: unknown error code: 0x%x' % (self.error_code) ################################################################################ # CONSTANTS ################################################################################ # 1.9 Standards Assignments CLSID_VirtualDiskService = string_to_bin('7D1933CB-86F6-4A98-8628-01BE94C9A575') IID_IEnumVdsObject = string_to_bin('118610B7-8D94-4030-B5B8-500889788E4E') IID_IVdsAdviseSink = string_to_bin('8326CD1D-CF59-4936-B786-5EFC08798E25') IID_IVdsAsync = string_to_bin('D5D23B6D-5A55-4492-9889-397A3C2D2DBC') IID_IVdsServiceInitialization = string_to_bin('4AFC3636-DB01-4052-80C3-03BBCB8D3C69') IID_IVdsService = string_to_bin('0818A8EF-9BA9-40D8-A6F9-E22833CC771E') IID_IVdsSwProvider = string_to_bin('9AA58360-CE33-4F92-B658-ED24B14425B8') IID_IVdsProvider = string_to_bin('10C5E575-7984-4E81-A56B-431F5F92AE42') error_status_t = ULONG # 2.2.1.1.3 VDS_OBJECT_ID VDS_OBJECT_ID = GUID ################################################################################ # STRUCTURES ################################################################################ # 2.2.2.1.3.1 VDS_SERVICE_PROP class VDS_SERVICE_PROP(NDRSTRUCT): structure = ( ('pwszVersion',LPWSTR), ('ulFlags',ULONG), ) class OBJECT_ARRAY(NDRUniConformantVaryingArray): item = PMInterfacePointer # 2.2.2.7.1.1 VDS_PROVIDER_TYPE class VDS_PROVIDER_TYPE(NDRENUM): class enumItems(Enum): VDS_PT_UNKNOWN = 0 VDS_PT_SOFTWARE = 1 VDS_PT_HARDWARE = 2 VDS_PT_VIRTUALDISK = 3 VDS_PT_MAX = 4 # 2.2.2.7.2.1 VDS_PROVIDER_PROP class VDS_PROVIDER_PROP(NDRSTRUCT): structure = ( ('id',VDS_OBJECT_ID), ('pwszName',LPWSTR), ('guidVersionId',GUID), ('pwszVersion',LPWSTR), ('type',VDS_PROVIDER_TYPE), ('ulFlags',ULONG), ('ulStripeSizeFlags',ULONG), ('sRebuildPriority',SHORT), ) ################################################################################ # RPC CALLS ################################################################################ # 3.4.5.2.5.1 IVdsServiceInitialization::Initialize (Opnum 3) class IVdsServiceInitialization_Initialize(DCOMCALL): opnum = 3 structure = ( ('pwszMachineName', LPWSTR), ) class IVdsServiceInitialization_InitializeResponse(DCOMANSWER): structure = ( ('ErrorCode', error_status_t), ) # 3.4.5.2.4.1 IVdsService::IsServiceReady (Opnum 3) class IVdsService_IsServiceReady(DCOMCALL): opnum = 3 structure = ( ) class IVdsService_IsServiceReadyResponse(DCOMANSWER): structure = ( ('ErrorCode', error_status_t), ) # 3.4.5.2.4.2 IVdsService::WaitForServiceReady (Opnum 4) class IVdsService_WaitForServiceReady(DCOMCALL): opnum = 4 structure = ( ) class IVdsService_WaitForServiceReadyResponse(DCOMANSWER): structure = ( ('ErrorCode', error_status_t), ) # 3.4.5.2.4.3 IVdsService::GetProperties (Opnum 5) class IVdsService_GetProperties(DCOMCALL): opnum = 5 structure = ( ) class IVdsService_GetPropertiesResponse(DCOMANSWER): structure = ( ('pServiceProp', VDS_SERVICE_PROP), ('ErrorCode', error_status_t), ) # 3.4.5.2.4.4 IVdsService::QueryProviders (Opnum 6) class IVdsService_QueryProviders(DCOMCALL): opnum = 6 structure = ( ('masks', DWORD), ) class IVdsService_QueryProvidersResponse(DCOMANSWER): structure = ( ('ppEnum', PMInterfacePointer), ('ErrorCode', error_status_t), ) # 3.1.1.1 IEnumVdsObject Interface # 3.4.5.2.1.1 IEnumVdsObject::Next (Opnum 3) class IEnumVdsObject_Next(DCOMCALL): opnum = 3 structure = ( ('celt', ULONG), ) class IEnumVdsObject_NextResponse(DCOMANSWER): structure = ( ('ppObjectArray', OBJECT_ARRAY), ('pcFetched', ULONG), ('ErrorCode', error_status_t), ) # 3.4.5.2.14.1 IVdsProvider::GetProperties (Opnum 3) class IVdsProvider_GetProperties(DCOMCALL): opnum = 3 structure = ( ) class IVdsProvider_GetPropertiesResponse(DCOMANSWER): structure = ( ('pProviderProp', VDS_PROVIDER_PROP), ('ErrorCode', error_status_t), ) ################################################################################ # OPNUMs and their corresponding structures ################################################################################ OPNUMS = { } ################################################################################ # HELPER FUNCTIONS AND INTERFACES ################################################################################ class IEnumVdsObject(IRemUnknown2): def Next(self, celt=0xffff): request = IEnumVdsObject_Next() request['ORPCthis'] = self.get_cinstance().get_ORPCthis() request['ORPCthis']['flags'] = 0 request['celt'] = celt try: resp = self.request(request, uuid = self.get_iPid()) except Exception, e: resp = e.get_packet() # If it is S_FALSE(1) means less items were returned if resp['ErrorCode'] != 1: raise interfaces = list() for interface in resp['ppObjectArray']: interfaces.append(IRemUnknown2(INTERFACE(self.get_cinstance(), ''.join(interface['abData']), self.get_ipidRemUnknown(), target = self.get_target()))) return interfaces class IVdsProvider(IRemUnknown2): def GetProperties(self): request = IVdsProvider_GetProperties() request['ORPCthis'] = self.get_cinstance().get_ORPCthis() request['ORPCthis']['flags'] = 0 resp = self.request(request, uuid = self.get_iPid()) return resp class IVdsServiceInitialization(IRemUnknown2): def __init__(self, interface): IRemUnknown2.__init__(self, interface) def Initialize(self): request = IVdsServiceInitialization_Initialize() request['ORPCthis'] = self.get_cinstance().get_ORPCthis() request['ORPCthis']['flags'] = 0 request['pwszMachineName'] = '\x00' resp = self.request(request, uuid = self.get_iPid()) return resp class IVdsService(IRemUnknown2): def __init__(self, interface): IRemUnknown2.__init__(self, interface) def IsServiceReady(self): request = IVdsService_IsServiceReady() request['ORPCthis'] = self.get_cinstance().get_ORPCthis() request['ORPCthis']['flags'] = 0 try: resp = self.request(request, uuid = self.get_iPid()) except Exception, e: resp = e.get_packet() return resp def WaitForServiceReady(self): request = IVdsService_WaitForServiceReady() request['ORPCthis'] = self.get_cinstance().get_ORPCthis() request['ORPCthis']['flags'] = 0 resp = self.request(request, uuid = self.get_iPid()) return resp def GetProperties(self): request = IVdsService_GetProperties() request['ORPCthis'] = self.get_cinstance().get_ORPCthis() request['ORPCthis']['flags'] = 0 resp = self.request(request, uuid = self.get_iPid()) return resp def QueryProviders(self, masks): request = IVdsService_QueryProviders() request['ORPCthis'] = self.get_cinstance().get_ORPCthis() request['ORPCthis']['flags'] = 0 request['masks'] = masks resp = self.request(request, uuid = self.get_iPid()) return IEnumVdsObject(INTERFACE(self.get_cinstance(), ''.join(resp['ppEnum']['abData']), self.get_ipidRemUnknown(), target = self.get_target()))
import uuid, datetime from os.path import join from shutil import rmtree from daisyproducer.documents.storage import OverwriteStorage from django.conf import settings from django.contrib.auth.models import User, Group from django.db import models from django.db.models import Max from django.forms import ModelForm from django.utils.translation import ugettext_lazy as _ class StateError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class State(models.Model): name = models.CharField(unique=True, max_length=32) next_states = models.ManyToManyField("self", symmetrical=False, blank=True) responsible = models.ManyToManyField(Group) sort_order = models.PositiveSmallIntegerField() def __unicode__(self): return self.name def transitionTo(self, state): if not isinstance(state, State): raise TypeError("'%s' is not a registered state" % state) if not state in self.next_states.all(): raise StateError("Cannot transition to %s from %s" % (self.name, state.name)) return state # used for admin view def all_next_states(self): return ",".join([state.name for state in self.next_states.all()]) # used for admin view def all_responsible(self): return ",".join([group.name for group in self.responsible.all()]) def is_last_state(self): return self.sort_order == State.objects.aggregate(final_sort_order=Max('sort_order')).get('final_sort_order') class Meta: ordering = ['sort_order'] class Document(models.Model): # see http://www.daisy.org/z3986/2005/Z3986-2005.html for a description of all the metadata fields title = models.CharField( _("Title"), max_length=255, help_text=_("The title of the DTB, including any subtitles")) author = models.CharField( _("Author"), max_length=255, help_text=_("Names of primary author or creator of the intellectual content of the publication"), blank=True) subject = models.CharField( _("Subject"), max_length=255, help_text=_("The topic of the content of the publication"), blank=True) description = models.TextField( _("Description"), help_text=_("Plain text describing the publication's content"), blank=True) publisher = models.CharField( _("Publisher"), max_length=255, default=settings.DAISY_DEFAULT_PUBLISHER, help_text=_("The agency responsible for making the DTB available")) date = models.DateField( _("Date"), help_text=_("Date of publication of the DTB")) identifier = models.CharField( _("Identifier"), max_length=255, unique=True, help_text=_("A string or number identifying the DTB")) source = models.CharField( _("Source"), max_length=20, help_text=_("A reference to a resource (e.g., a print original, ebook, etc.) from which the DTB is derived. Best practice is to use the ISBN when available"), blank=True) language_choices = (('de', 'de'), ('de-1901', 'de-1901',), # ('de-CH', 'de-CH',), # ('de-CH-1901', 'de-CH-1901',), # ('gsw', 'gsw',), ('en', 'en',), ('fr', 'fr',), # ('it', 'it',), ('es', 'es',), # ('und', 'Undefined',), ) language = models.CharField( _("Language"), max_length=10, choices=language_choices, help_text=_("Language of the content of the publication")) rights = models.CharField( _("Rights"), max_length=255, help_text=_("Information about rights held in and over the DTB"), blank=True) source_date = models.DateField( _("Source Date"), help_text=_("Date of publication of the resource (e.g., a print original, ebook, etc.) from which the DTB is derived"), null=True, blank=True) source_edition = models.CharField( _("Source Edition"), max_length=255, help_text=_("A string describing the edition of the resource (e.g., a print original, ebook, etc.) from which the DTB is derived"), blank=True) source_publisher = models.CharField( _("Source Publisher"), max_length=255, help_text=_("The agency responsible for making available the resource (e.g., a print original, ebook, etc.) from which the DTB is derived"), blank=True) source_rights = models.CharField( _("Source Rights"), max_length=255, help_text=_("Information about rights held in and over the resource (e.g., a print original, ebook, etc.) from which the DTB is derived"), blank=True) PRODUCTION_SERIES_CHOICES = ( ('SJW', 'SJW'), ('PPP', 'Rucksack-Buch',), ) production_series = models.CharField( _("Production Series"), max_length=25, choices=PRODUCTION_SERIES_CHOICES, help_text=_("Information about the series under which the book is produced"), blank=True) production_series_number = models.CharField( _("Production Series Number"), max_length=25, help_text=_("Information about the number in the series under which the book is produced"), blank=True) PRODUCTION_SOURCE_CHOICES = ( ('electronicData', 'Electronic Data'), ) production_source = models.CharField( _("Production Source"), max_length=25, choices=PRODUCTION_SOURCE_CHOICES, help_text=_("Information about the source from which the book was produced, e.g. scanned book, electronic data, etc"), blank=True) state = models.ForeignKey(State, verbose_name=_("State")) assigned_to = models.ForeignKey(User, verbose_name=_("Assigned to"), null=True, blank=True) created_at = models.DateTimeField(_("Created")) modified_at = models.DateTimeField(_("Last Modified")) def __unicode__(self): return self.title def latest_version(self): return self.version_set.latest() def transitionTo(self, state): self.assigned_to = None self.state = self.state.transitionTo(state) self.save() if self.state.is_last_state(): # we just transitioned to the last state. Presumably the # production is finished now. Since the images take up a # lot of space we'll remove them all. If a production is # picked up again we can alwas upload them again. for image in self.image_set.all(): # we have to loop over the images themselves to make # sure the delete method is invoked (which deletes the # image on the file system) image.delete() def has_local_words(self): return self.localword_set.exists() def save(self, *args, **kwargs): if not self.id: self.created_at = datetime.datetime.now() self.date = datetime.date.today() self.modified_at = datetime.datetime.now() # set initial state if not self.pk and not hasattr(self, 'state'): self.state = State.objects.filter(name='new')[0] if not self.identifier: self.identifier = "ch-sbs-%s" % str(uuid.uuid4()) super(Document, self).save(*args, **kwargs) def delete(self, *args, **kwargs): old_id = self.id super(Document, self).delete(*args, **kwargs) # remove the folders for versions and attachments on the file system rmtree(join(settings.MEDIA_ROOT, str(old_id))) def get_version_path(instance, filename): return '%s/versions/%s.xml' % (instance.document_id, instance.id) class Version(models.Model): comment = models.CharField(max_length=255) document = models.ForeignKey(Document) content = models.FileField(upload_to=get_version_path) created_by = models.ForeignKey(User, verbose_name=_("Created by")) created_at = models.DateTimeField(auto_now_add=True) def __unicode__(self): return u'%s, %s, %s' % (self.comment, self.created_by, self.created_at) def delete(self, *args, **kwargs): # remove the files on the file system self.content.delete() super(Version, self).delete(*args, **kwargs) class Meta: get_latest_by = "created_at" ordering = ['-created_at'] def get_attachment_path(instance, filename): return '%s/attachments/%s' % (instance.document_id, filename) class Attachment(models.Model): MIME_TYPE_CHOICES = ( ('application/pdf', 'Portable Document Format, PDF'), ('application/msword', 'Microsoft Word files'), ('application/rtf', 'Microsoft RTF files'), ('text/html', 'HTML'), ) comment = models.CharField(max_length=255) mime_type = models.CharField(max_length=32, choices=MIME_TYPE_CHOICES) document = models.ForeignKey(Document) content = models.FileField(upload_to=get_attachment_path) created_by = models.ForeignKey(User, verbose_name=_("Created by")) created_at = models.DateTimeField(auto_now_add=True) def delete(self, *args, **kwargs): # remove the files on the file system self.content.delete() super(Attachment, self).delete(*args, **kwargs) class Meta: ordering = ['-created_at'] def get_image_path(instance, filename): return '%s/images/%s' % (instance.document_id, filename) class Image(models.Model): MIME_TYPE_CHOICES = ( ('image/jpeg', 'JPEG image'), ('image/png', 'PNG image'), ) document = models.ForeignKey(Document) content = models.FileField(upload_to=get_image_path, storage=OverwriteStorage()) def __unicode__(self): return u'%s, %s' % (self.document, self.content) def delete(self, *args, **kwargs): # remove the files on the file system self.content.delete() super(Image, self).delete(*args, **kwargs) class Meta: unique_together = ('document', 'content') ordering = ['content'] class Product(models.Model): PRODUCT_TYPE_CHOICES = ( (0, 'Braille'), (1, 'Large Print'), (2, 'EBook'), (3, 'E-Text'), ) identifier = models.CharField(_("Identifier"), max_length=255, unique=True) type = models.PositiveSmallIntegerField(_("Type"), choices=PRODUCT_TYPE_CHOICES) document = models.ForeignKey(Document) def __unicode__(self): return self.identifier # Profiles class BrailleProfile(models.Model): BRAILLE_CONTRACTION_GRADE_CHOICES = ( ('0', _('Grade 0')), ('1', _('Grade 1')), ('2', _('Grade 2')), ) cells_per_line = models.PositiveSmallIntegerField(_("Cells per Line"), default=40) lines_per_page = models.PositiveSmallIntegerField(_("Lines per Page"), default=28) contraction = models.PositiveSmallIntegerField(_("Contraction"), default=0, choices=BRAILLE_CONTRACTION_GRADE_CHOICES) hyphenation = models.BooleanField(_("Hyphenation"), default=True) show_original_page_numbers = models.BooleanField(_("Show original page numbers"), default=True) enable_capitalization = models.BooleanField(_("Enable Capitalization"), default=False) detailed_accented_characters = models.BooleanField(_("Detailed Accented Characters"), default=False) class LargePrintProfile(models.Model): FONTSIZE_CHOICES = ( ('12pt', '12pt'), ('14pt', '14pt'), ('17pt', '17pt'), ('20pt', '20pt'), ('25pt', '25pt'), ) FONT_CHOICES = ( ('Tiresias LPfont', 'Tiresias LPfont'), ('Latin Modern Roman', 'Latin Modern Roman'), ('Latin Modern Sans', 'Latin Modern Sans'), ('Latin Modern Mono', 'Latin Modern Mono'), ) PAGESTYLE_CHOICES = ( ('plain', _('Plain')), ('withPageNums', _('With original page numbers')), ('spacious', _('Spacious')), ('scientific', _('Scientific')), ) ALIGNMENT_CHOICES = ( ('justified', _('justified')), ('left', _('left aligned')), ) PAPERSIZE_CHOICES = ( ('a3paper', 'a3paper'), ('a4paper', 'a4paper'), ) LINESPACING_CHOICES = ( ('singlespacing', _('Single spacing')), ('onehalfspacing', _('One-and-a-half spacing')), ('doublespacing', _('Double spacing')), ) ENDNOTE_CHOICES = ( ('none', _('Plain Footnotes')), ('document', _('Document Endnotes')), ('chapter', _('Chapter Endnotes')), ) IMAGE_CHOICES = ( ('show', _('Show Images')), ('ignore', _('Hide Images')), ) font_size = models.CharField(_("Fontsize"), default='17pt', max_length=4, choices=FONTSIZE_CHOICES) font = models.CharField(_("Font"), default='Tiresias LPfont', max_length=60, choices=FONT_CHOICES) page_style = models.CharField(_("Page style"), default='plain', max_length=16, choices=PAGESTYLE_CHOICES) alignment = models.CharField(default='left', max_length=16, choices=ALIGNMENT_CHOICES) stock_size = models.CharField(_("Stocksize"), default='a4paper', max_length=16, choices=PAPERSIZE_CHOICES) line_spacing = models.CharField(_("Line Spacing"), default='onehalfspacing', max_length=16, choices=LINESPACING_CHOICES) replace_em_with_quote = models.BooleanField(_("Replace italics with quote"), default=True) end_notes = models.CharField(_("End Notes"), default='none', max_length=16, choices=ENDNOTE_CHOICES) image_visibility = models.CharField(_("Images"), default='ignore', max_length=16, choices=IMAGE_CHOICES) class LargePrintProfileForm(ModelForm): class Meta: fields = "__all__" model = LargePrintProfile
# ERPNext - web based ERP (http://erpnext.com) # Copyright (C) 2012 Web Notes Technologies Pvt Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals import webnotes from controllers.trends import get_columns,get_data def execute(filters=None): if not filters: filters ={} data = [] trans = "Delivery Note" conditions = get_columns(filters, trans) data = get_data(filters, conditions) return conditions["columns"], data
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'FilerFileItem' db.create_table(u'contentitem_file_filerfileitem', ( (u'contentitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fluent_contents.ContentItem'], unique=True, primary_key=True)), ('file', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.File'])), ('name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)), ('target', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), )) db.send_create_signal(u'file', ['FilerFileItem']) def backwards(self, orm): # Deleting model 'FilerFileItem' db.delete_table(u'contentitem_file_filerfileitem') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'file.filerfileitem': { 'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'FilerFileItem', 'db_table': "u'contentitem_file_filerfileitem'", '_ormbases': ['fluent_contents.ContentItem']}, u'contentitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fluent_contents.ContentItem']", 'unique': 'True', 'primary_key': 'True'}), 'file': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['filer.File']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'target': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}) }, u'filer.file': { 'Meta': {'object_name': 'File'}, '_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'all_files'", 'null': 'True', 'to': u"orm['filer.Folder']"}), 'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}), 'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_filer.file_set+'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}), 'sha1': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}), 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, u'filer.folder': { 'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'parent', u'name'),)", 'object_name': 'Folder'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['filer.Folder']"}), u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, 'fluent_contents.contentitem': { 'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'ContentItem'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '15', 'db_index': 'True'}), 'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), 'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contentitems'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['fluent_contents.Placeholder']"}), 'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_fluent_contents.contentitem_set+'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}), 'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}) }, 'fluent_contents.placeholder': { 'Meta': {'unique_together': "(('parent_type', 'parent_id', 'slot'),)", 'object_name': 'Placeholder'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}), 'role': ('django.db.models.fields.CharField', [], {'default': "'m'", 'max_length': '1'}), 'slot': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}) } } complete_apps = ['file']
"""Field validators.""" import re from django.core.exceptions import ValidationError from django.core.validators import EmailValidator, URLValidator from django.utils.translation import ugettext_lazy class HostnameValidator(object): """Validator for fqdn.""" message = ugettext_lazy("Enter a valid domain name") code = "invalid" regex = re.compile(URLValidator.host_re) def __init__(self, message=None, code=None): """Constructor.""" if message is not None: self.message = message if code is not None: self.code = code def __call__(self, value): """Check value.""" if len(value) > 255: raise ValidationError(self.message, self.code) if value[-1] == ".": # strip exactly one dot from the right, if present. value = value[:-1] if not self.regex.match(value): raise ValidationError(self.message, self.code) validate_hostname = HostnameValidator() class UTF8EmailValidator(EmailValidator): """Validator for addresses using non-ASCII characters.""" # unicode letters range (must be a unicode string, not a raw string) ul = u"\u00a1-\uffff" ascii_set = u"-!#$%&'*+/=?^_`{}|~0-9A-Z" user_regex_raw = ( # dot-atom ur"^[" + ascii_set + ul + "]+(\.[" + ascii_set + ul + "]+)*\Z" # quoted-string ur'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)' ) user_regex = re.compile("(" + user_regex_raw, re.IGNORECASE) validate_utf8_email = UTF8EmailValidator() class UTF8AndEmptyUserEmailValidator(UTF8EmailValidator): """Same as upper + allows empty local part.""" user_regex = re.compile( ur"(^$|" + UTF8EmailValidator.user_regex_raw, re.IGNORECASE) validate_utf8_and_empty_user_email = UTF8AndEmptyUserEmailValidator()
"""Module to run the amore rotation search""" __author__ = "Adam Simpkin & Felix Simkovic" __date__ = "15 April 2018" __version__ = "0.4" import glob import logging import os import shutil import uuid logger = logging.getLogger(__name__) import pyjob from pyjob.script import ScriptCollector, Script import simbad.db import simbad.mr import simbad.rotsearch import simbad.core.amore_score import simbad.core.dat_score import simbad.parsers.phaser_parser import simbad.parsers.refmac_parser import simbad.parsers.rotsearch_parser import simbad.util import simbad.util.pdb_util import simbad.util.mtz_util import simbad.util.matthews_prob from simbad.util import EXPORT, CMD_PREFIX, CCP4_SOURCE, CCP4_SCRATCH, MKDIR_CMD, RM_CMD class AmoreRotationSearch(simbad.rotsearch._RotationSearch): """A class to perform the amore rotation search Attributes ---------- amore_exe : str The path to the amore executable mtz : str The path to the input MTZ work_dir : str The path to the working directory max_to_keep : int The maximum number of results to keep [default: 20] search_results : list The search results Examples -------- >>> from simbad.rotsearch.amore_search import AmoreRotationSearch >>> rotation_search = AmoreRotationSearch('<mtz>', '<mr_program>', '<tmp_dir>', '<work_dir>', ... '<amore_exe>', '<max_to_keep>', '<skip_mr>', '<process_all>') >>> rotation_search.run( ... '<models_dir>', '<output_dir>', '<nproc>', '<shres>', '<pklim>', '<npic>', '<rotastep>', ... '<min_solvent_content>', '<submit_nproc>', '<submit_qtype>', '<submit_queue>', '<chunk_size>' ... ) >>> rotation_search.summarize() >>> search_results = rotation_search.search_results If any results are found, an object is returned containing the pdb_code, and the various associated scores from amore. """ def __init__(self, mtz, mr_program, tmp_dir, work_dir, amore_exe=None, max_to_keep=20, skip_mr=False, process_all=False, **kwargs): super(AmoreRotationSearch, self).__init__(mtz, mr_program, tmp_dir, work_dir, max_to_keep=max_to_keep, skip_mr=skip_mr, process_all=process_all) self.amore_exe = amore_exe self.hklpck0 = None self.shres = None self.pklim = None self.progress = -5 self.npic = None self.rotastep = None self.ccp4_scr = None self.script_log_dir = None self.columns = [ "ALPHA", "BETA", "GAMMA", "CC_F", "RF_F", "CC_I", "CC_P", "Icp", "CC_F_Z_score", "CC_P_Z_score", "Number_of_rotation_searches_producing_peak", ] self.score_column = "CC_F_Z_score" self.template_hklpck1 = os.path.join(CCP4_SCRATCH, "{0}.hkl") self.template_clmn0 = os.path.join(CCP4_SCRATCH, "{0}_spmipch.clmn") self.template_clmn1 = os.path.join(CCP4_SCRATCH, "{0}.clmn") self.template_mapout = os.path.join(CCP4_SCRATCH, "{0}_amore_cross.map") self.template_table1 = os.path.join(CCP4_SCRATCH, "{0}_sfs.tab") self.template_model = os.path.join(CCP4_SCRATCH, "{0}.pdb") self.template_rot_log = os.path.join(CCP4_SCRATCH, "{0}_rot.log") self.template_tmp_dir = None def run( self, models_dir, nproc=2, shres=3.0, pklim=0.5, npic=50, rotastep=1.0, min_solvent_content=20, submit_qtype=None, submit_queue=None, chunk_size=0, **kwargs ): """Run amore rotation function on a directory of models Parameters ---------- models_dir : str The directory containing the models to run the rotation search on nproc : int, optional The number of processors to run the job on shres : int, float, optional Spherical harmonic resolution [default 3.0] pklim : int, float, optional Peak limit, output all peaks above <float> [default: 0.5] npic : int, optional Number of peaks to output from the translation function map for each orientation [default: 50] rotastep : int, float, optional Size of rotation step [default : 1.0] min_solvent_content : int, float, optional The minimum solvent content present in the unit cell with the input model [default: 30] submit_qtype : str The cluster submission queue type - currently support SGE and LSF submit_queue : str The queue to submit to on the cluster chunk_size : int, optional The number of jobs to submit at the same time Returns ------- file log file for each model in the models_dir """ from phaser import InputMR_DAT, runMR_DAT, InputCCA, runCCA self.shres = shres self.pklim = pklim self.npic = npic self.rotastep = rotastep self.submit_qtype = submit_qtype self.submit_queue = submit_queue self.simbad_dat_files = simbad.db.find_simbad_dat_files(models_dir) i = InputMR_DAT() i.setHKLI(self.mtz) i.setLABI_F_SIGF(self.mtz_obj.f, self.mtz_obj.sigf) i.setMUTE(True) run_mr_data = runMR_DAT(i) sol_calc = simbad.util.matthews_prob.SolventContent(self.mtz_obj.cell.volume_per_image()) dir_name = "simbad-tmp-" + str(uuid.uuid1()) self.script_log_dir = os.path.join(self.work_dir, dir_name) os.mkdir(self.script_log_dir) self.hklpck0 = self._generate_hklpck0() self.ccp4_scr = os.environ["CCP4_SCR"] default_tmp_dir = os.path.join(self.work_dir, "tmp") if self.tmp_dir: self.template_tmp_dir = os.path.join(self.tmp_dir, dir_name + "-{0}") else: self.template_tmp_dir = os.path.join(default_tmp_dir, dir_name + "-{0}") predicted_molecular_weight = 0 if run_mr_data.Success(): i = InputCCA() i.setSPAC_HALL(run_mr_data.getSpaceGroupHall()) i.setCELL6(run_mr_data.getUnitCell()) i.setMUTE(True) run_cca = runCCA(i) if run_cca.Success(): predicted_molecular_weight = run_cca.getAssemblyMW() dat_models = [] for dat_model in self.simbad_dat_files: name = os.path.basename(dat_model.replace(".dat", "")) try: pdb_struct = simbad.util.pdb_util.PdbStructure.from_file(dat_model) except Exception: # Catch all issues here msg = "Skipping %s: Problem with dat file" logger.debug(msg, name) continue solvent_fraction = sol_calc.calculate_from_struct(pdb_struct) solvent_content = solvent_fraction * 100 if solvent_content < min_solvent_content: msg = "Skipping %s: solvent content is predicted to be less than %.2f" logger.debug(msg, name, min_solvent_content) continue x, y, z, intrad = pdb_struct.integration_box model_molecular_weight = pdb_struct.molecular_weight mw_diff = abs(predicted_molecular_weight - model_molecular_weight) info = simbad.core.dat_score.DatModelScore(name, dat_model, mw_diff, x, y, z, intrad, solvent_content, None) dat_models.append(info) sorted_dat_models = sorted(dat_models, key=lambda x: float(x.mw_diff), reverse=False) n_files = len(sorted_dat_models) chunk_size = simbad.rotsearch.get_chunk_size(n_files, chunk_size) total_chunk_cycles = simbad.rotsearch.get_total_chunk_cycles(n_files, chunk_size) results = [] iteration_range = range(0, n_files, chunk_size) for cycle, i in enumerate(iteration_range): logger.info("Working on chunk %d out of %d", cycle + 1, total_chunk_cycles) if self.solution: logger.info("Early termination criteria met, skipping chunk %d", cycle + 1) continue collector = ScriptCollector(None) amore_files = [] for dat_model in sorted_dat_models[i: i + chunk_size]: script, run_file = self.generate_script(dat_model) collector.add(script) amore_files.append(run_file) if len(collector.scripts) > 0: logger.info("Running AMORE tab/rot functions") amore_logs, dat_models = zip(*amore_files) simbad.util.submit_chunk( collector, self.script_log_dir, nproc, "simbad_amore", submit_qtype, submit_queue, True, self.progress_monitor, self.rot_succeeded_log ) for dat_model, amore_log in zip(dat_models, amore_logs): base = os.path.basename(amore_log) pdb_code = base.replace("amore_", "").replace(".log", "") try: rotsearch_parser = simbad.parsers.rotsearch_parser.AmoreRotsearchParser(amore_log) score = simbad.core.amore_score.AmoreRotationScore( pdb_code, dat_model, rotsearch_parser.alpha, rotsearch_parser.beta, rotsearch_parser.gamma, rotsearch_parser.cc_f, rotsearch_parser.rf_f, rotsearch_parser.cc_i, rotsearch_parser.cc_p, rotsearch_parser.icp, rotsearch_parser.cc_f_z_score, rotsearch_parser.cc_p_z_score, rotsearch_parser.num_of_rot, ) if rotsearch_parser.cc_f_z_score: results += [score] except IOError: pass else: logger.critical("No structures to be trialled") self._search_results = results shutil.rmtree(self.script_log_dir) if os.path.isdir(default_tmp_dir): shutil.rmtree(default_tmp_dir) def generate_script(self, dat_model): logger.debug("Generating script to perform AMORE rotation " + "function on %s", dat_model.pdb_code) pdb_model = self.template_model.format(dat_model.pdb_code) table1 = self.template_table1.format(dat_model.pdb_code) hklpck1 = self.template_hklpck1.format(dat_model.pdb_code) clmn0 = self.template_clmn0.format(dat_model.pdb_code) clmn1 = self.template_clmn1.format(dat_model.pdb_code) mapout = self.template_mapout.format(dat_model.pdb_code) conv_py = "\"from simbad.db import convert_dat_to_pdb; convert_dat_to_pdb(r'{}', r'{}')\"" conv_py = conv_py.format(dat_model.dat_path, pdb_model) tab_cmd = [self.amore_exe, "xyzin1", pdb_model, "xyzout1", pdb_model, "table1", table1] tab_stdin = self.tabfun_stdin_template.format(x=dat_model.x, y=dat_model.y, z=dat_model.z, a=90, b=90, c=120) rot_cmd = [self.amore_exe, "table1", table1, "HKLPCK1", hklpck1, "hklpck0", self.hklpck0, "clmn1", clmn1, "clmn0", clmn0, "MAPOUT", mapout] rot_stdin = self.rotfun_stdin_template.format(shres=self.shres, intrad=dat_model.intrad, pklim=self.pklim, npic=self.npic, step=self.rotastep) rot_log = self.template_rot_log.format(dat_model.pdb_code) tmp_dir = self.template_tmp_dir.format(dat_model.pdb_code) source = simbad.util.source_ccp4() cmd = [ [source], [EXPORT, "CCP4_SCR=" + tmp_dir], [MKDIR_CMD, CCP4_SCRATCH, os.linesep], [CMD_PREFIX, CCP4_SOURCE + "/bin/ccp4-python", "-c", conv_py, os.linesep], ] if os.name == "nt": tab_stdin_file = os.path.join(CCP4_SCRATCH, 'tab_stdin') rot_stdin_file = os.path.join(CCP4_SCRATCH, 'rot_stdin') for i, line in enumerate(tab_stdin.split('\n')): if i == 0: cmd += [["echo", line, ">", tab_stdin_file]] else: cmd += [["echo", line, ">>", tab_stdin_file]] for i, line in enumerate(rot_stdin.split('\n')): if i == 0: cmd += [["echo", line, ">", rot_stdin_file]] else: cmd += [["echo", line, ">>", rot_stdin_file]] cmd += [ tab_cmd + ["<", tab_stdin_file, ">", os.devnull], rot_cmd + ["<", rot_stdin_file, ">", rot_log], [EXPORT, '"match="', os.linesep], ['for /F "delims=" %%G in', "('findstr SOLUTIONRCD", rot_log + "')", 'do (if not defined match set "match=%%G" & goto :found)', os.linesep], [":found", os.linesep], ["echo %match%", os.linesep] ] else: cmd += [ tab_cmd + ["<< eof >", os.devnull], [tab_stdin], ["eof"], [os.linesep], rot_cmd + ["<< eof >", rot_log], [rot_stdin], ["eof"], [os.linesep], ["grep", "-m 1", "SOLUTIONRCD", rot_log, os.linesep] ] cmd += [ [RM_CMD, CCP4_SCRATCH, os.linesep], [EXPORT, "CCP4_SCR=" + self.ccp4_scr], ] amore_script = Script(directory=self.script_log_dir, prefix="amore_", stem=dat_model.pdb_code) for c in cmd: amore_script.append(" ".join(map(str, c))) amore_log = amore_script.path.rsplit(".", 1)[0] + ".log" amore_files = (amore_log, dat_model.dat_path) amore_script.write() return amore_script, amore_files def _generate_hklpck0(self): logger.info("Preparing files for AMORE rotation function") stdin = self.sortfun_stdin_template.format(f=self.mtz_obj.f, sigf=self.mtz_obj.sigf) hklpck0 = os.path.join(self.work_dir, "spmipch.hkl") cmd = [self.amore_exe, "hklin", self.mtz, "hklpck0", hklpck0] pyjob.cexec(cmd, stdin=stdin) return hklpck0 @property def sortfun_stdin_template(self): return """TITLE ** spmi packing h k l F for crystal** SORTFUN RESOL 100. 2.5 LABI FP={f} SIGFP={sigf}""" @property def tabfun_stdin_template(self): return """TITLE: Produce table for MODEL FRAGMENT TABFUN CRYSTAL {x} {y} {z} {a} {b} {c} ORTH 1 MODEL 1 BTARGET 23.5 SAMPLE 1 RESO 2.5 SHANN 2.5 SCALE 4.0""" @property def rotfun_stdin_template(self): return """TITLE: Generate HKLPCK1 from MODEL FRAGMENT 1 ROTFUN GENE 1 RESO 100.0 {shres} CELL_MODEL 80 75 65 CLMN CRYSTAL ORTH 1 RESO 20.0 {shres} SPHERE {intrad} CLMN MODEL 1 RESO 20.0 {shres} SPHERE {intrad} ROTA CROSS MODEL 1 PKLIM {pklim} NPIC {npic} STEP {step}""" @staticmethod def _rot_job_succeeded(amore_z_score): """Check values for job success""" return amore_z_score > 10 def rot_succeeded_log(self, log): """Check a rotation search job for it's success Parameters ---------- log : str The path to a log file Returns ------- bool Success status of the rot run """ if self.skip_mr or self.process_all: return False rot_prog, pdb = os.path.basename(log).replace(".log", "").split("_", 1) rotsearch_parser = simbad.parsers.rotsearch_parser.AmoreRotsearchParser(log) dat_model = [s for s in self.simbad_dat_files if pdb in s][0] score = simbad.core.amore_score.AmoreRotationScore( pdb, dat_model, rotsearch_parser.alpha, rotsearch_parser.beta, rotsearch_parser.gamma, rotsearch_parser.cc_f, rotsearch_parser.rf_f, rotsearch_parser.cc_i, rotsearch_parser.cc_p, rotsearch_parser.icp, rotsearch_parser.cc_f_z_score, rotsearch_parser.cc_p_z_score, rotsearch_parser.num_of_rot, ) results = [score] try: job_succeeded = self._rot_job_succeeded(rotsearch_parser.cc_f_z_score) except TypeError: return False if job_succeeded and pdb not in self.tested: self.tested.append(pdb) output_dir = os.path.join(self.work_dir, "mr_search") mr = simbad.mr.MrSubmit( mtz=self.mtz, mr_program=self.mr_program, refine_program="refmac5", refine_type=None, refine_cycles=0, output_dir=output_dir, sgalternative="none", tmp_dir=self.tmp_dir, timeout=30, ) mr.mute = True mr.submit_jobs(results, nproc=1, process_all=True, submit_qtype=self.submit_qtype, submit_queue=self.submit_queue) mr_log = os.path.join(output_dir, pdb, "mr", self.mr_program, pdb + "_mr.log") refmac_log = os.path.join(output_dir, pdb, "mr", self.mr_program, "refine", pdb + "_ref.log") if os.path.isfile(refmac_log): refmac_parser = simbad.parsers.refmac_parser.RefmacParser(refmac_log) if simbad.mr._refinement_succeeded(refmac_parser.final_r_fact, refmac_parser.final_r_free): self.solution = True return True if os.path.isfile(mr_log): if self.mr_program == "phaser": phaser_parser = simbad.parsers.phaser_parser.PhaserParser(mr_log) if simbad.mr._phaser_succeeded(phaser_parser.llg, phaser_parser.tfz): self.solution = True return True return False def progress_monitor(self): total_log_files = 0 log_files = glob.glob(os.path.join(self.script_log_dir, '*.log')) for log in log_files: with open(log, 'r') as f: total_log_files += sum([1 for line in f.readlines() if " SOLUTIONRCD" in line]) total_sh_files = len(glob.glob(os.path.join(self.script_log_dir, '*.sh'))) percentage_complete = (total_log_files / total_sh_files) * 100 if percentage_complete - self.progress >= 5: logger.info("Percentage complete: {:.1f}%".format(percentage_complete)) self.progress = percentage_complete
""" Tutorial 3: Applet- and macro signatures. """ # Import the module with the global variables and the macro base class. import qtmacs.qte_global as qte_global from qtmacs.base_macro import QtmacsMacro from qtmacs.base_applet import QtmacsApplet from PyQt4 import QtGui # Get a reference to the main instance of Qtmacs. qteMain = qte_global.qteMain class TutorialMulti(QtmacsApplet): """ An applet with multiple widgets. """ def __init__(self, appletID): # Initialise the base class. super().__init__(appletID) # Instantiate three QLineEdit objects. line1 = QtGui.QLineEdit(self) line2 = QtGui.QLineEdit(self) line3 = QtGui.QLineEdit(self) # Register them with Qtmacs. self.qteLine1 = self.qteAddWidget(line1) self.qteLine2 = self.qteAddWidget(line2, autoBind=False) self.qteLine3 = self.qteAddWidget(line3, widgetSignature='custom') # Instantiate and register two push buttons. self.qtePBLocal = self.qteAddWidget(QtGui.QPushButton(self)) self.qtePBGlobal = self.qteAddWidget(QtGui.QPushButton(self)) self.qtePBLocal.setText('Local') self.qtePBGlobal.setText('Global') # Register the macro and connect the ``clicked`` signals of # the push buttons. self.macroName = qteMain.qteRegisterMacro(DemoMacroLineEdit) self.qtePBGlobal.clicked.connect(self.clickedGlobal) self.qtePBLocal.clicked.connect(self.clickedLocal) # Register DemoClickTheButton and bind it to <space>. name = qteMain.qteRegisterMacro(DemoClickTheButton) self.qteMain.qteBindKeyApplet('<space>', name, self) def clickedGlobal(self): qteMain.qteBindKeyGlobal('e', self.macroName) def clickedLocal(self): qteMain.qteBindKeyApplet('e', self.macroName, self) class DemoMacroLineEdit(QtmacsMacro): """ Insert the typed key, followed by a '|' character, into a QLineEdit. | Signature | * *applet*: '*' * *widget*: 'QLineEdit' """ def __init__(self): super().__init__() self.qteSetAppletSignature('*') self.qteSetWidgetSignature('QLineEdit') def qteRun(self): self.qteWidget.insert('|LineEdit|') class DemoClickTheButton(QtmacsMacro): """ Pass the last key on to the Qt native ``keyPressEvent`` method of the active widget. |Signature| * *applet*: 'DemoMultiWidget' * *widget*: 'QPushButton' """ def __init__(self): super().__init__() self.qteSetAppletSignature('*') self.qteSetWidgetSignature('QPushButton') def qteRun(self): self.qteWidget.animateClick() # Register the applet with Qtmacs and create an instance of it. app_name = qteMain.qteRegisterApplet(TutorialMulti) app_obj1 = qteMain.qteNewApplet(app_name) app_obj2 = qteMain.qteNewApplet(app_name) # Make the applet active, split its layout, and show the second # applet in the other half. qteMain.qteMakeAppletActive(app_obj1) qteMain.qteSplitApplet(app_obj2)
""" Test use-case when client attempts to call an unsubscribed contact. Gabble should ask them to "de-cloak". """ from gabbletest import exec_test from servicetest import (make_channel_proxy, call_async, sync_dbus, assertEquals, assertLength) import jingletest import dbus from twisted.words.xish import xpath import constants as cs import ns def test(q, bus, conn, stream): jt = jingletest.JingleTest(stream, 'test@localhost', 'foo@bar.com/Foo') jt2 = jingletest.JingleTest(stream, 'test@localhost', 'foo2@bar.com/Foo') # Make gabble think this is a different client jt2.remote_caps['node'] = 'http://example.com/fake-client1' run_test(q, bus, conn, stream, jt, True) run_test(q, bus, conn, stream, jt2, False) def run_test(q, bus, conn, stream, jt, decloak_allowed): """ Requests streams on a media channel to jt.remote_jid without having their presence at all. """ request = dbus.Dictionary({ cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_STREAMED_MEDIA, cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT, cs.TARGET_ID: jt.remote_jid }, signature='sv') path, props = conn.CreateChannel(request, dbus_interface=cs.CONN_IFACE_REQUESTS) media_iface = make_channel_proxy(conn, path, 'Channel.Type.StreamedMedia') handle = props[cs.TARGET_HANDLE] call_async(q, media_iface, 'RequestStreams', handle, [cs.MEDIA_STREAM_TYPE_AUDIO]) e = q.expect('stream-presence', to=jt.remote_bare_jid, presence_type=None) nodes = xpath.queryForNodes('/presence/temppres[@xmlns="%s"]' % ns.TEMPPRES, e.stanza) assertLength(1, nodes) assertEquals('media', nodes[0].getAttribute('reason')) if decloak_allowed: jt.send_remote_presence() info_event = q.expect('stream-iq', query_ns=ns.DISCO_INFO, to=jt.remote_jid) jt.send_remote_disco_reply(info_event.stanza) # RequestStreams should now happily complete q.expect('dbus-return', method='RequestStreams') else: q.expect('dbus-error', method='RequestStreams', name=cs.OFFLINE) if __name__ == '__main__': exec_test(test, timeout=10)
from django.shortcuts import render from django.views.generic.base import TemplateView from area.forms import AreaForm, LocationForm, BuildingForm from area.models import Area, Location, Building from django.http import HttpResponseRedirect import logging # GET /areas class ListAreaView(TemplateView): template_name = "area/area_list.html" def get_context_data(self, **kwargs): logger = logging.getLogger('webapp') logger.info('run get_context_data run') context = super(ListAreaView, self).get_context_data(**kwargs) context['object_list'] = list(Area.objects.all()) new = Area() new.name = "new" context['object_list'].append(new) return context # GET/POST /area def handle_area(request): logger = logging.getLogger('webapp') logger.info('run handle_area run') if request.method == 'POST': form = AreaForm(request.POST, request.FILES) if form.is_valid(): a = Area() a.adminEmail = form.cleaned_data['adminEmail'] a.areaStatus = form.cleaned_data['areaStatus'] a.createdDate = form.cleaned_data['createdDate'] a.folderName = form.cleaned_data['folderName'] a.language = form.cleaned_data['language'] a.logoFile = form.cleaned_data['logoFile'] a.name = form.cleaned_data['name'] a.paymentIntegration = form.cleaned_data['paymentIntegration'] a.paymentId = form.cleaned_data['paymentId'] a.plan = form.cleaned_data['plan'] a.save() return HttpResponseRedirect('/areas/') else: form = AreaForm() return render(request, 'area/area_detail.html', {'form': form, 'action':'/area/', 'http_method':'POST'}) # GET/POST /area/<areacode> def edit_area(request, areacode=None): logger = logging.getLogger('webapp') logger.info('run edit_area run') if(areacode): a = Area.objects.get(id=int(areacode)) if request.method == 'POST': #update record with submitted values logger.info('run submit_edit run') form = AreaForm(request.POST, request.FILES, instance=a) if form.is_valid(): logger.info('updating area') logger.info(form.cleaned_data) a.adminEmail = form.cleaned_data['adminEmail'] a.areaStatus = form.cleaned_data['areaStatus'] a.createdDate = form.cleaned_data['createdDate'] a.folderName = form.cleaned_data['folderName'] a.language = form.cleaned_data['language'] a.logoFile = form.cleaned_data['logoFile'] a.name = form.cleaned_data['name'] a.paymentIntegration = form.cleaned_data['paymentIntegration'] a.paymentId = form.cleaned_data['paymentId'] a.plan = form.cleaned_data['plan'] a.save() return HttpResponseRedirect('/areas/') return render(request, 'area/area_detail.html', {'form': form, 'action':'/area/' + areacode + '/', 'http_method':'POST'}) else: #load record to allow edition form = AreaForm(instance=a) return render(request, 'area/area_detail.html', {'form': form, 'action':'/area/' + areacode + '/', 'http_method':'POST'}) else: return HttpResponseRedirect('/areas/') # GET /area/<areacode>/locations class ListLocationView(TemplateView): template_name = "area/location_list.html" def get_context_data(self, **kwargs): logger = logging.getLogger('webapp') areacode = kwargs['areacode'] #logger.info('get locations', areacode) context = super(ListLocationView, self).get_context_data(**kwargs) area = Area.objects.get(id=int(areacode)) context['area'] = area locationArray = area.location_set.values context['object_list'] = locationArray return context # GET/POST /area/<areacode>/location def handle_location(request, areacode=None): logger = logging.getLogger('webapp') logger.info('run handle_location run') area = Area.objects.get(id=int(areacode)) if request.method == 'POST': form = LocationForm(request.POST) if form.is_valid(): l = Location() l.name = form.cleaned_data['name'] l.city = form.cleaned_data['city'] l.state = form.cleaned_data['state'] l.adminEmail = form.cleaned_data['adminEmail'] area = Area.objects.get(id=int(areacode)) area.location_set.add(l) return HttpResponseRedirect('/area/' + areacode + '/locations') else: form = LocationForm() return render(request, 'area/location_detail.html', {'form': form, 'action':'/area/' + areacode + '/location/', 'http_method':'POST', 'area': area}) # GET/POST /area/<areacode>/location/<locationid> def edit_location(request, areacode=None, locationid=None): logger = logging.getLogger('webapp') logger.info('run edit_location run') if(areacode and locationid): area = Area.objects.get(id=int(areacode)) l = Location.objects.get(id=int(locationid)) if request.method == 'POST': #update record with submitted values form = LocationForm(request.POST, instance=l) if form.is_valid(): l.name = form.cleaned_data['name'] l.city = form.cleaned_data['city'] l.state = form.cleaned_data['state'] l.adminEmail = form.cleaned_data['adminEmail'] l.save() return HttpResponseRedirect('/area/' + areacode + '/locations') return render(request, 'area/location_detail.html', {'form': form, 'action':'/area/' + areacode + '/location/' + locationid + '/', 'http_method':'POST', 'area': area}) else: #load record to allow edition form = LocationForm(instance=l) return render(request, 'area/location_detail.html', {'form': form, 'action':'/area/' + areacode + '/location/' + locationid + '/', 'http_method':'POST', 'area': area}) else: return HttpResponseRedirect('/area/' + areacode + '/locations') if areacode else HttpResponseRedirect('/areas/') # GET /area/<areacode>/location/<locationid>/buildings class ListBuildingView(TemplateView): template_name = "area/building_list.html" def get_context_data(self, **kwargs): logger = logging.getLogger('webapp') areacode = kwargs['areacode'] locationid = kwargs['locationid'] #logger.info('get buildings', areacode, locationid) context = super(ListBuildingView, self).get_context_data(**kwargs) area = Area.objects.get(id=int(areacode)) context['area'] = area location = Location.objects.get(id=int(locationid)) context['location'] = location buildingArray = location.building_set.values context['object_list'] = buildingArray return context # GET/POST /area/<areacode>/location/<locationid>/building def handle_building(request, areacode=None, locationid=None): logger = logging.getLogger('webapp') logger.info('run handle_building run') area = Area.objects.get(id=int(areacode)) if request.method == 'POST': form = BuildingForm(request.POST) if form.is_valid(): b = Building() b.name = form.cleaned_data['name'] b.address = form.cleaned_data['address'] b.zipcode = form.cleaned_data['zipcode'] b.phone = form.cleaned_data['phone'] b.cellphone = form.cleaned_data['cellphone'] b.adminEmail = form.cleaned_data['adminEmail'] location = Location.objects.get(id=int(locationid)) location.building_set.add(b) return HttpResponseRedirect('/area/' + areacode + '/location/' + locationid + '/buildings') else: form = BuildingForm() return render(request, 'area/building_detail.html', {'form': form, 'action':'/area/' + areacode + '/location/' + locationid + '/building/', 'http_method':'POST', 'area': area}) # GET/POST /area/<areacode>/location/<locationid>/building/<buildingid> def edit_building(request, areacode=None, locationid=None, buildingid=None): logger = logging.getLogger('webapp') logger.info('run edit_building run') if(areacode and locationid and buildingid): area = Area.objects.get(id=int(areacode)) l = Location.objects.get(id=int(locationid)) b = Building.objects.get(id=int(buildingid)) if request.method == 'POST': #update record with submitted values form = BuildingForm(request.POST, instance=b) if form.is_valid(): b.name = form.cleaned_data['name'] b.address = form.cleaned_data['address'] b.zipcode = form.cleaned_data['zipcode'] b.phone = form.cleaned_data['phone'] b.cellphone = form.cleaned_data['cellphone'] b.adminEmail = form.cleaned_data['adminEmail'] b.save() return HttpResponseRedirect('/area/' + areacode + '/location/' + locationid + '/buildings') return render(request, 'area/building_detail.html', {'form': form, 'action':'/area/' + areacode + '/location/' + locationid + '/building/' + buildingid + '/', 'http_method':'POST', 'area': area, 'location': l}) else: #load record to allow edition form = BuildingForm(instance=b) return render(request, 'area/building_detail.html', {'form': form, 'action':'/area/' + areacode + '/location/' + locationid + '/building/' + buildingid + '/', 'http_method':'POST', 'area': area, 'location': l}) else: return HttpResponseRedirect('/area/' + areacode + '/location/' + locationid + '/buildings') if areacode and locationid else HttpResponseRedirect('/areas/')
# -*- coding: utf-8 -*- """ Modulo che gestisce le configurazioni del gioco. Viene caricato il file di configurazione passato alla funzione start_mud nel modulo engine. """ #= IMPORT ====================================================================== import ConfigParser import sys from src.color import check_colors, colors from src.log import log #= COSTANTI ==================================================================== class ConfigOption(object): def __init__(self, name, section, default, facultative, online, getter, setter, minihelp): self.name = name self.section = section self.default = default self.facultative = facultative self.online = online self.getter = getter self.setter = setter self.minihelp = minihelp if not self.name: log.bug("(ConfigOption: ?) name non valido: %r" % self.name) return if not self.section: log.bug("(ConfigOption: %s) section non valido: %r" % (self.name, self.section)) if not self.getter: log.bug("(ConfigOption: %s) getter non valido: %r" % (self.name, self.getter)) if not self.setter: log.bug("(ConfigOption: %s) setter non valido: %r" % (self.name, self.setter)) if not self.minihelp: log.bug("(ConfigOption: %s) minihelp non valido: %r" % (self.name, self.minihelp)) #- Fine Inizializzazione - CONFIG_OPTIONS = ( # name section default facult online, getter setter minihelp ConfigOption("http_port", "SITE", 0, False, False, "getint", "set", "Porta http con cui i client si collegano al sito. L'opzione non è modificabile online"), ConfigOption("site_address", "SITE", "http://", False, True, "get", "set", "Indirizzo relativo il sito"), ConfigOption("allow_web_robots", "SITE", False, False, True, "getboolean", "set", "Indica se lasciare indicizzare o meno le pagine di aarit da parte dei bot dei motori di ricerca"), ConfigOption("google_analytics_ua", "SITE", "", True, True, "get", "set", "User Application ID per google analytics, se viene inserito verrà creato del codice html nell'header di tutte le pagine web dinamiche, l'opzione è facoltativa"), ConfigOption("max_google_translate", "SITE", 500, False, True, "getint", "set", "Lunghezza massima gestita dalle api di google translate"), ConfigOption("max_feedback_len", "SITE", 400, False, True, "getint", "set", "Numero massimo di caratteri inseribili per il feedback riguardo alla compatibilità"), ConfigOption("max_square_msg_len", "SITE", 100, False, True, "getint", "set", "Lunghezza massima postabile sulla piazzetta, a 0 significa che è disattivata"), ConfigOption("max_square_messages", "SITE", 100, False, True, "getint", "set", "Numero massimo di messaggi visualizzabili sul sito, se impostato a 0 non mostrerà la piazzetta"), ConfigOption("sending_interval", "SITE", 5, False, True, "getint", "set", "Secondi di attesa tra un send di nota, messaggio, post ed un altro"), ConfigOption("text_color", "SITE", "silver", False, True, "get", "set", "Serve ad evitare di inviare codici di colore laddove non serve"), ConfigOption("game_name", "SERVER", "Mud", False, True, "get", "set", "Nome del gioco"), ConfigOption("server_name", "SERVER", "Server", False, True, "get", "set", "Nome del server che ospita il gioco"), ConfigOption("engine_name", "SERVER", "Engine", False, True, "get", "set", "Nome del motore del gioco"), ConfigOption("engine_version", "SERVER", "0.0", False, True, "get", "set", "Versione del motore del gioco"), ConfigOption("staff_name", "SERVER", "Staff", False, True, "get", "set", "Nome dello staff del gioco"), ConfigOption("motto", "SERVER", "Huzza!", False, True, "get", "set", "Motto o frase d'effetto per il gioco"), ConfigOption("news_to_show", "SERVER", 5, False, True, "getint", "set", "Numero di novità da visualizzare nella homepage e quantità inviata ad ogni richiesta di visualizzazione delle novità più vecchie"), ConfigOption("allow_player_gaming", "SERVER", True, False, True, "getboolean", "set", "Indica se è permesso far entrare i giocatori nel gioco"), ConfigOption("save_persistence", "SERVER", True, False, True, "getboolean", "set", "Salva i dati che servono per mantenere la persistenza del mondo del Mud"), ConfigOption("compression_mode", "SERVER", "bz2", False, True, "get", "set", "Tipologia di archiviazione dei backup creati da aarit"), ConfigOption("max_output_buffer", "SERVER", 128000, False, True, "getint", "set", "Indica il limite massimo in Kb di buffer di output da inviare al giocatore, una volta sforato tale limite la connessione al client viene chiusa (valori tra 64000 a 128000)"), ConfigOption("max_execution_time", "SERVER", 0.04, False, True, "getfloat", "set", "Indica il tempo massimo in secondi dell'esecuzione di un comando (al di là del quale le deferred automatiche di group_entities nel metodo split_entity potrebbero fare più danno di quanto non ne facciano normalmente)"), ConfigOption("mail_on_enter_in_game", "MAIL", True, False, True, "getboolean", "set", "Avvisa se qualche giocatore non admin entra in gioco"), ConfigOption("email", "MAIL", "?@?.?", False, True, "getemail", "set", "Indirizzo a cui vengono inviate le mail da parte dei giocatori"), ConfigOption("smtp_host", "MAIL", "smpt.?", False, True, "get", "set", "SMTP con cui inviare le mail"), ConfigOption("smtp_email", "MAIL", "?@?.?", False, True, "getemail", "set", "Email utilizzata per l'invio delle mail"), ConfigOption("min_len_name", "GAME", 3, False, True, "getint", "set", "Lunghezza minima per un nome di account o di personaggio"), ConfigOption("max_len_name", "GAME", 14, False, True, "getint", "set", "Lunghezza massima per un nome di account o di personaggio"), ConfigOption("min_len_password", "GAME", 6, False, True, "getint", "set", "Lunghezza minima per una password"), ConfigOption("max_len_password", "GAME", 24, False, True, "getint", "set", "Lunghezza massima per la password"), ConfigOption("max_aliases", "GAME", 100, False, True, "getint", "set", "Numero massimo di alias creati per personaggio"), ConfigOption("max_macros", "GAME", 100, False, True, "getint", "set", "Numero massimo di macro creati per personaggio"), ConfigOption("max_account_players", "GAME", 30, False, True, "getint", "set", "Numero massimo di personaggi creabili in un account"), # (TD) da togliere la limitazione grazie al sistema dell'immaginetta di conferma del codice ConfigOption("max_account_bugs", "GAME", 1000, False, True, "getint", "set", "Numero massimo di bachi segnalabili"), ConfigOption("max_account_comments", "GAME", 1000, False, True, "getint", "set", "Numero massimo di commenti segnalabili"), ConfigOption("max_account_typos", "GAME", 1000, False, True, "getint", "set", "Numero massimo di typo segnalabili"), ConfigOption("max_account_ideas", "GAME", 1000, False, True, "getint", "set", "Numero massimo di idee segnalabili"), ConfigOption("max_level", "GAME", 200, False, True, "getint", "set", "Livello massimo consentito dal gioco"), ConfigOption("max_stat_value", "GAME", 100, False, True, "getint", "set", "Valore massimo per gli attributi come forza, velocità, etc"), ConfigOption("max_skill_value", "GAME", 100, False, True, "getint", "set", "Valore massimo imparabile per le skill"), ConfigOption("clumsy_value", "GAME", -100, False, True, "getint", "set", "Valore limite prima di considerare un lancio di dadi per una skill maldestra"), ConfigOption("failure_value", "GAME", 50, False, True, "getint", "set", "Valore limite prima di considerare un lancio di dadi per una skill fallito"), ConfigOption("success_value", "GAME", 200, False, True, "getint", "set", "Valore limite prima di considerare un lancio di dadi per una skill un successo"), ConfigOption("masterly_value", "GAME", 250, False, True, "getint", "set", "Valore limite prima di considerare un lancio di dadi per una skill un magistrale"), ConfigOption("starting_points", "GAME", 100, False, True, "getint", "set", "Valore iniziale utilizzato in vari modi dei punti vita, mana e vigore"), ConfigOption("starting_attrs", "GAME", 30, False, True, "getint", "set", "Valore iniziale utilizzato per gli attributi"), ConfigOption("min_repop_time", "GAME", 0, False, True, "getint", "set", "Minuti di tempo minimo impostabili per un reset di area"), ConfigOption("max_repop_time", "GAME", 1440, False, True, "getint", "set", "Minuti di tempo massimo impostabili per un reset di area"), ConfigOption("max_idle_seconds", "GAME", 900, False, True, "getint", "set", "Secondi di inattività massima prima che il mud esegua una sconnessione forzata"), ConfigOption("chars_for_smile", "GAME", 8, False, True, "getint", "set", "Numero di caratteri controllati alla fine di una frase detta in cui viene cercato uno smile"), ConfigOption("gift_on_enter", "GAME", None, True, True, "get", "set", "Entità da regalare ai giocatori che non l'hanno ancora, ogni volta che entrano, l'opzione è facoltativa"), ConfigOption("initial_destination", "GAME", None, False, True, "get", "set", "Destinazione per i pg che entrano nel gioco"), ConfigOption("min_secret_arg_len", "GAME", 2, False, True, "getint", "set", "Numero minimo da digitare"), ConfigOption("max_behaviour_probability", "GAME", 300, False, True, "getint", "set", "Probabilità massima impostabile nelle etichette di behaviour"), # (TD) toglierla e farla a 100% fisso e non al 300% come ora ConfigOption("purification_rpg_hours", "GAME", 24, False, True, "getint", "set", "Ore rpg prima che un'entità prescelta per la purificazione venga estratta"), ConfigOption("leveling_restore_points", "GAME", False, False, True, "getboolean", "set", "Se impostato a vero indica che ad ogni livello nuovo guadagnato da un giocatore i punteggi di vita, mana e vigore vengono recuperati totalmente"), ConfigOption("use_visual_grouping", "GAME", True, False, True, "getboolean", "set", "Se impostato a vero indica che gli oggetti verranno ammucchiati visivamente a seconda che la loro long sia uguale o meno"), ConfigOption("use_physical_grouping", "GAME", True, False, True, "getboolean", "set", "Se impostato a vero indica che gli oggetti verranno ammucchiati fisicamente a seconda che siano tra di loro equivalenti"), ConfigOption("currency_jump", "GAME", 1, False, True, "getint", "set", "Indica di quante decine le 4 valute rame, argento, oro e mithril si differenziano una dall'altra: 1, 10, 100 o 1000"), ConfigOption("persistent_act_seconds", "GAME", 2, False, True, "getint", "set", "Indica quanti secondi durano i messaggi della persistenza dell'azione, cioè i messaggi di act utilizzati come long, valori validi tra 4 e 1"), ConfigOption("running_step_time", "GAME", 1.0, False, True, "getfloat", "set", "Indica i secondi o i centesimi di secondo minimi tra un comando di movimento ed un altro per considerarlo come corsa, valori validi tra 2.0 e 0.1"), ConfigOption("dam_plr_vs_plr", "GAME", 100, False, True, "getint", "set", "Indica il modificatore in percentuale del danno inferto tra giocatori contro giocatori"), ConfigOption("dam_plr_vs_mob", "GAME", 100, False, True, "getint", "set", "Indica il modificatore in percentuale del danno inferto tra giocatori contro mob"), ConfigOption("dam_plr_vs_item", "GAME", 100, False, True, "getint", "set", "Indica il modificatore in percentuale del danno inferto tra giocatori contro oggetti"), ConfigOption("dam_mob_vs_plr", "GAME", 100, False, True, "getint", "set", "Indica il modificatore in percentuale del danno inferto tra mob contro giocatori"), ConfigOption("dam_mob_vs_mob", "GAME", 100, False, True, "getint", "set", "Indica il modificatore in percentuale del danno inferto tra mob contro mob"), ConfigOption("dam_mob_vs_item", "GAME", 100, False, True, "getint", "set", "Indica il modificatore in percentuale del danno inferto tra mob contro item"), ConfigOption("dam_item_vs_plr", "GAME", 100, False, True, "getint", "set", "Indica il modificatore in percentuale del danno inferto tra item contro giocatori"), ConfigOption("dam_item_vs_mob", "GAME", 100, False, True, "getint", "set", "Indica il modificatore in percentuale del danno inferto tra item contro mob"), ConfigOption("dam_item_vs_item", "GAME", 100, False, True, "getint", "set", "Indica il modificatore in percentuale del danno inferto tra item contro item"), ConfigOption("exp_modifier", "GAME", 100, False, True, "getint", "set", "Indica il modificatore in percentuale del guadagno dei punti di esperienza"), ConfigOption("seconds_in_minute", "TIME", 2, False, True, "getint", "set", "Numero di secondi reali che formano un minuto rpg"), ConfigOption("minutes_in_hour", "TIME", 60, False, True, "getint", "set", "Numero di minuti rpg in un'ora rpg"), ConfigOption("hours_in_day", "TIME", 24, False, True, "getint", "set", "Numero delle ore rpg in un giorno rpg"), ConfigOption("days_in_month", "TIME", 30, False, True, "getint", "set", "Numero dei giorni rpg in un mese rpg"), ConfigOption("months_in_year", "TIME", 10, False, True, "getint", "set", "Numero dei mesi rpg in un anno rpg"), ConfigOption("dawn_hour", "TIME", 5, False, True, "getint", "set", "Ora dell'aurora"), ConfigOption("sunrise_hour", "TIME", 6, False, True, "getint", "set", "Ora dell'alba"), ConfigOption("noon_hour", "TIME", 12, False, True, "getint", "set", "Ora del mezzogiorno"), ConfigOption("sunset_hour", "TIME", 18, False, True, "getint", "set", "Ora del tramonto"), ConfigOption("dusk_hour", "TIME", 19, False, True, "getint", "set", "Ora del crepuscolo"), ConfigOption("midnight_hour", "TIME", 0, False, True, "getint", "set", "Ora relativa alla mezzanotte"), ConfigOption("aggressiveness_loop_seconds","TIME", 1, False, True, "getfloat", "set", "Tempo in secondi del ciclo relativo all'invio di messaggi di minaccia (impostabile da 0.1 a 10)"), ConfigOption("blob_loop_seconds", "TIME", 1, False, True, "getfloat", "set", "Tempo in secondi del ciclo relativo la dinamica dei fluidi (impostabile da 0.1 a 10)"), ConfigOption("decomposer_loop_seconds", "TIME", 120, False, True, "getfloat", "set", "Tempo in secondi del ciclo relativo alla decomposizione dei cadaveri (impostabile da 12 a 1200)"), ConfigOption("digestion_loop_seconds", "TIME", 60, False, True, "getfloat", "set", "Tempo in secondi del ciclo relativo alla digestione di cibo ingerito (impostabile da 6 a 600)"), ConfigOption("fight_loop_seconds", "TIME", 0.1, False, True, "getfloat", "set", "Tempo in secondi del ciclo relativo al combattimento (impostabile da 0.01 a 1)"), ConfigOption("game_loop_seconds", "TIME", 1, False, True, "getfloat", "set", "Tempo in secondi del ciclo relativo al gioco (impostabile da 0.1 a 10)"), ConfigOption("maintenance_loop_seconds", "TIME", 60, False, True, "getfloat", "set", "Tempo in secondi del ciclo relativo alla manutenzione (impostabile da 0.1 a 10)"), ConfigOption("room_behaviour_loop_seconds","TIME", 1, False, True, "getfloat", "set", "Tempo in secondi del ciclo relativo ai behaviour delle stanze (impostabile da 0.1 a 10)"), ConfigOption("log_accents", "LOG", True, False, True, "getboolean", "set", "Logga gli accenti senza convertirli nei, volgarmente chiamati, 'accenti apostrofati'"), ConfigOption("log_player_output", "LOG", False, False, True, "getboolean", "set", "Esegue un log, per ogni personaggio, di tutto l'output inviato relativo alla pagina del gioco"), ConfigOption("print_entity_inputs", "LOG", False, False, True, "getboolean", "set", "Esegue il print su console degli input inviati dagli oggetti e dai mob"), ConfigOption("track_behaviours", "LOG", True, False, True, "getboolean", "set", "Attiva o meno il sistema di tracking delle esecuzioni dei behaviour"), ConfigOption("track_triggers", "LOG", True, False, True, "getboolean", "set", "Attiva o meno il sistema di tracking delle esecuzioni dei trigger"), ConfigOption("reload_web_pages", "DEVELOPMENT", True, False, True, "getboolean", "set", "Ricompila i controller delle pagina web ad ogni loro richiesta, utile per modifiche on the fly senza riavviare il server"), ConfigOption("reload_commands", "DEVELOPMENT", True, False, True, "getboolean", "set", "Ricompila il modulo del comando ad ogni sua chiamata, utile per modifiche on the fly senza riavviare il server"), ConfigOption("reload_gamescripts", "DEVELOPMENT", True, False, True, "getboolean", "set", "Ricompila i gamescript, utile se si stanno modificando per test o debug"), ConfigOption("use_subsequent_resets", "DEVELOPMENT", True, False, True, "getboolean", "set", "Abilita i reset successivi al primo, a volte può essere utile disatibilitarli per test"), ConfigOption("use_behaviours", "DEVELOPMENT", True, False, True, "getboolean", "set", "Abilita o disabilita tutti i behaviour"), ConfigOption("use_gamescripts", "DEVELOPMENT", True, False, True, "getboolean", "set", "Abilita o disabilita tutti i mudscripts"), ConfigOption("use_profiler", "DEVELOPMENT", False, False, True, "getboolean", "set", "Attiva il sistema di profiling per analizzare i colli di bottiglia nel codice"), ConfigOption("check_references", "DEVELOPMENT", False, False, True, "getboolean", "set", "Attiva o meno un sistema di controllo dei riferimenti di tutte le persistenze"), ConfigOption("delete_pyc_files", "DEVELOPMENT", True, False, True, "getboolean", "set", "Cancella tutti i file py compilati alla chiusura del gioco per maggiore pulizia, soprattutto tra i file dat"), ConfigOption("time_warp", "DEVELOPMENT", False, False, True, "getboolean", "set", "Se attivata tutte le deferred scatterrano dopo un secondo invece di attendere il loro naturale decorso di tempi, anche alcuni loop scattaranno il prima possibile (dipende dal loop, alcuni dopo un secondo altri dopo un minuto), questa opzione è utile per testare praticamente in real-time il normale flusso del codice senza dovre aspettare minuti e minuti")) SUPPORTED_COMPRESSIONS = ("tar", "gz", "bz2") #= CLASSI ====================================================================== class Config(ConfigParser.SafeConfigParser): """ Classe la cui variabile singleton è inizializzata a fine modulo. """ ready = False filename = "" def check_option_names(self): try: config_file = open(self.filename, "r") except IOError: log.bug("Impossibile aprire il file %s in lettura" % self.filename) return for line in config_file: if not line.strip(): continue if line[0] == "#": continue if "=" not in line: continue name = line.split("=")[0].strip() for option in CONFIG_OPTIONS: if option.name == name: break else: log.bug("Non è stata trovata nessuna opzione dal nome %s nel file di configurazione %s" % (name, self.filename)) continue #- Fine Metodo - def load(self, filename): self.filename = filename self.check_option_names() ConfigParser.SafeConfigParser.read(self, filename) for option in CONFIG_OPTIONS: if hasattr(self, option.name): log.bug("L'opzione di config %s è già stata impostata precedentemente alla sezione %s" % (option.name, option.section)) continue if not hasattr(self, option.getter): log.bug("L'opzione di config %s non possiede il getter %s" % (option.name, option.getter)) setattr(self, option.name, option.default) continue getter = getattr(self, option.getter) try: value = getter(option.section, option.name) except ConfigParser.NoOptionError: if not option.facultative: log.bug("Opzione %s mancante nel file %s, verrà caricata con il suo valore di default: %s" % (option.name, filename, option.default)) setattr(self, option.name, option.default) else: setattr(self, option.name, value) self.ready = True #- Fine Metodo - def save(self): for option in reversed(CONFIG_OPTIONS): value = getattr(self, option.name) if not value and option.getter == "get" and not option.facultative: log.bug("valore dell'opzione di config %s non valida e non opzionale: %s" % (option.name, value)) if not hasattr(self, option.setter): log.bug("L'opzione di config %s non possiede il setter %s" % (option.name, option.getter)) continue setter = getattr(self, option.setter) setter(option.section, option.name, value) try: config_file = open(self.filename, "w") except IOError: log.bug("Impossibile aprire il file %s in scrittura" % self.filename) return ConfigParser.SafeConfigParser.write(self, config_file) config_file.close() #- Fine Metodo - def finalize(self): # Converte la stringa ricavata per la prima destinazione in # oggetto-destinazione vero e proprio from room import Destination destination = Destination() destination.fread_the_line(None, self.initial_destination, "self.initial_destination") from src.database import database if destination.area in database["areas"]: destination.area = database["areas"][destination.area] else: log.bug("Codice d'area %s inesistente nel file %s (sarà impossibile per i pg entrare in gioco)" % ( destination.area, self.filename)) self.initial_destination = destination first_room = self.initial_destination.get_room() if not first_room: log.bug("initial_destination impostata nel file %s non punta ad una stanza valida: %r (first_room: %r)" % ( self.filename, self.initial_destination, first_room)) sys.exit(1) return # --------------------------------------------------------------------- # Recupera il riferimento dell'eventuale dono da inserire in gioco if self.gift_on_enter == "None": self.gift_on_enter = None elif self.gift_on_enter: if "_item_" in self.gift_on_enter: table_name = "proto_items" else: table_name = "proto_mobs" if self.gift_on_enter in database[table_name]: self.gift_on_enter = database[table_name][self.gift_on_enter] else: log.bug("Non è stata trovata nessuna entità di prototipo con codice %s nel database %s" % ( self.gift_on_enter, table_name)) #- Fine Metodo - def iter_all_error_messages(self): msgs = [] if not self.game_name: yield "game_name è vuoto" if self.game_name and not check_colors(self.game_name): yield "game_name contiene dei stili errati: %s" % self.game_name if not self.site_address: yield "site_address è vuoto" if self.site_address and not self.site_address.startswith("http://"): yield "site_address non inizia con 'http://'" if self.site_address and not self.site_address[-1].isalnum(): yield "site_address non finisce con un numero o con una stringa, dev'essere valido per poterci eventualmente concatenare la porta: %r" % self.site_address if self.http_port < 1 or self.http_port > 65535: yield "http_port è un valore errato: %d" % self.http_port if not self.text_color: yield "text_color non è valido: %r" % self.text_color if self.text_color and not self.text_color.lower(): yield "text_color non ha tutti i caratteri minuscoli: %s" % self.text_color if self.text_color and not self.text_color in colors: yield "text_color non si trova tra i nomi dei colori: %s" % self.text_color if not self.email: yield "email mancante per poter inviare mail tramite l'smtp: %r" % self.email if not self.smtp_email: yield "smtp_email mancante per poter inviare mail tramite l'smtp: %r" % self.smtp_email if not self.smtp_host: yield "smtp_host non è un valore valido: %r" % self.smtp_host if self.min_len_name < 2: yield "min_len_name è minore di 2: %d" % self.min_len_name if self.max_len_name < 3: yield "max_len_name è minore di 3: %d" % self.max_len_name if self.min_len_name >= self.max_len_name: yield "min_len_name %d supera quella di max_len_name %d" % (self.min_len_name, self.max_len_name) if self.min_len_password < 5: yield "min_len_password è minore di 5: %d" % self.min_len_password if self.max_len_password < 5: yield "max_len_password è minore di 5: %d" % self.max_len_password if self.min_len_password >= self.max_len_password: yield "min_len_password %d supera quella di max_len_password %d" % (self.min_len_password, self.max_len_password) if self.max_aliases < 0: yield "max_aliases non può essere negativo: %d" % self.max_aliases if self.max_macros < 0: yield "max_macros non può essere negativo: %d" % self.max_macros if self.max_account_players <= 0: yield "max_account_players non può essere negativo: %d" % self.max_account_players if self.max_account_bugs < 0: yield "max_account_bugs non può essere negativo: %d" % self.max_account_bugs if self.max_account_typos < 0: yield "max_account_typos non può essere negativo: %d" % self.max_account_typos if self.max_account_ideas < 0: yield "max_account_ideas non può essere negativo: %d" % self.max_account_ideas if self.sending_interval < 1 or self.sending_interval > 60: yield "sending_interval è un valore troppo basso o troppo alto: %d" % self.sending_interval if self.max_level < 50 or self.max_level > 1000: yield "max_level non è un valore valido: %d (meglio che rimanga tra 50 e 1000)" % self.max_level if self.max_stat_value < 50 or self.max_stat_value > 200: yield "max_stat_value non è un valore valido: %d (meglio che rimanga tra 50 e 200)" % self.max_stat_value if self.max_skill_value < 50 or self.max_skill_value > 200: yield "max_skill_value non è un valore valido: %d (meglio che rimanga tra 50 e 200)" % self.max_skill_value if self.clumsy_value < -300 or self.clumsy_value > 300: yield "clumsy_value non è un valore valido: %d (meglio che rimanga tra -300 e 300)" % self.clumsy_value if self.failure_value < -300 or self.failure_value > 300: yield "failure_value non è un valore valido: %d (meglio che rimanga tra -300 e 300)" % self.failure_value if self.success_value < -300 or self.success_value > 300: yield "success_value non è un valore valido: %d (meglio che rimanga tra -300 e 300)" % self.success_value if self.masterly_value < -300 or self.masterly_value > 300: yield "masterly_value non è un valore valido: %d (meglio che rimanga tra -300 e 300)" % self.masterly_value if self.starting_points < 10 or self.starting_points > 1000: yield "starting_points non è valido: %d (meglio che rimanga tra 10 e 1000)" % self.starting_points if self.starting_attrs < 20 or self.starting_attrs > 50: yield "starting_attrs non è valido: %d (meglio che rimanga tra 20 e 50)" % self.starting_attrs if self.min_repop_time < 0: yield "min_repop_time è un valore minore di zero: %d" % self.min_repop_time if self.max_repop_time < 0 or self.max_repop_time < self.min_repop_time: yield "max_repop_time è un valore minore di zero o minore di min_repop_time: %d" % self.max_repop_time if self.max_idle_seconds < 60 * 5 or self.max_idle_seconds > 60 * 60: yield "max_idle_seconds non è un valore tra 5 minuti e un'ora: %d" % self.max_idle_seconds if self.chars_for_smile < 0: yield "chars_for_smile non può essere negativo: %d" % self.chars_for_smile if self.initial_destination.get_error_message() != "": yield self.initial_destination.get_error_message() if not self.compression_mode in SUPPORTED_COMPRESSIONS: yield "compression_mode è errata: %r" % self.compression_mode if not self.motto: yield "motto è un valore non valido: %r" % self.motto if not self.staff_name: yield "staff_name è un valore non valido: %r" % self.staff_name if not self.engine_name: yield "engine_name è un valore non valido: %r" % self.engine_name if not self.engine_version: yield "engine_version è un valore non valido: %r" % self.engine_version if not self.server_name: yield "server_name è un valore non valido: %r" % self.server_name if self.news_to_show < 5 or self.news_to_show > 100: yield "news_to_show dev'essere tra 5 e 100: %d" % self.news_to_show if self.max_google_translate < 100: yield "max_google_translate non è una quantità di caratteri valida: %d" % self.max_google_translate if self.max_square_msg_len < 32: yield "max_square_msg_len non è una quantità di caratteri valida: %d" % self.max_square_msg_len if self.max_square_messages < 10: yield "max_square_messages non è una quantità di caratteri valida: %d" % self.max_square_messages if self.max_feedback_len < 64: yield "max_feedback_len non è una quantità di caratteri valida: %d" % self.max_feedback_len if self.min_secret_arg_len not in (1, 2, 3): yield "min_secret_arg_len dev'essere tra 1 e 3 compresi: %d" % self.min_secret_arg_len if self.max_behaviour_probability < 0 or self.max_behaviour_probability > 1000: yield "max_behaviour_probability dev'essere un numero tra 0 e 1000 compresi: %d" % self.max_behaviour_probability if self.purification_rpg_hours < 0 or self.purification_rpg_hours > 720: yield "purification_rpg_hours dev'essere un numero tra 0 e 720 compresi: %d" % self.purification_rpg_hours if self.currency_jump not in (1, 10, 100, 1000): yield "currency_jump dev'essere una decina tra 1 e 1000 compresi: %d" % self.currency_jump if self.persistent_act_seconds < 1 or self.persistent_act_seconds > 4: yield "persistent_act_seconds dev'essere tra 1 e 4 compresi: %d" % self.persistent_act_seconds if self.running_step_time < 0.1 or self.running_step_time > 2.0: yield "running_step_time dev'essere tra 0.1 e 2.0 compresi: %f" % self.running_step_time if self.dam_plr_vs_plr < 10 or self.dam_plr_vs_plr > 1000: yield "dam_plr_vs_plr dev'essere tra 10 e 1000 compresi: %d" % self.dam_plr_vs_plr if self.dam_plr_vs_mob < 10 or self.dam_plr_vs_mob > 1000: yield "dam_plr_vs_mob dev'essere tra 10 e 1000 compresi: %d" % self.dam_plr_vs_mob if self.dam_plr_vs_item < 10 or self.dam_plr_vs_item > 1000: yield "dam_plr_vs_item dev'essere tra 10 e 1000 compresi: %d" % self.dam_plr_vs_item if self.dam_mob_vs_plr < 10 or self.dam_mob_vs_plr > 1000: yield "dam_mob_vs_plr dev'essere tra 10 e 1000 compresi: %d" % self.dam_mob_vs_plr if self.dam_mob_vs_mob < 10 or self.dam_mob_vs_mob > 1000: yield "dam_mob_vs_mob dev'essere tra 10 e 1000 compresi: %d" % self.dam_mob_vs_mob if self.dam_mob_vs_item < 10 or self.dam_mob_vs_item > 1000: yield "dam_mob_vs_item dev'essere tra 10 e 1000 compresi: %d" % self.dam_mob_vs_item if self.dam_item_vs_plr < 10 or self.dam_item_vs_plr > 1000: yield "dam_item_vs_plr dev'essere tra 10 e 1000 compresi: %d" % self.dam_item_vs_plr if self.dam_item_vs_mob < 10 or self.dam_item_vs_mob > 1000: yield "dam_item_vs_mob dev'essere tra 10 e 1000 compresi: %d" % self.dam_item_vs_mob if self.dam_item_vs_item < 10 or self.dam_item_vs_item > 1000: yield "dam_item_vs_item dev'essere tra 10 e 1000 compresi: %d" % self.dam_item_vs_item if self.exp_modifier < 10 or self.exp_modifier > 1000: yield "exp_modifier dev'essere tra 10 e 1000 compresi: %d" % self.exp_modifier if self.max_output_buffer < 64000 or self.max_output_buffer > 256000: yield "max_output_buffer dev'essere tra 64000 e 128000: %d" % self.max_output_buffer if self.max_execution_time < 0.001 or self.max_execution_time > 0.5: yield "max_execution_time dev'essere tra 0.001 e 0.5: %d" % self.max_execution_time if self.seconds_in_minute < 1: yield "seconds_in_minute non può essere minore di 1: %d" % self.seconds_in_minute if self.minutes_in_hour < 1: yield "minutes_in_hour non può essere minore di 1: %d" % self.minutes_in_hour if self.hours_in_day < 1: yield "hours_in_day non può essere minore di 1: %d" % self.hours_in_day if self.days_in_month < 1: yield "days_in_month non può essere minore di 1: %d" % self.days_in_month if self.months_in_year < 1: yield "months_in_year non può essere minore di 1: %d" % self.months_in_year if self.dawn_hour < 0 or self.dawn_hour > self.hours_in_day - 1: yield "dawn_hour è errata: %d (dev'essere tra 0 e %d)" % (self.dawn_hour, self.hours_in_day - 1) if self.sunrise_hour < 0 or self.sunrise_hour > self.hours_in_day - 1: yield "sunrise_hour è errata: %d (dev'essere tra 0 e %d)" % (self.sunrise_hour, self.hours_in_day - 1) if self.noon_hour < 0 or self.noon_hour > self.hours_in_day - 1: yield "noon_hour è errata: %d (dev'essere tra 0 e %d)" % (self.noon_hour, self.hours_in_day - 1) if self.sunset_hour < 0 or self.sunset_hour > self.hours_in_day - 1: yield "sunset_hour è errata: %d (dev'essere tra 0 e %d)" % (self.sunset_hour, self.hours_in_day - 1) if self.dusk_hour < 0 or self.dusk_hour > self.hours_in_day - 1: yield "dusk_hour è errata: %d (dev'essere tra 0 e %d)" % (self.dusk_hour, self.hours_in_day - 1) if self.midnight_hour < 0 or self.midnight_hour > self.hours_in_day - 1: yield "midnight_hour è errata: %d (dev'essere tra 0 e %d)" % (self.midnight_hour, self.hours_in_day - 1) if self.aggressiveness_loop_seconds < 0.1 or self.aggressiveness_loop_seconds > 10: yield "aggressiveness_loop_seconds è errata: %d (dev'essere tra 0.1 e 10)" % self.aggressiveness_loop_seconds if self.blob_loop_seconds < 0.1 or self.blob_loop_seconds > 10: yield "blob_loop_seconds è errata: %d (dev'essere tra 0.1 e 10)" % self.blob_loop_seconds if self.decomposer_loop_seconds < 12 or self.decomposer_loop_seconds > 1200: yield "decomposer_loop_seconds è errata: %d (dev'essere tra 12 e 1200)" % self.decomposer_loop_seconds if self.digestion_loop_seconds < 6 or self.digestion_loop_seconds > 600: yield "digestion_loop_seconds è errata: %d (dev'essere tra 6 e 600)" % self.digestion_loop_seconds if self.fight_loop_seconds < 0.01 or self.fight_loop_seconds > 1: yield "fight_loop_seconds è errata: %d (dev'essere tra 0.01 e 1)" % self.fight_loop_seconds if self.game_loop_seconds < 0.1 or self.game_loop_seconds > 10: yield "game_loop_seconds è errata: %d (dev'essere tra 0.1 e 10)" % self.game_loop_seconds if self.maintenance_loop_seconds < 6 or self.maintenance_loop_seconds > 60: yield "maintenance_loop_seconds è errata: %d (dev'essere tra 6 e 60)" % self.maintenance_loop_seconds if self.room_behaviour_loop_seconds < 0.1 or self.room_behaviour_loop_seconds > 10: yield "room_behaviour_loop_seconds è errata: %d (dev'essere tra 0.1 e 10)" % self.room_behaviour_loop_seconds #- Fine Metodo - def get_error_message(self): messages = list(self.iter_all_error_messages()) if not messages: return "" log.bug("(Config: filename %s) %s" % (self.filename, messages[0])) return messages[0] #- Fine Metodo - #- Metodi getter e setter -------------------------------------------------- def getemail(self, section_name, option_name): return ConfigParser.SafeConfigParser.get(self, section_name, option_name) #- Fine Metodo - def set(self, section_name, option_name, value): # Qui anche le opzioni che hanno entità o altri oggetti (gift_on_enter) # funzionano senza problemi grazie al metodo __str__ ConfigParser.SafeConfigParser.set(self, section_name, option_name, str(value)) #- Fine Metodo - #= SINGLETON =================================================================== config = Config()
from setuptools import setup version = '0.5.dev0' long_description = '\n\n'.join([ open('README.rst').read(), open('CREDITS.rst').read(), open('CHANGES.rst').read(), ]) install_requires = [ 'setuptools', 'pyproj', ], tests_require = [ 'nose', 'coverage', ] setup(name='sufriblib', version=version, description="A library for working with SUFRIB 2.1 files (.RIB and .RMB files, sewer system measurement data)", long_description=long_description, # Get strings from http://www.python.org/pypi?%3Aaction=list_classifiers classifiers=[], keywords=[], author='Remco Gerlich', author_email='remco.gerlich@nelen-schuurmans.nl', url='', license='GPL', packages=['sufriblib'], include_package_data=True, zip_safe=False, install_requires=install_requires, tests_require=tests_require, extras_require={'test': tests_require}, entry_points={ 'console_scripts': [ 'sufribcat=sufriblib.scripts:sufribcat', ]}, )
from django.db import models from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes import generic from django.contrib.auth.models import User import datetime from managers import VoteManager class Vote(models.Model): content_type = models.ForeignKey(ContentType, related_name="votes") object_id = models.PositiveIntegerField() key = models.CharField(max_length=32) score = models.IntegerField() user = models.ForeignKey(User, blank=True, null=True, related_name="votes") ip_address = models.IPAddressField() date_added = models.DateTimeField(default=datetime.datetime.now, editable=False) date_changed = models.DateTimeField(default=datetime.datetime.now, editable=False) objects = VoteManager() content_object = generic.GenericForeignKey() class Meta: unique_together = (('content_type', 'object_id', 'key', 'user', 'ip_address')) def __unicode__(self): return "%s voted %s on %s" % (self.user_display, self.score, self.content_object) def save(self, *args, **kwargs): self.date_changed = datetime.datetime.now() super(Vote, self).save(*args, **kwargs) def user_display(self): if self.user: return "%s (%s)" % (self.user.username, self.ip_address) return self.ip_address user_display = property(user_display) def partial_ip_address(self): ip = self.ip_address.split('.') ip[-1] = 'xxx' return '.'.join(ip) partial_ip_address = property(partial_ip_address) class Score(models.Model): content_type = models.ForeignKey(ContentType) object_id = models.PositiveIntegerField() key = models.CharField(max_length=32) score = models.IntegerField() votes = models.PositiveIntegerField() content_object = generic.GenericForeignKey() class Meta: unique_together = (('content_type', 'object_id', 'key'),) def __unicode__(self): return "%s scored %s with %s votes" % (self.content_object, self.score, self.votes)
#!/usr/bin/env python # # Copyright (c) 2016, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import time import unittest import node import mle import config import command DUT_LEADER = 1 ROUTER1 = 2 ROUTER2 = 3 class Cert_5_3_6_RouterIdMask(unittest.TestCase): def setUp(self): self.nodes = {} for i in range(1,4): self.nodes[i] = node.Node(i) self.nodes[DUT_LEADER].set_panid(0xface) self.nodes[DUT_LEADER].set_mode('rsdn') self.nodes[DUT_LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64()) self.nodes[DUT_LEADER].enable_whitelist() self.nodes[ROUTER1].set_panid(0xface) self.nodes[ROUTER1].set_mode('rsdn') self.nodes[ROUTER1].add_whitelist(self.nodes[DUT_LEADER].get_addr64()) self.nodes[ROUTER1].add_whitelist(self.nodes[ROUTER2].get_addr64()) self.nodes[ROUTER1].enable_whitelist() self.nodes[ROUTER1].set_router_selection_jitter(1) self.nodes[ROUTER2].set_panid(0xface) self.nodes[ROUTER2].set_mode('rsdn') self._setUpRouter2() self.sniffer = config.create_default_thread_sniffer() self.sniffer.start() def _setUpRouter2(self): self.nodes[ROUTER2].add_whitelist(self.nodes[ROUTER1].get_addr64()) self.nodes[ROUTER2].enable_whitelist() self.nodes[ROUTER2].set_router_selection_jitter(1) def tearDown(self): self.sniffer.stop() del self.sniffer for node in list(self.nodes.values()): node.stop() del self.nodes def test(self): # 1 self.nodes[DUT_LEADER].start() self.nodes[DUT_LEADER].set_state('leader') self.assertEqual(self.nodes[DUT_LEADER].get_state(), 'leader') self.nodes[ROUTER1].start() time.sleep(5) self.assertEqual(self.nodes[ROUTER1].get_state(), 'router') self.nodes[ROUTER2].start() time.sleep(5) self.assertEqual(self.nodes[ROUTER2].get_state(), 'router') router2_id = self.nodes[ROUTER2].get_router_id() # Wait DUT_LEADER to establish routing to ROUTER2 via ROUTER1's MLE advertisement. time.sleep(config.MAX_ADVERTISEMENT_INTERVAL) # 2 self.nodes[ROUTER2].reset() self._setUpRouter2() # 3 & 4 # Flush the message queue to avoid possible impact on follow-up verification. dut_messages = self.sniffer.get_messages_sent_by(DUT_LEADER) # Verify the cost from DUT_LEADER to ROUTER2 goes to infinity in 12 mins. routing_cost = 1 for i in range(0, 24): time.sleep(30) print("%ss" %((i + 1) * 30)) leader_messages = self.sniffer.get_messages_sent_by(DUT_LEADER) msg = leader_messages.last_mle_message(mle.CommandType.ADVERTISEMENT, False) if msg == None: continue self.assertTrue(command.check_id_set(msg, router2_id)) routing_cost = command.get_routing_cost(msg, router2_id) if routing_cost == 0: break self.assertTrue(routing_cost == 0) time.sleep(config.INFINITE_COST_TIMEOUT + config.MAX_ADVERTISEMENT_INTERVAL) leader_messages = self.sniffer.get_messages_sent_by(DUT_LEADER) msg = leader_messages.last_mle_message(mle.CommandType.ADVERTISEMENT) self.assertFalse(command.check_id_set(msg, router2_id)) # 5 # Flush the message queue to avoid possible impact on follow-up verification. dut_messages = self.sniffer.get_messages_sent_by(DUT_LEADER) self.nodes[ROUTER2].start() time.sleep(5) self.assertEqual(self.nodes[ROUTER2].get_state(), 'router') time.sleep(config.MAX_ADVERTISEMENT_INTERVAL) leader_messages = self.sniffer.get_messages_sent_by(DUT_LEADER) leader_messages.last_mle_message(mle.CommandType.ADVERTISEMENT) # 6 self.nodes[ROUTER1].stop() self.nodes[ROUTER2].stop() router1_id = self.nodes[ROUTER1].get_router_id() router2_id = self.nodes[ROUTER2].get_router_id() time.sleep(config.MAX_NEIGHBOR_AGE + config.MAX_ADVERTISEMENT_INTERVAL) leader_messages = self.sniffer.get_messages_sent_by(DUT_LEADER) msg = leader_messages.last_mle_message(mle.CommandType.ADVERTISEMENT) self.assertEqual(command.get_routing_cost(msg, router1_id), 0) time.sleep(config.INFINITE_COST_TIMEOUT + config.MAX_ADVERTISEMENT_INTERVAL) leader_messages = self.sniffer.get_messages_sent_by(DUT_LEADER) msg = leader_messages.last_mle_message(mle.CommandType.ADVERTISEMENT) self.assertFalse(command.check_id_set(msg, router1_id)) self.assertFalse(command.check_id_set(msg, router2_id)) if __name__ == '__main__': unittest.main()
# Copyright 2018 Google LLC. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys if 'google' in sys.modules and 'google.protobuf' not in sys.modules: del sys.modules['google'] from absl.testing import absltest from third_party.nucleus.vendor.python import statusor_examples class StatusorClifWrapTest(absltest.TestCase): def test_make_int_ok(self): self.assertEqual(statusor_examples.MakeIntOK(), 42) def test_make_int_fail(self): with self.assertRaisesRegexp(ValueError, 'Invalid argument: MakeIntFail'): statusor_examples.MakeIntFail() def test_make_str_ok(self): self.assertEqual(statusor_examples.MakeStrOK(), 'hello') # See CLIF wrapper for a discussion of why this is commented out. # def test_make_str_ok_stripped_type(self): # self.assertEqual(statusor_examples.MakeStrOKStrippedType(), 'hello') def test_make_str_fail(self): with self.assertRaisesRegexp(ValueError, 'Invalid argument: MakeStrFail'): statusor_examples.MakeStrFail() def test_make_int_unique_ptr_ok(self): self.assertEqual(statusor_examples.MakeIntUniquePtrOK(), 421) def test_make_int_unique_ptr_fail(self): with self.assertRaisesRegexp(ValueError, 'Invalid argument: MakeIntUniquePtrFail'): statusor_examples.MakeIntUniquePtrFail() def test_make_int_vector_ok(self): self.assertEqual(statusor_examples.MakeIntVectorOK(), [1, 2, 42]) def test_make_int_vector_fail(self): with self.assertRaisesRegexp(ValueError, 'Invalid argument: MakeIntVectorFail'): statusor_examples.MakeIntVectorFail() def test_returning_status_ok_returns_none(self): self.assertEqual(statusor_examples.FuncReturningStatusOK(), None) def test_returning_status_fail_raises(self): with self.assertRaisesRegexp(ValueError, 'Invalid argument: FuncReturningStatusFail'): statusor_examples.FuncReturningStatusFail() if __name__ == '__main__': absltest.main()
import discord import asyncio from numpy import random client = discord.Client() @client.event async def on_ready(): print('Logged in as') print(client.user.name) print(client.user.id) print('------') @client.event async def on_message(message): if message.content.startswith('erp with me pimbot'): await client.send_message(message.channel, '*rubs your milky thighs*') if message.content.startswith('l-lewd'): await client.send_message(message.channel, "I'll show you lewd! *sticks finger up butte*") if message.content.startswith('bully me pimbot'): await client.send_message(message.channel, "I bet that would turn you on wouldn't it you fairy") if message.content.startswith('tell me about the jews pimbot'): await client.send_message(message.channel, '(((they))) are putting oestrogen in the water turning the frogs and our sons gay!') #if message.content.startswith('!best'): #myid = '<@201909896357216256>' #await client.send_message(message.channel, ' : %s is the best ' % myid) #if message.content.startswith('hello <@343915078074236929>'): #myid = '<@343915078074236929>' #await client.send_message(message.channel, myid + ' says hello') if message.content.startswith('<@343915078074236929>'): await client.send_message(message.channel, 'h-huh') if message.content.startswith('tfw no bf'): await client.send_message(message.channel, "I'll be your bf, i-if you want") if message.content.startswith('fuck you pimbot'): await client.send_message(message.channel, 'no u') if message.content.startswith('nini~'): await client.send_message(message.channel, 'goodnight qt') if message.content.startswith('no u'): await client.send_message(message.channel, 'no me') if message.content.startswith('bye bye pimbot'): await client.send_message(message.channel, 'bye bye~') if message.content.startswith('~github'): await client.send_message(message.channel, 'fork me or contribute to my development on github: \nhttps://github.com/Shiimoe/Pimbot') if message.content.startswith(message.content[0] + "-" + message.content[0]): await client.send_message(message.channel, 'Stop stuttering you gay cunt') client.run('MzQzOTE1MDc4MDc0MjM2OTI5.DGlHtA.50snhJlQlLsEmm69zh-v8zcKs5Y')