prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
#!/usr/bin/env python3 import asyncio import contextlib import datetime import functools import importlib import itertools import logging import os import sys import traceback import click import discord import config from cogs.utils import db from core import Chiaki, migration # use faster event loop, but fall back to default if on Windows or not installed try: import uvloop except ImportError: pass else: asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) @contextlib.contextmanager def log(stream=False): logging.getLogger('discord').setLevel(logging.INFO) os.makedirs(os.path.join(os.path.dirname(__file__), 'logs'), exist_ok=True) root = logging.getLogger() root.setLevel(logging.INFO) handler = logging.FileHandler( filename=f'logs/chiaki-{datetime.datetime.now()}.log', encoding='utf-8', mode='w' ) fmt = logging.Formatter('[{asctime}] ({levelname:<7}) {name}: {message}', '%Y-%m-%d %H:%M:%S', style='{') handler.setFormatter(fmt) root.addHandler(handler) if stream: stream_handler = logging.StreamHandler() stream_handler.setFormatter(fmt) root.addHandler(stream_handler) try: yield finally: for hdlr in root.handlers[:]: hdlr.close() root.removeHandler(hdlr) #--------------MAIN--------------- _old_send = discord.abc.Messageable.send async def new_send(self, content=None, *, allow_everyone=False, **kwargs): if content is not None: if not allow_everyone: content = str(content).replace('@everyone', '@\u200beveryone').replace('@here', '@\u200bhere') return await _old_send(self, content, **kwargs) @click.group(invoke_without_command=True) @click.option('--log-stream', is_flag=True, help='Adds a stderr stream-handler for logging') @click.pass_context def main(ctx, log_stream): if ctx.invoked_subcommand is not None: return # This has to be patched first because Chiaki loads her extensions in # __init__, which means she loads her commands in __init__ from discord.ext import commands old_commands_group = commands.group commands.group = functools.partial(old_commands_group, case_insensitive=True) bot = Chiaki() discord.abc.Messageable.send = new_send with log(log_stream): try: bot.run() finally: discord.abc.Messageable.send = _old_send commands.group = old_commands_group return 69 * bot.reset_requested # ------------- DB-related stuff ------------------ async def _create_pool(): psql = f'postgresql://{config.psql_user}:{config.psql_pass}@{config.psql_host}/{config.psql_db}' return await db.create_pool(psql, command_timeout=60) def _load_modules(extensions): for e in itertools.chain.from_iterable(Chiaki.find_extensions(e) or [e] for e in extensions): try: importlib.import_module(e) except: click.echo(f'Could not load {e}.\n{traceback.format_exc()}', err=True) raise async def _migrate(version='', downgrade=False, ver
bose=False): # click doesn't like None as a default so we have to settle with an empty string if not version: version = None _load_modules(config.extensions) pool = await _create_pool() async with pool.acquire() as conn: await migration.migrate(version, connection=conn, downgrade=downgrade, verbose=verbose) def _sync_migrate(version, downgrade, verbose): run = asyncio.get_event_loop()
.run_until_complete run(_migrate(version, downgrade=downgrade, verbose=verbose)) @main.command() @click.option('--version', default='', metavar='[version]', help='Version to migrate to, defaults to latest') @click.option('-v', '--verbose', is_flag=True) def upgrade(version, verbose): """Upgrade the database to a version""" _sync_migrate(version, downgrade=False, verbose=verbose) click.echo('Upgrade successful! <3') @main.command() @click.option('--version', default='', metavar='[version]', help='Version to migrate to, defaults to latest') @click.option('-v', '--verbose', is_flag=True) def downgrade(version, verbose): """Downgrade the database to a version""" _sync_migrate(version, downgrade=True, verbose=verbose) click.echo('Downgrade successful! <3') async def _init(verbose): _load_modules(config.extensions) pool = await _create_pool() async with pool.acquire() as conn: await migration.init(connection=conn, verbose=verbose) @main.command(name='init-db') @click.option('-v', '--verbose', is_flag=True) def init_db(verbose): """Initialize the database""" run = asyncio.get_event_loop().run_until_complete run(_init(verbose)) click.echo('Database initialization successful! <3') if __name__ == '__main__': sys.exit(main())
# -*- coding: utf-8 -*- """ pipya main cli file Copyright (c) 2015 Brunston Poon This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. Full license in LICENCE.txt """ #IMPORT IMPORT IMPORT IMPORT IMPORT IMPORT IMPORT IMPORT IMPORT IMPORT IMPORT #Let's make sure that the user has all the dependencies installed and that #they are running the correct version of Python try: import feedparser except ImportError: print("feedparser is a system-agnostic dependency for RSS support") sys.exit() import sys, os import time import json import webbrowser import helper as h import fetch as fetch toggle = True version = sys.version_info[0] if version !=
3:
print(""" Please upgrade to Python3, preferably 3.4.* or greater, before continuing""") toggle = False sys.exit() if os.name == "nt": try: from colorama import init init() except ImportError: print("colorama is a windows dependency for ANSI colors support") sys.exit() def main(): wapi, user, citystr,newslink = h.kernfig() h.welcome(user) while True: wapi, user, citystr,newslink = h.kernfig() print(h.ask()) uin = str.lower(input(">")) if "fetch" in uin: fetch.main(uin) elif "visit" in uin: for itemToVisit in h.giveComputerIndex(uin): newsfeed = h.grab(newslink) webbrowser.open(newsfeed.entries[itemToVisit-1].link) elif "set" in uin: if "name" in uin: name = input("What would you like me to call you? ") h.cfgwriter("settings.cfg",0,name) if "city" in uin: city = input(""" Changing weather location? Where to? Must be in Wunderground form. """) h.cfgwriter("settings.cfg",1,city) elif "name" and "pronounce" in uin: print(h.pipya()+"My name is pronounced Pip-pah. The y is silent :).") elif "name" and ("how" or "where") and "get" in uin: print(h.pipya()+"""\ My name started as pypa, for "python personal assistant". It morphed to pipya for pronounceability. Thanks for asking!""") elif "what" and "can" and "do" in uin: h.capabilities() elif "who" and "are" and ("you" or "pipya") in uin: print(h.pipya()+""" I am Pipya, a personal assistant written in python3. My creator is brupoon. He intended for me to be a jack-of-all-trades personal assistant operated by a cli. I am a sexless, genderless entity, though my name is similar to the human feminine "Pippa". """) elif uin in ["quit", "goodbye", "exit"]: print("Goodbye, {0}! 'Till next time.".format(user)) sys.exit() elif uin in ["jellyfish"]: h.jellyfish() else: print("Pipya: Sorry, {0}, I didn't quite catch that.".format(user)) if (__name__ == '__main__') and (toggle==True): main()
hinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinxcontrib.httpdomain' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Octofiles' copyright = '2016, Hackultura' author = 'Hackultura' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0.0' # The full version, including alpha/beta/rc tags. release = '1.0.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = 'pt_BR' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'Octofilesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Octofiles.tex', 'Octofiles Documentation', 'Hackultura', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'octofiles', 'Octofiles Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for
Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Octofiles', 'Octofiles Documentation', author, 'Octofiles', 'One line description of pr
oject.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How t
#!/usr/b
in/env python # -*- coding:utf-8 -*- from flask import Flask from config import * # create the app app = Flask(__name__) # Load default config and override config from config file app.config.from_object('c
onfig')
# -*- coding: utf-8 -*- ############################################################################### # # GetLoginProfile # Retrieves the user name and password create date for the specified user. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class GetLoginProfile(Choreography): def __init__(self, temboo_session): """ Create a new instance of the GetLoginProfile Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(GetLoginProfile, self).__init__(temboo_session, '/Library/Amazon/IAM/GetLoginProfile') def new_input_set(self): return GetLoginProfileInputSet() def _make_result_set(self, result, path): return GetLoginProfileResultSet(result, path) def _make_execution(self, session, exec_id, path): return GetLoginProfileChoreographyExecution(session, exec_id, path) class GetLoginProfileInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the GetLoginProfile Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_AWSAccessKeyId(self, value): """ Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.) """ super(GetLoginProfileInputSet, self)._set_input('AWSAccessKeyId', value) def set_AWSSecretKeyId(self, value): """ Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.) """ super(GetLoginProfileInputSet, self)._set_input('AWSSecretKeyId', value) def set_ResponseFormat(self, value): """ Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".) """ super(GetLoginProfileInputSet, self)._set_input('ResponseFormat', value) def set_UserName(self, value): """ Set the value of the UserName input for this Choreo. ((required, string) Name of the user whose login profile you want to retrieve.) """ super(GetLoginProfileInputSet, self)._set_input('UserName', value) class GetLoginProfileResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the GetLoginProfile Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str):
return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.) """ return self._output.get('Response', None) class GetLoginProfileChoreographyExecution(ChoreographyExecution): def _make_re
sult_set(self, response, path): return GetLoginProfileResultSet(response, path)
: self.next = self.__this_string[1] else: self.next = None def step(self): """ remove the first char from the string """ self.__this_string = self.__this_string[1:] self.__setup() return self.this def step2(self): """ remove the first two chars from the string """ self.__this_string = self.__this_string[2:] self.__setup() return self.this def remove_start_end(self, start, end): """ Removes a start, end block from the string if there """ if self.this == start: self.text_to_next(end) def __get_a_char_of_text(self): """ Removes one char of TEXT from the string and returns it. """ if self.this == "\\": if self.next == None: rtrn = "\\" else: rtrn = self.next self.step2() else: rtrn = self.this self.step() return rtrn def text_to_next(self, char): """ return/remove a format strings from here """ new_str = "" while self.this is not None and self.this != char: new_str += self.__get_a_char_of_text() if self.this == char: self.step() return new_str def is_a(self): return True def parse_format(self): rtrn = self.__get_a_char_of_text() if rtrn: return rtrn return '' #------------------------------------------------------------------------ # # VarString class - The Output string class # #------------------------------------------------------------------------ class VarString(object): """ The current state of the entire string (integer from TextTypes) A list to hold tuple object (integer from TextTypes, string) This will contain the string that will be displayed. or string out. it is used for groups and format strings. """ def __init__(self, start_state = TXT.remove): self.state = start_state #overall state of the string. self._text = [] #list of tuples (TXT.?, string) def __update_state(self, new_status): if new_status > self.state: self.state = new_status def add_text(self, text): self._text.append((TXT.text, text)) def add_variable(self, text): self.state = TXT.display self._text.append((TXT.text, text)) def add_remove(self): self.__update_state(TXT.remove) self._text.append((TXT.remove, "")) def add_separator(self, text): self._text.append((TXT.separator, text)) def get_final(self): #if self.state == TXT.remove: # return (TXT.remove, "") curr_string = "" index = 0 while index < len(self._text): if self._text[index][0] == TXT.text: curr_string += self._text[index][1] index = index + 1 continue #while self._text: if index +1 ==
len(self._text): if self._text[index][0] == TXT.separator and curr_string != '': curr_string +
= self._text[index][1] index = index + 1 break #while self._text: type_0_1 = (self._text[index][0], self._text[index+1][0]) #if type_0_1 == (TXT.remove, TXT.remove): # pass if type_0_1 == (TXT.remove, TXT.separator): index = index + 1 #elif type_0_1 == (TXT.remove, TXT.text): # pass elif type_0_1 == (TXT.separator, TXT.remove): index = index + 1 #elif type_0_1 == (TXT.separator, TXT.separator): # pass elif type_0_1 == (TXT.separator, TXT.text): curr_string += self._text[index][1] #else: # print "#oops Should never get here." index = index + 1 #return what we have return (self.state, curr_string) print("===" + str(self.state) + " '" + str(curr_string) + "'") def extend(self, acquisition): """ acquisition is a VarString object Merge the content of acquisition into this place. """ self.__update_state(acquisition.state) if acquisition.state != TXT.display: #The sub {} was TXT.remove. We don't want to simply ignore it. self.add_remove() #add a remove que here to note it. return self._text.extend(acquisition._text) #------------------------------------------------------------------------ # # Parsers # #------------------------------------------------------------------------ #------------------------------------------------------------------------ # SeparatorParse #------------------------------------------------------------------------ class SeparatorParse(object): """ parse out a separator """ def __init__(self, consumer_in): self._in = consumer_in def is_a(self): return self._in.this == "<" def parse_format(self): if not self.is_a(): return """ get the text and return it """ self._in.step() return self._in.text_to_next(">") #------------------------------------------------------------------------ # AttributeParse #------------------------------------------------------------------------ class AttributeParse(object): """ Parse attributes """ def __init__(self, consumer_in): self._in = consumer_in def get_name(self): """ Gets a name inside a [] block """ if self._in.this != "[": return self._in.step() return self._in.text_to_next("]") def get_attribute(self, attrib_list, attrib_name): """ Get an attribute by name """ if attrib_name == "": return for attr in attrib_list: if str(attr.get_type()) == attrib_name: return str(attr.get_value()) return def is_a(self): """ check """ return self._in.this == "a" def parse_format(self, attrib_list): """ Get the attribute and add it to the string out """ name = self.get_name() return self.get_attribute(attrib_list, name) #------------------------------------------------------------------------ # VariableParse #------------------------------------------------------------------------ class VariableParse(object): """ Parse the individual variables """ def __init__(self, friend, database, consumer_in): self.friend = friend self.database = database self._in = consumer_in def is_a(self): """ check """ return self._in.this == "$" and self._in.next is not None and \ "nsijbBdDmMvVauetTpP".find(self._in.next) != -1 def get_event_by_type(self, marriage, e_type): """ get an event from a type """ if marriage is None: return None for e_ref in marriage.get_event_ref_list(): if not e_ref: continue event = self.friend.database.get_event_from_handle(e_ref.ref) if event.get_type() == e_type: return event return None def get_event_by_name(self, person, event_name): """ get an event from a name. """ if not person: return None for e_ref in person.get_event_ref_list(): if not e_ref: continue event = self.friend.database.get_event_from_handle(e_ref.ref) if event.get_type().is_type(event_name): return event return None def empty_item(self, item): """ return false if there is a valid item(date or place). Otherwise add a TXT.remove marker in the output string remove any format strings from the input string """
myKeywords): """prompt user for a decision on which attribute has to be the key attribute for the aggregated data and writes the keywords file This could be swapped to a call to the keyword editor Args: None Returns: the value of the aggregation attribute keyword. None if no usable attribute has been found Raises: Propagates any error """ if myKeywords is None: myKeywords = dict() vProvider = myLayer.dataProvider() vFields = vProvider.fields() fields = [] for i in vFields: # show only int or string fields to be chosen as aggregation # attribute other possible would be float if vFields[i].type() in [ QtCore.QVariant.Int, QtCore.QVariant.String]: fields.append(vFields[i].name()) #there is no usable attribute, use None if len(fields) == 0: aggrAttribute = None logOnQgsMessageLog( 'there is no usable attribute, use None') #there is only one usable attribute, use it elif len(fields) == 1: aggrAttribute = fields[0] logOnQgsMessageLog('there is only one usable attribute, ' 'use it: ' + str(aggrAttribute)) #there are multiple usable attribute, prompt for an answer elif len(fields) > 1: myTitle = self.tr( 'Waiting for aggregation attribute selection...') myMessage = self.tr('Please select which attribute you want to' ' use as ID for the aggregated results') myProgress = 1 self.showBusy(myTitle, myMessage, myProgress) #open a AggregationAttributeDialog dialog = QtGui.QDialog() #remove all windows hints to avoid allowing for cancelling the # dialog dialog.setWindowFlags(QtCore.Qt.CustomizeWindowHint) dialogGui = Ui_AggregationAttributeDialogBase() dialogGui.setupUi(dialog) dialogGui.buttonBox.button( QtGui.QDialogButtonBox.Cancel).setHidden(True) cboAggr = dialogGui.cboAggregationAttributes cboAggr.clear() cboAggr.addItems(fields) cboAggr.setCurrentIndex(0) self.disableBusyCursor() if dialog.exec_() == QtGui.QDialog.Accepted: aggrAttribute = cboAggr.currentText() logOnQgsMessageLog('User selected: ' + str(aggrAttribute) + ' as aggregation attribute') else: #the user cancelled, use the first attribute as default aggrAttribute = fields[0] # myMessage = self.tr( # 'You have to select an aggregation attribute') # raise InvalidParameterException(myMessage) self.enableBusyCursor() myKeywords['aggregation attribute'] = aggrAttribute write_keywords(myKeywords, myKeywordFilePath) return aggrAttribute def _completed(self): """Helper function for slot activated when the process is done. Args None Returns Report to render on canvas Raises Exceptions on a range of error conditions Provides report out from impact_function to canvas """ myTitle = self.tr('Loading results...') myMessage = self.tr('The impact assessment is complete - loading ' 'the results into QGIS now...') myProgress = 99 self.showBusy(myTitle, myMessage, myProgress) m
yMessage = self.runner.result() # FIXME (Ole): This branch is not covered by the tests myEngineImpactLayer = self.runner.impactLayer() if myEngineImpactLayer is None: myMessage = str(self.tr('No impact layer was c
alculated. ' 'Error message: %s\n' % str(myMessage))) if self.runner.lastTraceback() is not None: myMessage += '<br/><ul>' for myItem in self.runner.lastTraceback(): # replace is to tidy up windows paths a little myMessage += ('<li>' + str(myItem.replace('\\\\\\\\', '')) + '</li>') myMessage += '</ul>' raise Exception(myMessage, self.runner.lastException()) # Load impact layer into QGIS myQgisImpactLayer = self.readImpactLayer(myEngineImpactLayer) myKeywords = self.keywordIO.readKeywords(myQgisImpactLayer) #write postprocessing report to keyword myKeywords['postprocessing_report'] = self.getPostprocessingOutput() self.keywordIO.writeKeywords(myQgisImpactLayer, myKeywords) # Get tabular information from impact layer myReport = self.keywordIO.readKeywords(myQgisImpactLayer, 'impact_summary') # Get requested style for impact layer of either kind myStyle = myEngineImpactLayer.get_style_info() # Determine styling for QGIS layer if myEngineImpactLayer.is_vector: if not myStyle: # Set default style if possible pass else: setVectorStyle(myQgisImpactLayer, myStyle) elif myEngineImpactLayer.is_raster: if not myStyle: myQgisImpactLayer.setDrawingStyle( QgsRasterLayer.SingleBandPseudoColor) myQgisImpactLayer.setColorShadingAlgorithm( QgsRasterLayer.PseudoColorShader) else: setRasterStyle(myQgisImpactLayer, myStyle) else: myMessage = self.tr('Impact layer %s was neither a raster or a ' 'vector layer' % myQgisImpactLayer.source()) raise ReadLayerError(myMessage) # Add layer to QGIS QgsMapLayerRegistry.instance().addMapLayer(myQgisImpactLayer) # then zoom to it if self.zoomToImpactFlag: self.iface.zoomToActiveLayer() if self.hideExposureFlag: myExposureLayer = self.getExposureLayer() myLegend = self.iface.legendInterface() myLegend.setLayerVisible(myExposureLayer, False) self.restoreState() #append postprocessing report myReport += self.getPostprocessingOutput() # Return text to display in report pane return myReport def showHelp(self): """Load the help text into the wvResults widget""" if not self.helpDialog: self.helpDialog = Help(self.iface.mainWindow(), 'dock') self.helpDialog.show() def showBusy(self, theTitle=None, theMessage=None, theProgress=0): """A helper function to indicate the plugin is processing. Args: * theTitle - an optional title for the status update. Should be plain text only * theMessage - an optional message to pass to the busy indicator. Can be an html snippet. * theProgress - a number between 0 and 100 indicating % complete Returns: None Raises: Any exceptions raised by the RIAB library will be propagated. ..note:: Uses bootstrap css for progress bar. """ #self.pbnRunStop.setText('Cancel') self.pbnRunStop.setEnabled(False) if theTitle is None: theTitle = self.tr('Analyzing this question...') myHtml = ('<table class="condensed">' ' <tr>' ' <th class="info button-cell">' + str(theTitle) + ' </th>' ' </tr>' ' <tr>' ' <td>' + str(theMessage) + ' </td>' ' </tr>' ' <tr>' ' <td>' ' <div class="progress">' '
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('data_storage', '0002_auto_20150727_23
12'), ] operations = [ migrations.RenameField( model_name='readonly', old_name='state', new_name='stateAbbreviation', ), migrations.RenameField( model_name='writeonly', old_name='s
tate', new_name='stateAbbreviation', ), migrations.AddField( model_name='readonly', name='stateName', field=models.TextField(default='sexy jens'), preserve_default=False, ), migrations.AddField( model_name='writeonly', name='stateName', field=models.TextField(default='sexy cindy'), preserve_default=False, ), ]
#!/usr/bin/python # find_unicode.py # # Author: Jonathan Hosmer # Date: Sun Feb 15 14:06:15 2015 # import os import sys def help(): print 'Usage: {} [file, [dir, [file, ..]]]'.format(__file__) print 'Displays line:character position of all non-ascii Unicode character(s) in a file' def main(): args = sys.argv[1:] if '-h' in args or '--help' in args: sys.exit(help()) if not args: files = [sys.stdin] long_fname = len('stdin') + 1 else: # make a flat list of all files ## if a dir is given as an arg then take all files in that dir [non-recursive] files = [f for f in args if os.path.isfile(f)] + [x for y in [[os.path.join(d, f) for f in os.listdir(d) if os.path.isfile(os.path.join(d, f))] for d in args if os.path.isdir(d)] for x in y] long_fname = max(map(len, files)) + 1 chars = [] out_str = '{{:<{}}} {{:03}}:{{:04}} {{chars:^5}} {{chars!r:^13}}'.format(long_fname) if sys.stdin not in files: header = '{{:^{}}} {{}}:{{}} {{:^5}} {{:^10}}'.format(long_fname) head_out = header.format('File', 'Line', 'Col', 'char', '(ord)') print head_out + '\n' + '-'*len(head_out) for f in fil
es: if f is sys.stdin: infile = sys.stdin fname = 'stdin' else: fname = f infile = open(f) for line_i, line in enumerate(infile):
for char_i, char in enumerate(line): if ord(char) > 126: chars.append(char) else: if chars: print out_str.format(fname, line_i+1, char_i+1-len(chars), chars=''.join(chars)) chars = [] if __name__ == '__main__': sys.exit(main())
import json import re import unittest from datetime import datetime from coalib.output.JSONEncoder import create_json_encoder class TestClass1(object): def __init__(self): self.a = 0 class TestClass2(object): def __init__(self): self.a = 0 self.b = TestClass1() class TestClass3(object): def __init__(self): self.a = 0 self.b = TestClass1() @staticmethod def __getitem__(key): return 'val' @staticmethod def keys(): return ['key'] class PropertiedClass(object): def __init__(self): self._a = 5 @property def prop(self): return self._a class JSONAbleClass(object): @staticmethod def __json__(): return ['dont', 'panic'] class JSONEncoderTest(unittest.TestCase): JSONEncoder = create_json_encoder(use_relpath=True) kw = {'cls': JSONEncoder, 'sort_keys': True} def test_builtins(self): self.assertEqual('"test"', json.dumps('test', **self.kw)) self.assertEqual('1', json.dumps(1, **self.kw)) self.assertEqual('true', json.dumps(True, **self.kw)) self.assertEqual('null', json.dumps(None, **self.kw)) def test_iter(self): self.assertEqual('[0, 1]', json.dumps([0, 1], **self.kw)) self.assertEqual('[0, 1]', json.dumps((0, 1), **self.kw)) self.assertEqual('[0, 1]', json.dumps(range(2), **self.kw)) def test_dict(self): self.assertEqual('{"
0": 1}', json.dumps({0: 1}, **self.kw)) self.assertEqual('{"0": 1}', json.dumps({'0': 1}, **self.kw)) self.assertEqual('{"0": "1"}', json.dumps({'0': '1'}, **self.kw)) def test_time(self): tf = datetime.today() self.assertEqual('"' + tf.isoformat() + '"',
json.dumps(tf, **self.kw)) def test_re_object(self): uut = re.compile('x') self.assertEqual('"' + uut.pattern + '"', json.dumps(uut, **self.kw)) def test_class1(self): tc1 = TestClass1() self.assertEqual('{"a": 0}', json.dumps(tc1, **self.kw)) self.assertEqual('[{"a": 0}]', json.dumps([tc1], **self.kw)) self.assertEqual('{"0": {"a": 0}}', json.dumps({0: tc1}, **self.kw)) def test_class2(self): tc2 = TestClass2() self.assertEqual('{"a": 0, "b": {"a": 0}}', json.dumps(tc2, **self.kw)) def test_class3(self): tc3 = TestClass3() self.assertEqual('{"key": "val"}', json.dumps(tc3, **self.kw)) def test_propertied_class(self): uut = PropertiedClass() self.assertEqual('{"prop": 5}', json.dumps(uut, **self.kw)) def test_jsonable_class(self): uut = JSONAbleClass() self.assertEqual('["dont", "panic"]', json.dumps(uut, **self.kw)) def test_type_error(self): with self.assertRaises(TypeError): json.dumps(1j, **self.kw)
sed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Views for managing Neutron Routers. """ from collections import OrderedDict from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse_lazy from django.utils.translation import pgettext_lazy from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import messages from horizon import tables from horizon import tabs from horizon.utils import memoized from openstack_dashboard import api from openstack_dashboard.utils import filters from openstack_dashboard.dashboards.project.routers\ import forms as project_forms from openstack_dashboard.dashboards.project.routers import tables as rtables from openstack_dashboard.dashboards.project.routers import tabs as rdtabs class IndexView(tables.DataTableView): table_class = rtables.RoutersTable template_name = 'project/routers/index.html' page_title = _("Routers") FILTERS_MAPPING = {'admin_state_up': {_("up"): True, _("down"): False}} def _get_routers(self, search_opts=None): try: search_opts = self.get_filters( filters=search_opts, filters_map=self.FILTERS_MAPPING) tenant_id = self.request.user.tenant_id routers = api.neutron.router_list(self.request, tenant_id=tenant_id, **search_opts) except Exception: routers = [] exceptions.handle(self.request, _('Unable to retrieve router list.')) ext_net_dict = self._list_external_networks() for r in routers: r.name = r.name_or_id self._set_external_network(r, ext_net_dict) return routers def get_data(self): routers = self._get_routers() return routers def _list_external_networks(self): try: search_opts = {'router:external': True} ext_nets = api.neutron.network_list(self.request, **search_opts) ext_net_dict = OrderedDict((n['id'], n.name_or_id) for n in ext_nets) except Exception as e: msg = _('Unable to retrieve a list of external networks "%s".') % e exceptions.handle(self.request, msg) ext_net_dict = {} return ext_net_dict def _set_external_network(self, router, ext_net_dict): gateway_info = router.external_gateway_info if gateway_info: ext_net_id = gateway_info['network_id'] if ext_net_id in ext_net_dict: gateway_info['network'] = ext_net_dict[ext_net_id] else: msg_params = {'ext_net_id': ext_net_id, 'router_id': router.id} msg = _('External network "%(ext_net_id)s" expected but not ' 'found for router "%(router_id)s".') % msg_params messages.error(self.request, msg) # gateway_info['network'] is just the network name, so putting # in a smallish error message in the table is reasonable. # Translators: The usage is "<UUID of ext_net> (Not Found)" gateway_info['network'] = pgettext_lazy( 'External network not found', u'%s (Not Found)') % ext_net_id class DetailView(tabs.TabbedTableView): tab_group_class = rdtabs.RouterDetailTabs template_name
= 'horizon/common/_detail.html' failure_url = reverse_lazy('horizon:project:routers:index') network_url =
'horizon:project:networks:detail' page_title = "{{ router.name|default:router.id }}" @memoized.memoized_method def _get_data(self): try: router_id = self.kwargs['router_id'] router = api.neutron.router_get(self.request, router_id) router.set_id_as_name_if_empty(length=0) except Exception: msg = _('Unable to retrieve details for router "%s".') \ % router_id exceptions.handle(self.request, msg, redirect=self.failure_url) if router.external_gateway_info: ext_net_id = router.external_gateway_info['network_id'] router.external_gateway_info['network_url'] = reverse( self.network_url, args=[ext_net_id]) try: ext_net = api.neutron.network_get(self.request, ext_net_id, expand_subnet=False) ext_net.set_id_as_name_if_empty(length=0) router.external_gateway_info['network'] = ext_net.name except Exception: msg = _('Unable to retrieve an external network "%s".') \ % ext_net_id exceptions.handle(self.request, msg) router.external_gateway_info['network'] = ext_net_id return router @memoized.memoized_method def _get_ports(self): try: ports = api.neutron.port_list(self.request, device_id=self.kwargs['router_id']) except Exception: ports = [] msg = _('Unable to retrieve port details.') exceptions.handle(self.request, msg) return ports def get_context_data(self, **kwargs): context = super(DetailView, self).get_context_data(**kwargs) router = self._get_data() table = rtables.RoutersTable(self.request) context["router"] = router context["url"] = self.failure_url context["actions"] = table.render_row_actions(router) context['dvr_supported'] = api.neutron.get_feature_permission( self.request, "dvr", "get") context['ha_supported'] = api.neutron.get_feature_permission( self.request, "l3-ha", "get") choices = rtables.STATUS_DISPLAY_CHOICES router.status_label = filters.get_display_label(choices, router.status) choices = rtables.ADMIN_STATE_DISPLAY_CHOICES router.admin_state_label = ( filters.get_display_label(choices, router.admin_state)) return context def get_tabs(self, request, *args, **kwargs): router = self._get_data() ports = self._get_ports() return self.tab_group_class(request, router=router, ports=ports, **kwargs) class CreateView(forms.ModalFormView): form_class = project_forms.CreateForm form_id = "create_router_form" modal_header = _("Create Router") template_name = 'project/routers/create.html' success_url = reverse_lazy("horizon:project:routers:index") page_title = _("Create Router") submit_label = _("Create Router") submit_url = reverse_lazy("horizon:project:routers:create") class UpdateView(forms.ModalFormView): form_class = project_forms.UpdateForm form_id = "update_router_form" modal_header = _("Edit Router") template_name = 'project/routers/update.html' success_url = reverse_lazy("horizon:project:routers:index") page_title = _("Update Router") submit_label = _("Save Changes") submit_url = "horizon:project:routers:update" def get_context_data(self, **kwargs): context = super(UpdateView, self).get_context_data(**kwargs) args = (self.kwargs['router_id'],) context["router_id"] = self.kwargs['router_id'] context['submit_url'] = reverse(self.submit_url, args=args) return context def _get_object(self, *args, **kwargs):
#!/usr/bin/python AGO_TELLSTICK_VERSION = '0.0.9' """ ############################################ # # Tellstick Duo class # # Date of origin: 2014-01-25 # __author__ = "Joakim Lindbom" __copyright__ = "Copyright 2014, Joakim Lindbom" __credits__ = ["Joakim Lindbom", "The ago control team"] __license__ = "GPL Public License Version 3" __maintainer__ = "Joakim Lindbom" __email__ = 'Joakim.Lindbom@gmail.com' __status__ = "Experimental" __version__ = AGO_TELLSTICK_VERSION ############################################ """ from tellstickbase import tellstickbase import td class tellstickduo(tellstickbase): """Class used for Tellstick & Tellstick Duo devices""" def __get__(self, obj, objtype=None): pass def __set__(self, obj, val): pass def __delete__(self, obj): pass def __init__(self): self.SensorEvent = None def init(self, SensorPollDelay, TempUnits): # TELLSTICK_BELL | TELLSTICK_TOGGLE | TELLSTICK_LEARN | TELLSTICK_EXECUTE | TELLSTICK_UP | TELLSTICK_DOWN | TELLSTICK_STOP td.init(defaultMethods=td.TELLSTICK_TURNON | td.TELLSTICK_TURNOFF | td.TELLSTICK_DIM) self.log.info("Init executed") def close(self): return td.close() def turnOn(self, devId): resCode = td.turnOn(devId) return self.getErrorString(resCode).lower() def turnOff(self, devId): resCode = td.turnOff(devId) return self.getErrorString(resCode).lower() def getErrorString(self, resCode): return td.getErrorString(resCode) def dim(s
elf, devId, level): resCode = td.dim(devId, level) return self.getErrorString(resCode).lower() def getName(self, devId): return td.getName(devId) def methodsReadable(self, method, default): return td.methodsReadable(method, default) def getNumberOfDevices(self): return td.get
NumberOfDevices() def getNumberOfSensors(self): return td.getNumberOfDevices() # wrong def getDeviceId(self, i): return td.getDeviceId(i) def getModel(self, devId): return td.getModel(devId) def registerDeviceEvent(self, deviceEvent): return td.registerDeviceEvent(deviceEvent) def registerDeviceChangedEvent(self, deviceEvent): return td.registerDeviceChangedEvent(deviceEvent) def newTempSensor(self, devId, model, value): self.log.debug("New temperature sensor intercepted: devId=" + devId + " model=" + model) s = { "id" : devId, "description" : "", "model" : model, "new" : True, "temp" : float(value), # C/F "offset" : 0.0, # TODO: Add to parameter & config file "lastTemp" : float(-274.0), "isTempSensor" : True, "isHumiditySensor" : False, "ignore" : False} return s def newHumiditySensor(self, devId, model, value): self.log.debug("New humidity sensor intercepted: devId=" + devId + " model=" + model) s = { "id" : devId, "description" : "", "model" : model, "new" : True, "humidity" : float(value), "offset" : 0.0, # TODO: Add to parameter & config file "lastHumidity" : float(-999.0), "isHumiditySensor" : True, "isTempSensor" : False, "ignore" : False} return s def SensorEventInterceptor(self, protocol, model, id, dataType, value, timestamp, callbackId): devId = 'S' + str(id) # Prefix 'S' to make sure name doesn't clash with self-defined devices devIdT = devId + "-temp" devIdH = devId + "-hum" # self.checkIgnore(self, devId) #TODO: Add once moved self.log.trace("SensorEventInterceptor called for " + devId) if str(id) not in self.ignoreDevices: # New temperature sensor? if devIdT not in self.sensors and dataType & td.TELLSTICK_TEMPERATURE == td.TELLSTICK_TEMPERATURE: self.sensors[devIdT] = self.newTempSensor(devIdT, model, value) # New humidity sensor? if devIdH not in self.sensors and dataType & td.TELLSTICK_HUMIDITY == td.TELLSTICK_HUMIDITY: self.sensors[devIdH] = self.newHumiditySensor(devIdH, model, value) # Call registered callback self.SensorEvent(protocol, model, devId, dataType, value, timestamp, callbackId) def registerSensorEvent(self, deviceEvent): self.SensorEvent = deviceEvent return td.registerSensorEvent(self.SensorEventInterceptor) def listSensors(self): sensors = td.listSensors() if len(sensors) != 0: for id, value in sensors.iteritems(): self.log.trace("listSensors: devId: %s ", str(id)) if id not in self.ignoreDevices: devId = str(id) + "-temp" if devId not in self.sensors: if value["isTempSensor"]: # New temp sensor found self.sensors[devId] = self.newTempSensor(devId, value["model"], value["temp"]) devId = str(id) + "-hum" if devId not in self.sensors: if value["isHumiditySensor"]: # New hum sensor found self.sensors[devId] = self.newHumiditySensor(devId, value["model"], value["humidity"]) if not value["new"]: continue return self.sensors def listSwitches(self): if len(self.switches) == 0: for i in range(self.getNumberOfDevices()): devId = self.getDeviceId(i) model = self.getModel(devId) if ('switch' in model or 'dimmer' in model): dev = { "id" : devId, "name" : self.getName(devId), "model" : model} if 'dimmer' in model: dev["isDimmer"] = True else: dev["isDimmer"] = False self.switches[devId] = dev return self.switches def listRemotes(self): self.log.trace("listRemotes start") if len(self.remotes) == 0: self.log.info("getNumberOfDevices=" + str(self.getNumberOfDevices())) for i in range(self.getNumberOfDevices()): devId = self.getDeviceId(i) model = self.getModel(devId) self.log.info("devId=" + str(devId) + " model=" + model) if 'switch' not in model and 'dimmer' not in model: dev = { "id" : str(devId), "name" : self.getName(devId), "model" : model} self.log.info("devId=" + str(devId) + " model=" + model) self.remotes[devId] = dev return self.remotes
t_delete_container_not_found return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) def _foo_bar_container_foo_bar_object_NOT_FOUND(self, method, url, body, headers): # test_delete_object_not_found return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) def _foo_bar_container_foo_bar_object_DELETE(self, method, url, body, headers): # test_delete_object return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) def _foo_bar_container_foo_test_upload(self, method, url, body, headers): # test_upload_object_success self._assert_content_length_header_is_string(headers=headers) body = '' headers = {} headers['etag'] = '0x8CFB877BB56A6FB' headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd' return (httplib.CREATED, body, headers, httplib.responses[httplib.CREATED]) def _foo_bar_container_foo_test_upload_block(self, method, url, body, headers): # test_upload_object_success self._assert_content_length_header_is_string(headers=headers) body = '' headers = {} headers['etag'] = '0x8CFB877BB56A6FB' return (httplib.CREATED, body, headers, httplib.responses[httplib.CREATED]) def _foo_bar_container_foo_test_upload_page(self, method, url, body, headers): # test_upload_object_success body = '' headers = {} headers['etag'] = '0x8CFB877BB56A6FB' return (httplib.CREATED, body, headers, httplib.responses[httplib.CREATED]) def _foo_bar_container_foo_test_upload_blocklist(self, method, url, body, headers): # test_upload_object_success self._assert_content_length_header_is_string(headers=headers) body = '' headers = {} headers['etag'] = '0x8CFB877BB56A6FB' headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd' return (httplib.CREATED, body, headers, httplib.responses[httplib.CREATED]) def _foo_bar_container_foo_test_upload_lease(self, method, url, body, headers): # test_upload_object_success self._assert_content_length_header_is_string(headers=headers) action = headers['x-ms-lease-action'] rheaders = {'x-ms-lease-id': 'someleaseid'} body = '' if action == 'acquire': return (httplib.CREATED, body, rheaders, httplib.responses[httplib.CREATED]) else: if headers.get('x-ms-lease-id', None) != 'someleaseid': return (httplib.BAD_REQUEST, body, rheaders, httplib.responses[httplib.BAD_REQUEST]) return (httplib.OK, body, headers, httplib.responses[httplib.CREATED]) def _foo_bar_container_foo_test_upload_INVALID_HASH(self, method, url, body, headers): # test_upload_object_invalid_hash1 self._assert_content_length_header_is_string(headers=headers) body = '' headers = {} headers['etag'] = '0x8CFB877BB56A6FB' headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd' return (httplib.CREATED, body, headers, httplib.responses[httplib.CREATED]) def _foo_bar_container_foo_bar_object(self, method, url, body, headers): # test_upload_object_invalid_file_size self._assert_content_length_header_is_string(headers=headers) body = generate_random_data(1000) return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _foo_bar_container_foo_bar_object_INVALID_SIZE(self, method, url, body, headers): # test_upload_object_invalid_file_size self._assert_content_length_header_is_string(headers=headers) body = '' return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _assert_content_length_header_is_string(self, headers): if 'Content-Length' in headers: self.assertTrue(isinstance(headers['Content-Length'], basestring)) class AzureBlobsTests(unittest.TestCase): driver_type = AzureBlobsStorageDriver driver_args = STORAGE_AZURE_BLOBS_PARAMS mock_response_klass = AzureBlobsMockHttp @classmethod def create_driver(self): return self.driver_type(*self.driver_args) def setUp(self): self.driver_type.connectionCls.conn_class = self.mock_response_klass self.mock_response_klass.type = None self.driver = self.create_driver() def tearDown(self): self._remove_test_file() def _remove_test_file(self): file_path = os.path.abspath(__file__) + '.temp' try: os.unlink(file_path) except OSError: pass def test_invalid_credenti
als(self): self.mock_response_klass.type = 'UNAUTHORIZED' try: sel
f.driver.list_containers() except InvalidCredsError: e = sys.exc_info()[1] self.assertEqual(True, isinstance(e, InvalidCredsError)) else: self.fail('Exception was not thrown') def test_list_containers_empty(self): self.mock_response_klass.type = 'list_containers_EMPTY' containers = self.driver.list_containers() self.assertEqual(len(containers), 0) def test_list_containers_success(self): self.mock_response_klass.type = 'list_containers' AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2 containers = self.driver.list_containers() self.assertEqual(len(containers), 4) self.assertTrue('last_modified' in containers[1].extra) self.assertTrue('url' in containers[1].extra) self.assertTrue('etag' in containers[1].extra) self.assertTrue('lease' in containers[1].extra) self.assertTrue('meta_data' in containers[1].extra) def test_list_container_objects_empty(self): self.mock_response_klass.type = 'EMPTY' container = Container(name='test_container', extra={}, driver=self.driver) objects = self.driver.list_container_objects(container=container) self.assertEqual(len(objects), 0) def test_list_container_objects_success(self): self.mock_response_klass.type = None AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2 container = Container(name='test_container', extra={}, driver=self.driver) objects = self.driver.list_container_objects(container=container) self.assertEqual(len(objects), 4) obj = objects[1] self.assertEqual(obj.name, 'object2.txt') self.assertEqual(obj.hash, '0x8CFB90F1BA8CD8F') self.assertEqual(obj.size, 1048576) self.assertEqual(obj.container.name, 'test_container') self.assertTrue('meta1' in obj.meta_data) self.assertTrue('meta2' in obj.meta_data) self.assertTrue('last_modified' in obj.extra) self.assertTrue('content_type' in obj.extra) self.assertTrue('content_encoding' in obj.extra) self.assertTrue('content_language' in obj.extra) def test_get_container_doesnt_exist(self): self.mock_response_klass.type = Non
self[name] = values.get(name, '') # We have to do an extra trick to catch unchecked checkboxes for name in [name for name in values.keys() if name[9:] in field_names and name.startswith('checkbox_')]: if name[9:] not in values: self[name[9:]] = '0' def _load_from_record(self, row): for i, field in enumerate(self.std_fields): #print('_load_from_record %i, %s=%s' % (i, field, row[i+1])) if i == 0: self.id = row[0] elif field == 'uuid': self.uuid = row[i + 1] else: value = row[i + 1] if value is None: self.values[field] = empty elif field in self.time_fields: self.values[field] = from_utimestamp(value) else: self.values[field] = value def _fetch_crash_by_id(self, id, must_exist=True): row = None if self.id_is_valid(id): # Fetch the standard crashdump fields for row in self.env.db_query("SELECT id,%s FROM crashdump WHERE id=%%s" % ','.join(self.std_fields), (id,)): break if not row and must_exist: raise ResourceNotFound(_("Crash %(id)s does not exist.", id=id), _("Invalid crash identifier")) if row: self.id = id self._load_from_record(row) def _fetch_crash_by_uuid(self, uuid, must_exist=True): row = None if self.uuid_is_valid(uuid): # Fetch the standard crashdump fields for row in self.env.db_query("SELECT id,%s FROM crashdump WHERE uuid=%%s" % ','.join(self.std_fields), (str(uuid),)): break if not row and must_exist: raise ResourceNotFound(_("Crash %(uuid)s does not exist.", uuid=uuid), _("Invalid crash identifier")) if row: self.uuid = uuid self._load_from_record(row) def insert(self, when=None, db=None): """Add crash to database. :since 1.0: the `db` p
arameter is no longer needed and will be removed in version 1.1.1 """ assert not self.exists, 'Cannot insert an existing ticket' if 'cc' in self.values: self['cc'] = _fixup_cc_list(self.values['cc']) # Add a timestamp if when is None: when = datetime.now(utc)
self.values['uploadtime'] = self.values['changetime'] = when self.values['uuid'] = str(self.uuid) # The owner field defaults to the component owner if self.values.get('owner') == '< default >': default_to_owner = '' if self.values.get('component'): try: component = Component(self.env, self['component']) default_to_owner = component.owner # even if it's empty except ResourceNotFound: # No such component exists pass # If the current owner is "< default >", we need to set it to # _something_ else, even if that something else is blank. self['owner'] = default_to_owner # Perform type conversions values = dict(self.values) for field in self.time_fields: if field in values: values[field] = to_utimestamp(values[field]) # Insert ticket record std_fields = [] custom_fields = [] for f in self.fields: fname = f['name'] if fname in self.values: if f.get('custom'): custom_fields.append(fname) else: std_fields.append(fname) with self.env.db_transaction as db: cursor = db.cursor() cursor.execute("INSERT INTO crashdump (%s) VALUES (%s)" % (','.join(std_fields), ','.join(['%s'] * len(std_fields))), [values[name] for name in std_fields]) crash_id = db.get_last_id(cursor, 'crashdump') # Insert custom fields if custom_fields: db.executemany( """INSERT INTO crashdump_custom (crash, name, value) VALUES (%s, %s, %s) """, [(crash_id, c, self[c]) for c in custom_fields]) self.id = crash_id self.resource = self.resource(id=crash_id) self._old = {} return self.id def save_changes(self, author=None, comment=None, when=None, db=None, cnum='', replyto=None): """ Store ticket changes in the database. The ticket must already exist in the database. Returns False if there were no changes to save, True otherwise. :since 1.0: the `db` parameter is no longer needed and will be removed in version 1.1.1 :since 1.0: the `cnum` parameter is deprecated, and threading should be controlled with the `replyto` argument """ assert self.exists, "Cannot update a new crash dump" if 'cc' in self.values: self['cc'] = _fixup_cc_list(self.values['cc']) # Perform type conversions for field in self.time_fields: if field in self.values: self.values[field] = to_utimestamp(self.values[field]) props_unchanged = all(self.values.get(k) == v for k, v in self._old.iteritems()) if (not comment or not comment.strip()) and props_unchanged: return False # Not modified if when is None: when = datetime.now(utc) when_ts = to_utimestamp(when) if 'component' in self.values: # If the component is changed on a 'new' ticket # then owner field is updated accordingly. (#623). if self.values.get('status') == 'new' \ and 'component' in self._old \ and 'owner' not in self._old: try: old_comp = Component(self.env, self._old['component']) old_owner = old_comp.owner or '' current_owner = self.values.get('owner') or '' if old_owner == current_owner: new_comp = Component(self.env, self['component']) if new_comp.owner: self['owner'] = new_comp.owner except TracError: # If the old component has been removed from the database # we just leave the owner as is. pass with self.env.db_transaction as db: db("UPDATE crashdump SET changetime=%s WHERE id=%s", (when_ts, self.id)) # find cnum if it isn't provided if not cnum: num = 0 for ts, old in db(""" SELECT DISTINCT tc1.time, COALESCE(tc2.oldvalue,'') FROM crashdump_change AS tc1 LEFT OUTER JOIN crashdump_change AS tc2 ON tc2.crash=%s AND tc2.time=tc1.time AND tc2.field='comment' WHERE tc1.crash=%s ORDER BY tc1.time DESC """, (self.id, self.id)): # Use oldvalue if available, else count edits try: num += int(str(old).rsplit('.', 1)[-1]) break except ValueError: num += 1 cnum = str(num + 1) if replyto: cnum = '%s.%s' % (replyto, cnum) # store fields for name in self._old.keys(): if name in self.custom_fields: for row in db("""SELECT * FROM crash_custom WHERE c
from __future__ import absolute_import from .notedown import * from .mai
n import convert, markdown_template, __version__ # avoid having to require the notebook to install notedown try: from .contentsmanager import NotedownContentsManager except I
mportError: NotedownContentsManager = 'You need to install the jupyter notebook.'
# -*- coding: utf-8 -*- # Copyright 2013 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime from unittest import TestCase from paste.fixture import TestApp from sqlalchemy import orm from nailgun.db import engine from nailgun.db import flush from nailgun.db import NoCacheQuery from nailgun.db.sqlalchemy.models import Node from nailgun.wsgi import build_app class TestDBRefresh(TestCase): def setUp(self): self.app = TestApp(build_app().wsgifunc()) self.db = orm.scoped_session( orm.sessionmaker(bind=engine, query_cls=NoCacheQuery) )() self.db2 = orm.scoped_session( orm.sessionmaker(bind=engine, query_cls=NoCacheQuery) )() self.default_headers = { "Content-Type": "application/json" } flush() def test_session_update(self): node = Node() no
de.mac = u"ASDFGHJKLMNOPR"
node.timestamp = datetime.now() self.db.add(node) self.db.commit() node2 = self.db2.query(Node).filter( Node.id == node.id ).first() node2.mac = u"12345678" self.db2.add(node2) self.db2.commit() self.db.query(Node).filter( Node.id == node.id ).first() self.assertEqual(node.mac, u"12345678")
me': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'project.project': { 'Meta': {'object_name': 'Project'}, 'brief': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['project.ProjectCategory']", 'symmetrical': 'False'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 18, 0, 0)'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_refused_to_give_to_bitfund': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'maintainer': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['auth.User']"}), 'status': ('django.db.models.fields.CharField', [], {'default': "'unclaimed'", 'max_length': '80'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'project.project_dependencies': { 'Meta': {'object_name': 'Project_Dependencies'}, 'brief': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 18, 0, 0)'}), 'dependee_project': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'related_name': "'dependee_project'", 'to': "orm['project.Project']"}), 'depender_project': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'related_name': "'depender_project'", 'to': "orm['project.Project']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'redonation_amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}), 'redonation_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}), 'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'project.projectcategory': { 'Meta': {'object_name': 'ProjectCategory'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 18, 0, 0)'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'project.projectgoal': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectGoal'}, 'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '0'}), 'brief': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 18, 0, 0)'}), 'date_ending': ('django.db.models.fields.DateTimeField', [], {}), 'date_starting': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 18, 0, 0)'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '80'}), 'long_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['project.Project']"}), 'short_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'sort_order': ('django.db.models.fields.IntegerField', [],
{'default': '0'}), '
title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'video_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}) }, 'project.projectgratefulusers': { 'Meta': {'object_name': 'ProjectGratefulUsers'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['project.Project']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'project.projectneed': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectNeed'}, 'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '0'}), 'brief': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
hilled_water_temperature = var_reference_leaving_chilled_water_temperature # real var_reference_leaving_condenser_water_temperature = 5.5 obj.reference_leaving_condenser_water_temperature = var_reference_leaving_condenser_water_temperature # real var_reference_chilled_water_flow_rate = 0.0001 obj.reference_chilled_water_flow_rate = var_reference_chilled_water_flow_rate # real var_reference_condenser_water_flow_rate = 0.0001 obj.reference_condenser_water_flow_rate = var_reference_condenser_water_flow_rate # object-list var_cooling_capacity_function_of_temperature_curve_name = "object-list|Cooling Capacity Function of Temperature Curve Name" obj.cooling_capacity_function_of_temperature_curve_name = var_cooling_capacity_function_of_temperature_curve_name # object-list var_electric_input_to_cooling_output_ratio_function_of_temperature_curve_name = "object-list|Electric Input to Cooling Output Ratio Function of Temperature Curve Name" obj.electric_input_to_cooling_output_ratio_function_of_temperature_curve_name = var_electric_input_to_cooling_output_ratio_function_of_temperature_curve_name # alpha var_electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_type = "LeavingCondenserWaterTemperature" obj.electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_type = var_electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_type # object-list var_electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_name = "object-list|Electric Input to Cooling Output Ratio Function of Part Load Ratio Curve Name" obj.electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_name = var_electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_name # real var_minimum_part_load_ratio = 0.0 obj.minimum_part_load_ratio = var_minimum_part_load_ratio # real var_maximum_part_load_ratio = 0.0001 obj.maximum_part_load_ratio = var_maximum_part_load_ratio # real var_optimum_part_load_ratio = 0.0001 obj.optimum_part_load_ratio = var_optimum_part_load_ratio # real var_minimum_unloading_ratio = 0.0 obj.minimum_unloading_ratio = var_minimum_unloading_ratio # node var_chilled_water_inlet_node_name = "node|Chilled Water Inlet Node Name" obj.chilled_water_inlet_node_name = var_ch
illed_water_inlet_node_name # node var_chilled_water_outlet_node_name = "node|Chilled Water Outlet Node Name" obj.chilled_water_outlet_node_name = var_chilled_water_outlet_node_name # node var_condenser_inlet_node_name = "node|Condenser Inlet Node Name" obj.condenser_inlet_n
ode_name = var_condenser_inlet_node_name # node var_condenser_outlet_node_name = "node|Condenser Outlet Node Name" obj.condenser_outlet_node_name = var_condenser_outlet_node_name # real var_fraction_of_compressor_electric_consumption_rejected_by_condenser = 0.50005 obj.fraction_of_compressor_electric_consumption_rejected_by_condenser = var_fraction_of_compressor_electric_consumption_rejected_by_condenser # real var_leaving_chilled_water_lower_temperature_limit = 21.21 obj.leaving_chilled_water_lower_temperature_limit = var_leaving_chilled_water_lower_temperature_limit # alpha var_chiller_flow_mode_type = "ConstantFlow" obj.chiller_flow_mode_type = var_chiller_flow_mode_type # real var_design_heat_recovery_water_flow_rate = 0.0 obj.design_heat_recovery_water_flow_rate = var_design_heat_recovery_water_flow_rate # node var_heat_recovery_inlet_node_name = "node|Heat Recovery Inlet Node Name" obj.heat_recovery_inlet_node_name = var_heat_recovery_inlet_node_name # node var_heat_recovery_outlet_node_name = "node|Heat Recovery Outlet Node Name" obj.heat_recovery_outlet_node_name = var_heat_recovery_outlet_node_name # real var_sizing_factor = 0.0001 obj.sizing_factor = var_sizing_factor # real var_condenser_heat_recovery_relative_capacity_fraction = 0.5 obj.condenser_heat_recovery_relative_capacity_fraction = var_condenser_heat_recovery_relative_capacity_fraction # object-list var_heat_recovery_inlet_high_temperature_limit_schedule_name = "object-list|Heat Recovery Inlet High Temperature Limit Schedule Name" obj.heat_recovery_inlet_high_temperature_limit_schedule_name = var_heat_recovery_inlet_high_temperature_limit_schedule_name # node var_heat_recovery_leaving_temperature_setpoint_node_name = "node|Heat Recovery Leaving Temperature Setpoint Node Name" obj.heat_recovery_leaving_temperature_setpoint_node_name = var_heat_recovery_leaving_temperature_setpoint_node_name idf = IDF() idf.add(obj) idf.save(self.path, check=False) with open(self.path, mode='r') as f: for line in f: log.debug(line.strip()) idf2 = IDF(self.path) self.assertEqual(idf2.chillerelectricreformulatedeirs[0].name, var_name) self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].reference_capacity, var_reference_capacity) self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].reference_cop, var_reference_cop) self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].reference_leaving_chilled_water_temperature, var_reference_leaving_chilled_water_temperature) self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].reference_leaving_condenser_water_temperature, var_reference_leaving_condenser_water_temperature) self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].reference_chilled_water_flow_rate, var_reference_chilled_water_flow_rate) self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].reference_condenser_water_flow_rate, var_reference_condenser_water_flow_rate) self.assertEqual(idf2.chillerelectricreformulatedeirs[0].cooling_capacity_function_of_temperature_curve_name, var_cooling_capacity_function_of_temperature_curve_name) self.assertEqual(idf2.chillerelectricreformulatedeirs[0].electric_input_to_cooling_output_ratio_function_of_temperature_curve_name, var_electric_input_to_cooling_output_ratio_function_of_temperature_curve_name) self.assertEqual(idf2.chillerelectricreformulatedeirs[0].electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_type, var_electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_type) self.assertEqual(idf2.chillerelectricreformulatedeirs[0].electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_name, var_electric_input_to_cooling_output_ratio_function_of_part_load_ratio_curve_name) self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].minimum_part_load_ratio, var_minimum_part_load_ratio) self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].maximum_part_load_ratio, var_maximum_part_load_ratio) self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].optimum_part_load_ratio, var_optimum_part_load_ratio) self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].minimum_unloading_ratio, var_minimum_unloading_ratio) self.assertEqual(idf2.chillerelectricreformulatedeirs[0].chilled_water_inlet_node_name, var_chilled_water_inlet_node_name) self.assertEqual(idf2.chillerelectricreformulatedeirs[0].chilled_water_outlet_node_name, var_chilled_water_outlet_node_name) self.assertEqual(idf2.chillerelectricreformulatedeirs[0].condenser_inlet_node_name, var_condenser_inlet_node_name) self.assertEqual(idf2.chillerelectricreformulatedeirs[0].condenser_outlet_node_name, var_condenser_outlet_node_name) self.assertAlmostEqual(idf2.chillerelectricreformulatedeirs[0].fraction_of_compressor_electric_consumption_rejected
import bpy from .... base_types.node import AnimationNode class LineMeshNode(bpy.types.Node, AnimationNode): bl_idname = "an_LineMeshNode" bl_label = "Line Mesh" def create(self): self.newInput("Vector", "Start", "start") self.newInput("Vector", "End", "end", value = [0, 0, 10]) self.newInput("Integer", "Steps", "steps", value = 2, minValue = 2) self.newOutp
ut("Vector List", "Vertices", "vertices") self.newOutput("Edge Indices List", "Edge Indices", "edgeIndices") def execute(self, start, end, steps): steps = max(steps, 2) divisor = steps - 1
vertices = [start * (1 - i / divisor) + end * i / divisor for i in range(steps)] edges = [(i, i + 1) for i in range(steps - 1)] return vertices, edges
#!/usr/bin/env python import os import sys if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "travis-ci.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv
)
# -*- coding:
utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('app', '0014_survey'), ] operations = [ migrations.AddField( model_name='survey', name='question2', field=models.CharField(default=50, max_length=50), p
reserve_default=False, ), ]
# -*- coding: utf-8 -*- # Copyright (c) 2009 - 2014 Detlev Offenbach <detlev@die-offenbachs.de> # """ Module implementing a dialog to manage history. """ from __future__ import unicode_literals from PyQt5.QtCore import pyqtSignal, Qt, QUrl from PyQt5.QtGui import QFontMetrics, QCursor from PyQt5.QtWidgets import QDialog, QMenu, QApplication from E5Gui.E5TreeSortFilterProxyModel import E5TreeSortFilterProxyModel from .HistoryModel import HistoryModel from .Ui_HistoryDialog import Ui_HistoryDialog class HistoryDialog(QDialog, Ui_HistoryDialog): """ Class implementing a dialog to manage history. @signal openUrl(QUrl, str) emitted to open a URL in the current tab @signal newUrl(QUrl, str) emitted to open a URL in a new tab """ openUrl = pyqtSignal(QUrl, str) newUrl = pyqtSignal(QUrl, str) def __init__(self, parent=None, manager=None): """ Constructor @param parent reference to the parent widget (QWidget @param manager reference to the history manager object (HistoryManager) """ super(HistoryDialog, self).__init__(parent) self.setupUi(self) self.__historyManager = manager if self.__historyManager is None: import Helpviewer.HelpWindow self.__historyManager = \ Helpviewer.HelpWindow.HelpWindow.historyManager() self.__model = self.__historyManager.historyTreeModel() self.__proxyModel = E5TreeSortFilterProxyModel(self) self.__proxyModel.setSortRole(HistoryModel.DateTimeRole) self.__proxyModel.setFilterKeyColumn(-1) self.__proxyModel.setSourceModel(self.__model) self.historyTree.setModel(self.__proxyModel) self.historyTree.expandAll() fm = QFontMetrics(self.font()) header = fm.width("m") * 40 self.historyTree.header().resizeSection(0, header) self.historyTree.header().setStretchLastSection(True) self.historyTree.setContextMenuPolicy(Qt.CustomContextMenu) self.historyTree.activated.connect(self.__activated) self.historyTree.customContextMenuRequested.connect( self.__customContextMenuRequested) self.searchEdit.textChanged.connect( self.__proxyModel.setFilterFixedString) self.removeButton.clicked.connect(self.historyTree.removeSelected) self.removeAllButton.clicked.connect(self.__historyManager.clear) self.__proxyModel.modelReset.connect(self.__modelReset) def __modelReset(self): """ Private slot handling a reset of the tree view's model. """ self.historyTree.expandAll() def __customContextMenuRequested(self, pos): """ Private
slot to handle the context menu request for the bookmarks tree. @param pos pos
ition the context menu was requested (QPoint) """ menu = QMenu() idx = self.historyTree.indexAt(pos) idx = idx.sibling(idx.row(), 0) if idx.isValid() and not self.historyTree.model().hasChildren(idx): menu.addAction( self.tr("&Open"), self.__openHistoryInCurrentTab) menu.addAction( self.tr("Open in New &Tab"), self.__openHistoryInNewTab) menu.addSeparator() menu.addAction(self.tr("&Copy"), self.__copyHistory) menu.addAction(self.tr("&Remove"), self.historyTree.removeSelected) menu.exec_(QCursor.pos()) def __activated(self, idx): """ Private slot to handle the activation of an entry. @param idx reference to the entry index (QModelIndex) """ self.__openHistory( QApplication.keyboardModifiers() & Qt.ControlModifier) def __openHistoryInCurrentTab(self): """ Private slot to open a history entry in the current browser tab. """ self.__openHistory(False) def __openHistoryInNewTab(self): """ Private slot to open a history entry in a new browser tab. """ self.__openHistory(True) def __openHistory(self, newTab): """ Private method to open a history entry. @param newTab flag indicating to open the history entry in a new tab (boolean) """ idx = self.historyTree.currentIndex() if newTab: self.newUrl.emit( idx.data(HistoryModel.UrlRole), idx.data(HistoryModel.TitleRole)) else: self.openUrl.emit( idx.data(HistoryModel.UrlRole), idx.data(HistoryModel.TitleRole)) def __copyHistory(self): """ Private slot to copy a history entry's URL to the clipboard. """ idx = self.historyTree.currentIndex() if not idx.parent().isValid(): return url = idx.data(HistoryModel.UrlStringRole) clipboard = QApplication.clipboard() clipboard.setText(url)
# -*- coding: utf-8 -*- # Copyright (C) 2016-2018 Cordell Bloor # Published under the MIT License from nose.tools import * import guardonce.util as go def test_ok(): contents = ''' #ifndef MATCH_H #define MATCH_H ''' g,s,e = go.guess_guard(contents) assert_equals(g, 'MATCH_H') assert_equals(s, 1) assert_equals(e, 32) def test_ok_space_before_hash(): contents = ''' #ifndef MATCH_H #define MATCH_H ''' g,s,e = go.guess_guard(contents) assert_equals(g, 'MATCH_H') assert_equals(s, 1) assert_equals(e, 33) def test_ok_space_after_hash(): contents = ''' # ifndef MATCH_H # define MATCH_H ''' g,s,e = go.guess_guard(contents) assert_equals(g, 'MATCH_H') assert_equals(s, 1) assert_equals(e, 34) @raises(ValueError) def test_no_ifndef(): contents = ''' #ifdef MATCH_H #define MATCH_H ''' go.guess_guard(contents) @raises(ValueError) def test_no_define(): contents = ''' #ifndef MATCH_H #defne MATCH_H ''' go.guess_guard(contents) @raises(ValueError) def test_mismatched_define_symbol(): contents = ''' #ifndef MATCH_H #define MISMATCH_H ''' go.guess_guard(contents) @raises(ValueError) def test_extra_junk_on_ifndef(): contents = ''' #ifndef MATCH_H WEIRD_HUH #define MATCH_H ''' go.guess_guard(contents) @raises(ValueError) def test_extra_junk_on_define(): contents = ''' #ifndef MATCH_H #define MATCH_H WEIRD_HUH ''' go.guess_guard(contents) def test_extra_whitespace_on_ifndef(): contents = ''' #ifndef MATCH_H #define MATCH_H ''' g,s,e = go.guess_guard(contents) assert_equals(g, 'MATCH_H') assert_equals(s, 1) assert_equals(e, 33) def test_extra_whitespace_on_define(): contents = ''' #ifndef MATCH_H #define MATCH_H ''' g,s,e = go.guess_guard(contents) assert_equals(g, 'MATCH_H') assert_equals(s, 1) assert_equals(e, 33) def test_define_with_value_1(): contents = ''' #ifndef MATCH_H #define MATCH_H 1 ''' g,s,e = go.guess_guard(contents) assert_equal
s(g, 'MATCH_H') assert_equals(s, 1) assert_equals(e, 34) @raises(ValueError) def test_define(): contents = ''' #ifndef ONE #define ONE 1 #endif ''' go.guess_guard(contents) @raises(ValueError) def test_define_with_space(): contents = ''' #ifndef ONE #define ONE 1 #endif ''' go.guess_guard(contents) def test_if_defined(): conte
nts = ''' #if !defined(MATCH_H) #define MATCH_H ''' g,s,e = go.guess_guard(contents) assert_equals(g, 'MATCH_H') assert_equals(s, 1) assert_equals(e, 38) def test_if_defined_no_parentheses(): contents = ''' #if !defined MATCH_H #define MATCH_H ''' g,s,e = go.guess_guard(contents) assert_equals(g, 'MATCH_H') assert_equals(s, 1) assert_equals(e, 37) def test_if_defined_space_after_bang(): contents = ''' #if ! defined(MATCH_H) #define MATCH_H ''' g,s,e = go.guess_guard(contents) assert_equals(g, 'MATCH_H') assert_equals(s, 1) assert_equals(e, 39) def test_if_defined_space_before_parentheses(): contents = ''' #if !defined (MATCH_H) #define MATCH_H ''' g,s,e = go.guess_guard(contents) assert_equals(g, 'MATCH_H') assert_equals(s, 1) assert_equals(e, 39) def test_if_defined_space_before_symbol(): contents = ''' #if !defined( MATCH_H) #define MATCH_H ''' g,s,e = go.guess_guard(contents) assert_equals(g, 'MATCH_H') assert_equals(s, 1) assert_equals(e, 39) def test_if_defined_space_after_symbol(): contents = ''' #if !defined(MATCH_H ) #define MATCH_H ''' g,s,e = go.guess_guard(contents) assert_equals(g, 'MATCH_H') assert_equals(s, 1) assert_equals(e, 39) def test_if_defined_space_before_newline(): contents = ''' #if !defined(MATCH_H) #define MATCH_H ''' g,s,e = go.guess_guard(contents) assert_equals(g, 'MATCH_H') assert_equals(s, 1) assert_equals(e, 39) @raises(ValueError) def test_if_defined_extra_junk_before_newline(): contents = ''' #if !defined(MATCH_H) WEIRD_HUH #define MATCH_H ''' go.guess_guard(contents) @raises(ValueError) def test_if_defined_extra_junk_in_defined(): contents = ''' #if !defined(MATCH_H WEIRD_HUH) #define MATCH_H ''' go.guess_guard(contents)
from django.core.management.base import BaseCommand from django.conf import settings from utils.amazon import default_s3_store from videos.models import Video, VIDEO_TYPE_FLV, VIDEO_TYPE_HTML5 import urllib import os import commands import sys from django.core.exceptions import ImproperlyConfigured from django.core.files.base import ContentFile from django.db.models import ObjectDoesNotExist VIDEO_UPLOAD_PATH = getattr(settings, 'VIDEO_UPLOAD_PATH', \ os.path.join(settings.MEDIA_ROOT, 'videos')) VIDEO_THUMBNAILS_FOLDER = getattr(settings, 'VIDEO_THUMBNAILS_PATH', 'videos/thumbnails/') THUMBNAILS_PATH = os.path.join(settings.MEDIA_ROOT, VIDEO_THUMBNAILS_FOLDER) class Command(BaseCommand): def handle(self, *args, **options): print 'Run load thumbnail command' self.verbosity = int(options.get('verbosity', 1)) self.s3_store = self.init_s3() if not os.path.exists(VIDEO_UPLOAD_PATH): os.makedirs(VIDEO_UPLOAD_PATH) if not os.path.exists(THUMBNAILS_PAT
H): os.makedirs(THUMBNAILS_PATH) qs = Video.objects.filter(thumbnail='', videourl__original=True, videourl__type__in=[VIDEO_TYPE_FLV, VIDEO_TYPE_HTML5]) for video in qs: self.print_to_console(u'Handling %s' % video.__unicode__()) try: video_url = video.videourl_set.filter(original=True)[:1].get(
) except ObjectDoesNotExist: continue path = self.get_file_path(video, video_url) if not os.path.exists(path): self.print_to_console(u'Saving...') urllib.urlretrieve(video_url.url, path) self.print_to_console(u'Video saved.') else: self.print_to_console(u'File exist.') self.get_thumbnail(video, path) self.print_to_console(u'-----------------') #--- Save original thumbnails to S3 Store --- self.print_to_console(u'Save original thumbnails to S3 Store...') qs = Video.objects.exclude(thumbnail='').filter(s3_thumbnail='') for video in qs: self.print_to_console(u'Handling %s' % video.__unicode__()) name = video.thumbnail.strip('/').split('/')[-1] cf = ContentFile(urllib.urlopen(video.thumbnail).read()) video.s3_thumbnail.save('%s/%s' % (video.video_id, name), cf, True) def print_to_console(self, msg, min_verbosity=1): if self.verbosity >= min_verbosity: print msg def init_s3(self): if not default_s3_store: raise ImproperlyConfigured('Have not settings for thumbnails uploading to S3 Store.') return default_s3_store def get_thumbnail(self, video, path): self.print_to_console(u'Get thumbnail...') grabimage = "ffmpeg -y -i %s -vframes 1 -ss 00:00:%s -an -vcodec png -f rawvideo %s" thumbnailfilename = "%s.png" % video.video_id thumbnailpath = os.path.normpath(os.path.join(THUMBNAILS_PATH, thumbnailfilename)) grab_result = 'Command is not runned yet' try: grab_result = commands.getoutput(grabimage % (path, 10, thumbnailpath)) if not os.path.exists(thumbnailpath): raise Exception('Error in converting: %s' % grab_result) if not os.path.getsize(thumbnailpath): grab_result = commands.getoutput(grabimage % (path, 5, thumbnailpath)) self.print_to_console(u'Saving in S3 Store...') cf = ContentFile(open(thumbnailpath, 'rb').read()) video.s3_thumbnail.save(thumbnailfilename, cf, True) video.thumbnail = video.s3_thumbnail.url video.save() os.remove(thumbnailpath) os.remove(path) except: if settings.DEBUG: raise self.handle_error(sys.exc_info()) def get_file_path(self, video, video_url): type = video_url.url.split('.')[-1] name = '%s.%s' % (video.video_id, type) return os.path.join(VIDEO_UPLOAD_PATH, name)
#! /usr/bin/python import bottle import settings
from controller import admin as admin_controller from controller import email as email_controller app = application = bottle.Bottle() # Base url for regular users app.route(settings.BASEPATH, 'GET', admin_controller.index) app.route(settings.BASEPATH + '/', 'GET', admin_controller.index) app.route( settings.BASEPATH + '/tasks/<id>', 'GET', admin_controller.read_user_tasks ) app.route( settings.BASEPATH + '/update/<id>', 'POST',
admin_controller.update_self ) # Email handler email = bottle.Bottle() app.mount(settings.EMAIL_PATH, email) email.route('/', 'POST', email_controller.receive_email) email.route('/', 'GET', email_controller.test_form) email.route('', 'GET', email_controller.test_form) # Ansible admin admin = bottle.Bottle() app.mount(settings.ADMIN_PATH, admin) admin.route('/tasks', 'GET', admin_controller.read_tasks) admin.route('/create', 'POST', admin_controller.create_person) admin.route('/delete', 'POST', admin_controller.delete_people) admin.route('/<id>', 'GET', admin_controller.read_person) admin.route('/<id>', 'POST', admin_controller.update_person) admin.route('/', 'GET', admin_controller.admin) # Static files app.route( settings.STATIC_PATH + '/<type>/<filename>', 'GET', lambda **kwargs: bottle.static_file( filename=kwargs['filename'], root='static/' + kwargs['type'] ) ) if __name__ == '__main__': bottle.run(app=app, reloader=True, **settings.SERVER)
# https://leetcode.com/problems/build-arra
y-where-you-can-find-the-maximum-exactly-k-comparisons class Solution: def numOfArrays(self, n: int, m: int, k: int) -> int: M = 1000000007 ret = 0 for i in range(k, m+1): ret += ((i-1)**(k-1)) * (i**(n-k)) return ret % M # n = 2 # m = 3 # k = 1 # ans = 6 # n = 9 # m = 1 # k = 1 # ans = 1 n = 50 m = 100 k = 25 ans
= 34549172 # n = 37 # m = 17 # k = 7 # ans = 418930126 sl = Solution() ret = sl.numOfArrays(n, m, k) print(ret, "O" if ret==ans else "X")
import json import os import unittest from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine from pymatgen.util.testing import PymatgenTest class PhononBandStructureSymmLineTest(PymatgenTest): def setUp(self): with open( os.path.join(PymatgenTest.TEST_FILES_DIR, "NaCl_phonon_bandstructure.json"), encoding="utf-8", ) as f: d = json.load(f) self.bs = PhononBandStructureSymmLine.from_dict(d) with open( os.path.join(PymatgenTest.TEST_FILES_DIR, "Si_phonon_bandstructure.json"), encoding="utf-8", ) as f: d = json.load(f) self.bs2 = PhononBandStructureSymmLine.from_dict(d) def test_basic(self): self.assertAlmostEqual(self.bs.bands[1][10], 0.7753555184) self.assertAlmostEqual(self.bs.bands[5][100], 5.2548379776) self.assertArrayEqual(self.bs.bands.shape, (6, 204)) self.assertArrayEqual(self.bs.eigendisplacements.shape, (6, 204, 2, 3)) self.assertArrayAlmostEqual( self.bs.eigendisplacements[3][50][0], [0.0 + 0.0j, 0.14166569 + 0.04098339j, -0.14166569 - 0.04098339j], ) self.assertTrue(self.bs.has_eigendisplacements, True) self.assertArrayEqual(self.bs.min_freq()[0].frac_coords, [0, 0, 0]) self.assertAlmostEqual(self.bs.min_freq()[1], -0.03700895020) self.assertTrue(self.bs.has_imaginary_freq()) self.assertFalse(self.bs.has_imaginary_freq(tol=0.5)) self.assertArrayAlmostEqual(self.bs.asr_breaking(), [-0.0370089502, -0.0370089502, -0.0221388897]) self.assertEqual(self.bs.nb_bands, 6) self.assertEqual(self.bs.nb_qpoints, 204) self.assertArrayAlmostEqual(self.bs.qpoints[1].frac_coords, [0.01, 0, 0]) def test_nac(self): self.assertTrue(self.bs.has_nac) self.assertFalse(self.bs2.has_nac) self.assertAlmostEqual(self.bs.get_nac_frequencies_along_dir([1, 1, 0])[3], 4.6084532143) self.assertIsNone(self.bs.get_nac_frequencies_along_dir([0, 1, 1])) self.assertIsNone(self.bs2.get_nac_frequencies_along_dir([0, 0, 1])) self.assertArrayAlmostEqual( self.bs.get_nac_eigendisplacements_along_dir([1, 1, 0])[3][1], [(0.1063906409128248 + 0j), 0j, 0j], ) self.assertIsNone(self.bs.get_nac_eigendisplacements_along_dir([0, 1, 1])) self.assertIsNone(self.bs2.get_nac_eigendisplacements_along_dir([0, 0, 1])) def test_branches(self): self.assertEqual(self.bs.branches[0]["end_index"], 50) self.assertEqual(self.bs.branches[1]["start_index"], 51) self.assertEqual(self.bs.branches[2]["name"], "Y-Gamma") self.assertAlmostEqual(self.bs.get_branch(10)[0]["name"], "Gamma-X") self.assertEqual(len(self.bs.branches), 4) def test_dict_methods(self): s = self.bs.as_dict() self.assertIsNotNone(s) self.assertIsNotNone(json.dumps(s)) s = self.bs2.as_dict() self.assertIsNotNone(s) self.assertIsNotNone(json.dumps(s)) s = self.bs2.as_phononwebsite() self.assertIsNotNone(s) self.assertIsNotNone(json.dumps(s)) self.assertMSONable(self.bs) self.assertMSONable(self.
bs2)
def test_write_methods(self): self.bs2.write_phononwebsite("test.json") def tearDown(self): if os.path.isfile("test.json"): os.remove("test.json") if __name__ == "__main__": unittest.main()
"""Manage Treadmill app manifest.""" from __future__ import absolute_import import logging import urllib import click from .. import cli from treadmill import restclient from treadmill import context _LOGGER = logging.getLogger(__name__) _STATE_FORMATTER = cli.make_formatter(cli.InstanceStatePrettyFormatter) _ENDPOINT_FORMATTER = cli.make_formatter(cli.EndpointPrettyFormatter) _APP_FORMATTER = cli.make_formatter(cli.AppPrettyFormatter) def _show_state(apis, match): """Show cell state.""" url = '/state/' if match: url += '?' + urllib.urlencode([('match', match)]) response = restclient.get(apis, url) cli.out(_STATE_FORMATTER(response.json())) def _show_list(apis, match, states): """Show list of instnces in given state.""" url = '/state/' if match: url += '?' + urllib.urlencode([('match', match)]) response = restclient.get(apis, url) names = [item['name'] for item in response.json() if item['state'] in states] for name in names: print name def _show_endpoints(apis, pattern, endpoint, proto): """Show cell endpoints.""" url = '/endpoint/%s' % urllib.quote(pattern) if endpoint: if proto: url += '/' + proto else: url += '/*' url += '/' + endpoint response = restclient.get(apis, url) endpoints = [{ 'name': end['name'], 'proto': end['proto'], 'endpoint': end['endpoint'], 'hostport': '{0}:{1}'.format(end['host'], end['port']) } for end in response.json()] cli.out(_ENDPOINT_FORMATTER(endpoints)) def _show_instance(apis, instance_id): """Show instance manifest.""" url = '/instance/%s' % urllib.quote(instance_id) response = restclient.get(apis, url) cli.out(_APP_FORMATTER(response.json())) def init(): """Return top level command handler.""" ctx = {} @click.group() @click.option('--cell', required=True, envvar='TREADMILL_CELL', c
allback=cli.handle_context_opt, expose_value=False) @click.option('--api', required=False, help='API url to use.', metavar='URL', envvar='TREADMILL_STATEAPI') def show(api): """Show state of scheduled applications.""" ctx['api'] = api @show.command() @cli.ON_REST_EXCEPTIONS @click.option('--match', help='Application name pattern match')
def state(match): """Show state of Treadmill scheduled instances.""" apis = context.GLOBAL.state_api(ctx['api']) return _show_state(apis, match) @show.command() @cli.ON_REST_EXCEPTIONS @click.option('--match', help='Application name pattern match') def pending(match): """Show pending instances.""" apis = context.GLOBAL.state_api(ctx['api']) return _show_list(apis, match, ['pending']) @show.command() @cli.ON_REST_EXCEPTIONS @click.option('--match', help='Application name pattern match') def running(match): """Show running instances.""" apis = context.GLOBAL.state_api(ctx['api']) return _show_list(apis, match, ['running']) @show.command() @cli.ON_REST_EXCEPTIONS @click.option('--match', help='Application name pattern match') def scheduled(match): """Show scheduled instances.""" apis = context.GLOBAL.state_api(ctx['api']) return _show_list(apis, match, ['running', 'scheduled']) @show.command(name='all') @cli.ON_REST_EXCEPTIONS @click.option('--match', help='Application name pattern match') def _all(match): """Show scheduled instances.""" apis = context.GLOBAL.state_api(ctx['api']) return _show_list(apis, match, ['pending', 'running', 'scheduled']) @show.command() @cli.ON_REST_EXCEPTIONS @click.argument('pattern') @click.argument('endpoint', required=False) @click.argument('proto', required=False) def endpoints(pattern, endpoint, proto): """Show application endpoints.""" apis = context.GLOBAL.state_api(ctx['api']) return _show_endpoints(apis, pattern, endpoint, proto) @show.command() @cli.ON_REST_EXCEPTIONS @click.argument('instance_id') def instance(instance_id): """Show scheduled instance manifest.""" apis = context.GLOBAL.cell_api(ctx['api']) return _show_instance(apis, instance_id) del _all del running del scheduled del pending del instance del state del endpoints return show
#!/usr/bin/env python # $Id: pythiaExample.py 545 2012-01-18 06:10:03Z cvermilion $ #---------------------------------------------------------------------- # Copyright (c) 2010-12, Pierre-Antoine Delsart, Kurtis Geerlings, Joey Huston, # Brian Martin, and Christopher Vermilion # #---------------------------------------------------------------------- # This file is part of SpartyJet. # # SpartyJet is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # SpartyJet is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SpartyJet; if not, write to the Free Software # Foundation, Inc.: # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #---------------------------------------------------------------------- from spartyjet import SpartyJet as SJ, fastjet as fj, writeCurrentFile from ROOT import TPythia8, gSystem #=============================================== # Example showing how to pass events directly from Pythia to SpartyJet. # Requires that you have built ROOT's Pythia 6 and/or 8 interface. gSystem.Load('libEG') # import common event gen lib # Create a jet builder(MessageLevel = INFO)------ builder = SJ.JetBuilder(SJ.INFO) ######### PYTHIA8 ######### # Load Libraries gSystem.Load('libEGPythia8
') # Creat TPythia8 object pythia = TPythia8() # Single W production pythia.ReadString('Weak
SingleBoson:ffbar2W = on') # Force decay W->ev pythia.ReadString("24:onMode = off") pythia.ReadString("24:onIfAny = 11 12") # UE pythia.ReadString("PartonLevel:MI = off") # ISR,FSR, beam remnant pythia.ReadString("PartonLevel:ISR = on") pythia.ReadString("PartonLevel:FSR = on") pythia.ReadString("PartonLevel:Remnants = on") # Hadronize pythia.ReadString("HadronLevel:all = on") # Initialize for LHC pythia.Initialize(2212,2212, 7000.) # Initialize for Tevatron #pythia.Initialize( 2212, -2212, 1960.) ########################### ######### PYTHIA6 ######### ## Load Library #ROOT.gSystem.Load("libEGPythia6") ## Create TPythia8 object #pythia = ROOT.TPythia6() ## Turn on W+jet production #pythia.SetMSEL(14) ## Turn off all decay modes except W->e+nu #decayChOff = [190,191,192,194,195,196,198,199,200,207,208] #for dc in decayChOff: # pythia.SetMDME(dc,1,0) ## Turn on W->e+nu #pythia.SetMDME(206,1,1) ## Initialize for LHC #pythia.Initialize('cms','p+','p+',7000.) ## Initialize for Tevatron ##pythia.Initialize('cms','p+','p-',1960.) ########################### # Create input object and add to builder -------- input = SJ.PythiaInput(pythia) # Event Particle printout (Only usable in Pythia8) #input.printEvent(True) builder.configure_input(input) # Create jet finder and add to builder ---------- analysis = SJ.JetAnalysis(SJ.FastJet.FastJetFinder('AntiKt4', fj.antikt_algorithm, 0.4)) builder.add_analysis(analysis) # Configure text output-------------------------- builder.add_text_output("../data/output/pythia.dat") # Configure ntuple output------------------------ outfile = "../data/output/pythia.root" builder.configure_output("SpartyJet_Tree", outfile) # Run SpartyJet---------------------------------- builder.process_events(10) # Save this script in the ROOT file (needs to go after process_events or it # gets over-written!) writeCurrentFile(outfile)
from django.conf.urls import patterns, include, url from django.contrib import admin import views from django.contrib.auth.decorators import login_required urlpatterns = patterns('', url(r'^library$', views.library, name='profile-library'), url(r'^stream$',
views.stream, name='profile-stream'), url(r'^add_photo/', login_required(views.PhotoCreate.as_view( template_name="imager_images/create_form.html", success_url='/images/library')), name='add_photo'), url(r'^add_album/', login_required(views.AlbumCreate.as_view( template_name="imager_images/create_form.html", success_url='/images/library')), name='add_album'), u
rl(r'^update_album/(?P<pk>\d+)/$', login_required(views.AlbumUpdate.as_view( template_name="imager_images/update_album.html", success_url='/images/library')), name='update_album'), url(r'^update_photo/(?P<pk>\d+)/$', login_required(views.PhotoUpdate.as_view( template_name="imager_images/update_photo.html", success_url='/images/library')), name='update_photo'), url(r'^delete_photo/(?P<pk>\d+)/$', login_required(views.PhotoDelete.as_view( template_name="imager_images/delete_form.html", success_url='/images/library')), name='delete_photo'), url(r'^delete_album/(?P<pk>\d+)/$', login_required(views.AlbumDelete.as_view( template_name="imager_images/delete_form.html", success_url='/images/library')), name='delete_album'), )
""" Generic relations Generic relations let an object have a foreign key to any object through a content-type/object-id field. A ``GenericForeignKey`` field can point to any object, be it animal, vegetable, or mineral. The canonical example is tags (although this example implementation is *far* from complete). """ from __future__ import unicode_literals from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation, ) from django.contrib.contenttypes.models import ContentType from django.db import models from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class TaggedItem(models.Model): """A tag on an item.""" tag = models.SlugField() content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey() class Meta: ordering = ["tag", "content_type__model"] def __str__(self): return self.tag class ValuableTaggedItem(TaggedItem): value = models.PositiveIntegerField() class AbstractComparison(models.Model): comparative = models.CharField(max_length=50) content_type1 = models.ForeignKey(ContentType, models.CASCADE, related_name="comparative1_set") object_id1 = models.PositiveIntegerField() first_obj = GenericForeignKey(ct_field="content_type1", f
k_field="object_id1") @python_2_unicode_compatible class Comparison(AbstractComparison): """ A model that tests having multiple
GenericForeignKeys. One is defined through an inherited abstract model and one defined directly on this class. """ content_type2 = models.ForeignKey(ContentType, models.CASCADE, related_name="comparative2_set") object_id2 = models.PositiveIntegerField() other_obj = GenericForeignKey(ct_field="content_type2", fk_field="object_id2") def __str__(self): return "%s is %s than %s" % (self.first_obj, self.comparative, self.other_obj) @python_2_unicode_compatible class Animal(models.Model): common_name = models.CharField(max_length=150) latin_name = models.CharField(max_length=150) tags = GenericRelation(TaggedItem, related_query_name='animal') comparisons = GenericRelation(Comparison, object_id_field="object_id1", content_type_field="content_type1") def __str__(self): return self.common_name @python_2_unicode_compatible class Vegetable(models.Model): name = models.CharField(max_length=150) is_yucky = models.BooleanField(default=True) tags = GenericRelation(TaggedItem) def __str__(self): return self.name @python_2_unicode_compatible class Mineral(models.Model): name = models.CharField(max_length=150) hardness = models.PositiveSmallIntegerField() # note the lack of an explicit GenericRelation here... def __str__(self): return self.name class GeckoManager(models.Manager): def get_queryset(self): return super(GeckoManager, self).get_queryset().filter(has_tail=True) class Gecko(models.Model): has_tail = models.BooleanField(default=False) objects = GeckoManager() # To test fix for #11263 class Rock(Mineral): tags = GenericRelation(TaggedItem) class ManualPK(models.Model): id = models.IntegerField(primary_key=True) tags = GenericRelation(TaggedItem, related_query_name='manualpk') class ForProxyModelModel(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() obj = GenericForeignKey(for_concrete_model=False) title = models.CharField(max_length=255, null=True) class ForConcreteModelModel(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() obj = GenericForeignKey() class ConcreteRelatedModel(models.Model): bases = GenericRelation(ForProxyModelModel, for_concrete_model=False) class ProxyRelatedModel(ConcreteRelatedModel): class Meta: proxy = True # To test fix for #7551 class AllowsNullGFK(models.Model): content_type = models.ForeignKey(ContentType, models.SET_NULL, null=True) object_id = models.PositiveIntegerField(null=True) content_object = GenericForeignKey()
import os path
= os.path.dirname(os.path.realpath(__file__)) sbmlFilePath = os.path.join(path, 'BIOMD0000000462.xml') with open(sbmlFilePath,'r') as f: sbmlString = f.read() def module_exists(module_name): try: __import__(module_name) except I
mportError: return False else: return True if module_exists('libsbml'): import libsbml sbml = libsbml.readSBMLFromString(sbmlString)
# # Gordon McMillan (as inspired and influenced by Greg Stein) # # subclasses may not need marshal or struct, but since they're # builtin, importing is safe. # # While an Archive is really an abstraction for any "filesystem # within a file", it is tuned for use with imputil.FuncImporter. # This assumes it contains python code objects, indexed by the # the internal name (ie, no '.py'). # See carchive.py for a more general archive (contains anything) # that can be understood by a C program. #archive_rt is a stripped down version of MEInc.Dist.archive. #It has had all building logic removed. #It's purpose is to bootstrap the Python installation. import marshal import struct class Archive: """ A base class for a repository of python code objects. The extract method is used by imputil.ArchiveImporter to get code objects by name (fully qualified name), so an enduser "import a.b" would become extract('a.__init__') extract('a.b') """ MAGIC = 'PYL\0' HDRLEN = 12 # default is MAGIC followed by python's magic, int pos of toc TOCPOS = 8 TRLLEN = 0 # default - no trailer TOCTMPLT = {} # os = None def __init__(self, path=None, start=0): "Initialize an Archive. If path is omitted, it will be an empty Archive." self.toc = None self.path = path self.start = start import imp self.pymagic = imp.get_magic() if path is not None: self.lib = open(self.path, 'rb') self.checkmagic() self.loadtoc() ####### Sub-methods of __init__ - override as needed ############# def checkmagic(self): """ Overridable. Check to see if the file object self.lib actually has a file we understand. """ self.lib.seek(self.start) #default - magic is at start of file if self.lib.read(len(self.MAGIC)) != self.MAGIC: raise RuntimeError, "%s is not a valid %s archive file" \ % (self.path, self.__class__.__name__) if self.lib.read(len(self.pymagic)) != self.pymagic: raise RuntimeError, "%s has version mismatch to dll" % (self.path) def loadtoc(self): """ Overridable. Default: After magic comes an int (4 byte native) giving the position of the TOC within self.lib. Default: The TOC is a marshal-able string. """ self.lib.seek(self.start + self.TOCPOS) (offset,) = struct.unpack('=i', self.lib.read(4)) self.lib.seek(self.start + offset) self.toc = marshal.load(self.lib) ######## This is what is called by FuncImporter ####### ## Since an Archive is flat, we ignore parent and modname. def get_code(self, parent, modname, fqname): print "parent: ", parent print "modname: ", modname print "fqname: ", fqname return self.extract(fqname) # None if not found, (ispkg, code) otherwise if rslt is None: return None ispkg, code = rslt if ispkg: return ispkg, code, {'__path__': []} return rslt ####### Core method - Override as needed ######### def extract(self, name): """ Get the object corresponding to name, or None. For use with imputil ArchiveImporter, object is a python code object. 'name' is the name as specified in an 'import name'. 'import a.b' will become: extract('a') (return None because 'a' is not a code object) extract('a.__init__') (return a code object) extract('a.b') (return a code object) Default implementation: self.toc is a dict self.toc[name] is pos self.lib has the code object marshal-ed at pos """ ispkg, pos = self.toc.get(name, (0, None)) if pos is None: return None self.lib.seek(self.start + pos) return ispkg, marshal.load(self.lib) ######################################################################## # Informational methods def contents(self): """Return a list of the contents Default implementation assumes self.toc is a dict like object. Not required by ArchiveImporter. """ return self.toc.keys() ######################################################################## # Building ####### Top level method - shouldn't need overriding ####### ## def build(self, path, lTOC): ## """Create an archive file of name 'path'. ## lTOC is a 'logical TOC' - a list of (name, path, ...) ## where name is the internal name, eg 'a' ## and path is a file to get the object from, eg './a.pyc'. ## """ ## self.path = path ## self.lib = open(path, 'wb') ## #reserve space for the header ## if self.HDRLEN: ## self.lib.write('\0'*self.HDRLEN) ## ## #create an empty toc ## ## if type(self.TOCTMPLT) == type({}): ## self.toc = {} ## else: # assume callable ## self.toc = self.TOCTMPLT() ## ## for tocentry in lTOC: ## self.add(tocentry) # the guts of the archive ## ## tocpos = self.lib.tell() ## self.save_toc(tocpos) ## if self.TRLLEN: ## self.save_trailer(tocpos) ## if self.HDRLEN: ## self.update_headers(tocpos) ## self.lib.close() ## ## ## ####### manages keeping the internal TOC and the guts in sync ####### ## def add(self, entry): ## """Override this to influence the mechanics of the Archive. ## Assumes entry is a seq beginning with (nm, pth, ...) where ## nm is the key by which we'll be asked for the object. ## pth is the name of where we find the object. Overrides of ##
get_obj_from can make use of further elements in entry. ## """ ## if self.os is None: ## import os ## self.os = os ## nm = entry[0] ## pth = entry[1] ## ispkg = self.os.path.splitext(self.os.path.basename(pth))[0] == '__init__' ## self.toc[nm] = (
ispkg, self.lib.tell()) ## f = open(entry[1], 'rb') ## f.seek(8) #skip magic and timestamp ## self.lib.write(f.read()) ## ## def save_toc(self, tocpos): ## """Default - toc is a dict ## Gets marshaled to self.lib ## """ ## marshal.dump(self.toc, self.lib) ## ## def save_trailer(self, tocpos): ## """Default - not used""" ## pass ## ## def update_headers(self, tocpos): ## """Default - MAGIC + Python's magic + tocpos""" ## self.lib.seek(self.start) ## self.lib.write(self.MAGIC) ## self.lib.write(self.pymagic) ## self.lib.write(struct.pack('=i', tocpos)) ############################################################## # # ZlibArchive - an archive with compressed entries # class ZlibArchive(Archive): MAGIC = 'PYZ\0' TOCPOS = 8 HDRLEN = 12 TRLLEN = 0 TOCTMPLT = {} LEVEL = 9 def __init__(self, path=None, offset=0): Archive.__init__(self, path, offset) # dynamic import so not imported if not needed global zlib import zlib def extract(self, name): (ispkg, pos, lngth) = self.toc.get(name, (0, None, 0)) if pos is None: return None self.lib.seek(self.start + pos) return ispkg, marshal.loads(zlib.decompress(self.lib.read(lngth))) ## def add(self, entry): ## if self.os is None: ## import os ## self.os = os ## nm = entry[0] ## pth = entry[1] ## ispkg = self.os.path.splitext(self.os.path.basename(pth))[0] == '__init__' ## f = open(pth, 'rb') ## f.seek(8) #skip magic and timestamp ## obj = zlib.compress(f.read(), self.LEVEL) ## self.toc[nm] = (ispkg, self.lib.tell(), len(obj)) ## self.lib.write(obj) ##
class Point: Empty = " " Full = "XX" def __init__(self, x, y): if not type(x) == int \ or not type(y) == int: r
aise Exception('Can only assign int type to x or y value') self.X = x self.Y = y self.Value = self.Empty def __eq__(self, other): if isinstance(other, self.__class__): return self.X == other.X and self.Y == other.Y def __ne__(self, other): return not self.__eq__(other) def __str__(self): return "P(" + str(self.X) + "," + str(self.Y) + ")
" def __repr__(self): return "P(" + str(self.X) + "," + str(self.Y) + ")"
#!/usr/bin/env python """Download data files needed by SLURP.""" # Copyright (C) 2012-2013 Constantine Lignos # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import gzip from datamanager import download, unzip # Assume that download.py and pennpipeline.py are located in the same # directory PIPELINE_NAME = "SUBTLEPipeline-master" ROOT_DIR = os.path.dirname(os.p
ath.abspath(__file__)) PIPELINE_URL
= "https://github.com/PennNLP/SUBTLEPipeline/archive/master.zip" FILENAME = os.path.join(ROOT_DIR, PIPELINE_NAME + ".zip") download(PIPELINE_URL, FILENAME) unzip(FILENAME, ROOT_DIR) # Now we need to additionally unzip the model file contained in the pipeline print "Decompressing parser model file..." model_gz = gzip.open(os.path.join(ROOT_DIR, PIPELINE_NAME, "models", "wsjall.obj.gz")) open(os.path.join(ROOT_DIR, PIPELINE_NAME, "models", "wsjall.obj"), 'wb').write(model_gz.read())
##################################################################################### # # # Script to update Hostname # # # # Usage : wsadmin -lang jython -f updateHostName.py <node name > < host name > # #
# ##################################################################################### def updateHostName(nodename,hostname):
nlist = AdminConfig.list('ServerIndex') attr=[["hostName", hostname ]] AdminConfig.modify(nlist,attr) AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName BOOTSTRAP_ADDRESS -host '+ hostname +' -port 2809 -modifyShared true]') AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName CSIV2_SSL_MUTUALAUTH_LISTENER_ADDRESS -host '+ hostname +' -port 9202 -modifyShared true]') AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName CSIV2_SSL_SERVERAUTH_LISTENER_ADDRESS -host '+ hostname +' -port 9201 -modifyShared true]') AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName ORB_LISTENER_ADDRESS -host '+ hostname +' -port 9900 -modifyShared true]') AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName SAS_SSL_SERVERAUTH_LISTENER_ADDRESS -host '+ hostname +' -port 9901 -modifyShared true]') AdminTask.modifyServerPort('server1', '[-nodeName '+ nodename +' -endPointName SOAP_CONNECTOR_ADDRESS -host '+ hostname +' -port 8878 -modifyShared true]') AdminConfig.save() updateHostName(sys.argv[0], sys.argv[1])
from setuptools import find_packages from setuptools import setup with open('README.md') as f: setup( name = 'swarm', version = '0.5.0', author = 'arvin.x', author_email = 'arvin.x@icloud.com', description =
'A modular distributed penetration test
ing tool', license = 'GPLv3', long_description = f.read(), packages = find_packages(), scripts = ['swarm.py','swarm_s.py'], entry_points = { 'console_scripts': [ 'swarm = swarm:main', 'swarm-s = swarm_s:main', ] }, install_requires = [ 'pymongo>=3.3.0', 'beautifulsoup4>=4.5.0', 'python-libnmap>=0.7.0', 'requests>=2.7.0', 'IPy>=0.83', 'argparse>=1.2.1', ], data_files=[ ('/etc/swarm',['swarm.conf']), ('/etc/swarm',['etc/dirsc.conf','etc/domainsc.conf','etc/nmap.conf', 'etc/sitemap.conf','etc/intruder.conf']), ], classifiers = [ 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 2.6', ], )
import con
fig def debug(*args): if config.DEBUG:
print(*args)
#!/usr/bin/env python from __future__ import division, print_function def configuration(parent_package='',top_path=None): from numpy.distutils.
misc_util import Configuration config = Configuration('compat', parent_package, top_path) return config if __name__ == '__main__': from numpy
.distutils.core import setup setup(configuration=configuration)
from django.con
trib.auth.decorators import user_passes_test from django.contrib.auth.models import Group def group_required(names, login_url=None): """ Checks if the user is a member of a particular group (or at least one group from the list) """ if not hasattr(names,'__iter__'): names = [names] return user_passes_test(lambda u: u.groups.filter(name__in=names),
login_url=login_url)
# LogicalFile Management Providers # # Copyright (C) 2013-2014 Red Hat, Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are # those of the authors and should not be interpreted as representing official # policies, either expressed or implied, of the FreeBSD Project. # # Authors: Jan Synacek <jsynacek@redhat.com> # """ File and directory management functions. Usage: %(cmd)s list <directory> [ <depth> ] %(cmd)s createdir <directory> %(cmd)s deleted
ir <directory> %(cmd)s show <target> Commands: list List a directory. When depth is specified, at most depth levels will be listed recursively. The files and directories are listed in a tree-like structure. Possible listed file types are: * F : Regular data file. * Dev : Device file. Can be either block or character device. * Dir : Directory.
* P : Pipe file. * L : Symbolic link. * S : Unix socket. createdir Create a directory. The parent directory must exist. deletedir Delete a directory. The directory must be empty. show Show detailed information about target. Target can be any file on the remote system. """ from lmi.scripts.common import command from lmi.scripts.logicalfile import logicalfile class Lister(command.LmiLister): COLUMNS = ('Type', 'Name', 'Mode', 'Current SELinux Context') OPT_NO_UNDERSCORES = True CALLABLE = logicalfile.lf_list class Show(command.LmiLister): COLUMNS = ('Name', 'Value') OPT_NO_UNDERSCORES = True CALLABLE = logicalfile.lf_show class CreateDir(command.LmiCheckResult): EXPECT = None CALLABLE = logicalfile.lf_createdir class DeleteDir(command.LmiCheckResult): EXPECT = None CALLABLE = logicalfile.lf_deletedir Lf = command.register_subcommands( 'Lf', __doc__, { 'list' : Lister, 'createdir' : CreateDir, 'deletedir' : DeleteDir, 'show' : Show, }, )
# @author Avtandil Kikabidze # @copyright Copyright (c) 2008-2015, Avtandil Kikabidze aka LONGMAN (akalongman@gmail.com) # @link http://longman.me # @license The MIT License (MIT) import os import sys import sublime import sublime_plugin st_version = 2 if sublime.version() == '' or int(sublime.version()) > 3000: st_version = 3 reloader_name = 'codeformatter.reloader' # ST3 loads each package as a module, so it needs an extra prefix if st_version == 3: reloader_name = 'CodeFormatter.' + reloader_name from imp import reload if reloader_name in sys.modules: reload(sys.modules[reloader_name]) try: # Python 3 from .codeformatter.formatter import Formatter except (ValueError): # Python 2 from codeformatter.formatter import Formatter # fix for ST2 cprint = globals()['__builtins__']['print'] debug_mode = False def plugin_loaded(): cprint('CodeFormatter: Plugin Initialized') # settings = sublime.load_settings('CodeFormatter.sublime-settings') # debug_mode = settings.get('codeformatter_debug', False) # if debug_mode: # from pprint import pprint # pprint(settings) # debug_write('Debug mode enabled') # debug_write('Platform ' + sublime.platform() + ' ' + sublime.arch()) # debug_write('Sublime Version ' + sublime.version()) # debug_write('Settings ' + pprint(settings)) if (sublime.platform() != 'windows'): import stat path = ( sublime.packages_path() + '/CodeFormatter/codeformatter/lib/phpbeautifier/fmt.phar' ) st = os.stat(path) os.chmod(path, st.st_mode | stat.S_IEXEC) if st_version == 2: plugin_loaded() class CodeFormatterCommand(sublime_plugin.TextCommand): def run(self, edit, syntax=None, saving=None): run_formatter(self.view, edit, syntax=syntax, saving=saving) class CodeFormatterOpenTabsCommand(sublime_plugin.TextCommand): def run(self, edit, syntax=None): window = sublime.active_window() for view in window.views(): run_formatter(view, ed
it, quiet=True) class CodeFormatterEventListener(sublime_plugin.EventListener): def on_pre_save(self, view): view.run_command('code_formatter', {'saving': True}) class CodeFormatterShowPhpTransformationsCommand(sublime_plugin.TextCommand): def run(self, edit, syntax=False): import subprocess import re p
latform = sublime.platform() settings = sublime.load_settings('CodeFormatter.sublime-settings') opts = settings.get('codeformatter_php_options') php_path = 'php' if ('php_path' in opts and opts['php_path']): php_path = opts['php_path'] php55_compat = False if ('php55_compat' in opts and opts['php55_compat']): php55_compat = opts['php55_compat'] cmd = [] cmd.append(str(php_path)) if php55_compat: cmd.append( '{}/CodeFormatter/codeformatter/lib/phpbeautifier/fmt.phar'.format( sublime.packages_path())) else: cmd.append( '{}/CodeFormatter/codeformatter/lib/phpbeautifier/phpf.phar'.format( sublime.packages_path())) cmd.append('--list') #print(cmd) stderr = '' stdout = '' try: if (platform == 'windows'): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = subprocess.SW_HIDE p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=startupinfo, shell=False, creationflags=subprocess.SW_HIDE) else: p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() except Exception as e: stderr = str(e) if (not stderr and not stdout): stderr = 'Error while gethering list of php transformations' if len(stderr) == 0 and len(stdout) > 0: text = stdout.decode('utf-8') text = re.sub( 'Usage:.*?PASSNAME', 'Available PHP Tranformations:', text) window = self.view.window() pt = window.get_output_panel('paneltranformations') pt.set_read_only(False) pt.insert(edit, pt.size(), text) window.run_command( 'show_panel', {'panel': 'output.paneltranformations'}) else: show_error('Formatter error:\n' + stderr) def run_formatter(view, edit, *args, **kwargs): if view.is_scratch(): show_error('File is scratch') return # default parameters syntax = kwargs.get('syntax') saving = kwargs.get('saving', False) quiet = kwargs.get('quiet', False) formatter = Formatter(view, syntax) if not formatter.exists(): if not quiet and not saving: show_error('Formatter for this file type ({}) not found.'.format( formatter.syntax)) return if (saving and not formatter.format_on_save_enabled()): return file_text = sublime.Region(0, view.size()) file_text_utf = view.substr(file_text).encode('utf-8') if (len(file_text_utf) == 0): return stdout, stderr = formatter.format(file_text_utf) if len(stderr) == 0 and len(stdout) > 0: view.replace(edit, file_text, stdout) elif not quiet: show_error('Format error:\n' + stderr) def console_write(text, prefix=False): if prefix: sys.stdout.write('CodeFormatter: ') sys.stdout.write(text + '\n') def debug_write(text, prefix=False): console_write(text, True) def show_error(text): sublime.error_message(u'CodeFormatter\n\n%s' % text)
# Xandikos # Copyright (C) 2017 Jelmer Vernooij <jelmer@jelmer.uk>, et al. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 3 # of the License or (at your option) any later version of # the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. """VCard file handling. """ from .store import File, InvalidFileContents class VCardFile(File): content_type = "text/vcard" def __init__(self, content, content_type): super(VCardFile, self).__init__(content, content_type) self._addressbook = None def validate(self): c = b"".join(self.content).strip() # TODO(jelmer): Do more extensive checking of VCards if not c.startswith((b"BEGIN:VCARD\r\n", b"BEGIN:VCARD\n")) or not c.endswith( b"\nEND:VCARD" ): raise Inv
alidFileContents( self.content_type, self.content, "Missin
g header and trailer lines", ) if not self.addressbook.validate(): # TODO(jelmer): Get data about what is invalid raise InvalidFileContents( self.content_type, self.content, "Invalid VCard file") @property def addressbook(self): if self._addressbook is None: import vobject text = b"".join(self.content).decode('utf-8', 'surrogateescape') try: self._addressbook = vobject.readOne(text) except vobject.base.ParseError as e: raise InvalidFileContents(self.content_type, self.content, str(e)) return self._addressbook
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse from django import http from mox import IsA # noqa from openstack_dashboard import api from
openstack_dashboard.test import helpers as test INDEX_URL = reverse('horizon:project:data_processing.job_executions:index') DETAILS_URL = reverse( 'horizon:project:data_processing.job_executions:details'
, args=['id']) class DataProcessingJobExecutionTests(test.TestCase): @test.create_stubs({api.sahara: ('job_execution_list',)}) def test_index(self): api.sahara.job_execution_list(IsA(http.HttpRequest)) \ .AndReturn(self.job_executions.list()) self.mox.ReplayAll() res = self.client.get(INDEX_URL) self.assertTemplateUsed( res, 'project/data_processing.job_executions/job_executions.html') self.assertContains(res, 'Executions') @test.create_stubs({api.sahara: ('job_execution_get',)}) def test_details(self): api.sahara.job_execution_get(IsA(http.HttpRequest), IsA(unicode)) \ .AndReturn(self.job_executions.list()[0]) self.mox.ReplayAll() res = self.client.get(DETAILS_URL) self.assertTemplateUsed( res, 'project/data_processing.job_executions/details.html') self.assertContains(res, 'RUNNING') @test.create_stubs({api.sahara: ('job_execution_list', 'job_execution_delete')}) def test_delete(self): job_exec = self.job_executions.first() api.sahara.job_execution_list(IsA(http.HttpRequest)) \ .AndReturn(self.job_executions.list()) api.sahara.job_execution_delete(IsA(http.HttpRequest), job_exec.id) self.mox.ReplayAll() form_data = {'action': 'job_executions__delete__%s' % job_exec.id} res = self.client.post(INDEX_URL, form_data) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, INDEX_URL) self.assertMessageCount(success=1)
from Gaudi.Configuration import * from Configurables import GaudiSequencer from Configurables import DaVinci simulation=False from Configurables import EventNodeKiller eventNodeKiller = EventNodeKiller('DAQkiller') eventNodeKiller.Nodes = ['DAQ','pRec'] #MySequencer.Members+=[eventNodeKiller] from Configurables import DecayTreeTuple from DecayTreeTuple.Configuration import * tuple=DecayTreeTuple() tuple.Decay="[B0 -> ^(K*(892)0 -> ^K+ ^pi-) ^(eta_prime -> ^pi- ^pi+ ^(eta -> ^gamma ^gamma))]CC" tuple.Branches={"B0":"[B0 -> (K*(892)0 -> K+ pi-) (eta_prime -> pi- pi+ (eta -> gamma gamma))]CC"} tuple.Inputs=["/Event/Bhadron/Phys/B2XEtaB2etapKstarLine/Particles"] tuple.ToolList += [ "TupleToolGeometry" , "TupleToolDira" , "TupleToolAngles" , "TupleToolPid" , "TupleToolKinematic" , "TupleToolPropertime" , "TupleToolPrimaries" , "TupleToolEventInfo" , "TupleToolTrackInfo" , "TupleToolVtxIsoln" , "TupleToolPhotonInfo" #, "TupleToolMCTruth" #, "TupleToolMCBackgroundInfo" , "TupleToolCaloHypo" #, "TupleToolTrackIsolation" ] tuple.addTool(TupleToolDecay,name="B0") from Configurables import TupleToolDecayTreeFitter tuple.B0.addTool(TupleToolDecayTreeFitter("PVFit")) tuple.B0.PVFit.Verbose=True tuple.B0.PVFit.constrainToOriginVertex=True tuple.B0.PVFit.daughtersToConstrain = ["K*(892)0","eta_prime"] tuple.B0.ToolList+=["TupleToolDecayTreeFitter/PVFit"] from Configurables import TupleToolTISTOS tistos = tuple.B0.addTupleTool(TupleToolTISTOS, name="TupleToolTISTOS") tistos.VerboseL0=True tistos.VerboseHlt1=True tistos.VerboseHlt2=True tistos.TriggerList=["L0PhotonDecision", "L0ElectronDecision", "Hlt1TrackPhotonDecision", "Hlt1TrackAllL0Decision", "Hlt1TrackMuonDecision", "Hlt1TrackForwardPassThroughDecision", "Hlt1TrackForwardPassThroughLooseDecision", "Hlt1SingleElectronNoIPDecision", "L0HadronDecision", "L0LocalPi0Decision", "L0GlobalPi0Decision", "L0MuonDecision", "Hlt2Topo2BodyBBDTDecision", "Hlt2Topo3BodyBBDTDecision", "Hlt2Topo4BodyBBDTDecision", "Hlt2RadiativeTopoTrackTOSDecision", "Hlt2RadiativeTopoPhotonL0Decision", "Hlt2TopoRad2BodyBBDTDecision", "Hlt2TopoRad2plus1BodyBBDTDecision", "Hlt2Topo2BodySimpleDecision", "Hlt2Topo3BodySimpleDecision", "Hlt2Topo4BodySimpleDecision"] D
aVinci().InputType='MDST' #DaVinci().RootInTES='/Event/Bhadron/' D
aVinci().UserAlgorithms+=[eventNodeKiller,tuple] DaVinci().TupleFile="Output.root" DaVinci().HistogramFile="histos.root" DaVinci().DataType='2012' DaVinci().EvtMax=-1 DaVinci().PrintFreq=1000 DaVinci().MoniSequence=[tuple] DaVinci().Simulation=False
import logging from django import template from django.
conf import settings from django.core.urlresolvers import reverse from django.http import Http404 from apps.dasforms.forms import ContactForm from apps.dasforms.utils import get_form register = template.Library() logger = logging.getLogger(__name__) @register.inclusion_tag('dasforms/inline_form.html', takes_context=True) def render_inline_form(context, formtype): """Render inline form.""" formtype = formtype.encode('utf8') try: formclass = get_form(formtype)
form = formclass(referer, topic) action = reverse("dasform", kwargs={'formtype':formtype}) except: logger.warning('Form class could not be found: %s' % formtype) form = ContactForm() action = reverse("dasform", kwargs={'formtype':'ContactForm'}) senturl = reverse("sent") return { 'action': action, 'form': form, 'formtype': formtype, 'senturl': senturl, } @register.inclusion_tag('dasforms/honeypot_field.html') def render_honeypot_field(field_name=None): """ Renders honeypot field named field_name (defaults to HONEYPOT_FIELD_NAME). """ if not field_name: field_name = settings.HONEYPOT_FIELD_NAME value = getattr(settings, 'HONEYPOT_VALUE', '') if callable(value): value = value() return {'fieldname': field_name, 'value': value}
#-*- coding:utf8 -*- def crawl_folder(folder): import os os_objects = [] seen = set([folder]) for os_object_name in os.listdir(folder): full_path = os.path.normpath(os.path.join(folder, os_object_name)) if not full_path in seen: os_objects.append((full_path, os_object_name,)) seen.add(full_path) return os_objects class TCustomCounter: def __init__(self, name, log_stream, verbosity, interval=10): self.name = name self.verbosity = verbosity self.log_stream = log_stream self.interval = interval self.value = 0 def add(self): from datetime import datetime self.value += 1 if self.verbosity and self.value % self.interval == 0: self.log_stream.write("Logger: " + self.name + ", value: " + str(self.value) + ", time: " + str(datetime.now())+ "\n") s
elf.log_stream.flush() def log_state(self): from datetime import datetime self.log_stream.write("Logger: " + self.name + ", value: " + str(self.value) + ", time: " + str(datetime.now())+ "\n") self.log_stream.flush()
#!/usr/bin/python # Example using an RGB character LCD wired directly to Raspberry Pi or BeagleBone Black. import time import Adafruit_CharLCD as LCD # Raspberry Pi configuration: lcd_rs = 27 # Change this to pin 21 on older revision Raspberry Pi's lcd_en = 22 lcd_d4 = 25 lcd_d5 = 24 lcd_d6 = 23 lcd_d7 = 18 lcd_red = 4 lcd_green = 17 lcd_blue = 7 # Pin 7 is CE1 # Define LCD column and row size for 16x2 LCD. lcd_columns = 16 lcd_rows = 2 # Initialize the LCD using the pins above. lcd = LCD.Adafruit_RGBCharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7, lcd_columns, lcd_rows, lcd_red, lcd_green, lcd_blue) # Show some basic colors. lcd.set_color(1.0, 0.0, 0.0) lcd.clear() lcd.message('Joyeux') time.sleep(3.0) lcd.set_color(
0.0, 1.0, 0.0)
lcd.clear() lcd.message('Noel') time.sleep(3.0) lcd.set_color(0.0, 0.0, 1.0) lcd.clear() lcd.message('Je vais') time.sleep(3.0) lcd.set_color(1.0, 1.0, 0.0) lcd.clear() lcd.message('te faire') time.sleep(3.0) lcd.set_color(0.0, 1.0, 1.0) lcd.clear() lcd.message('des trucs') time.sleep(3.0) lcd.set_color(1.0, 0.0, 1.0) lcd.clear() lcd.message('de fou') time.sleep(3.0) lcd.set_color(1.0, 1.0, 1.0) lcd.clear() lcd.message('MOUAHHH') time.sleep(3.0)
# -*- coding: utf-8 -*- import logging from unittest import mock impor
t olympia.core.logger from olympia.amo.tests import TestCase from olympia.users.models import UserProfile class LoggerTests(TestCase): @mock.patch('olympia.core.get_remote_addr', lambda: '127.0.0.1') @mock.patch('olympia.core.get_user', lambda: UserProfile(username=u'fôo')) def test_get_logger_adapter(self): log = olympia.core.logger.getLogger('test') expected_kwargs = { 'extr
a': { 'REMOTE_ADDR': '127.0.0.1', 'USERNAME': u'fôo', } } assert log.process('test msg', {}) == ('test msg', expected_kwargs) @mock.patch('olympia.core.get_remote_addr', lambda: '127.0.0.1') @mock.patch('olympia.core.get_user', lambda: None) def test_logger_adapter_user_is_none(self): log = olympia.core.logger.getLogger('test') expected_kwargs = { 'extra': { 'REMOTE_ADDR': '127.0.0.1', 'USERNAME': '<anon>', } } assert log.process('test msg', {}) == ('test msg', expected_kwargs) @mock.patch('olympia.core.get_remote_addr', lambda: None) @mock.patch('olympia.core.get_user', lambda: UserProfile(username='bar')) def test_logger_adapter_addr_is_none(self): log = olympia.core.logger.getLogger('test') expected_kwargs = { 'extra': { 'REMOTE_ADDR': '', 'USERNAME': 'bar', } } assert log.process('test msg', {}) == ('test msg', expected_kwargs) def test_formatter(self): formatter = olympia.core.logger.Formatter() record = logging.makeLogRecord({}) formatter.format(record) assert 'USERNAME' in record.__dict__ assert 'REMOTE_ADDR' in record.__dict__ def test_json_formatter(self): formatter = olympia.core.logger.JsonFormatter() record = logging.makeLogRecord({}) # These would be set by the adapter. record.__dict__['USERNAME'] = 'foo' record.__dict__['REMOTE_ADDR'] = '127.0.0.1' formatter.format(record) assert record.__dict__['uid'] == 'foo' assert record.__dict__['remoteAddressChain'] == '127.0.0.1'
import json import datetime from django.core.files.base import ContentFile from django.core.exceptions import ValidationError from django.utils.timezone import utc from ..models import AgentProfile from ..exceptions import IDNotFoundError, ParamError from ..utils import etag class AgentProfileManager(): def __init__(self, agent): self.Agent = agent def save_non_json_profile(self, p, profile, request_dict): p.content_type = request_dict['headers']['CONTENT_TYPE'] p.etag = etag.create_tag(profile.read()) if 'updated' in request_dict['headers'] and request_dict['headers']['updated']: p.updated = request_dict['headers']['updated'] else: p.updated = datetime.datetime.utcnow().replace(tzinfo=utc) # Go to beginning of file profile.seek(0) fn = "%s_%s" % (p.agent_id, request_dict.get('filename', p.id)) p.profile.save(fn, profile) p.save() def post_profile(self, request_dict): # get/create profile p, created = AgentProfile.objects.get_or_create( profile_id=request_dict['params']['profileId'], agent=self.Agent) post_profile = request_dict['profile'] # If incoming profile is application/json and if a profile didn't # already exist with the same agent and profileId if created: p.json_profile = post_profile p.content_type = "application/json" p.etag = etag.create_tag(post_profile) # If incoming profile is application/json and if a profile already # existed with the same agent and profileId else: orig_prof = json.loads(p.json_profile) post_profile = json.loads(post_profile) merged = json.dumps( dict(list(orig_prof.items()) + list(post_profile.items()))) p.json_profile = merged p.etag = etag.create_tag(merged) # Set updated if 'updated' in request_dict['headers'] and request_dict['headers']['updated']: p.updated = request_dict['headers']['updated'] else: p.updated = datetime.datetime.utcnow().replace(tzinfo=utc) p.save() def put_profile(self, request_dict): # get/create profile p, created = AgentProfile.objects.get_or_create( profile_id=request_dict['params']['profileId'], agent=self.Agent) # Profile being PUT is not json if "application/json" not in request_dict['headers']['CONTENT_TYPE']: try: profile = ContentFile(request_dict['profile'].read()) except: try: profile = ContentFile(request_dict['profile']) except: profile = ContentFile(str(request_dict['profile'])) etag.check_preconditions(request_dict, p, created) # If it already exists delete it if p.profile: try: p.profile.delete() except OSError: # probably was json before p.json_profile = {} self.save_non_json_profile(p, profile, request_dict) # Profile being PUT is json else: # (overwrite existing profile data) etag.check_preconditions(request_dict, p, created) the_profile = request_dict['profile'] p.json_profile = the_profile p.content_type = request_dict['headers']['CONTENT_TYPE'] p.etag = etag.create_tag(the_profile) # Set updated if 'updated' in request_dict['headers'] and request_dict['headers']['updated']: p.updated = request_dict['headers']['updated'] else: p.updated = datetime.datetime.utcnow().replace(tzinfo=utc) p.save() def get_profile(self, profile_id): try: return self.Agent.agentprofile_set.get(profile_id=profile_id) except: err_msg = 'There is no agent profile associated with the id: %s' % profile_id raise IDNotFoundError(err_msg) def get_profile_ids(self, since=None): ids = [] if since: try: # this expects iso6801 date/time format # "2013-02-15T12:00:00+00:00" profs = self.Agent.agentprofile_set.filter(updated__gt=since) except ValidationError: err_msg = 'Since field is not in correct format for retrieval of agent
profiles' raise ParamError(err_msg) ids = [p.profile_id for p in profs] else: ids = self.Agent.agentprofile_set.values_list( 'profile_id', flat=True) return ids def delete_profile(self, profile_id): try: self.get_profile(profile_id).delete() # w
e don't want it anyway except AgentProfile.DoesNotExist: pass except IDNotFoundError: pass
# -*- coding: utf-8 -*- # © 2016 OpenSynergy
Indonesia # License AGPL-3.0 or later (htt
p://www.gnu.org/licenses/agpl.html). from . import test_hr_expense
# -*- coding: utf-8 -*- # © 201
6 ClearCorp # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). from . import account
_invoice_incoterm
import datetime class Store: def parse(self,line): fields=line.split('\t') self.id = fields[0] self.name = fields[1] return self def __repr__(self): return "Store: id=%s \t name=%s"%(self.id,self.name) class Product: def parse(self,line): fields=line.split('\t') self.id = fields[0] self.
name = fields[1] self.category=fields[2] return self def __repr__(self): return "Product: id=%s \t name=%s"%(self.id,self.name) class SaleRow: def parse(self,line): fields=line.split('\t') self.day=fields[0] # maybe parse as date? see below:) # self.day=date
time.datetime.strptime(fields[0],"%Y-%m-%d") self.store_id=fields[1] self.product_id=fields[2] self.quantity=int(fields[3]) # let's parse this return self def __repr__(self): return "SaleRow: day=%s \t store_id=%s \t product_id=%s quantity=%d"%(self.day,self.store_id,self.product_id, self.quantity)
se, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.artifactregistry_v1beta2.types.ListRepositoriesRequest): The initial request object. response (google.cloud.artifactregistry_v1beta2.types.ListRepositoriesResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = repository.ListRepositoriesRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property async def pages(self) -> AsyncIterator[repository.ListRepositoriesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response def __aiter__(self) -> AsyncIterator[repository.Repository]: async def async_generator(): async for page in self.pages: for response in page.repositories: yield response return async_generator() def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class ListPackagesPager: """A pager for iterating through ``list_packages`` requests. This class thinly wraps an initial :class:`google.cloud.artifactregistry_v1beta2.types.ListPackagesResponse` object, and provides an ``__iter__`` method to iterate through its ``packages`` field. If there are more pages, the ``__iter__`` method will make additional ``ListPackages`` requests and continue to iterate through the ``packages`` field on the corresponding responses. All the usual :class:`google.cloud.artifactregistry_v1beta2.types.ListPackagesResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., package.ListPackagesResponse], request: package.ListPackagesRequest, response: package.ListPackagesResponse, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.artifactregistry_v1beta2.types.ListPackagesRequest): The initial request object. response (google.cloud.artifactregistry_v1beta2.types.ListPackagesResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = package.ListPackagesRequest(request) self._response = response
self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterator[package.ListPackagesResponse]: yield self._response while self._response.next_page_token: self._re
quest.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterator[package.Package]: for page in self.pages: yield from page.packages def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class ListPackagesAsyncPager: """A pager for iterating through ``list_packages`` requests. This class thinly wraps an initial :class:`google.cloud.artifactregistry_v1beta2.types.ListPackagesResponse` object, and provides an ``__aiter__`` method to iterate through its ``packages`` field. If there are more pages, the ``__aiter__`` method will make additional ``ListPackages`` requests and continue to iterate through the ``packages`` field on the corresponding responses. All the usual :class:`google.cloud.artifactregistry_v1beta2.types.ListPackagesResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., Awaitable[package.ListPackagesResponse]], request: package.ListPackagesRequest, response: package.ListPackagesResponse, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.artifactregistry_v1beta2.types.ListPackagesRequest): The initial request object. response (google.cloud.artifactregistry_v1beta2.types.ListPackagesResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = package.ListPackagesRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property async def pages(self) -> AsyncIterator[package.ListPackagesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response def __aiter__(self) -> AsyncIterator[package.Package]: async def async_generator(): async for page in self.pages: for response in page.packages: yield response return async_generator() def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class ListVersionsPager: """A pager for iterating through ``list_versions`` requests. This class thinly wraps an initial :class:`google.cloud.artifactregistry_v1beta2.types.ListVersionsResponse` object, and provides an ``__iter__`` method to iterate through its ``versions`` field. If there are more pages, the ``__iter__`` method will make additional ``ListVersions`` requests and continue to iterate through the ``versions`` field on the corresponding responses. All the usual :class:`google.cloud.artifactregistry_v1beta2.types.ListVersionsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., version.ListVersionsResponse], request: version.ListVersionsRequest, response: version.ListVersionsResponse, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.artifactregistry_v1beta2.types.ListVersionsRequest): The initial request object. response (google.cloud.artifactregistry_v1beta2.types.ListVersionsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be
from mock import MagicMock, patch import pytest from adbpy.adb import Adb from adbpy import Target, AdbError @pytest.fixture def adb(): adb = Adb(()) adb.socket = MagicMock() return adb def test_get_transport(): assert Adb._get_transport(Target.ANY) == "host:transport-any" assert Adb._get_transport(Target.USB) == "host:transport-usb" assert Adb._get_transport(Target.EMULATOR) == "host:transport-local" assert Adb._get_transport("950a8ad5") == "host:transport:950a8ad5" def test_
adb_version(adb): adb.version() adb.socket.send.assert_called_once_with("host:version") def test_adb_get_serialno_any(adb): adb.get_serialno(Target.ANY) adb.socket.send.assert_called_once_with("host:get-serialno") def test_adb_get_serialno_serial(adb): adb.get_serialno("6097191b") adb.socket.send.assert_called_once_wit
h("host-serial:6097191b:get-serialno") def test_adb_get_product(adb): adb.get_product("950a8ad5") adb.socket.send.assert_called_once_with("host-serial:950a8ad5:get-product") def test_adb_get_devpath(adb): adb.get_devpath(Target.USB) adb.socket.send.assert_called_once_with("host-usb:get-devpath") def test_adb_get_state(adb): adb.get_state(Target.EMULATOR) adb.socket.send.assert_called_once_with("host-local:get-state") def test_shell(adb): with patch.object(Adb, "_setup_target"): adb.shell("ls -l") adb.socket.send.assert_called_once_with("shell:ls -l") adb._setup_target.assert_called_once() def test_forward(adb): device_id = "950a8ad5" adb.forward("tcp:6001", "tcp:36001", device_id, norebind=False) adb.socket.send.assert_called_once_with("host-serial:950a8ad5:" "forward:tcp:6001;" "tcp:36001") def test_forward_rebind(adb): device_id = "950a8ad5" adb.forward("tcp:6001", "tcp:36001", device_id, norebind=True) adb.socket.send.assert_called_once_with("host-serial:950a8ad5:" "forward:norebind:" "tcp:6001;tcp:36001") def test_devices(adb): adb.socket.receive = MagicMock(return_value="950a8ad5\tdevice\n") output = adb.devices() assert output == [("950a8ad5", "device")] def test_start(adb): adb.process = MagicMock() adb.process.running = MagicMock(return_value=False) with pytest.raises(AdbError): adb.start()
from netools import nextIpInPool, ping, aliveHost, hostsUnDone def main(): aliveHosts = [] # pool IP ipStart = "192.168.56.1" ipEnd = "192.168.56.5" print"Pools: ", ipStart + " -> " + ipEnd print"Scanning online Router on netw
ork..." aliveHosts = aliveHost(ipStart, ipEnd) print "online Router:" print aliveHosts # print"New Hosts Alive in Pools:",hostsUnDone(aliveHosts, aliveHost(ipStart,ipEnd)) if __name__ == '__mai
n__': main()
# This document is part of Acronym # https://github.com/geowurster/Acronym # =================================================================================== # # # New BSD License #
# Copyright (c) 2014, Kevin D. Wurster # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and
the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * The names of its contributors may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # =================================================================================== # """ Algorithms that can be easily ported from one datatype to another with little work """ from . import tileindex
wing conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os import util from bitcoin import * MAX_TARGET = 0x00000000FFFF0000000000000000000000000000000000000000000000000000 class Blockchain(util.PrintError): '''Manages blockchain headers and their verification''' def __init__(self, config, network): self.config = config self.network = network self.headers_url = "https://headers.electrum.org/blockchain_headers" self.local_height = 0 self.set_local_height() def height(self): return self.local_height def init(self): self.init_headers_file() self.set_local_height() self.print_error("%d blocks" % self.local_height) def verify_header(self, header, prev_header, bits, target): prev_hash = self.hash_header(prev_header) assert prev_hash == header.get('prev_block_hash'), "prev hash mismatch: %s vs %s" % (prev_hash, header.get('prev_block_hash')) assert bits == header.get('bits'), "bits mismatch: %s vs %s" % (bits, header.get('bits')) _hash = self.hash_header(header) assert int('0x' + _hash, 16) <= target, "insufficient proof of work: %s vs target %s" % (int('0x' + _hash, 16), target) def verify_chain(self, chain): first_header = chain[0] prev_header = self.read_header(first_header.get('block_height') - 1) for header in chain: height = header.get('block_height') bits, target = self.get_target(height / 2016, chain) self.verify_header(header, prev_header, bits, target) prev_header = header def verify_chunk(self, index, data): num = len(data) / 80 prev_header = None if index != 0: prev_header = self.read_header(index*2016 - 1) bits, target = self.get_target(index) for i in range(num): raw_header = data[i*80:(i+1) * 80] header = self.deserialize_header(raw_header) self.verify_header(header, prev_header, bits, target) prev_header = header def serialize_header(self, res): s = int_to_hex(res.get('version'), 4) \ + rev_hex(res.get('prev_block_hash')) \ + rev_hex(res.get('merkle_root')) \ + int_to_hex(int(res.get('timestamp')), 4) \ + int_to_hex(int(res.get('bits')), 4) \ + int_to_hex(int(res.get('nonce')), 4) return s def deserialize_header(self, s): hex_to_int = lambda s: int('0x' + s[::-1].encode('hex'), 16) h = {} h['version'] = hex_to_int(s[0:4]) h['prev_block_hash'] = hash_encode(s[4:36]) h['merkle_root'] = hash_encode(s[36:68]) h['timestamp'] = hex_to_int(s[68:72]) h['bits'] = hex_to_int(s[72:76]) h['nonce'] = hex_to_int(s[76:80]) return h def hash_header(self, header): if header is None: return '0' * 64 return hash_encode(Hash(self.serialize_header(header).decode('hex'))) def path(self): return util.get_headers_path(self.config) def init_headers_file(self): filename = self.path() if os.path.exists(filename): return try: import urllib, socket socket.setdefaulttimeout(30) self.print_error("downloading ", self.headers_url) urllib.urlretrieve(self.headers_url, filename) self.print_error("done.") except Exception: self.print_error("download failed. creating file", filename) open(filename, 'wb+').close() def save_chunk(self, index, chunk): filename = self.path() f = open(filename, 'rb+') f.seek(index * 2016 * 80) h = f.write(chunk) f.close() self.set_local_height() def save_header(self, header): data = self.serialize_header(header).decode('hex') assert len(data) == 80 height = header.get('block_height') filename = self.path() f = open(filename, 'rb+') f.seek(height * 80) h = f.write(data) f.close() self.set_local_height() def set_local_height(self): name = self.path() if os.path.exists(name): h = os.path.getsize(name)/80 - 1 if self.local_height != h: self.local_height = h def read_header(self, block_height): name = self.path() if os.path.exists(name): f = open(name, 'rb')
f.seek(block_height * 80) h = f.read(80) f.close() if len(h) == 80: h = self.deserialize_header(h) return h def get_target(self, index, chain=None): if index == 0: return 0x1d00ffff, MAX_TARGET first = self.read_header((index-1) * 2016) last = self.read_header(index*2016 - 1) if last is None: for h in chain: if h.get('block_height') == index*2016 -
1: last = h assert last is not None # bits to target bits = last.get('bits') bitsN = (bits >> 24) & 0xff assert bitsN >= 0x03 and bitsN <= 0x1d, "First part of bits should be in [0x03, 0x1d]" bitsBase = bits & 0xffffff assert bitsBase >= 0x8000 and bitsBase <= 0x7fffff, "Second part of bits should be in [0x8000, 0x7fffff]" target = bitsBase << (8 * (bitsN-3)) # new target nActualTimespan = last.get('timestamp') - first.get('timestamp') nTargetTimespan = 14 * 24 * 60 * 60 nActualTimespan = max(nActualTimespan, nTargetTimespan / 4) nActualTimespan = min(nActualTimespan, nTargetTimespan * 4) new_target = min(MAX_TARGET, (target*nActualTimespan) / nTargetTimespan) # convert new target to bits c = ("%064x" % new_target)[2:] while c[:2] == '00' and len(c) > 6: c = c[2:] bitsN, bitsBase = len(c) / 2, int('0x' + c[:6], 16) if bitsBase >= 0x800000: bitsN += 1 bitsBase >>= 8 new_bits = bitsN << 24 | bitsBase return new_bits, bitsBase << (8 * (bitsN-3)) def connect_header(self, chain, header): '''Builds a header chain until it connects. Returns True if it has successfully connected, False if verification failed, otherwise the height of the next header needed.''' chain.append(header) # Ordered by decreasing height previous_height = header['block_height'] - 1 previous_header = self.read_header(previous_height) # Missing header, request it if not previous_header: return previous_height # Does it connect to my chain? prev_hash = self.hash_header(previous_header) if prev_hash != header.get('prev_block_hash'): self.print_error("reorg") return previous_height # The chain is complete. Reverse to order by increasing height chain.reverse() try: self.verify_chain(chain) self.print_error("new height:", previous_height + len(chain)) for header in chain: self.save_header(header) return True except BaseException as e: self.print_error(str(e)) return False def connect_chunk(self, idx, hexdata): try: data = hexdata.decode('hex') self.verify_chunk(idx, data) self.print_error("validated chunk %d" % idx) self.save_chunk(idx, data)
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.env
iron.setdefault("DJANGO_SETTINGS_MODULE", "django_website.settings") from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (C) 2013-2019: SCS Software import bpy from bpy.types import Panel from io_scs_tools.ui import shared as _shared class _MeshPanelBlDefs(_shared.HeaderIconPanel): bl_space_type = "PROPERTIES" bl_region_type = "WINDOW" bl_ui_units_x = 15 @classmethod def poll(cls, context): return hasattr(context, "active_object") and context
.active_object and context.active_object.type == "MESH" def get_layout(self): """Returns layout depending where it's drawn into. If popover cre
ate extra box to make it distinguisable between different sub-panels.""" if self.is_popover: layout = self.layout.box().column() else: layout = self.layout return layout class SCS_TOOLS_PT_Mesh(_MeshPanelBlDefs, Panel): """ Creates "SCS Mesh" panel in the Object properties window. """ bl_label = "SCS Mesh" bl_context = "data" def draw(self, context): """UI draw function. :param context: Blender Context :type context: bpy.context """ if not self.poll(context): self.layout.label(text="No active mesh object!", icon="INFO") return layout = self.get_layout() mesh = context.active_object.data layout.use_property_split = True layout.use_property_decorate = False classes = ( SCS_TOOLS_PT_Mesh, ) def register(): # for cls in classes: # bpy.utils.register_class(cls) # # from io_scs_tools import SCS_TOOLS_MT_MainMenu # SCS_TOOLS_MT_MainMenu.append_props_entry("Mesh Properties", SCS_TOOLS_PT_Mesh.__name__) # No mesh settings available currently, thus commented out and just passing pass def unregister(): # for cls in classes: # bpy.utils.unregister_class(cls) # No mesh settings available currently, thus commented out and just passing pass
from .engine import Bl
oggingEngine from .processor import PostProcessor from .sqlastorage import SQLAStorage from .storage import Storage """ Flask-Blogging is a Flask extension to add blog support to your web application. This extension uses
Markdown to store and then render the webpage. Author: Gouthaman Balaraman Date: June 1, 2015 """ __author__ = 'Gouthaman Balaraman' __version__ = '0.4.2'
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Request body validating middleware for OpenStack Identity resources.""" from keystone.common.validation import validators def lazy_validate(request_body_schema, resource_to_validate): """A non-decorator way to validate a request, to be used inline. :param request_body_schema: a schema to validate the resource reference :param resource_to_validate: dictionary to val
idate :raises keystone.exception.ValidationError: if `resource_to_validate` is None. (see wrapper method below). :raises TypeError: at decoration time when the expected resource to validate isn't fou
nd in the decorated method's signature """ schema_validator = validators.SchemaValidator(request_body_schema) schema_validator.validate(resource_to_validate) def nullable(property_schema): """Clone a property schema into one that is nullable. :param dict property_schema: schema to clone into a nullable schema :returns: a new dict schema """ # TODO(dstanek): deal with the case where type is already a list; we don't # do that yet so I'm not wasting time on it new_schema = property_schema.copy() new_schema['type'] = [property_schema['type'], 'null'] # NOTE(kmalloc): If enum is specified (such as our boolean case) ensure we # add null to the enum as well so that null can be passed/validated as # expected. Without adding to the enum, null will not validate as enum is # explicitly listing valid values. According to the JSON Schema # specification, the values must be unique in the enum array. if 'enum' in new_schema and None not in new_schema['enum']: # In the enum the 'null' is NoneType new_schema['enum'].append(None) return new_schema
#!/usr/bin/python3 import argparse import os import sys import configuration import shouter from gitFunctions import Commiter from gitFunctions import Initializer, Differ from rtcFunctions import ImportHandler from rtcFunctions import RTCInitializer from rtcFunctions import RTCLogin from rtcFunctions import WorkspaceHandler def initialize(): config = configuration.get() directory = config.workDirectory if os.path.exists(directory): sys.exit("Configured directory '" + directory + "' already exists, please make sure to use a " + "non-existing directory") shouter.shout("Migration will take place in " + directory) os.makedirs(directory) os.chdir(directory) config.deletelogfolder() git = Initializer() git.initalize() RTCInitializer.initialize() if Differ.has_diff(): git.initialcommit() Commiter.pushmaster() def resume(): shouter.shout("Found existing git repo in work directory, resuming migration...") config = configuration.get() os.chdir(config.workDirectory) os.chdir(config.clonedGitRepoName) if Differ.has_diff(): sys.exit("Your git repo has some uncommited changes, please add/remove them manually") RTCLogin.loginandcollectstreamuuid() Initializer.preparerepo() if config.previousstreamname: prepare() else: Commiter.branch(config.streamname) WorkspaceHandler().load() def existsrepo(): config = configuration.get() repodirectory = os.path.join(config.workDirectory, config.gitRepoName) return os.path.exists(repodirectory) def migrate(): rtc = ImportHandler() rtcworkspace = WorkspaceHandler() git = Commiter if existsrepo(): resume() else: initialize() config = configuration.get() streamuuid = config.streamuuid streamname = config.streamname branchname = streamname + "_branchpoint" componentbaselineentries = rtc.getcomponentbaselineentriesfromstream(streamuuid) rtcworkspace.setnewflowtargets(streamuuid) history = rtc.readhistory(componentbaselineentries, streamname) changeentries = rtc.getchangeentriesofstreamcomponents(componentbaselineentries) if len(changeentries) > 0: git.branch(branchname) rtc.acceptchangesintoworkspace(rtc.getchangeentriestoaccept(changeentries, history)) shouter.shout("All changes until creation of stream '%s' accepted" % streamname) git.pushbranch(branchname) rtcworkspace.setcomponentstobaseline(componentbaselineentries, streamuuid) rtcworkspace.load() git.branch(streamname) changeentries = rtc.getchangeentriesofstream(streamuuid) amountofacceptedchanges = rtc.acceptchangesintoworkspace(rtc.getchangeentriestoaccept(changeentries, history)) if amountofacceptedchanges > 0: git.pushbranch(streamname) git.promotebranchtomaster(streamname) RTCLogin.logout() summary(streamname) def prepare(): config = configuration.get() rtc = ImportHandler() rtcworkspace = WorkspaceHandler() # git checkout branchpoint Commiter.checkout(config.previousstreamname + "_branchpoint") # list baselines of current workspace componentbaselineentries = rtc.getcomponentbaselineentriesfromstream(config.previousstreamuuid) # set components to that baselines rtcworkspace.setcomponentstobaseline(componentbaselineentries, config.previousstreamuuid) rtcworkspace.load() def summary(streamname): config = configuration.get() shouter.shout("\nAll changes accepted - Migration of stream '%s' is completed." "\nYou can distribute the git-repo '%s'." % (streamname, config.gitRepoName)) if len(config.ignorefileextensions) > 0: # determine and log the ignored but still present files os.chdir(config.workDirectory) os.chdir(config.clonedGitRepoName) pathtoclonedgitrepo = config.workDirectory + os.sep + config.clonedGitRepoName if pathtoclonedgitrepo[-1:] != os.sep: pathtoclonedgitrepo += os.sep ignoredbutexist = [] with open('.gitignore', 'r') as gitignore: for line in gitignore.readlines(): line = line.strip() if line != ".jazz5" and line != ".metadata" and line != ".jazzShed": pathtoignored = pathtoclonedgitrepo + line if os.path.exists(pathtoignored): ignoredbutexist.append(line) if len(ignoredbutexist) > 0: shouter.shout("\nThe following files have been ignored in the new git repository, " + "but still exist in the actual RTC workspace:") ignoredbutexist.sort() for ignored in ignoredbutexist: shouter.shout("\t" + ignored) def parsecommandline(): parser = argparse.ArgumentParser() configfiledefault = 'config.ini' configfilehelp = 'name of the config file, or full path to the config file; defaults to ' + configfiledefault parser.add_argument('-c', '--configfile', metavar='file', dest='configfile', help=configfilehelp, default=configfiledefault) parser.add_argument('-u', '--user', metavar='user', dest='user', help='RTC user', default=None) parser.add_argument('-p', '--password', metavar='password', dest='password', help='RTC password', default=None) parser.add_argument('-s',
'--stored', help='Use stored password for the repository connection', action='store_true') arguments = parser.parse_args() configuration.setconfigfile(arguments.configfile) configuration.setUser(arguments.user) configuration.setPassword(arguments.password) configuration.setStored(arguments.stored) def validate(): confi
g = configuration.get() streamname = config.streamname branchname = streamname + "_branchpoint" previousstreamname = config.previousstreamname offendingbranchname = None if not Commiter.checkbranchname(streamname): offendingbranchname = streamname elif not Commiter.checkbranchname(branchname): offendingbranchname = branchname elif not Commiter.checkbranchname(previousstreamname): offendingbranchname = previousstreamname if offendingbranchname: sys.exit(offendingbranchname + " is not a valid git branch name - consider renaming the stream") if __name__ == "__main__": parsecommandline() validate() migrate()
""" Lua pattern matcher based on a NFA inspired by http://swtch.com/~rsc/regexp/regexp1.html """ from rpyre.interface.lua import compile_re from rpyre.matching import find def main(args): n = 20 s = args[1] #s = "(a|b)*a%sa(a|b)*$"
% ("(a|b)" * n, ) print s evilregex = compile_re(s) import os chunks = [] # use os.read to be RPython compatible while True: s = os.read(0, 4096) if not s: break chunks.append(s) s = "".join(chunks) print len(s) print find(evilregex, s, 0) """ for x in find2(evilregex, s, 0): print x """ return 0 # needed for the PyPy translation toolchain def target(*args): return main, None def jitpolicy(*args):
from rpython.jit.codewriter.policy import JitPolicy return JitPolicy() if __name__ == '__main__': import sys sys.exit(main(sys.argv))
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # service type constants: CORE = "CORE" DUMMY = "DUMMY" LOADBALANCER = "LOADBALANCER" FIREWALL = "FIREWALL" VPN = "VPN" METERING = "METERING" L3_ROUTER_NAT = "L3_ROUTER_NAT" #maps extension alias to service type EXT_TO_SERVICE_MAPPING = { 'dummy': DUMMY, 'lbaas': LOADBALANCER, 'fwaas': FIREWALL, 'vpnaas': VPN, 'metering': METERING, 'router': L3_ROUTER_NAT } # TODO(salvatore-orlando): Move these (or derive them) from conf file ALLOWED_SERVICES = [CORE, DUMMY, LOADBALANCER, FIREWALL, VPN, METERING, L3_ROUTER_NAT] COMMON_PREFIXES = { CORE: "", DUMMY: "/dummy_svc", LOADBALANCER: "/lb", FIREWALL: "/fw", VPN: "/vpn", METERING: "/metering", L3_ROUTER_NAT: "", } # Service operation status constants ACTIVE = "ACTIVE" DOWN = "DOWN" PENDING_CREATE = "PENDING_CREATE"
PENDING_UPDATE = "PENDING_UPDATE" PENDING_DELETE = "
PENDING_DELETE" INACTIVE = "INACTIVE" ERROR = "ERROR" # FWaaS firewall rule action FWAAS_ALLOW = "allow" FWAAS_DENY = "deny" # L3 Protocol name constants TCP = "tcp" UDP = "udp" ICMP = "icmp" # Network Type constants TYPE_FLAT = 'flat' TYPE_GRE = 'gre' TYPE_LOCAL = 'local' TYPE_VXLAN = 'vxlan' TYPE_VLAN = 'vlan' TYPE_NONE = 'none'
# au
thor: Milan Kubi
k
##################################################### # # A library for getting match information for a given team at a given event # out of the Blue Alliance API # # Authors: Andrew Merrill and Jacob Bendicksen (Fall 2014) # # Requires the blueapi.py library ###################################################### #this doesn't currently fully work import blueapi teamNumber = 1540 eventKey = '2014pncmp' #returns a list of qualification matches that the team played in def getTeamQualMatches(teamNumber,eventKey): matches = [] for n in range(0,len(blueapi.getTeamEventMatches(eventKey,teamNumber))): if blueapi.getTeamEventMatches(teamNumber,eventKey)[n]['comp_level'] == 'qm': matches.append(blueapi.getTeamEventMatches(eventKey)[n]['match_number']) matches.sort() return matches #returns a list of qualification matches that the team played in def getQualMatches(eventKey): matches = [] for n in range(0,len(blueapi.getEventMatches(eventKey))): if blueapi.getEventMatches(eventKey)[n]['comp_level'] == 'qm': matches.append(blueapi.getEventMatches(eventKey)[n]['match_number']) matches.sort() return matches #returns a list of quarterfinal matches that the team played in def getTeamQFMatches(teamNumber, eventKey): matches = [] for n in range(0,len(blueap
i.getTeamEventMatches(eventKey))): if blueapi.getTeamEventMatches(eventKe
y)[n]['comp_level'] == 'qf': matches.append(blueapi.getTeamEventMatches(eventKey)[n]['match_number']) matches.sort() return matches #returns a list of quarterfinal matches that the team played in def getQFMatches(eventKey): matches = [] for n in range(0,len(blueapi.getEventMatches(eventKey))): if blueapi.getEventMatches(eventKey)[n]['comp_level'] == 'qf': matches.append(blueapi.getEventMatches(eventKey)[n]['match_number']) matches.sort() return matches #returns a list of semifinal matches that the team played in def getTeamSFMatches(teamNumber, eventKey): matches = [] for n in range(0,len(blueapi.getTeamEventMatches(eventKey))): if blueapi.getTeamEventMatches(eventKey)[n]['comp_level'] == 'sf': matches.append(blueapi.getTeamEventMatches(eventKey)[n]['match_number']) matches.sort() return matches #returns a list of semifinal matches that the team played in def getSFMatches(eventKey): matches = [] for n in range(0,len(blueapi.getEventMatches(eventKey))): if blueapi.getEventMatches(eventKey)[n]['comp_level'] == 'sf': matches.append(blueapi.getEventMatches(eventKey)[n]['match_number']) matches.sort() return matches #returns a list of finals matches that the team played in def getTeamFMatches(teamNumber, eventKey): matches = [] for n in range(0,len(blueapi.getTeamEventMatches(eventKey))): if blueapi.getTeamEventMatches(eventKey)[n]['comp_level'] == 'f': matches.append(blueapi.getTeamEventMatches(eventKey)[n]['match_number']) matches.sort() return matches #returns a list of qualification matches that the team played in def getFMatches(eventKey): matches = [] for n in range(0,len(blueapi.getEventMatches(eventKey))): if blueapi.getEventMatches(eventKey)[n]['comp_level'] == 'f': matches.append(blueapi.getEventMatches(eventKey)[n]['match_number']) matches.sort() return matches def getMatchRedScore(matchNumber,eventKey): return blueapi.getEventMatches(eventKey)[matchNumber]['alliances']['red']['score'] def getMatchBlueScore(matchNumber,eventKey): return blueapi.getEventMatches(eventKey)[matchNumber]['alliances']['blue']['teams'] def getMatchRedTeams(matchNumber,eventKey): return blueapi.getEventMatches(eventKey)[matchNumber]['alliances']['red']['teams'] def getMatchBlueTeams(matchNumber,eventKey): return blueapi.getEventMatches(eventKey)[matchNumber]['alliances']['blue']['teams'] def getMatchVideo(matchNumber,eventKey): videos = blueapi.getEventMatches(eventKey)[matchNumber]['videos'] for n in range(0,5): if videos[n]['type'] == 'youtube': return "youtu.be/" + videos[n]['key'] elif videos[n]['type'] == 'tba': return videos[n]['key'] def getSetNumber(matchNumber,eventKey): return blueapi.getEventMatches(eventKey)[matchNumber]['set_number'] def getTimeString(matchNumber,eventKey): return blueapi.getEventMatches(eventKey)[matchNumber]['time_string'] def getMatchKey(matchNumber,eventKey): return blueapi.getEventMatches(eventKey)[matchNumber]['key'] def getMatchTime(matchNumber,eventKey): return blueapi.getEventMatches(eventKey)[matchNumber]['time'] def getScoreBreakdown(matchNumber,eventKey): return blueapi.getEventMatches(eventKey)[matchNumber]['score_breakdown']
# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest_lib.common.utils import data_utils from tempest.api.network import base from tempest import test class ExtraDHCPOptionsTestJSON(base.BaseNetworkTest): """ Tests the following operations with the Extra DHCP Options Neutron API extension: port create port list port show port update v2.0 of the Neutron API is assumed. It is also assumed that the Extra DHCP Options extension is enabled in the [network-feature-enabled] section of etc/tempest.conf """ @classmethod def skip_checks(cls): super(ExtraDHCPOptionsTestJSON, cls).skip_checks() if not test.is_extension_enabled('extra_dhcp_opt', 'network'): msg = "Extra DHCP Options extension not enabled." raise cls.skipException(msg) @classmethod def resource_setup(cls): super(ExtraDHCPOptionsTestJSON, cls).resource_setup() cls.network = cls.create_network() cls.subnet = cls.create_subnet(cls.network) cls.port = cls.create_port(cls.network) cls.ip_tftp = ('123.123.123.123' if cls._ip_version == 4 else '2015::dead') cls.ip_server = ('123.123.123.45' if cls._ip_version == 4 else '2015::badd') cls.extra_dhcp_opts = [ {'opt_value': 'pxelinux.0', 'opt_name': 'bootfile-name'}, {'opt_value': cls.ip_tftp, 'opt_name': 'tftp-server'}, {'opt_value': cls.ip_server, 'opt_name': 'server-ip-address'} ] @test.idempotent_id('d2c17063-3767-4a24-be4f-a23dbfa133c9') def test_create_list_port_with_extra_dhcp_options(self): # Create a port with Extra DHCP Options body = self.client.create_port( network_id=self.network['id'], extra_dhcp_opts=self.extra_dhcp_opts) port_id = body['port']['id'] self.addCleanup(self.client.delete_port, port_id) # Confirm port created has Extra DHCP Options body = self.client.list_ports() ports = body['ports'] port = [p for p in ports if p['id'] == port_id] self.assertTrue(port) self._confirm_extra_dhcp_options(port[0], self.extra_dhcp_opts) @test.idempotent_id('9a6aebf4-86ee-4f47-b07a-7f7232c55607') def test_update_show_port_with_extra_dhcp_options(self): # Update port with extra dhcp options name = data_utils.rand_name('new-port-name') body = self.client.update_port( self.port['id'], name=name, extra_dhcp_opts=self.extra_dhcp_opts) # Confirm extra dhcp options were added to the port body = self.client.show_port(self.port['id']) self._confirm_extra_dhcp_options(body['port'], self.extra_dhcp_opts) def _confirm_extra_dhcp_options(self, port, extra_dhcp_opts): retrieved = port['extra_dhcp_opts'] self.assertEqual(len(retrieved), len(extra_dhcp_opts)) for retrieved_option in retrieved: for option in extra_dhcp_opts: if (retrieved_option['opt_value'] == option['opt_value'] and retrieved_option['opt_name'] == optio
n['opt_name']):
break else: self.fail('Extra DHCP option not found in port %s' % str(retrieved_option)) class ExtraDHCPOptionsIpV6TestJSON(ExtraDHCPOptionsTestJSON): _ip_version = 6
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.6.1 Generated by: https://github.
com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import os import sys import unittest import kubernetes.client from kubernetes.client.rest import ApiException from kubernetes.client.models.v2alpha1_horizontal_pod_autoscaler_spec import V2alpha1HorizontalPodAutoscalerSpec
class TestV2alpha1HorizontalPodAutoscalerSpec(unittest.TestCase): """ V2alpha1HorizontalPodAutoscalerSpec unit test stubs """ def setUp(self): pass def tearDown(self): pass def testV2alpha1HorizontalPodAutoscalerSpec(self): """ Test V2alpha1HorizontalPodAutoscalerSpec """ model = kubernetes.client.models.v2alpha1_horizontal_pod_autoscaler_spec.V2alpha1HorizontalPodAutoscalerSpec() if __name__ == '__main__': unittest.main()
from django import forms from django.utils.html import escape from django.forms.utils import ErrorList from django.core.exceptions import ValidationError from django.utils.translation import ugettext_lazy as _ from booktype.utils.misc import booktype_slugify from booki.editor.models import BookiGroup from booktype.utils import misc from booktype.apps.core.forms import BaseBooktypeForm from widgets import RemovableImageWidget class SpanErrorList(ErrorList): def __unicode__(self): return unicode(self.as_spans()) def as_spans(self): return "<span style='color: red'>%s</span>" % ( ",".join([e for e in self]) ) class BaseGroupForm(BaseBooktypeForm, forms.ModelForm): name = forms.CharField() description = forms.CharField( label=_('Description (250 characters)'), required=False, max_length=250, widget=forms.Textar
ea(attrs={'rows': '10', 'cols': '40'}) ) group_image = forms.FileField( label=_('Group image'), required=False, widget=RemovableImageWidget(attrs={ 'label_class': 'checkbox-inline', 'input_class': 'group-image-removable' } ) ) class Meta: model = BookiGroup fields = [
'name', 'description' ] def __init__(self, *args, **kwargs): kwargs.update({'error_class': SpanErrorList}) super(BaseGroupForm, self).__init__(*args, **kwargs) def clean_name(self): new_url_name = booktype_slugify(self.cleaned_data['name']) group_data_url_name = BookiGroup.objects.filter(url_name=new_url_name).exclude(pk=self.instance.pk) if len(group_data_url_name) > 0: raise ValidationError(_('Group name is already in use')) return self.cleaned_data.get('name', '') def clean_description(self): return escape(self.cleaned_data.get('description', '')) def set_group_image(self, group_id, group_image): try: filename = misc.set_group_image(group_id, group_image, 240, 240) if len(filename) == 0: raise ValidationError(_('Only JPEG file is allowed for group image.')) else: misc.set_group_image( "{}_small".format(group_id), group_image, 18, 18) except Exception as err: # TODO: we should do something here print err class GroupCreateForm(BaseGroupForm): pass class GroupUpdateForm(BaseGroupForm): def clean_group_image(self): group_image = self.files.get('group_image', None) group_id = str(self.instance.pk) if group_image: self.set_group_image(group_id, group_image) return group_image
# coding=utf-8 from __future__ import absolute_import import gevent from gevent.pywsgi import WSGIHandler import sys from webob import Request from .response import Response from .socket import Socket from ..event_emitter import EventEmitter from .transports import WebsocketTransport import logging logger = logging.getLogger(__name__) __all__ = ['EngineHandler'] class EngineHandler(WSGIHandler, EventEmitter): """ The WSGIHandler for EngineServer It filters out interested requests and process them, leave other requests to the WSGIHandler """ transports = ('polling', 'websocket') def __init__(self, serv
er_context, *args, **kwargs): super(EngineHandler, self).__init__(*args, **kwargs) EventEmitter.__init__(self) self.server_context = server_con
text if self.server_context.transports: self.transports = self.server_context.transports def bind_framework_info(self, socket): # Run framework's wsgi application to hook up framework specific info eg. request # This is why we define /socket.io url in web frameworks and points them to a view logger.debug("[EngineHandler] Bind the framework specific info to engine socket") self.environ['engine_socket'] = socket try: def start_response(status, headers): logger.debug("[EngineHandler] [%s] [%s]" % (status, headers)) res = self.application(self.environ, start_response) logger.debug("[EngineHandler] %s" % res) except Exception, e: logger.debug("[EngineHandler] bind framework info met exception %s" % e) self.handle_error(*sys.exc_info()) def handle_one_response(self): """ There are 3 situations we get a new request: 1. Handshake. 2. Upgrade. 3. Polling Request. After the transport been upgraded, all data transferring handled by the WebSocketTransport """ path = self.environ.get('PATH_INFO') if not path.lstrip('/').startswith(self.server_context.resource + '/'): return super(EngineHandler, self).handle_one_response() # Create a request and a response request = Request(self.get_environ()) setattr(request, 'handler', self) setattr(request, 'response', Response()) logger.debug("[EngineHandler] Incoming request with %s" % request.GET) # Upgrade the websocket if needed is_websocket = False if request.GET.get("transport", None) == "websocket": if 'Upgrade' in request.headers: logger.debug("[EngineHandler] It is a websocket upgrade request") # This is the ws upgrade request, here we handles the upgrade ws_handler = self.server_context.ws_handler_class(self.socket, self.client_address, self.server) ws_handler.__dict__.update(self.__dict__) ws_handler.prevent_wsgi_call = True ws_handler.handle_one_response() # Suppose here we have an websocket connection setattr(request, 'websocket', ws_handler.websocket) is_websocket = True else: logger.warning("[EngineHandler] Client fired a websocket but the 'Upgrade' Header loose somewhere, maybe your proxy") return sid = request.GET.get("sid", None) b64 = request.GET.get("b64", False) socket = self.server_context.engine_sockets.get(sid, None) # FIXME CHECK WHETHER WE NEED THIS? if socket and not is_websocket: # We spawn a new gevent here, let socket do its own business. # In current event loop, we will wait on request.response, which is set in socket.set_request logger.debug("[EngineHandler] Found existing socket") self.bind_framework_info(socket) gevent.spawn(socket.process_request, request) else: if socket is None: logger.debug("[EngineHandler] No existing socket, handshake") socket = self._do_handshake(b64=b64, request=request) if not is_websocket: logger.debug("[EngineHandler] The incoming request not websocket, bind framework info") self.bind_framework_info(socket) if is_websocket and socket.transport.name != 'websocket': logger.debug("[EngineHandler] websocket, proceed as upgrade") # Here we have a upgrade ws_transport = WebsocketTransport(self, {}) ws_transport.process_request(request) socket.maybe_upgrade(ws_transport) # wait till the response ends logger.debug("[EngineHandler] Waiting for the response signal") request.response.join() # The response object can be used as a wsgi application which will send out the buffer self.application = request.response # Call super handle_one_repsponse() to do timing, logging etc super(EngineHandler, self).handle_one_response() self.emit('cleanup') def _do_handshake(self, b64, request): """ handshake with client to build a socket :param b64: :param request: :return: """ transport_name = request.GET.get('transport', None) if transport_name not in self.transports: raise ValueError("transport name [%s] not supported" % transport_name) socket = Socket(request, supports_binary=not bool(b64)) self.server_context.engine_sockets[socket.id] = socket def remove_socket(*args, **kwargs): self.server_context.engine_sockets.pop(socket.id) socket.on('close', remove_socket) request.response.headers['Set-Cookie'] = 'io=%s' % socket.id socket.open() self.emit('connection', socket) return socket
e': 'volume', 'device_name': '/dev/vdc'}) ])} bdm = bdi['block_device_mapping'][0] bdm['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta, bdi) mock_conf = mock.MagicMock(source_path='fake') with contextlib.nested( mock.patch.object(driver_block_device.DriverVolumeBlockDevice, 'save'), mock.patch.object(drvr, '_connect_volume'), mock.patch.object(drvr, '_get_volume_config', return_value=mock_conf), mock.patch.object(drvr, '_set_cache_mode') ) as (volume_save, connect_volume, get_volume_config, set_cache_mode): devices = drvr._get_guest_storage_config(instance, image_meta, disk_info, False, bdi, flavor, "hvm") self.assertEqual(3, len(devices)) self.assertEqual('/dev/vdb', instance.default_ephemeral_device) self.assertIsNone(instance.default_swap_device) connect_volume.assert_called_with(bdm['connection_info'], {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'}) get_volume_config.assert_called_with(bdm['connection_info'], {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'}) volume_save.assert_called_once_with() self.assertEqual(3, set_cache_mode.call_count) def test_get_neutron_events(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) network_info = [network_model.VIF(id='1'), network_model.VIF(id='2', active=True)] events = drvr._get_neutron_events(network_info) self.assertEqual([('network-vif-plugged', '1')], events) def test_unplug_vifs_ignores_errors(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) with mock.patch.object(drvr, 'vif_driver') as vif_driver: vif_driver.unplug.side_effect = exception.AgentError( method='unplug') drvr._unplug_vifs('inst', [1], ignore_errors=True) vif_driver.unplug.assert_called_once_with('inst', 1) def test_unplug_vifs_reports_errors(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) with mock.patch.object(drvr, 'vif_driver') as vif_driver: vif_driver.unplug.side_effect = exception.AgentError( method='unplug') self.assertRaises(exception.AgentError, drvr.unplug_vifs, 'inst', [1])
vif_driver.unplug.assert_called_once_with('inst', 1) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') def test_cleanup_pass_with_no_mount_device(self, undefine, unp
lug): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr.firewall_driver = mock.Mock() drvr._disconnect_volume = mock.Mock() fake_inst = {'name': 'foo'} fake_bdms = [{'connection_info': 'foo', 'mount_device': None}] with mock.patch('nova.virt.driver' '.block_device_info_get_mapping', return_value=fake_bdms): drvr.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False) self.assertTrue(drvr._disconnect_volume.called) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) fake_inst = {'name': 'foo'} with mock.patch.object(drvr._conn, 'lookupByName') as lookup: lookup.return_value = fake_inst # NOTE(danms): Make unplug cause us to bail early, since # we only care about how it was called unplug.side_effect = test.TestingException self.assertRaises(test.TestingException, drvr.cleanup, 'ctxt', fake_inst, 'netinfo') unplug.assert_called_once_with(fake_inst, 'netinfo', True) @mock.patch('nova.virt.driver.block_device_info_get_mapping') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_get_serial_ports_from_instance') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') def test_cleanup_serial_console_enabled( self, undefine, get_ports, block_device_info_get_mapping): self.flags(enabled="True", group='serial_console') instance = 'i1' network_info = {} bdm_info = {} firewall_driver = mock.MagicMock() get_ports.return_value = iter([('127.0.0.1', 10000)]) block_device_info_get_mapping.return_value = () # We want to ensure undefine_domain is called after # lookup_domain. def undefine_domain(instance): get_ports.side_effect = Exception("domain undefined") undefine.side_effect = undefine_domain drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr.firewall_driver = firewall_driver drvr.cleanup( 'ctx', instance, network_info, block_device_info=bdm_info, destroy_disks=False, destroy_vifs=False) get_ports.assert_called_once_with(instance) undefine.assert_called_once_with(instance) firewall_driver.unfilter_instance.assert_called_once_with( instance, network_info=network_info) block_device_info_get_mapping.assert_called_once_with(bdm_info) @mock.patch('nova.virt.driver.block_device_info_get_mapping') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_get_serial_ports_from_instance') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') def test_cleanup_serial_console_domain_gone( self, undefine, get_ports, block_device_info_get_mapping): self.flags(enabled="True", group='serial_console') instance = {'name': 'i1'} network_info = {} bdm_info = {} firewall_driver = mock.MagicMock() block_device_info_get_mapping.return_value = () # Ensure _get_serial_ports_from_instance raises same exception # that would have occurred if domain was gone. def exception_with_yield(instance): raise exception.InstanceNotFound("domain undefined") yield get_ports.side_effect = exception_with_yield drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr.firewall_driver = firewall_driver drvr.cleanup( 'ctx', instance, network_info, block_device_info=bdm_info, destroy_disks=False, destroy_vifs=False) get_ports.assert_called_once_with(instance) undefine.assert_called_once_with(instance) firewall_driver.unfilter_instance.assert_called_once_with( instance, network_info=network_info) block_device_info_get_mapping.assert_called_once_with(bdm_info) def test_swap_volume(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) mock_dom = mock.MagicMock() guest = libvirt_guest.Guest(mock_dom) with mock.patch.object(drvr._conn, 'defineXML', create=True) as mock_define: xmldoc = "<domain/>" srcfile = "/first/path" dstfile = "/second/path" mock_dom.XMLDesc.return_value = xmldoc mock_dom.isPersistent.return_value = True mock_dom.blockJobInfo.return_value = {} drvr._swap_volume(guest, srcfile, dstfile, 1) mock_dom.XMLDesc.assert_called_once_with( flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE | fakelibvirt.VIR_DOMAIN_XML_SECURE)) mock_dom.blockRebase.assert
import copy from threading import Lock from .metrics_core import Metric class CollectorRegistry(object): """Metric collector registry. Collectors must have a no-argument method 'collect' that returns a list of Metric objects. The returned metrics should be consistent with the Prometheus exposition formats. """ def __init__(self, auto_describe=False): self._collector_to_names = {} self._names_to_collectors = {} self._auto_describe = auto_describe self._lock = Lock() def register(self, collector): """Add a collector to the registry.""" with self._lock: names = self._get_names(collector) duplicates = set(self._names_to_collectors).intersection(names) if duplicates: raise ValueError( 'Duplicated timeseries in CollectorRegistry: {0}'.format( duplicates)) for name in names: se
lf._names_to_collectors[name] = collector self._collector_to_names[collector] = names def unregister(self, collector): """Remove a collector from the registry.""" with self._lock: for name in self._collector_to_names[collector]:
del self._names_to_collectors[name] del self._collector_to_names[collector] def _get_names(self, collector): """Get names of timeseries the collector produces.""" desc_func = None # If there's a describe function, use it. try: desc_func = collector.describe except AttributeError: pass # Otherwise, if auto describe is enabled use the collect function. if not desc_func and self._auto_describe: desc_func = collector.collect if not desc_func: return [] result = [] type_suffixes = { 'counter': ['_total', '_created'], 'summary': ['', '_sum', '_count', '_created'], 'histogram': ['_bucket', '_sum', '_count', '_created'], 'gaugehistogram': ['_bucket', '_gsum', '_gcount'], 'info': ['_info'], } for metric in desc_func(): for suffix in type_suffixes.get(metric.type, ['']): result.append(metric.name + suffix) return result def collect(self): """Yields metrics from the collectors in the registry.""" collectors = None with self._lock: collectors = copy.copy(self._collector_to_names) for collector in collectors: for metric in collector.collect(): yield metric def restricted_registry(self, names): """Returns object that only collects some metrics. Returns an object which upon collect() will return only samples with the given names. Intended usage is: generate_latest(REGISTRY.restricted_registry(['a_timeseries'])) Experimental.""" names = set(names) collectors = set() with self._lock: for name in names: if name in self._names_to_collectors: collectors.add(self._names_to_collectors[name]) metrics = [] for collector in collectors: for metric in collector.collect(): samples = [s for s in metric.samples if s[0] in names] if samples: m = Metric(metric.name, metric.documentation, metric.type) m.samples = samples metrics.append(m) class RestrictedRegistry(object): def collect(self): return metrics return RestrictedRegistry() def get_sample_value(self, name, labels=None): """Returns the sample value, or None if not found. This is inefficient, and intended only for use in unittests. """ if labels is None: labels = {} for metric in self.collect(): for s in metric.samples: if s.name == name and s.labels == labels: return s.value return None REGISTRY = CollectorRegistry(auto_describe=True)
lType): def __init__(self, value2=None, value1=None): super(notEqualTypeSub, self).__init__(value2, value1, ) supermod.notEqualType.subclass = notEqualTypeSub # end class notEqualTypeSub class betweenTypeSub(supermod.betweenType): def __init__(self, max=None, value=None, min=None): super(betweenTypeSub, self).__init__(max, value, min, ) supermod.betweenType.subclass = betweenTypeSub # end class betweenTypeSub class notBetweenTypeSub(supermod.notBetweenType): def __init__(self, max=None
, value=None, min=None): super(notBetweenTypeSub, self).__init__(max, value, min, ) supermod.notBetween
Type.subclass = notBetweenTypeSub # end class notBetweenTypeSub class orTypeSub(supermod.orType): def __init__(self, clause=None): super(orTypeSub, self).__init__(clause, ) supermod.orType.subclass = orTypeSub # end class orTypeSub class andTypeSub(supermod.andType): def __init__(self, clause=None): super(andTypeSub, self).__init__(clause, ) supermod.andType.subclass = andTypeSub # end class andTypeSub class factTypeSub(supermod.factType): def __init__(self, name=None, extensiontype_=None): super(factTypeSub, self).__init__(name, extensiontype_, ) supermod.factType.subclass = factTypeSub # end class factTypeSub class predicateTypeSub(supermod.predicateType): def __init__(self, name=None, value=None): super(predicateTypeSub, self).__init__(name, value, ) supermod.predicateType.subclass = predicateTypeSub # end class predicateTypeSub class structTypeSub(supermod.structType): def __init__(self, name=None, comment=None, field=None): super(structTypeSub, self).__init__(name, comment, field, ) supermod.structType.subclass = structTypeSub # end class structTypeSub class instanceTypeSub(supermod.instanceType): def __init__(self, name=None, type_=None, comment=None, field=None): super(instanceTypeSub, self).__init__(name, type_, comment, field, ) supermod.instanceType.subclass = instanceTypeSub # end class instanceTypeSub class actionTypeSub(supermod.actionType): def __init__(self, extensiontype_=None): super(actionTypeSub, self).__init__(extensiontype_, ) supermod.actionType.subclass = actionTypeSub # end class actionTypeSub class setTypeSub(supermod.setType): def __init__(self, name=None, value=None): super(setTypeSub, self).__init__(name, value, ) supermod.setType.subclass = setTypeSub # end class setTypeSub class runRuleTypeSub(supermod.runRuleType): def __init__(self, name=None, argument=None): super(runRuleTypeSub, self).__init__(name, argument, ) supermod.runRuleType.subclass = runRuleTypeSub # end class runRuleTypeSub class ExpertSystemSub(supermod.ExpertSystem): def __init__(self, comment=None, fact=None, rule=None): super(ExpertSystemSub, self).__init__(comment, fact, rule, ) supermod.ExpertSystem.subclass = ExpertSystemSub # end class ExpertSystemSub class algorithmTypeSub(supermod.algorithmType): def __init__(self, RadialBasisFunctionNetwork=None, MultiLayerPerceptron=None): super(algorithmTypeSub, self).__init__(RadialBasisFunctionNetwork, MultiLayerPerceptron, ) supermod.algorithmType.subclass = algorithmTypeSub # end class algorithmTypeSub class algorithmType4Sub(supermod.algorithmType4): def __init__(self, RecurrentNeuralNetwork=None): super(algorithmType4Sub, self).__init__(RecurrentNeuralNetwork, ) supermod.algorithmType4.subclass = algorithmType4Sub # end class algorithmType4Sub class fieldTypeSub(supermod.fieldType): def __init__(self, initialValue=None, type_=None, name=None): super(fieldTypeSub, self).__init__(initialValue, type_, name, ) supermod.fieldType.subclass = fieldTypeSub # end class fieldTypeSub class fieldType5Sub(supermod.fieldType5): def __init__(self, name=None, value=None): super(fieldType5Sub, self).__init__(name, value, ) supermod.fieldType5.subclass = fieldType5Sub # end class fieldType5Sub class argumentTypeSub(supermod.argumentType): def __init__(self, name=None, value=None): super(argumentTypeSub, self).__init__(name, value, ) supermod.argumentType.subclass = argumentTypeSub # end class argumentTypeSub class ruleTypeSub(supermod.ruleType): def __init__(self, name=None, comment=None, parameter=None, if_=None, then=None, else_=None): super(ruleTypeSub, self).__init__(name, comment, parameter, if_, then, else_, ) supermod.ruleType.subclass = ruleTypeSub # end class ruleTypeSub class parameterTypeSub(supermod.parameterType): def __init__(self, type_=None, name=None): super(parameterTypeSub, self).__init__(type_, name, ) supermod.parameterType.subclass = parameterTypeSub # end class parameterTypeSub class ifTypeSub(supermod.ifType): def __init__(self, clause=None): super(ifTypeSub, self).__init__(clause, ) supermod.ifType.subclass = ifTypeSub # end class ifTypeSub class thenTypeSub(supermod.thenType): def __init__(self, action=None): super(thenTypeSub, self).__init__(action, ) supermod.thenType.subclass = thenTypeSub # end class thenTypeSub class elseTypeSub(supermod.elseType): def __init__(self, action=None): super(elseTypeSub, self).__init__(action, ) supermod.elseType.subclass = elseTypeSub # end class elseTypeSub def get_root_tag(node): tag = supermod.Tag_pattern_.match(node.tag).groups()[-1] rootClass = None rootClass = supermod.GDSClassesMapping.get(tag) if rootClass is None and hasattr(supermod, tag): rootClass = getattr(supermod, tag) return tag, rootClass def parse(inFilename, silence=False): parser = None doc = parsexml_(inFilename, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: rootTag = 'MeetCI' rootClass = supermod.MeetCI rootObj = rootClass.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None if not silence: sys.stdout.write('<?xml version="1.0" ?>\n') rootObj.export( sys.stdout, 0, name_=rootTag, namespacedef_='', pretty_print=True) return rootObj def parseEtree(inFilename, silence=False): parser = None doc = parsexml_(inFilename, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: rootTag = 'MeetCI' rootClass = supermod.MeetCI rootObj = rootClass.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None mapping = {} rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping) reverse_mapping = rootObj.gds_reverse_node_mapping(mapping) if not silence: content = etree_.tostring( rootElement, pretty_print=True, xml_declaration=True, encoding="utf-8") sys.stdout.write(content) sys.stdout.write('\n') return rootObj, rootElement, mapping, reverse_mapping def parseString(inString, silence=False): from StringIO import StringIO parser = None doc = parsexml_(StringIO(inString), parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: rootTag = 'MeetCI' rootClass = supermod.MeetCI rootObj = rootClass.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None if not silence: sys.stdout.write('<?xml version="1.0" ?>\n') rootObj.export( sys.stdout, 0, name_=rootTag, namespacedef_='') return rootObj def parseLiteral(inFilename, silence=False): parser = None doc = parsexml_(inFilename, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: rootTag = 'MeetCI' rootClass = supermod.MeetCI rootObj = rootClass.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None if not silence: sys.stdout.write('#from ??? import *\n\
ies. price_entries = prices.get_last_price_entries(entries, None) return used_open_entries + holdings_entries + price_entries def report_holdings(currency, relative, entries, options_map, aggregation_key=None, sort_key=None): """Generate a detailed list of all holdings. Args: currency: A string, a currency to convert to. If left to None, no conversion is carried out. relative: A boolean, true if we should reduce this to a relative value. entries: A list of directives. options_map: A dict of parsed options. aggregation_key: A callable use to generate aggregations. sort_key: A function to use to sort the holdings, if specified. Returns: A Table instance. """ holdings_list, _ = get_assets_holdings(entries, options_map, currency) if aggregation_key: holdings_list = holdings.aggregate_holdings_by(holdings_list, aggregation_key) if relative: holdings_list = holdings.reduce_relative(holdings_list) field_spec = RELATIVE_FIELD_SPEC else: field_spec = FIELD_SPEC if sort_key: holdings_list.sort(key=sort_key, reverse=True) return table.create_table(holdings_list, field_spec) def load_from_csv(fileobj): """Load a list of holdings from a CSV file. Args: fileobj: A file object. Yields: Instances of Holding, as read from the file. """ column_spec = [ ('Account', 'account', None), ('Units', 'number', D), ('Currency', 'currency', None), ('Cost Currency', 'cost_currency', None), ('Average Cost', 'cost_number', D), ('Price', 'price_number', D), ('Book Value', 'book_value', D), ('Market Value', 'market_value', D), ('Price Date', 'price_date', None), ] column_dict = {name: (attr, converter) for name, attr, converter in column_spec} klass = holdings.Holding # Create a set of default values for the namedtuple. defaults_dict = {attr: None for attr in klass._fields} # Start reading the file. reader = csv.reader(fileobj) # Check that the header is readable. header = next(reader) attr_converters = [] for header_name in header: try: attr_converter = column_dict[header_name] attr_converters.append(attr_converter) except KeyError: raise IOError("Invalid file contents for holdings") for line in reader: value_dict = defaults_dict.copy() for (attr, converter), value in zip(attr_converters, line): if converter: value = converter(value) value_dict[attr] = value yield holdings.Holding(**value_dict) class HoldingsReport(report.TableReport): """The full list of holdings for Asset and Liabilities accounts.""" names = ['holdings'] aggregations = { 'commodity': dict(aggregation_key=lambda holding: holding.currency), 'account': dict(aggregation_key=lambda holding: holding.account), 'root-account': dict( aggregation_key=lambda holding: account.root(3, holding.account), sort_key=lambda holding: holding.market_value or ZERO), 'currency': dict(aggregation_key=lambda holding: holding.cost_currency), } def __init__(self, *rest, **kwds): super().__init__(*rest, **kwds) if self.args.relative and not self.args.currency: self.parser.error("--relative needs to have --currency set") @classmethod def add_args(cls, parser): parser.add_argument('-c', '--currency', action='store', default=None, help="Which currency to convert all the holdings to") parser.add_argument('-r', '--relative', action='store_true', help="True if we should render as relative values only") parser.add_argument('-g', '--groupby', '--by', action='store', default=None, choices=cls.aggregations.keys(), help="How to group the holdings (default is: don't group)") def generate_table(self, entries, errors, options_map): keywords = self.aggregations[self.args.groupby] if self.args.groupby else {} return report_holdings(self.args.currency, self.args.relative, entries, options_map, **keywords) def render_beancount(self, entries, errors, options_map, file): # Don't allow any aggregations if we output as beancount format. for attribute in 'currency', 'relative', 'groupby': if getattr(self.args, attribute): self.parser.error( "'beancount' format does not support --{} option".format(attribute)) # Get the summarized entries and print them out. holdings_entries = get_holdings_entries(entries, options_map) dcontext = options_map['dcontext'] printer.print_entries(holdings_entries, dcontext, file=file) class CashReport(report.TableReport): """The list of cash holdings (defined as currency = cost-currency).""" names = ['cash'] @classmethod def add_args(cls, parser): parser.add_argument('-c', '--currency', action='store', default=None, help="Which currency to convert all the holdings to") parser.add_argument('-i', '--ignored', action='store_true', help="Report on ignored holdings instead
of included ones") parser.add_argument('-o',
'--operating-only', action='store_true', help="Only report on operating currencies") def generate_table(self, entries, errors, options_map): holdings_list, price_map = get_assets_holdings(entries, options_map) holdings_list_orig = holdings_list # Keep only the holdings where currency is the same as the cost-currency. holdings_list = [holding for holding in holdings_list if (holding.currency == holding.cost_currency or holding.cost_currency is None)] # Keep only those holdings held in one of the operating currencies. if self.args.operating_only: operating_currencies = set(options_map['operating_currency']) holdings_list = [holding for holding in holdings_list if holding.currency in operating_currencies] # Compute the list of ignored holdings and optionally report on them. if self.args.ignored: ignored_holdings = set(holdings_list_orig) - set(holdings_list) holdings_list = ignored_holdings # Convert holdings to a unified currency. if self.args.currency: holdings_list = holdings.convert_to_currency(price_map, self.args.currency, holdings_list) return table.create_table(holdings_list, FIELD_SPEC) class NetWorthReport(report.TableReport): """Generate a table of total net worth for each operating currency.""" names = ['networth', 'equity'] def generate_table(self, entries, errors, options_map): holdings_list, price_map = get_assets_holdings(entries, options_map) net_worths = [] for currency in options_map['operating_currency']: # Convert holdings to a unified currency. # # Note: It's entirely possible that the price map does not have all # the necessary rate conversions here. The resulting holdings will # simply have no cost when that is the case. We must handle this # gracefully below. currency_holdings_list = holdings.convert_to_currency(price_map, currency,
# -*- coding: utf-8 -*- from . import app, db from flask import request, g, session, redirect from Lotus.model.user import User from hashlib import md5 from Lotus.lib.msg_code import Msg import json @app.route('/user/login', methods=['POST']) def user_login(): email = request.form.get('email', None) psw = request.form.get('psw', None) if email is not None and psw is not None: users = User.query.filter_by(email=email, psw=psw) if users: g.user = users[0] session['userid'] = use
rs[0].userid else: return '{"code":%d,"msg":$s}'.format(Msg[
'faild'], 'user not exist') else: return '{"code":%d,"msg":$s}'.format(Msg['faild'], 'params not enougth') @app.route('/user/register', methods=['POST']) def user_register(): # todo (参数不够)有插入异常怎么办? # todo 忘记密码.. try: u = User() u.username = request.form.get('username', None) u.description = request.form.get('description', None) u.type = request.form.get('type', User.CONST_TYPE_USER) u.email = request.form.get('email', None) m = md5() m.update(request.form.get('psw', User.CONST_DEFAULT_PASSWORD)) # 默认密码 u.psw = m.hexdigest() db.session.add(u) db.session.commit() except Exception as e: return '{"code":%d,"msg":$s}'.format(Msg['faild'], 'register faild') return '{"code":%d,"msg":$s}'.format(Msg['success'], 'register success') @app.route('/user/<int:userid>/avatar', methods=['GET', 'POST']) def user_avatar(userid): #upload #TODO support upload avater if request.method == 'POST': pass else: pass @app.route('/user/<int:userid>/profile', methods=['GET']) def user_profile(userid): if session.get('userid'): result = { 'userid': g.user.userid, 'username': g.user.username, 'avatar': g.user.avatar, 'description': g.user.description, 'type': g.user.type, 'email': g.user.email } return json.dumps(result) else: redirect('/user/login') @app.route('/user/<int:userid>/issue/sends/page/<int:page>', methods=['GET']) def user_issues_send(userid, page): pass @app.route('/user/<int:userid>/issue/favours/page/<int:page>', methods=['GET']) def user_issues_favour(userid, page): pass @app.route('/user/<int:userid>/issue/favours/page/<int:page>', methods=['GET']) def user_messages(userid, page): pass
bj.dotted) # THEN self.assertEqual(code, actual_coap_obj.code) class TestCoapMessage(unittest.TestCase): def test_should_return_version_value_when_version_property_is_called(self): # GIVEN version = any_version() coap_message = coap.CoapMe
ssage(version, any_type(), any_code(), any_message_id(), any_token(), any_options(), any_payload()) # WHEN
actual_version = coap_message.version # THEN self.assertEqual(version, actual_version) def test_should_return_type_value_when_type_property_is_called(self): # GIVEN _type = any_type() coap_message = coap.CoapMessage(any_version(), _type, any_code(), any_message_id(), any_token(), any_options(), any_payload()) # WHEN actual_type = coap_message.type # THEN self.assertEqual(_type, actual_type) def test_should_return_code_value_when_code_property_is_called(self): # GIVEN code = any_code() coap_message = coap.CoapMessage(any_version(), any_type(), code, any_message_id(), any_token(), any_options(), any_payload()) # WHEN actual_code = coap_message.code # THEN self.assertEqual(code, actual_code) def test_should_return_message_id_value_when_message_id_property_is_called(self): # GIVEN message_id = any_message_id() coap_message = coap.CoapMessage(any_version(), any_type(), any_code(), message_id, any_token(), any_options(), any_payload()) # WHEN actual_message_id = coap_message.message_id # THEN self.assertEqual(message_id, actual_message_id) def test_should_return_token_value_when_token_property_is_called(self): # GIVEN token = any_token() coap_message = coap.CoapMessage(any_version(), any_type(), any_code(), any_message_id(), token, any_options(), any_payload()) # WHEN actual_token = coap_message.token # THEN self.assertEqual(token, actual_token) def test_should_return_tkl_value_when_tkl_property_is_called(self): # GIVEN token = any_token() coap_message = coap.CoapMessage(any_version(), any_type(), any_code(), any_message_id(), token, any_options(), any_payload()) # WHEN actual_tkl = coap_message.tkl # THEN self.assertEqual(len(token), actual_tkl) def test_should_return_options_value_when_options_property_is_called(self): # GIVEN options = any_options() coap_message = coap.CoapMessage(any_version(), any_type(), any_code(), any_message_id(), any_token(), options, any_payload()) # WHEN actual_options = coap_message.options # THEN self.assertEqual(options, actual_options) def test_should_return_payload_value_when_payload_property_is_called(self): # GIVEN payload = any_payload() coap_message = coap.CoapMessage(any_version(), any_type(), any_code(), any_message_id(), any_token(), any_options(), payload) # WHEN actual_payload = coap_message.payload # THEN self.assertEqual(payload, actual_payload) def test_should_return_uri_path_value_when_uri_path_property_is_called(self): # GIVEN uri_path = any_uri_path() coap_message = coap.CoapMessage(any_version(), any_type(), any_code(), any_message_id(), any_token(), any_options(), any_payload(), uri_path) # WHEN actual_uri_path = coap_message.uri_path # THEN self.assertEqual(uri_path, actual_uri_path) class TestCoapMessageIdToUriPathBinder(unittest.TestCase): def test_should_add_uri_path_to_binds_when_add_uri_path_for_method_is_called(self): # GIVEN message_id = any_message_id() token = any_token() uri_path = any_uri_path() binder = coap.CoapMessageIdToUriPathBinder() # WHEN binder.add_uri_path_for(message_id, token, uri_path) # THEN self.assertEqual(uri_path, binder.get_uri_path_for(message_id, token)) def test_should_raise_KeyError_when_get_uri_path_for_is_called_but_it_is_not_present_in_database(self): # GIVEN message_id = any_message_id() token = any_token() uri_path = any_uri_path() binder = coap.CoapMessageIdToUriPathBinder() # THEN self.assertRaises(RuntimeError, binder.get_uri_path_for, message_id, token) class TestCoapMessageFactory(unittest.TestCase): def _create_dummy_payload_factory(self): class DummyPayloadFactory: def parse(self, data, message_info): return data.read() return DummyPayloadFactory() def _create_coap_message_factory(self): return coap.CoapMessageFactory( options_factory=coap.CoapOptionsFactory(), uri_path_based_payload_factories={ "/a/as": self._create_dummy_payload_factory() }, message_id_to_uri_path_binder=coap.CoapMessageIdToUriPathBinder()) def test_should_create_CoapMessage_from_solicit_request_data_when_parse_method_is_called(self): # GIVEN data = bytearray([0x42, 0x02, 0x00, 0xbd, 0x65, 0xee, 0xb1, 0x61, 0x02, 0x61, 0x73, 0xff, 0x01, 0x08, 0x16, 0x6e, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x02, 0x04, 0x01, 0x02]) factory = self._create_coap_message_factory() # WHEN coap_message = factory.parse(io.BytesIO(data), None) # THEN self.assertEqual(1, coap_message.version) self.assertEqual(0, coap_message.type) self.assertEqual(2, coap_message.tkl) self.assertEqual(2, coap_message.code) self.assertEqual(189, coap_message.message_id) self.assertEqual(bytearray([0x65, 0xee]), coap_message.token) self.assertEqual("a", coap_message.options[0].value.decode("utf-8")) self.assertEqual("as", coap_message.options[1].value.decode("utf-8")) self.assertEqual("/a/as", coap_message.uri_path) self.assertEqual(bytearray([0x01, 0x08, 0x16, 0x6e, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x02, 0x04, 0x01, 0x02]), coap_message.payload) def test_should_create_CoapMessage_from_solicit_response_data_when_parse_method_is_called(self): # GIVEN data = bytearray([0x62, 0x44, 0x00, 0xbd, 0x65, 0xee, 0xff, 0x04, 0x01, 0x00, 0x02, 0x02, 0x00, 0x00, 0x07, 0x09, 0x76, 0x80, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00]) mid_binder = coap.CoapMessageIdToUriPathBinder() mid_binder.add_uri_path_for(189, bytearray([0x65, 0xee]), "/a/as") factory = coap.CoapMessageFactory( options_factory=coap.CoapOptionsFactory(), uri_path_based_payload_factories={ "/a/as": self._create_dummy_payload_factory() }, message_id_to_uri_path_binder=mid_binder) # WHEN coap_message = factory.parse(io.BytesIO(data), None) # THEN self.assertEqual(1, coap_message.version) self.assertEqual(2, coap_message.type) self.assertEqual(2, coap_message.tkl) self.assertEqual("2.04", coap_message.code) self.assertEqual(189, coap_message.message_id) self.assertEqual(bytearray([0x65, 0xee]), coap_message.token) self.assertEqual(None, coap_message.uri_path) self.assertEqual(bytearray([0x04, 0x01, 0x00, 0x02, 0x02, 0x00, 0x00, 0x07, 0x09, 0x76, 0x80, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00]), coap_message.payload) if __name__ == "__mai
: utf-8 """ Qc API Qc API # noqa: E501 The version of the OpenAPI document: 3.0.0 Contact: cloudsupport@telestream.net Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from telestream_cloud_qc.configuration import Configuration class PartitionStatusTest(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'closed_complete': 'bool', 'open_incomplete': 'bool', 'closed_incomplete': 'bool', 'open_complete': 'bool', 'not_present': 'bool', 'reject_on_error': 'bool', 'checked': 'bool' } attribute_map = { 'closed_complete': 'closed_complete', 'open_incomplete': 'open_incomplete', 'closed_incomplete': 'closed_incomplete', 'open_complete': 'open_complete', 'not_present': 'not_present', 'reject_on_error': 'reject_on_error', 'checked': 'checked' } def __init__(self, closed_complete=None, open_incomplete=None, closed_incomplete=None, open_complete=None, not_present=None, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501 """PartitionStatusTest - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._closed_complete = None self._open_incomplete = None self._closed_incomplete = None self._open_complete = None self._not_present = None self._reject_on_error = None self._checked = None self.discriminator = None if closed_complete is not None: self.closed_complete = closed_complete if open_incomplete is not None: self.open_incomplete = open_incomplete if closed_incomplete is not None: self.closed_incomplete = closed_incomplete if open_complete is not None: self.open_complete = open_complete if not_present is not None: self.not_present = not_present if reject_on_error is not None: self.reject_on_error = reject_on_error if checked is not None: self.checked = checked @property def closed_complete(self): """Gets the closed_complete of this PartitionStatusTest. # noqa: E501 :return: The closed_complete of this PartitionStatusTest. # noqa: E501 :rtype: bool """ return self._closed_complete @closed_complete.setter def closed_complete(self, closed_complete): """Sets the closed_complete of this PartitionStatusTest. :param closed_complete: The closed_complete of this PartitionStatusTest. # noqa: E501 :type: bool """ self._closed_complete = closed_complete @property def open_incomplete(self): """Gets the open_incomplete of this PartitionStatusTest. # noqa: E501 :return: The open_incomplete of this PartitionStatusTest. # noqa: E501 :rtype: bool """ return self._open_incomplete @open_incomplete.setter def open_incomplete(self, open_incomplete): """Sets the open_incomplete of this PartitionStatusTest. :param open_incomplete: The open_incomplete of this PartitionStatusTest. # noqa: E501 :type: bool """ self._open_incomplete = open_incomplete @property def closed_incomplete(self): """Gets the closed_incomplete of this PartitionStatusTest. # noqa: E501 :return: The closed_incomplete of this PartitionStatusTest. # noqa: E501 :rtype: bool """ return self._closed_incomplete @closed_incomplete.setter def closed_incomplete(self, closed_incomplete): """Sets the closed_incomplete of this PartitionStatusTest. :param closed_incomplete: The closed_incomplete of this PartitionStatusTest. # noqa: E501 :type: bool """ self._closed_incomplete = closed_incomplete @property def open_complete(self): """Gets the open_complete of this PartitionStatusTest. # noqa: E501 :return: The open_complete of this PartitionStatusTest. # noqa: E501 :rtype: bool """ return self._open_complete @open_complete.setter def open_complete(self, open_complete): """Sets the open_complete of this PartitionStatusTest. :param open_complete: The open_complete of this PartitionStatusTest. # noqa: E501 :type: bool """ self._open_complete = open_complete @property def not_present(self): """Gets the not_present of this PartitionStatusTest. # noqa: E501 :return: The not_present of this PartitionStatusTest. # noqa: E501 :rtype: bool """ return self._not_present @not_present.setter def not_present(self, not_present): """Sets the not_present of this PartitionStatusTest. :param not_present: The not_present of this PartitionStatusTest. # noqa: E501 :type: bool """ self._not_present = not_present @property def reject_on_error(self): """Gets the reject_on_error of this PartitionStatusTest. # noqa: E501 :return: The reject_on_error of this PartitionStatusTest. # noqa: E501 :rtype: bool """ return self._reject_on_error @reject_on_error.setter def reject_on_error(self, reject_on_error): """Sets the reject_on_error of this PartitionStatusTest. :param reject_on_error: The reject_on_error of this PartitionStatusTest. # noqa: E501 :type: bool """ self._reject_on_error = reject_on_error @property def checked(self): """Gets the checked of this PartitionStatusTest. # noqa: E501 :return: The checked of this PartitionStatusTest. # noqa: E501 :rtype: bool """ return self._checked @checked.setter def checked(self, checked): """Sets the checked of this PartitionStatusTest. :param checked: The checked of this PartitionStatusTest. # noqa: E501 :type: bool """ self._checked = checked def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return se
lf.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, PartitionStatusTest): return False return self.to_dict() == other.to_dict() def __ne__(self,
other): """Returns true if both objects are not equal""" if not isinstance(other, PartitionStatusTest): return True return self.to_dict()
DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@domain.com'), ) MANAGERS = ADMINS DATABASE_ENGINE = 'sqlite3' DATABASE_NAME = 'nosedjango' DATABASE_USER = 'root' DATABASE_PASSWORD = '' DATABASE_HO
ST = '' DATABASE_PORT = '' TIME_ZONE = 'America/Chicago' LANGUAGE_CODE = 'en-us' SITE_ID = 1 USE_I18N = True MEDIA_ROOT = ''
MEDIA_URL = '' ADMIN_MEDIA_PREFIX = '/media/' SECRET_KEY = 'w9*+(qevfn*j2959ikv-_7kj7ivptt#8&n*gy0o&ktisx@%rzt' TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.load_template_source', 'django.template.loaders.app_directories.load_template_source', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', ) ROOT_URLCONF = 'nosedjangotests.urls' TEMPLATE_DIRS = ( ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'nosedjangotests.polls', )
"""Leetcode 100. Same Tree Easy URL: https://leetcode.com/problems/same-tree/ Given two binary trees, write a function to check if they are the same or not. Two binary trees are considered the same if they are structurally identical and the nodes have the same value. Example 1: Input: 1 1 / \ / \ 2 3 2 3 [1,2,3], [1,2,3] Output: true Example 2: Input: 1 1 / \ 2 2 [1,2], [1,null,2] Output: false Example 3: Input: 1 1 / \ / \ 2 1 1 2 [1,2,1], [1,1,2] Output: false """ # Definition for a binary tree node. class TreeNode(object): def __init__(self, val): self.val = val self.left = None self.right = None class SolutionPreorderRecur(object): def isSameTree(self, p, q): """ :type p: TreeNode :type q: TreeNode :rtype: bool Apply recursive preorder traversal to check same tree. Time complexity: O(n). Space complexity: O(n). """ # Check if both root don't exist. if not p and not q: return True # Check if just one of roots exits. if not p or not q: return False # If both exist, check their values are the same. if p.val != q.val: return False # Recursively check left/right subtrees. return (self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)) class SolutionPreorderIter(object): def isSameTree(self, p, q): """ :type p: TreeNode :type q: TreeNode :rtype: bool Apply iterative preorder traversal to check same tree. Time complexity: O(n). Space complexity: O(n). """ stack = [(p, q)] while stack: cur_p, cur_q = stack.pop() # Check if both root don't exist, continue, # since there may be other node pairs to check. if not cur_p and not cur_q: continue # Check if just one of roots exits. if not cur_p or not cur_q: return False # If both exist, check their values are the same. if cur_p.val != cur_q.val: return False # Add root's right and then left to stack, since stack is FILO. stack.append((cur_p.right, cur_q.right)) stack.append((cur_p.left, cur_q.left)) return True def main(): # Input: 1 1 # / \ / \ # 2 3 2 3 # [1,2,3], [1,2,3] # Output: true p = TreeNode(1) p.left
= TreeNode(2) p.right = TreeNode(3) q = TreeNode(1) q.left = TreeNode(2) q.right = TreeNode(3) print SolutionPreorderRecur().isSameTree(p, q) print SolutionPreorderIter().isSameTree(p, q) # Input: 1 1 # / \ # 2 2 # [1,2], [1,null,2] # Output: false p = TreeNode(1) p.left
= TreeNode(2) q = TreeNode(1) q.right = TreeNode(2) print SolutionPreorderRecur().isSameTree(p, q) print SolutionPreorderIter().isSameTree(p, q) # Input: 1 1 # / \ / \ # 2 1 1 2 # [1,2,1], [1,1,2] # Output: false p = TreeNode(1) p.left = TreeNode(2) p.right = TreeNode(1) q = TreeNode(1) q.left = TreeNode(1) q.right = TreeNode(2) print SolutionPreorderRecur().isSameTree(p, q) print SolutionPreorderIter().isSameTree(p, q) # Input: [10,5,15], [10,5,null,null,15] p = TreeNode(10) p.left = TreeNode(5) p.right = TreeNode(15) q = TreeNode(10) q.left = TreeNode(5) q.left.right = TreeNode(15) print SolutionPreorderRecur().isSameTree(p, q) print SolutionPreorderIter().isSameTree(p, q) if __name__ == '__main__': main()
# -*- coding: utf-8 -*- """ Created on Mon Jan 11 17:55:48 2016 用欧拉法计算自行车从静止起步后的速度 @author: nightwing """ import matplotlib.pyplot as plt DENSITY = 1.29 #空气密度(kg/m3) C = 1.0 #阻力系数 A = 0.33 #截面积(m2) M = 70.0 #人车质量(kg) v = 7.0 #转折速度(m/s) v1 = 0.0 #(
无阻力)速度(m/s) v2 = 0.0 #(有阻力)速度(m/s) P = 400.0 #功率(w) t = 0 #初始时间 t_max = 200 #截止时间(s) dt = 0.1 #时间间隔 time = [] #此列表存储时间 velocity1 = [] #此列表存储无空气阻力时的速度 velocity2 = [] #此列表存储有空气阻力时的速度 #---欧拉法计算自行车运动速度--- while t <= t_max: velocity1.append(v1)
velocity2.append(v2) time.append(t) if v1 <= v: v1 += P/(M*v)*dt if v2 <= v: v2 += P/(M*v)*dt-C*DENSITY*A*v2**2/(2*M)*dt if v1 > v: v1 += P/(M*v1)*dt if v2 > v: v2 += P/(M*v2)*dt-C*DENSITY*A*v2**2/(2*M)*dt t += dt #------------绘图--------------- plt.title("Bicycling simulation: velocity vs. time") plt.xlabel("time (s)") plt.ylabel("velocity (m/s)") plt.plot(time,velocity1,"k-",label="No air resistence") plt.plot(time,velocity2,"k--",label="With air resistence") plt.legend(loc=2) plt.show()
ython.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.util import tf_inspect def assert_integer_form( x, data=None, summarize=None, message=None, int_dtype=None, name="assert_integer_form"): """Assert that x has integer components (or floats equal to integers). Args: x: Floating-point `Tensor` data: The tensors to print out if the condition is `False`. Defaults to error message and first few entries of `x` and `y`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. int_dtype: A `tf.dtype` used to cast the float to. The default (`None`) implies the smallest possible signed int will be used for casting. name: A name for this operation (optional). Returns:
Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`. """ with ops.name_scope(name, values=[x, data]): x = ops.convert_to_tensor(x, name="x") if x.dtype.is_integer: return control_flow_ops.no_op() message = message or "{} has non-integer components".format(x) if int_dtype is None: try: in
t_dtype = { dtypes.float16: dtypes.int16, dtypes.float32: dtypes.int32, dtypes.float64: dtypes.int64, }[x.dtype.base_dtype] except KeyError: raise TypeError("Unrecognized type {}".format(x.dtype.name)) return check_ops.assert_equal( x, math_ops.cast(math_ops.cast(x, int_dtype), x.dtype), data=data, summarize=summarize, message=message, name=name) def assert_symmetric(matrix): matrix_t = array_ops.matrix_transpose(matrix) return control_flow_ops.with_dependencies( [check_ops.assert_equal(matrix, matrix_t)], matrix) def embed_check_nonnegative_integer_form( x, name="embed_check_nonnegative_integer_form"): """Assert x is a non-negative tensor, and optionally of integers.""" with ops.name_scope(name, values=[x]): x = ops.convert_to_tensor(x, name="x") assertions = [ check_ops.assert_non_negative( x, message="'{}' must be non-negative.".format(x)), ] if not x.dtype.is_integer: assertions += [ assert_integer_form( x, message="'{}' cannot contain fractional components.".format( x)), ] return control_flow_ops.with_dependencies(assertions, x) def same_dynamic_shape(a, b): """Returns whether a and b have the same dynamic shape. Args: a: `Tensor` b: `Tensor` Returns: `bool` `Tensor` representing if both tensors have the same shape. """ a = ops.convert_to_tensor(a, name="a") b = ops.convert_to_tensor(b, name="b") # Here we can't just do math_ops.equal(a.shape, b.shape), since # static shape inference may break the equality comparison between # shape(a) and shape(b) in math_ops.equal. def all_shapes_equal(): return math_ops.reduce_all(math_ops.equal( array_ops.concat([array_ops.shape(a), array_ops.shape(b)], 0), array_ops.concat([array_ops.shape(b), array_ops.shape(a)], 0))) # One of the shapes isn't fully defined, so we need to use the dynamic # shape. return control_flow_ops.cond( math_ops.equal(array_ops.rank(a), array_ops.rank(b)), all_shapes_equal, lambda: constant_op.constant(False)) def maybe_get_static_value(x, dtype=None): """Helper which tries to return a static value. Given `x`, extract it's value statically, optionally casting to a specific dtype. If this is not possible, None is returned. Args: x: `Tensor` for which to extract a value statically. dtype: Optional dtype to cast to. Returns: Statically inferred value if possible, otherwise None. """ if x is None: return x try: # This returns an np.ndarray. x_ = tensor_util.constant_value(x) except TypeError: x_ = x if x_ is None or dtype is None: return x_ return np.array(x_, dtype) def get_logits_and_probs(logits=None, probs=None, multidimensional=False, validate_args=False, name="get_logits_and_probs", dtype=None): """Converts logit to probabilities (or vice-versa), and returns both. Args: logits: Floating-point `Tensor` representing log-odds. probs: Floating-point `Tensor` representing probabilities. multidimensional: Python `bool`, default `False`. If `True`, represents whether the last dimension of `logits` or `probs`, a `[N1, N2, ... k]` dimensional tensor, representing the logit or probability of `shape[-1]` classes. validate_args: Python `bool`, default `False`. When `True`, either assert `0 <= probs <= 1` (if not `multidimensional`) or that the last dimension of `probs` sums to one. name: A name for this operation (optional). dtype: `tf.DType` to prefer when converting args to `Tensor`s. Returns: logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or `1`, then the corresponding entry in the returned logit will be `-Inf` and `Inf` respectively. Raises: ValueError: if neither `probs` nor `logits` were passed in, or both were. """ with ops.name_scope(name, values=[probs, logits]): if (probs is None) == (logits is None): raise ValueError("Must pass probs or logits, but not both.") if probs is None: logits = ops.convert_to_tensor(logits, name="logits", dtype=dtype) if not logits.dtype.is_floating: raise TypeError("logits must having floating type.") # We can early return since we constructed probs and therefore know # they're valid. if multidimensional: if validate_args: logits = embed_check_categorical_event_shape(logits) return logits, nn.softmax(logits, name="probs") return logits, math_ops.sigmoid(logits, name="probs") probs = ops.convert_to_tensor(probs, name="probs", dtype=dtype) if not probs.dtype.is_floating: raise TypeError("probs must having floating type.") if validate_args: with ops.name_scope("validate_probs"): one = constant_op.constant(1., probs.dtype) dependencies = [check_ops.assert_non_negative(probs)] if multidimensional: probs = embed_check_categorical_event_shape(probs) dependencies += [ check_ops.assert_near( math_ops.reduce_sum(probs, -1), one, message="probs does not sum to 1.") ] else: dependencies += [check_ops.assert_less_equal( probs, one, message="probs has components greater than 1.")] probs = control_flow_ops.with_dependencies(dependencies, probs) with ops.name_scope("logits"): if multidimensional: # Here we don't compute the multidimensional case, in a manner # consistent with respect to the unidimensional case. We do so # following the TF convention. Typically, you might expect to see # logits = log(probs) - log(probs[pivot]). A side-effect of # being consistent with the TF approach is that the unidimensional case # implicitly handles the second dimension but the multidimensional case # explicitly keeps the pivot dimension. return math_ops.log(probs), probs return math_ops.log(probs) - math_ops.log1p(-1. * probs), probs def _is_known_unsigned_by_dtype(dt): """Helper returning True if dtype is known to be unsigned.""" return { dtypes.bool: True, dtypes.uint8: True, dtypes.uint16: True, }.get(dt.base_dtype, False) def _is_known_signed_by_dtype(dt): """Helper returning True if dtype is known to be signed.""" return { dtypes.float16: True, dtypes.float32: True, dtypes.float64: True,
#!/usr/bin/env python import os import sys ## A name of directory containing 'path:...' file ## You can download them using 'make-wget_pathway.sh' script dir_name = sys.argv[1] f_summary = open('%s.summary'%dir_name,'w') f_genes = open('%s.genes'%dir_name,'w') f_compounds = open('%s.compounds'%dir_name,'w') gene_total = [] for filename in os.listdir( dir_name ): if( not filename.startswith('path:') ): continue #sys.stderr.write('Read %s ... '%filename) path_id = '' path_name = '' gene_list = [] comp_list = [] prev_tag = '' f = open(os.path.join(dir_name,filename),'r') for line in f: tmp_tag = line[:11].strip() if( tmp_tag == 'ENTRY' ): path_id = line.strip().split()[1] if( tmp_tag == 'NAME' ): path_name = line[11:].split(' - ')[0].strip() if( tmp_tag == 'COMPOUND' ): comp_list.append( line[11:].strip().split()[0] ) f_compounds.write('path:%s\t%s\n'%(path_id,line[11:].strip())) elif( tmp_tag == '' and prev_tag == 'COMPOUND' ): comp_list.append( line[11:].strip().split()[0] ) f_compounds.write('path:%s\t%s\n'%(path_id,line[11:].strip())) elif( tmp_tag == 'GENE' ): gene_list.append( line[11:].strip().split()[0] ) f_genes.write('path:%s\t%s\n'%(path_id,line[11:].strip())) #print line[11:].strip() elif( tmp_tag == '' and prev_tag == 'GENE' ): gene_list.append( line[11:].strip().split()[0] ) f_genes.write('path:%s\t%s\n'%(path_id,line[11:].strip())) #print line[11:].strip() if( tmp_tag != '' ): prev_tag = tmp_tag f.close()
if( len(gene_list) == 0 ): sys.stderr.write('//SKIP// %s(%d) %s\n'%(path_id, len(gene_list), path_name)) continue f_summary.write('path:%s\t%s\t%d\t%d\n'%(path_id, path_name, len(gene_list), len(comp_list))) f_summary.close() f_genes.close() f_compounds.close()
License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # SPDX-License-Identifier: Apache-2.0 import re import argparse import json import functools from utils import parse_raw_timing, merged_dict # ============================================================================= NUMBER_RE = re.compile(r'\d+$') def get_sdf_type(type): """ Returns a SDF timing type for the given type plus information whether it is sequential or not. Returns None, None if the type is unknown """ # Known keywords and their SDF counterparts seq_keywords = { "Setup": "setup", "Hold": "hold", "Recov": "recovery", "Remov": "removal", } comb_keywords = { "Prop": "iopath", } # Sequential if type in seq_keywords: return seq_keywords[type], True # Combinational if type in comb_keywords: return comb_keywords[type], False # Unknown return None, None def parse_logical_names(phy_type, log_names, cell_pins): """ Parses logical cell names. Extracts the cell name, input pin name and output pin name. Uses dumped library cell definitions to achive that since a logical name string uses "_" as a separator and "_" can also occur in cell/pin name. Returns a list of tuples with (cell_name, src_pin, dst_pin) """ log_cells = [] # Process logical names that should correspond to bel timings for log_name in log_names.split(","): # Since both cell and pin names may also contain "_" the # logical name is split iteratively. # The timing type is the first, it has to equal the timing # type of the speed model if not log_name.startswith(phy_type): continue log_name = log_name[len(phy_type) + 1:] # Find the cell name in the library and strip it for c in cell_pins: if log_name.startswith(c): log_cell = c log_name = log_name[len(c) + 1:] break else: continue log_pins = cell_pins[log_cell] # Find the input pin in the library and strip it for p in log_pins: if log_name.startswith(p): log_src = p log_name = log_name[len(p) + 1:] break else: continue # Find the output pin in the library and strip it for p in log_pins: if log_name == p: log_dst = p break else: continue # Append log_cells.append(( log_cell, log_src, log_dst, )) return log_cells def read_raw_timings(fin, cell_pins): """ Reads and parses raw timings, converts them into data used for SDF generation. """ REGEX_CFG = re.compile(r".*__CFG([0-9]+)$") def inner(): raw = list(parse_raw_timing(fin)) for slice, site_name, bel_name, speed_model, properties in raw: # Check if we have a bel timing # TODO: There are other naming conventions for eg. BRAM and DSP if not speed_model.startswith("bel_"): continue # Get timings from properties timings = [(k, properties[k]) for k in [ "DELAY", "FAST_MAX", "FAST_MIN", "SLOW_MAX", "SLOW_MIN", ]] # Get edge from the model name if "RISING" in speed_model: edge = "rising" elif "FALLING" in speed_model: edge = "falling" else: # Supposedly means either edge edge = None # Get configuration. Look for "__CFG<n>" # FIXME: How to correlate that with a configuration name ? match = REGEX_CFG.match(speed_model) if match is not None: cfg = match.group(1) else: cfg = None # Process physical names for the timing model. These should # correspond to site timings phy_names = properties["NAME"].split(",") for phy_name in phy_names: # Extract data from the name. Each name field should hava the # format: "<type>_<bel>_<site>_<src_pin>_<dst_pin>". The split # has to be done in complex way as the bel name may have "_" # within. phy_type, phy_name = phy_name.split("_", maxsplit=1) if 'URAM288' in phy_name: uram_info = speed_model.split("__") site = 'URAM288' bel = uram_info[1] phy_src = uram_info[2] phy_dst = uram_info[3] else: phy_name, phy_src, phy_dst = phy_name.rsplit( "_", maxsplit=2) bel_site = phy_name.rsplit("_", maxsplit=1) if len(bel_site) == 2: bel, site = bel_site else: continue sdf_type, is_seq = get_sdf_type(phy_type) if sdf_type is None: continue # Process logical names that should correspond to bel timings log_cells = parse_logical_names( phy_type, properties["NAME_LOGICAL"], cell_pins) # If we have log cells then yield them for log_cell, log_src, log_dst in log_cells: # Format cell type cell_type = log_cell if edge is not None: cell_type += "_{}_{}".format(log_src, edge) # Format cell location location = "{}/{}".format(site, bel) # Yield stuff key = (site_name, location, cell_type, speed_model) yield (*key, "type"), cell_type.upper() yield (*key, "location"), location.upper() yield (*key, "model"), speed_model if is_seq: yield (*key, "clock"), log_src.upper() yield (*key, "input"), log_dst.
upper() else:
yield (*key, "input"), log_src.upper() yield (*key, "output"), log_dst.upper() if is_seq: yield (*key, "sequential"), sdf_type for t, v in timings: yield (*key, t), v # We don't have any logical cells, stick to the bel # # TODO: This can be modified so we always dump timing for the # bel regardless of if we can decode logical cells. This way # we may have SDFs with both bels and logical cells. if not len(log_cells): # Format cell type cell_type = bel if cfg is not None: cell_type += "_CFG{}".format(cfg) if edge is not None: cell_type += "_{}_{}".format(phy_src, edge) # Format cell location location = "{}/{}".format(site, bel) # Yield stuff key = (site_name, location, cell_type, speed_model) yield (*key, "type"), cell_type.upper() yield (*key, "location"), location.upper() yield (*key, "model"), speed_model if is_seq: yield (*key, "clock"), phy_src.upper() yield (*key, "input"),
from SamplingAccuracyEvaluation import SamplingAlgorithm as SA from SamplingAccuracyEvaluation import AccuracyEvaluation as AE from SamplingAccuracyEvaluation import PrintGraph as PG from SamplingAccuracyEvaluation import StatisticalCalculation as SC import operator def populationListGenerate(filePath, target): print('Generate Population List') populationList = [] populationFile = open(filePath, 'r') while True: line = populationFile.readline() if not line: break line_data = line.split(',') populationList.append(line_data[target]) populationFile.close() return populationList def calculateScore(evalList): score = 0 for i in range(len(evalList)): if i == 0: score = score + abs(evalList[i])/4 else: score = score + abs(evalList[i])/3 return score def run(windowSize, sampleSize, filePath, target=0): print('############## Sampling Accuracy Evaluation ##############') count = 1 numOfTrials = 1 jSDPieceCount = 20 pAAPieceCount = 20 print('Window Size: ' ,windowSize) print('Sample Size: ' ,sampleSize) print('JSD Piece Count: ' ,jSDPieceCount) print('PAA Piece Count: ' ,pAAPieceCount) populationList = populationListGenerate(filePath, target) windowList = [] accuracyMeasureCount = 3 evalDic = {} reservoirEvalList = [0.0 for _ in range(accuracyMeasureCount)] hashEvalList = [0.0 for _ in range(accuracyMeasureCount)] priorityEvalList = [0.0 for _ in range(accuracyMeasureCount)] print() for data in populationList: windowList.append(data) if count == windowSize: print('################## ' + str(numOfTrials) + ' Evaluation Start ####################') # if numOfTrials == 1: PG.printGraph(windowList, 'Popu
lation', numOfTrials) print() print(str(numOfTrials)+'_ReservoirSampling') sampleList = SA.sortedReservoirSam(sampleSize, windowList) tempEvalList = AE.run(windowList, sampleList, jSDP
ieceCount, pAAPieceCount) SC.sumPerIndex(reservoirEvalList, tempEvalList) # if numOfTrials == 1: PG.printGraph(sampleList, 'Reservoir', numOfTrials) print() print(str(numOfTrials)+'_HashSampling') sampleList = SA.hashSam(sampleSize, windowList) tempEvalList = AE.run(windowList, sampleList, jSDPieceCount, pAAPieceCount) SC.sumPerIndex(hashEvalList, tempEvalList) # if numOfTrials == 1: PG.printGraph(sampleList, 'Hash', numOfTrials) print() print(str(numOfTrials)+'_PrioritySampling') sampleList = SA.sortedPrioritySam(sampleSize, windowList) tempEvalList = AE.run(windowList, sampleList, jSDPieceCount, pAAPieceCount) SC.sumPerIndex(priorityEvalList, tempEvalList) # if numOfTrials == 1: PG.printGraph(sampleList, 'Priority', numOfTrials) print() numOfTrials = numOfTrials + 1 count = 0 windowList = [] count = count + 1 for i in range(accuracyMeasureCount): reservoirEvalList[i] = reservoirEvalList[i] / numOfTrials hashEvalList[i] = hashEvalList[i] / numOfTrials priorityEvalList[i] = priorityEvalList[i] / numOfTrials evalDic['RESERVOIR_SAMPLING'] = calculateScore(reservoirEvalList) evalDic['HASH_SAMPLING'] = calculateScore(hashEvalList) evalDic['PRIORITY_SAMPLING'] = calculateScore(priorityEvalList) sortedEvalList = sorted(evalDic.items(), key = operator.itemgetter(1)) return sortedEvalList[0][0]
# OpenShot Video Editor is a program that creates, modifies, and edits video files. # Copyright (C) 2009 Jonathan Thomas # # This file is part of OpenShot Video Editor (http://launchpad.net/openshot/). # # OpenShot Video Editor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenShot Video Editor is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OpenShot Video Editor. If not, see <http://www.gnu.org/licenses/>. # Import Blender's python API. This only works when the script is being # run from the context of Blender. Blender contains it's own version of Python # with this library pre-installed. import bpy # Load a font def load_font(font_path): """ Load a new TTF font into Blender, and return the font object """ # get the original list of fonts (before we add a new one) original_fonts = bpy.data.fonts.keys() # load new font bpy.ops.font.open(filepath=font_path) # get the new list of fonts (after we added a new one) for font_name in bpy.data.fonts.keys(): if font_name not in original_fonts: return bpy.data.fonts[font_name] # no new font was added return None # Debug Info: # ./blender -b test.blend -P demo.py # -b = background mode # -P = run a Python script within the context of the project file # Init all of the variables needed by this script. Because Blender executes # this script, OpenShot will inject a dictionary of the required parameters # before this script is executed. params = { 'title' : 'Oh Yeah! OpenShot!', 'extrude' : 0.1, 'bevel_depth' : 0.02, 'spacemode' : 'CENTER', 'text_size' : 1.5, 'width' : 1.0, 'fontname' : 'Bfont', 'color' : [0.8,0.8,0.8], 'alpha' : 1.0, 'alpha_mode' : 'TRANSPARENT', 'output_path' : '/tmp/', 'fps' : 24, 'quality' : 90, 'file_format' : 'PNG', 'color_mode' : 'RGBA', 'horizon_color' : [0.57, 0.57, 0.57], 'resolution_x' : 1920, 'resolution_y' : 1080, 'resolution_percentage' : 100, 'start_frame' : 20, 'end_frame' : 25, 'animation' : True, } #INJECT_PARAMS_HERE # The remainder of this script will modify the current Blender .blend project # file, and adjust the settings. The .blend file is specified in the XML file # that defines this template in OpenShot. #---------------------------------------------------------------------------- # Modify Text / Curve settings #print (bpy.data.curves.keys()) text_object = bpy.data.curves["Title"] text_object.extrude = params["extrude"] text_object.bevel_de
pth = params["bevel_depth"] text_object.body = params["title"] text_object.align = params["spacemode"] text_object.size = params["text_size"] text_object.space_character = params["width"] # Get font o
bject font = None if params["fontname"] != "Bfont": # Add font so it's available to Blender font = load_font(params["fontname"]) else: # Get default font font = bpy.data.fonts["Bfont"] text_object.font = font text_object = bpy.data.curves["Subtitle"] text_object.extrude = params["extrude"] text_object.bevel_depth = params["bevel_depth"] text_object.body = params["sub_title"] text_object.align = params["spacemode"] text_object.size = params["text_size"] text_object.space_character = params["width"] # set the font text_object.font = font # Change the material settings (color, alpha, etc...) material_object = bpy.data.materials["Text"] material_object.diffuse_color = params["diffuse_color"] material_object.specular_color = params["specular_color"] material_object.specular_intensity = params["specular_intensity"] material_object.alpha = params["alpha"] # Set the render options. It is important that these are set # to the same values as the current OpenShot project. These # params are automatically set by OpenShot bpy.context.scene.render.filepath = params["output_path"] bpy.context.scene.render.fps = params["fps"] #bpy.context.scene.render.quality = params["quality"] try: bpy.context.scene.render.file_format = params["file_format"] bpy.context.scene.render.color_mode = params["color_mode"] except: bpy.context.scene.render.image_settings.file_format = params["file_format"] bpy.context.scene.render.image_settings.color_mode = params["color_mode"] try: bpy.context.scene.render.alpha_mode = params["alpha_mode"] except: pass bpy.data.worlds[0].horizon_color = params["horizon_color"] bpy.context.scene.render.resolution_x = params["resolution_x"] bpy.context.scene.render.resolution_y = params["resolution_y"] bpy.context.scene.render.resolution_percentage = params["resolution_percentage"] bpy.context.scene.frame_start = params["start_frame"] bpy.context.scene.frame_end = params["end_frame"] # Animation Speed (use Blender's time remapping to slow or speed up animation) animation_speed = int(params["animation_speed"]) # time remapping multiplier new_length = int(params["end_frame"]) * animation_speed # new length (in frames) bpy.context.scene.frame_end = new_length bpy.context.scene.render.frame_map_old = 1 bpy.context.scene.render.frame_map_new = animation_speed if params["start_frame"] == params["end_frame"]: bpy.context.scene.frame_start = params["end_frame"] bpy.context.scene.frame_end = params["end_frame"] # Render the current animation to the params["output_path"] folder bpy.ops.render.render(animation=params["animation"])
evel project folder PROJECT_ROOT = SITE_ROOT.parent # Site name: SITE_NAME = SITE_ROOT.basename() # Id for the Sites framework SITE_ID = 1 ########## END PATH CONFIGURATION ########## DEBUG CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#debug DEBUG = bool(get_env_setting('DEBUG', False)) # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug TEMPLATE_DEBUG = DEBUG # Is this a development instance? Set this to True on development/master # instances and False on stage/prod. DEV = False DEBUG_DB = bool(get_env_setting('DEBUG_DB', False)) ########## END DEBUG CONFIGURATION ########## DATABASE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = { 'default': dj_database_url.config(default='sqlite:///%s' % (PROJECT_ROOT / 'development.sqlite')) } # enable utf8mb4 on mysql if DATABASES['default']['ENGINE'] == 'django.db.backends.mysql': # enable utf8mb4 on mysql DATABASES['default']['OPTIONS'] = { 'charset': 'utf8mb4', 'init_command': 'SET storage_engine=INNODB', } ########## END DATABASE CONFIGURATION ########## CACHE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#caches CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', } } if get_env_setting('MEMCACHED_LOCATION', '') is not '': CACHES['default'] = { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': get_env_setting('MEMCACHED_LOCATION'), 'PREFIX': SITE_NAME + ':', } ########## END CACHE CONFIGURATION ########## GENERAL CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone TIME_ZONE = 'UTC' # See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code LANGUAGE_CODE = 'en-us' # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n USE_I18N = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n USE_L10N = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz USE_TZ = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-LOGIN_REDIRECT_URL LOGIN_REDIRECT_URL = "/" ########## END GENERAL CONFIGURATION ########## MEDIA CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root MEDIA_ROOT = (SITE_ROOT / 'media').normpath() # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url MEDIA_URL = '/media/' ########## END MEDIA CONFIGURATION ########## STATIC FILE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root STATIC_ROOT = get_env_setting('STATIC_ROOT', (PROJECT_ROOT / 'assets').normpath()) if not isinstance(STATIC_ROOT, path): STATIC_ROOT = path(STATIC_ROOT) # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url STATIC_URL = '/static/' # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS STATICFILES_DIRS = ( (SITE_ROOT / 'static').normpath(), ) # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ) ########## END STATIC FILE CONFIGURATION ########## FIXTURE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS FIXTURE_DIRS = ( (SITE_ROOT / 'fixtures').normpath(), ) ########## END FIXTURE CONFIGURATION ########## SECRET CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key SECRET_KEY = get_env_setting('SECRET_KEY', 'secret') ########## END SECRET CONFIGURATION ########## TEST CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#test-runner TEST_RUNNER = 'django.test.runner.DiscoverRunner' ########## END TEST CONFIGURATION ########## TEMPLATE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.media', 'django.core.context_processors.request', 'django.core.context_processors.i18n', 'django.core.context_processors.static', 'django.core.context_processors.csrf', 'django.core.context_processors.tz', 'django.contrib.messages.context_processors.messages', 'pyanalysis.apps.base.context_processors.google_analytics', ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs TEMPLATE_DIRS = ( (SITE_ROOT / 'templates').normpath(), ) ########## END TEMPLATE CONFIGURATION ########## MIDDLEWARE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes MIDDLEWARE_CLASSES = ( # Default Django middleware. 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ########## END MIDDLEWARE CONFIGURATION ########## URL CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf ROOT_URLCONF = '%s.urls' % SITE_NAME ########## END URL CONFIGURATION ########## APP CONFIGURATION DJANGO_APPS = ( # Default Django apps: 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Utilities 'django_extensions', 'widget_tweaks', ) # Apps specific for this project go here. LOCAL_APPS = ( 'pyanalysis.apps.base', 'pyanalysis.apps.api', 'pyanalysis.apps.corpus', 'pyanalysis.apps.importer', 'pyanalysis.ap
ps.enhance', ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps INSTALLED_APPS = DJANGO_APPS + LOCAL_APPS ########## END APP CONFIGURATION ########## LOGGING CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#logging # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/t
opics/logging for # more details on how to customize your logging configuration. LOGS_ROOT = get_env_setting('LOGS_ROOT', PROJECT_ROOT / 'logs') if not isinstance(LOGS_ROOT, path): LOGS_ROOT = path(LOGS_ROOT) if not LOGS_ROOT.exists(): LOGS_ROOT.mkdir() LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, 'console': { # logging handler that outputs log messages to terminal 'class': 'logging.StreamHandler', 'level': 'DEBUG', }, 'db_handler': { 'class': 'logging.FileHandler', 'filename': LOGS_ROOT / 'django.db.log', 'level': 'DEBUG', }, }, 'loggers': { 'pyanalysis': { 'handlers': ['console'], 'level': 'WARNING', }, 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', }, } } if DEBUG: LOGGING['loggers']['pyanalysis']['level'] = 'DEBUG' if DEBUG_DB: LOGGING['loggers']['django.db'] = { 'handlers': ['db_handler'], 'l
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2011-2013 Martijn Kaijser # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # impor
t xbmc import xbmcaddon ### get addon info __addon__ = xbmcaddon.Addon(id='script.artwork.downloader') __addonid__ = __addon__.getAddonInfo('id') __addonname__ = __
addon__.getAddonInfo('name') __author__ = __addon__.getAddonInfo('author') __version__ = __addon__.getAddonInfo('version') __addonpath__ = __addon__.getAddonInfo('path') __addonprofile__= xbmc.translatePath(__addon__.getAddonInfo('profile')).decode('utf-8') __icon__ = __addon__.getAddonInfo('icon') __localize__ = __addon__.getLocalizedString
# -*
- coding: utf-
8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from . import note
import unittest from mopidy_tunein import Extension class ExtensionTest(unittest.TestCase): def test_get_default_config(self): ext = Ext
ension() config = ext.get_default_config() self.assertIn("[tunein]", config) self.assertIn("enabled = true", config) def test_get_config_schema(self): ext = Extension() schema = ext.get_config_schema() self.assertIn("timeout", schema)
self.assertIn("filter", schema)
#!/usr/bin/python # -*- coding: utf-8 -*- import re COST_AUTO = 5 class Appartment(object): """Appartment class consists of features that have all appartments""" def __init__(self, address, metro, transportation, rooms, space, price, floor, addInfo): super(Appartment, self).__init__() self.address = self.setAddress(address) self.metro = self.setMetro(metro) self.transportation = self.setTransportation(transportation) self.rooms = self.setRooms(rooms) self.space = self.setSpace(spac
e) self.pric
e = self.setPrice(price) self.floor = self.setFloor(floor) self.addInfo = self.setAddInfo(addInfo) # Getter methods def getAddress(self): return self.address def getMetro(self): return self.metro def getTransportation(self): return self.transportation def getRooms(self): return self.rooms def getSpace(self): return self.space def getPrice(self): return self.price def getFloor(self): return self.floor def getAddInfo(self): return self.addInfo # Setter methods def setAddress(self, address): if (type(address) is str) or (type(address) is unicode): self.address = address else: print "Can't set proper address, type is not string" self.address = None return self.address def setMetro(self, metro): if (type(metro) is str) or (type(metro) is unicode): self.metro = metro else: print "Can't set metro, type is not string" self.metro = None return self.metro def setTransportation(self, transportation): if type(transportation) is dict: self.transportation = transportation elif (type(transportation) is str) or (type(transportation) is unicode): time = re.search(u'\d+', transportation) auto = re.search(u'авто', transportation) foot = re.search(u'пешком', transportation) if time and auto: time = int(time.group()) d = {} d['auto'] = time self.transportation = d elif time and foot: time = int(time.group()) d = {} d['foot'] = time self.transportation = d else: self.transportation = None return self.transportation def setRooms(self, rooms): if type(rooms) is int: self.rooms = rooms elif (type(rooms) is str) or (type(rooms) is unicode): room = re.search(u'\d', rooms) if room: room = int(room.group()) self.rooms = room else: print "error, no match" self.rooms = None else: print "type error, current type is " + type(rooms) self.rooms = None return self.rooms def setSpace(self, space): if type(space) is dict: self.space = space elif type(space) is list: d = {} for typo in space: if re.search(u'кухня', typo): area = re.search(u'\d+', typo) area = int(area.group()) d['kitchen'] = area elif re.search(u'жилая', typo): area = re.search(u'\d+', typo) area = int(area.group()) d['dwelling'] = area elif re.search(u'общая', typo): area = re.search(u'\d+', typo) area = int(area.group()) d['full'] = area elif typo == "NULL": pass else: print "Error, no matching typo's. Current typo is " + typo self.space = d else: print "Error with setting space" self.space = None return self.space def setPrice(self, price): if (type(price) is int) or (type(price) is float): print "type is " + str(type(price)) self.price = int(price) elif (type(price) is str) or (type(price) is unicode): price = price.replace(u',', '') price = re.search(u'^\d+', price) if price: self.price = int(price.group()) else: print "No match of price in string" self.price = None else: print "Type error, current type is " + str(type(price)) self.price = None return self.price def setFloor(self, floor): if type(floor) is tuple: self.floor = floor elif (type(floor) is str) or (type(floor) is unicode): floor = floor.split("/") if len(floor) == 2: floor = (int(floor[0]), int(floor[1])) self.floor = floor else: print "length of floor array is not 2, len = " + len(floor) self.floor = None else: print "Type error, current type is " + type(floor) self.floor = None return self.floor def setAddInfo(self, addInfo): if type(addInfo) is list: self.addInfo = addInfo elif (type(addInfo) is str) or (type(addInfo) is unicode): addInfo = addInfo.split('|') self.addInfo = addInfo else: print "Type error, current type is " + type(addInfo) self.addInfo = None return self.addInfo # Helper methods to preprocess data def preprocessData1(self): line = [] address = self.address if address: line.append(address) metro = self.metro if metro: line.append(metro) transportation = self.transportation if transportation: if 'auto' in transportation: line.append(str(COST_AUTO*transportation['auto'])) elif 'foot' in transportation: line.append(str(transportation['foot'])) else: print "no line about transportation" rooms = self.rooms if rooms: line.append(str(rooms)) space = self.space if space: if 'kitchen' in space: line.append(str(space['kitchen'])) if 'dwelling' in space: line.append(str(space['dwelling'])) if 'full' in space: line.append(str(space['full'])) price = self.price if price: line.append(str(price)) floor = self.floor if floor and floor[1]!=0: num = round(float(floor[0])/float(floor[1]), 2) line.append(str(num)) return line
# -*- coding: utf-8 -*- from django.core.management.base import BaseCommand from django.contrib.auth import get_user_model from projects.models import Project User = get_user_model() class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument('user') parser.add_argument('name', nargs='+') parser.add_argument('--description', nargs='*', d
efault='') def handle(self, *args, **options): name = ' '.join(options['name']) description = ' '.join(options['description']) u = User.objects.get(username=options['user']) p = Project.objects.create(user=u, name=name, description=description) p.save() self.stdo
ut.write(str(p.pk))
# -*- coding: utf-8 -*- import traceback from datetime import timedelta from django.core import mail from django.core.mail import EmailMultiAlternatives, mail_admins from django.core.management.base import BaseCommand from django.template.loader import render_to_string from django.utils.translation import ugettext_lazy as _ from django.contrib.sites.models import Site from django.conf import settings from django.utils import translation from django.utils import timezone from cert.models import Attendee class Command(BaseCommand): help = u'Send certificate e-mails' def get_email(self, attendee): translation.activate(settings.LANGUAGE_CODE) subject = _(u'Certificado de participação | PUG-PE') from_email = settings.DEFAULT_FROM_EMAIL ctx = { 'site': Site.objects.get_current().domain, 'event': attendee.event, 'attendee': attendee, } text_content = render_to_string('cert/cert_email.txt', ctx) html_content = render_to_string('cert/cert_email.html', ctx) msg = EmailMultiAlternatives( subject, text_content, from_email, [attendee.email], ) msg.attach_alternative(html_content, "text/html") return msg def handle(self, *args, **options): connection = mail.get_connection() num_emails = 0 attendees = Attendee.objects.filter(sent_date__isnull=True) # Evitar envio para eventos muito antigos attendees = attendees.filter( pub_date__gte=timezone.now() - timedelta(days=10), ) for attendee in attendees: msg = self.get_email(attendee) try: num_emails += connection.send_messages([msg]) except Exception as exc: subject = _(u'PUG-PE: Problema envio certificado') body = 'except: '.
format(exc) body += traceback.format_exc() mail_admins(s
ubject, body) else: attendee.sent_date = timezone.now() attendee.save() self.stdout.write( unicode(_(u'Foram enviados {0} emails\n'.format(num_emails))), )
import sys import glob def read_fileb(filename, mode='rb'): f = open(filename, mode) try: return f.read() finall
y: f.close() def write_fileb(filename, value, mode='wb'): f = open(filename, mode) try: f.write(value) finally: f.close() for filename in glob.glob(sys.argv[1]): data1 = read_fileb(filename) write_fileb(
filename + '.bak2', data1) data2lines = read_fileb(filename).strip().split('\n') data2 = '\n'.join([line.rstrip( ).replace('\t', ' ' * 2) for line in data2lines]) + '\n' write_fileb(filename, data2) print filename, len(data1) - len(data2)
from hypothesis
.utils.conventions import not_set def accept(f): def complex_numbers(): return f() return complex_numb
ers
# # Created by DraX on 2005.08.08 # # Updated by ElgarL on 28.09.2005 # print "importing village master data: Talking Island Village ...done" import sys from net.sf.l2j.gameserver.model.quest import State from net.sf.l2j.gameserver.model.quest import QuestState from net.sf.l2j.gameserver.model.quest.jython import QuestJython as JQuest GRAND_MASTER_BITZ = 7026 class Quest (JQuest) : def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr) def onEvent (self,event,st): htmltext = "No Quest" if event == "7026-01.htm": htmltext = event if event == "7026-02.htm": htmltext = event if event == "7026-03.htm": htmltext = event if event == "7026-04.htm": htmltext = event if event == "7026-05.htm": htmltext = event if event == "7026-06.htm": htmltext = event if event == "7026-07.htm": htmltext = event return htmltext def onTalk (Self,npc,st): npcId = npc.getNpcId() Race = st.getPlayer().getRace() pcId = st.getPlayer().getClassId().getId() # Humans got accepted if npcId == GRAND_MASTER_BITZ and Race in [Race.human]: #fighter if pcId == 0x00: htmltext = "7026-01.htm" #warrior, knight, rogue if pcId == 0x01 or pcId == 0x04 or pcId == 0x07: htmltext = "7026-08.htm" #warlord, paladin, treasureHunter if pcId == 0x03 or pcId == 0x05 or pcId == 0x08: htmltext = "7026-09.htm" #gladiator, darkAvenger, hawkeye if pcId == 0x
02 or pcId == 0x06 or pcId =
= 0x09: htmltext = "7026-09.htm" #mage, wizard, cleric]: if pcId == 0x0a or pcId == 0x0b or pcId == 0x0f: htmltext = "7026-10.htm" #sorceror, necromancer, warlock, bishop, prophet if pcId == 0x0c or pcId == 0x0d or pcId == 0x0e or pcId == 0x10 or pcId == 0x11: htmltext = "7026-10.htm" st.setState(STARTED) return htmltext # All other Races must be out if npcId == GRAND_MASTER_BITZ and Race in [Race.dwarf, Race.darkelf, Race.elf, Race.orc]: st.setState(COMPLETED) st.exitQuest(1) return "7026-10.htm" QUEST = Quest(7026,"7026_bitz_occupation_change","village_master") CREATED = State('Start', QUEST) STARTED = State('Started', QUEST) COMPLETED = State('Completed', QUEST) QUEST.setInitialState(CREATED) QUEST.addStartNpc(7026) STARTED.addTalkId(7026)
ib2 from collections import namedtuple from urllib import urlencode from urllib import quote API_BASE = 'https://api.github.com/' REPO_TYPE_CHOICES = ('all', 'public', 'private', 'forks', 'sources', 'member') @contextlib.contextmanager def chdir(dirname=None): curdir = os.getcwd() try: if dirname is not None: os.chdir(dirname) yield finally: os.chdir(curdir) class Helpers(object): def __init__(self, args): self.args = args def exec_cmd(self, command): """ Executes an external command taking into account errors and logging. """ print "Executing command: %s" % self.redact(command) resp = os.system(command) if resp != 0: raise Exception(self.redact("Command [%s] failed (%s)" % (command, resp))) def https_url_with_auth(self, base_url): _, suffix = base_url.split('https://') return 'https://%s:%s@%s' % (quote(self.args.username), quote(self.args.password), suffix) def redact(self, s): if hasattr(self.args, 'password'): s = s.replace(self.args.password, 'REDACTED') if hasattr(self.args, 'username'): s = s.replace(self.args.username, 'REDACTED') return s Pagination = namedtuple('Pagination', 'first prev next last') def get_pagination(raw_link_header): link_map = {} for link, rel in (lh.split(';') for lh in raw_link_header.split(',')): link_map[rel.split('=')[1].strip('"')] = link.strip(' <>') return Pagination(*(link_map.get(f) for f in Pagination._fields)) def add_https_basic_auth(request, username, password): base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '') request.add_header("Authorization", "Basic %s" % base64string) def get_repos(org, repo_type, access_token=None, username=None, password=None, per_page=25): """ Paginates through all of the repositories using github's Link header. https://developer.github.com/v3/#link-header """ url = API_BASE + 'orgs/%s/repos?' % org qs_params = {'type': repo_type, 'per_page': per_page} if access_token: qs_params.update({'access_token': args.access_token}) url += urlencode(qs_params) request = urllib2.Request(url) elif username and password: url += urlencode(qs_params) request = urllib2.Request(url) add_https_basic_auth(request, username, password) else: raise ValueError('unworkable combination of authentication inputs') response = urllib2.urlopen(request) try: pagination = get_pagination(response.headers['Link']) except KeyError: print 'no Link header, nothing to paginate through.' pagination = Pagination(None, None, None, None) repos = json.loads(response.read()) for r in repos: if not r.get('archived'): yield r # so, this isn't the DRYest code ;-) while pagination.next: request = urllib2.Request(pagination.next) if username and password: add_https_basic_auth(request, username, password) response = urllib2.urlopen(request) pagination = get_pagination(response.headers['Link']) repos = json.loads(response.read()) for r in repos: if not r.get('archived'): yield r # Github API call, can authenticate via access token, or username and password # git cloning/pulling, can authenticate via ssh key, or username & password via https def repocsv(string): """ >>> repocsv('org1/repo1, org2/repo2,org3/repo3 ,org4/repo4') ['org1/repo1', 'org2/repo2', 'org3/repo3', 'org4/repo4'] """ try: repos = [r.strip() for r in string.split(',')] return set(repos) except Exception as exc: raise argparse.ArgumentTypeError(exc.message) if __name__ == '__main__': parser = argparse.ArgumentParser(description='backup github repositories for an organization') subparsers = parser.add_subparsers(dest='authtype') # uses an access token to fetch repositories names from github's API, # but then assu
mes you have SSH keys setup for cloning/pulling ssh_parser = subparsers.add_parser('ssh', help='use ssh for
cloning/pulling, and use access token for github api access') ssh_parser.add_argument('-d', '--dir', type=str, dest='directory', required=True, help='full or relative path to store backed up repositories') ssh_parser.add_argument('-o', '--org', type=str, required=True, help='github organization name') ssh_parser.add_argument('-t', '--type', type=str, dest='rtype', nargs='?', default='all', choices=REPO_TYPE_CHOICES, help='repository types to backup') ssh_parser.add_argument('-a', '--access-token', type=str, help='personal access token or oauth access token') ssh_parser.add_argument('-f', '--forks', action='store_true', help='add this arg if you want to backup fork repositories also') ssh_parser.add_argument('-i', '--ignore-list', type=repocsv, default=set(), help='add repos you dont want to fetch/index, e.g. --ignore-list org1/repo1,org2/repo2') # uses a username and password for fetching repositories names from # github's API, and uses same username and password for # cloning/updating via HTTPS as well. # # note: you can also use your personal access token as a password for https # basic auth when talking to github's api or cloning https_parser = subparsers.add_parser('https', help='use https for cloning/pulling, and use username and password (https basic auth) for github api access. note that github also allows using a personal access token as a password via this method') https_parser.add_argument('-d', '--dir', type=str, dest='directory', required=True, help='full or relative path to store backed up repositories') https_parser.add_argument('-o', '--org', type=str, required=True, help='github organization name') https_parser.add_argument('-t', '--type', type=str, dest='rtype', nargs='?', default='all', choices=REPO_TYPE_CHOICES, help='repository types to backup') https_parser.add_argument('-u', '--username', dest='username', type=str, required=True, help='github username') https_parser.add_argument('-p', '--password', dest='password', type=str, required=True, help='github password or github personal access token') https_parser.add_argument('-f', '--forks', action='store_true', help='add this arg if you want to backup fork repositories also') https_parser.add_argument('-i', '--ignore-list', type=repocsv, default=set(), help='add repos you dont want to fetch/index, e.g. --ignore-list org1/repo1,org2/repo2') args = parser.parse_args() if not os.path.exists(args.directory): os.makedirs(args.directory) if args.authtype == 'ssh': org_repos = get_repos(args.org, args.rtype, args.access_token) else: org_repos = get_repos(args.org, args.rtype, username=args.username, password=args.password) h = Helpers(args) for repo in org_repos: # skip ignored repos if repo['full_name'] in args.ignore_list: print 'skipping ignored repository %s' % repo['full_name'] continue # skip forks unless asked not to if not args.forks and repo['fork']: print 'skipping fork repository %s' % repo['full_name'] continue destdir = os.path.join(args.directory, repo['name']) if args.authtype == 'ssh': repo_path = repo['ssh_url'] else: repo_path = h.https_url_with_auth(repo['clone_url']) if os.path.exists(destdir): # pull in new commits to an already tracked repository print '*** updating %s... ***' % h.redact(repo_path) with chdir(destdir): try: h.exec_cmd('git pull origin %s' % repo['default_branch']) continue except Exception as e: print 'error: %s (repo=%s); will re-clone!' % (e, repo['name']) # clone the repo fresh, deleting if it already existed print '*** back
""" Return the index by key into the list of LogEntries. """ return self._index @property def prev_hash(self): """ Return the content hash of the previous Log. """ return self._prev_hash @property def prev_master(self): """ Return the ID of the master of the previous Log. """ return self._prev_master @property def timestamp(self): """ Return the timestamp for this Log. """ return self._timestamp class BoundLog(Log): """ A fult tolerant log bound to a file. """ def __init__(self, reader, hashtype=HashTypes.SHA2, u_path=None, base_name='L'): super(). __init__(reader, hashtype) self.fd_ = None self.is_open = False # for appending overwriting = False if u_path: self.u_path = u_path self.base_name = base_name overwriting = True else: if isinstance(reader, FileReader): self.u_path = reader.u_path self.base_name = reader.base_name overwriting = False else: msg = "no target uPath/baseName specified" raise UpaxError(msg) self.path_to_log = "%s/%s" % (self.u_path, self.base_name) if overwriting: with open(self.path_to_log, 'w') as file: log_contents = super(BoundLog, self).__str__() file.write(log_contents) file.close() self.fd_ = open(self.path_to_log, 'a') self.is_open = True def add_entry(self, tstamp, key, node_id, src, path): if not self.is_open: msg = "log file %s is not open for appending" % self.path_to_log raise UpaxError(msg) # XXX NEED TO THINK ABOUT THE ORDER OF OPERATIONS HERE entry = super( BoundLog, self).add_entry(tstamp, key, node_id, src, path) stringified = str(entry) self.fd_.write(stringified) return entry def flush(self): """ Flush the log. This should write the contents of any internal buffers to disk, but no particular behavior is guaranteed. """ self.fd_.flush() def close(self): """ Close the log. """ self.fd_.close() self.is_open = False # ------------------------------------------------------------------- class LogEntry(): """ The entry made upon adding a file to the Upax content-keyed data store. This consists of a timestamp; an SHA content key, the hash of the contents of the file, the NodeID identifying the contributor, its source (which may be a program name, and a UNIX/POSIX path associated with the file. The path will normally be relative. """ __slots__ = ['_timestamp', '_key', '_node_id', '_src', '_path', ] def __init__(self, timestamp, key, node_id, source, pathToDoc): self._timestamp = timestamp # seconds from epoch if key is None: raise UpaxError('LogEntry key may not be None') hashtype = len(key) == 40 self._key = key # 40 or 64 hex digits, content hash if hashtype == HashTypes.SHA1: check_hex_node_id_160(self._key) else: check_hex_node_id_256(self._key) if node_id is None: raise UpaxError('LogEntry nodeID may not be None') self._node_id = node_id # 40/64 digits, node providing entry # XXX This is questionable. Why can't a node with a SHA1 id store # a datum with a SHA3 key? if hashtype == HashTypes.SHA1: check_hex_node_id_160(self._node_id) else: check_hex_node_id_256(self._node_id) self._src = source # tool or person responsible self._path = pathToDoc # file name @property def key(self): """ Return the 40- or 64-byte SHA hash associated with the entry. This is an SHA content hash. """ return self._key @property def node_id(self): """ Return the 40- or 64-byte NodeID associated with the entry. """ return self._node_id @property def path(self): """ Return the POSIX path associated with the LogEntry. """ return self._path @property def src(self): """ Return the 'src' associated with the LogEntry. """ return self._src @property def timestamp(self): """ Return the time at which the LogEntry was created. """ return self._timestamp @property def hashtype(self): """ XXX WRONG should return key length, allowing 64 or 40. """ return len(self._key) == 40 # used in serialization, so newli
nes are intended def __str__(self): if self.hashtype == HashTypes.SHA1: fmt = '%013u %40s %40s "%s" %s\n' else: fmt = '%013u %64s %64s "%s" %s\n' return fmt % (self._timestamp, self._key, self._node_id, self._src, self._path) def __eq__(self, other): return isinstance(other, LogEntry) and\ self._timestamp
== other.timestamp and\ self._key == other.key and\ self._node_id == other.node_id and\ self._src == other.src and\ self._path == other.path def __ne__(self, other): return not self.__eq__(other) def equals(self, other): """ The function usualy known as __eq__. XXX DEPRECATED """ return self.__eq__(other) # ------------------------------------------------------------------- # CLASS READER AND SUBCLASSES # ------------------------------------------------------------------- class Reader(object): """ Would prefer to be able to handle this through something like a Java Reader, so that we could test with a StringReader but then use a FileReader in production. If it is a file, file.readlines(sizeHint) supposedly has very good preformance for larger sizeHint, say 100KB It appears that lines returned need to be rstripped, which wastefully requires copying For our purposes, string input can just be split on newlines, which has the benefit of effectively chomping at the same time """ # __slots__ = ['_entries', '_index', '_lines', '_hashtype', # 'FIRST_LINE_RE', ] def __init__(self, lines, hashtype): check_hashtype(hashtype) self._hashtype = hashtype if hashtype == HashTypes.SHA1: first_line_pat = r'^(\d{13}) ([0-9a-f]{40}) ([0-9a-f]{40})$' else: first_line_pat = r'^(\d{13}) ([0-9a-f]{64}) ([0-9a-f]{64})$' self.first_line_re = re.compile(first_line_pat, re.I) # XXX verify that argument is an array of strings self._lines = lines ndx_last = len(self._lines) - 1 # strip newline from last line if present if ndx_last >= 1: self._lines[ndx_last] = self._lines[ndx_last].rstrip('\n') # Entries are a collection, a list. We also need a dictionary # that accesses each log entry using its hash. self._entries = [] # the empty list self._index = dict() # mapping hash => entry @property def hashtype(self): """ Return the type of SHA hash used. """ return self._hashtype def read(self): """ The first line contains timestamp, hash, nodeID for previous Log. Succeeding lines look like timestamp hash nodeID src path In both cases timestamp is an unsigned int, the number of milliseconds since the epoch. It can be printed with %13u. The current value (April 2011) is about 1.3 trillion (1301961973000). """ first_line = None if self._lines: first_line = self._lines[0] if first_line: match = re.match(self.first_line_re, first_line) if not match: print("NO MATCH, FIRST LINE; hashtype = %s" % self.hashtype)
#!/usr/bin/env python import sys import re from helpers import * PROGRAM_USAGE = """ SeqAn invalid identifiers detection script. USAGE: invalid_identifiers.py BASE_PATH BASE_PATH is the root path of all the folders to be searched. This script generates a list of invalid identifiers found in the code base, paired with their suggested replacement string in the format ``"old: new"``, one identifier per line. The result is written to the standard output. """.strip() INVALID_IDENTIFIER = re.compile(r'\b_[A-Z_]\w*\b') REPLACEMENT_ID = re.compile(r'\b(__?)(\w*)\b') # The following IDs are exempted from replacement since they are either defined # by some compiler (-specific library) or are solely used within a string. VALID_IDENTIFIERS = map( lambda rx: re.compile(rx), [ '___+', '^__$', '_N', '_L', '_H', '__u?int64(_t)?', '_A123456', '__OPTIMIZE__', '__gnu_cxx', '_Resize_String', # will be done manually '_Fill_String', # '_Transcript_', '_Confidence_99', '_PARSER_H', '_POSIX_TIMERS', '__GNUC_MINOR__', '_S_IREAD', '_S_IWRITE', '_O_BINARY', '_O_CREAT',
'_O_RDONLY', '_O_RDWR', '_O_TEMPORARY', '_O_TRUNC', '_O_WRONLY', '_KMER_H', '_MSC_EXTENSIONS', '_GLIBCXX_PARALLEL', '_FILE_OFFSET_BITS', '_POSIX_SYNCHRONIZED_IO', '__cplusplus', '__(force)?inline(__)?', '__alignof(__)?', '__attribute__', '__GLOBAL__', '_DELETIO
NS____', '_INSERTS______', '_REPLACEMENTS_', '__int128', '__SSE2__', '__m128i', '__VA_ARGS__', '__FILE__', '__LINE__', '__GET_OPT_H__', '_OPENMP', '__SINIX__', '__sgi', '__BEOS__', '__aix__', '__ICC', '__WATCOMC__', '__ADSPBLACKFIN__', '_BEOS', '__SUNPRO_CC?', '__tru64', '__FreeBSD__', '__ultrix', '__OPENBSD', '_MPRAS', '_HAIKU', '_SGI_COMPILER_VERSION', '_POSIX_C_SOURCE', '_XOPEN_SOURCE', '__OpenBSD__', '__AIX__', '__ADSP21000__', '__HAIKU__', '__riscos__', '__hpux', '__HP_aCC', '__riscos', '__hpua', '__GNUC__', '_ULTRIX', '_SCO_SV', '__DECCXX', '_XENIX', '__sgi__', '_WIN32', '__PGI', '__QNX__', '__APPLE__', '__AIX', '_SGI', '_AIX', '__XENIX__', '__INTEL_COMPILER', '__osf', '__linux__', '__sinix__', '__bsdos__', '__ADSPTS__', '__sun', '__sinix', '__NetBSD', '__FreeBSD', '__osf__', '__ultrix__', '__COMPILER_VER__', '__COMO__', '__linux', '__UNIX_SV__', '__HAIKU', '__WIN32__', '__NetBSD__', '__CYGWIN__', '_COMPILER_VERSION', '__BORLANDC__', '__TRU64__', '__MINGW32__', '__aix', '__BeOS', '__QNXNTO__', '__hpux__', '__IBMCPP__', '__IAR_SYSTEMS_ICC__', '__18CXX', '__HP_cc', '__SUNPRO_C', '__DECC', '__IBMC__', '_MSC_VER' ]) def valid(id): """ Returns whether the given ``id`` is in fact valid and shouldn't be replaced. """ return any(VALID_ID.match(id) for VALID_ID in VALID_IDENTIFIERS) def find_all(file): """ Returns all invalid identifiers found in a given ``file``. """ f = open(file, 'r') result = [] for line in f: matches = INVALID_IDENTIFIER.findall(line) invalids = [match for match in matches if not valid(match)] result += invalids return result def replacement(orig): """ Returns the replacement string for a given invalid identifier. """ return REPLACEMENT_ID.sub(r'\2\1', orig) def generate_replacements(ids): """ Generates a dictionary of replacement strings for a list of invalid identifiers. """ return dict([(original, replacement(original)) for original in ids]) def main(): if len(sys.argv) != 2: print >>sys.stderr, 'ERROR: Invalid number of arguments.' print >>sys.stderr, PROGRAM_USAGE return 1 results = {} project_path = sys.argv[1] for file in all_files(project_path): results[file] = set(find_all(file)) all_ids = set() for ids in results.values(): all_ids |= ids replacements = generate_replacements(all_ids) for id in sorted(all_ids): print '%s: %s' % (id, replacements[id]) #for file in sorted(results.keys()): # for id in results[file]: # print '%s: %s' % (file, id) return 0 if __name__ == '__main__': sys.exit(main())
# Foris - we
b administration interface for OpenWrt based on NETCONF # Copyright (C) 2017 CZ.NIC, z.s.p.o. <http://www.nic.cz> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any late
r version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .base import BaseConfigHandler __all__ = ["BaseConfigHandler"]