repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
chokribr/inveniotest
modules/websearch/lib/websearch_webinterface.py
9
47533
## This file is part of Invenio. ## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """WebSearch URL handler.""" __revision__ = "$Id$" import cgi import os import datetime import time import sys from urllib import quote from invenio import webinterface_handler_config as apache import threading #maximum number of collaborating authors etc shown in GUI MAX_COLLAB_LIST = 10 MAX_KEYWORD_LIST = 10 MAX_VENUE_LIST = 10 if sys.hexversion < 0x2040000: # pylint: disable=W0622 from sets import Set as set # pylint: enable=W0622 from invenio.config import \ CFG_SITE_URL, \ CFG_SITE_NAME, \ CFG_CACHEDIR, \ CFG_SITE_LANG, \ CFG_SITE_SECURE_URL, \ CFG_BIBRANK_SHOW_DOWNLOAD_STATS, \ CFG_WEBSEARCH_INSTANT_BROWSE_RSS, \ CFG_WEBSEARCH_RSS_TTL, \ CFG_WEBSEARCH_RSS_MAX_CACHED_REQUESTS, \ CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE, \ CFG_WEBSEARCH_ENABLED_SEARCH_INTERFACES, \ CFG_WEBDIR, \ CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS, \ CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS, \ CFG_WEBSEARCH_USE_ALEPH_SYSNOS, \ CFG_WEBSEARCH_RSS_I18N_COLLECTIONS, \ CFG_INSPIRE_SITE, \ CFG_WEBSEARCH_WILDCARD_LIMIT, \ CFG_SITE_RECORD from invenio.dbquery import Error from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory from invenio.urlutils import redirect_to_url, make_canonical_urlargd, drop_default_urlargd from invenio.htmlutils import get_mathjax_header from invenio.htmlutils import nmtoken_from_string from invenio.webuser import getUid, page_not_authorized, get_user_preferences, \ collect_user_info, logoutUser, isUserSuperAdmin from invenio.webcomment_webinterface import WebInterfaceCommentsPages from invenio.weblinkback_webinterface import WebInterfaceRecordLinkbacksPages from invenio.bibcirculation_webinterface import WebInterfaceHoldingsPages from invenio.webpage import page, pageheaderonly, create_error_box from invenio.messages import gettext_set_language from invenio.search_engine import check_user_can_view_record, \ collection_reclist_cache, \ collection_restricted_p, \ create_similarly_named_authors_link_box, \ get_colID, \ get_coll_i18nname, \ get_most_popular_field_values, \ get_mysql_recid_from_aleph_sysno, \ guess_primary_collection_of_a_record, \ page_end, \ page_start, \ perform_request_cache, \ perform_request_log, \ perform_request_search, \ restricted_collection_cache, \ get_coll_normalised_name, \ EM_REPOSITORY from invenio.websearch_webcoll import perform_display_collection from invenio.search_engine_utils import get_fieldvalues, \ get_fieldvalues_alephseq_like from invenio.access_control_engine import acc_authorize_action from invenio.access_control_config import VIEWRESTRCOLL from invenio.access_control_mailcookie import mail_cookie_create_authorize_action from invenio.bibformat import format_records from invenio.bibformat_engine import get_output_formats from invenio.websearch_webcoll import get_collection from invenio.intbitset import intbitset from invenio.bibupload import find_record_from_sysno from invenio.bibrank_citation_searcher import get_cited_by_list from invenio.bibrank_downloads_indexer import get_download_weight_total from invenio.search_engine_summarizer import summarize_records from invenio.errorlib import register_exception from invenio.bibedit_webinterface import WebInterfaceEditPages from invenio.bibeditmulti_webinterface import WebInterfaceMultiEditPages from invenio.bibmerge_webinterface import WebInterfaceMergePages from invenio.bibdocfile_webinterface import WebInterfaceManageDocFilesPages, WebInterfaceFilesPages from invenio.search_engine import get_record from invenio.shellutils import mymkdir import invenio.template websearch_templates = invenio.template.load('websearch') search_results_default_urlargd = websearch_templates.search_results_default_urlargd search_interface_default_urlargd = websearch_templates.search_interface_default_urlargd try: output_formats = [output_format['attrs']['code'].lower() for output_format in \ get_output_formats(with_attributes=True).values()] except KeyError: output_formats = ['xd', 'xm', 'hd', 'hb', 'hs', 'hx'] output_formats.extend(['hm', 't', 'h']) def wash_search_urlargd(form): """ Create canonical search arguments from those passed via web form. """ argd = wash_urlargd(form, search_results_default_urlargd) if argd.has_key('as'): argd['aas'] = argd['as'] del argd['as'] if argd.get('aas', CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE) not in CFG_WEBSEARCH_ENABLED_SEARCH_INTERFACES: argd['aas'] = CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE # Sometimes, users pass ot=245,700 instead of # ot=245&ot=700. Normalize that. ots = [] for ot in argd['ot']: ots += ot.split(',') argd['ot'] = ots # We can either get the mode of function as # action=<browse|search>, or by setting action_browse or # action_search. if argd['action_browse']: argd['action'] = 'browse' elif argd['action_search']: argd['action'] = 'search' else: if argd['action'] not in ('browse', 'search'): argd['action'] = 'search' del argd['action_browse'] del argd['action_search'] if argd['em'] != "": argd['em'] = argd['em'].split(",") return argd class WebInterfaceUnAPIPages(WebInterfaceDirectory): """ Handle /unapi set of pages.""" _exports = [''] def __call__(self, req, form): argd = wash_urlargd(form, { 'id' : (int, 0), 'format' : (str, '')}) formats_dict = get_output_formats(True) formats = {} for f in formats_dict.values(): if f['attrs']['visibility']: formats[f['attrs']['code'].lower()] = f['attrs']['content_type'] del formats_dict if argd['id'] and argd['format']: ## Translate back common format names f = { 'nlm' : 'xn', 'marcxml' : 'xm', 'dc' : 'xd', 'endnote' : 'xe', 'mods' : 'xo' }.get(argd['format'], argd['format']) if f in formats: redirect_to_url(req, '%s/%s/%s/export/%s' % (CFG_SITE_URL, CFG_SITE_RECORD, argd['id'], f)) else: raise apache.SERVER_RETURN, apache.HTTP_NOT_ACCEPTABLE elif argd['id']: return websearch_templates.tmpl_unapi(formats, identifier=argd['id']) else: return websearch_templates.tmpl_unapi(formats) index = __call__ class WebInterfaceRecordPages(WebInterfaceDirectory): """ Handling of a /CFG_SITE_RECORD/<recid> URL fragment """ _exports = ['', 'files', 'reviews', 'comments', 'usage', 'references', 'export', 'citations', 'holdings', 'edit', 'keywords', 'multiedit', 'merge', 'plots', 'linkbacks', 'hepdata'] #_exports.extend(output_formats) def __init__(self, recid, tab, form=None): self.recid = recid self.tab = tab self.format = form self.files = WebInterfaceFilesPages(self.recid) self.reviews = WebInterfaceCommentsPages(self.recid, reviews=1) self.comments = WebInterfaceCommentsPages(self.recid) self.usage = self self.references = self self.keywords = self self.holdings = WebInterfaceHoldingsPages(self.recid) self.citations = self self.plots = self self.hepdata = self self.export = WebInterfaceRecordExport(self.recid, self.format) self.edit = WebInterfaceEditPages(self.recid) self.merge = WebInterfaceMergePages(self.recid) self.linkbacks = WebInterfaceRecordLinkbacksPages(self.recid) return def __call__(self, req, form): argd = wash_search_urlargd(form) argd['recid'] = self.recid argd['tab'] = self.tab # do we really enter here ? if self.format is not None: argd['of'] = self.format req.argd = argd uid = getUid(req) if uid == -1: return page_not_authorized(req, "../", text="You are not authorized to view this record.", navmenuid='search') elif uid > 0: pref = get_user_preferences(uid) try: if not form.has_key('rg'): # fetch user rg preference only if not overridden via URL argd['rg'] = int(pref['websearch_group_records']) except (KeyError, ValueError): pass user_info = collect_user_info(req) (auth_code, auth_msg) = check_user_can_view_record(user_info, self.recid) if argd['rg'] > CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS and acc_authorize_action(req, 'runbibedit')[0] != 0: argd['rg'] = CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS #check if the user has rights to set a high wildcard limit #if not, reduce the limit set by user, with the default one if CFG_WEBSEARCH_WILDCARD_LIMIT > 0 and (argd['wl'] > CFG_WEBSEARCH_WILDCARD_LIMIT or argd['wl'] == 0): if acc_authorize_action(req, 'runbibedit')[0] != 0: argd['wl'] = CFG_WEBSEARCH_WILDCARD_LIMIT # only superadmins can use verbose parameter for obtaining debug information if not isUserSuperAdmin(user_info): argd['verbose'] = 0 if auth_code and user_info['email'] == 'guest': cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)}) target = CFG_SITE_SECURE_URL + '/youraccount/login' + \ make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : CFG_SITE_SECURE_URL + req.unparsed_uri}, {}) return redirect_to_url(req, target, norobot=True) elif auth_code: return page_not_authorized(req, "../", \ text=auth_msg, \ navmenuid='search') from invenio.search_engine import record_exists, get_merged_recid # check if the current record has been deleted # and has been merged, case in which the deleted record # will be redirect to the new one record_status = record_exists(argd['recid']) merged_recid = get_merged_recid(argd['recid']) if record_status == -1 and merged_recid: url = CFG_SITE_URL + '/' + CFG_SITE_RECORD + '/%s?ln=%s' url %= (str(merged_recid), argd['ln']) redirect_to_url(req, url) elif record_status == -1: req.status = apache.HTTP_GONE ## The record is gone! # mod_python does not like to return [] in case when of=id: out = perform_request_search(req, **argd) if isinstance(out, intbitset): return out.fastdump() elif out == []: return str(out) else: return out # Return the same page wether we ask for /CFG_SITE_RECORD/123 or /CFG_SITE_RECORD/123/ index = __call__ class WebInterfaceRecordRestrictedPages(WebInterfaceDirectory): """ Handling of a /record-restricted/<recid> URL fragment """ _exports = ['', 'files', 'reviews', 'comments', 'usage', 'references', 'export', 'citations', 'holdings', 'edit', 'keywords', 'multiedit', 'merge', 'plots', 'linkbacks', 'hepdata'] #_exports.extend(output_formats) def __init__(self, recid, tab, format=None): self.recid = recid self.tab = tab self.format = format self.files = WebInterfaceFilesPages(self.recid) self.reviews = WebInterfaceCommentsPages(self.recid, reviews=1) self.comments = WebInterfaceCommentsPages(self.recid) self.usage = self self.references = self self.keywords = self self.holdings = WebInterfaceHoldingsPages(self.recid) self.citations = self self.plots = self self.export = WebInterfaceRecordExport(self.recid, self.format) self.edit = WebInterfaceEditPages(self.recid) self.merge = WebInterfaceMergePages(self.recid) self.linkbacks = WebInterfaceRecordLinkbacksPages(self.recid) self.hepdata = self def __call__(self, req, form): argd = wash_search_urlargd(form) argd['recid'] = self.recid if self.format is not None: argd['of'] = self.format req.argd = argd uid = getUid(req) user_info = collect_user_info(req) if uid == -1: return page_not_authorized(req, "../", text="You are not authorized to view this record.", navmenuid='search') elif uid > 0: pref = get_user_preferences(uid) try: if not form.has_key('rg'): # fetch user rg preference only if not overridden via URL argd['rg'] = int(pref['websearch_group_records']) except (KeyError, ValueError): pass if argd['rg'] > CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS and acc_authorize_action(req, 'runbibedit')[0] != 0: argd['rg'] = CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS #check if the user has rights to set a high wildcard limit #if not, reduce the limit set by user, with the default one if CFG_WEBSEARCH_WILDCARD_LIMIT > 0 and (argd['wl'] > CFG_WEBSEARCH_WILDCARD_LIMIT or argd['wl'] == 0): if acc_authorize_action(req, 'runbibedit')[0] != 0: argd['wl'] = CFG_WEBSEARCH_WILDCARD_LIMIT # only superadmins can use verbose parameter for obtaining debug information if not isUserSuperAdmin(user_info): argd['verbose'] = 0 record_primary_collection = guess_primary_collection_of_a_record(self.recid) if collection_restricted_p(record_primary_collection): (auth_code, dummy) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=record_primary_collection) if auth_code: return page_not_authorized(req, "../", text="You are not authorized to view this record.", navmenuid='search') # Keep all the arguments, they might be reused in the # record page itself to derivate other queries req.argd = argd # mod_python does not like to return [] in case when of=id: out = perform_request_search(req, **argd) if isinstance(out, intbitset): return out.fastdump() elif out == []: return str(out) else: return out # Return the same page wether we ask for /CFG_SITE_RECORD/123 or /CFG_SITE_RECORD/123/ index = __call__ class WebInterfaceSearchResultsPages(WebInterfaceDirectory): """ Handling of the /search URL and its sub-pages. """ _exports = ['', 'authenticate', 'cache', 'log'] def __call__(self, req, form): """ Perform a search. """ argd = wash_search_urlargd(form) _ = gettext_set_language(argd['ln']) if req.method == 'POST': raise apache.SERVER_RETURN, apache.HTTP_METHOD_NOT_ALLOWED uid = getUid(req) user_info = collect_user_info(req) if uid == -1: return page_not_authorized(req, "../", text=_("You are not authorized to view this area."), navmenuid='search') elif uid > 0: pref = get_user_preferences(uid) try: if not form.has_key('rg'): # fetch user rg preference only if not overridden via URL argd['rg'] = int(pref['websearch_group_records']) except (KeyError, ValueError): pass if argd['rg'] > CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS and acc_authorize_action(req, 'runbibedit')[0] != 0: argd['rg'] = CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS involved_collections = set() involved_collections.update(argd['c']) involved_collections.add(argd['cc']) if argd['id'] > 0: argd['recid'] = argd['id'] if argd['idb'] > 0: argd['recidb'] = argd['idb'] if argd['sysno']: tmp_recid = find_record_from_sysno(argd['sysno']) if tmp_recid: argd['recid'] = tmp_recid if argd['sysnb']: tmp_recid = find_record_from_sysno(argd['sysnb']) if tmp_recid: argd['recidb'] = tmp_recid if argd['recid'] > 0: if argd['recidb'] > argd['recid']: # Hack to check if among the restricted collections # at least a record of the range is there and # then if the user is not authorized for that # collection. recids = intbitset(xrange(argd['recid'], argd['recidb'])) restricted_collection_cache.recreate_cache_if_needed() for collname in restricted_collection_cache.cache: (auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=collname) if auth_code and user_info['email'] == 'guest': coll_recids = get_collection(collname).reclist if coll_recids & recids: cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : collname}) target = CFG_SITE_SECURE_URL + '/youraccount/login' + \ make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : CFG_SITE_SECURE_URL + req.unparsed_uri}, {}) return redirect_to_url(req, target, norobot=True) elif auth_code: return page_not_authorized(req, "../", \ text=auth_msg, \ navmenuid='search') else: involved_collections.add(guess_primary_collection_of_a_record(argd['recid'])) # If any of the collection requires authentication, redirect # to the authentication form. for coll in involved_collections: if collection_restricted_p(coll): (auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=coll) if auth_code and user_info['email'] == 'guest': cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : coll}) target = CFG_SITE_SECURE_URL + '/youraccount/login' + \ make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : CFG_SITE_SECURE_URL + req.unparsed_uri}, {}) return redirect_to_url(req, target, norobot=True) elif auth_code: return page_not_authorized(req, "../", \ text=auth_msg, \ navmenuid='search') #check if the user has rights to set a high wildcard limit #if not, reduce the limit set by user, with the default one if CFG_WEBSEARCH_WILDCARD_LIMIT > 0 and (argd['wl'] > CFG_WEBSEARCH_WILDCARD_LIMIT or argd['wl'] == 0): auth_code, auth_message = acc_authorize_action(req, 'runbibedit') if auth_code != 0: argd['wl'] = CFG_WEBSEARCH_WILDCARD_LIMIT # only superadmins can use verbose parameter for obtaining debug information if not isUserSuperAdmin(user_info): argd['verbose'] = 0 # Keep all the arguments, they might be reused in the # search_engine itself to derivate other queries req.argd = argd # mod_python does not like to return [] in case when of=id: out = perform_request_search(req, **argd) if isinstance(out, intbitset): return out.fastdump() elif out == []: return str(out) else: return out def cache(self, req, form): """Search cache page.""" argd = wash_urlargd(form, {'action': (str, 'show')}) return perform_request_cache(req, action=argd['action']) def log(self, req, form): """Search log page.""" argd = wash_urlargd(form, {'date': (str, '')}) return perform_request_log(req, date=argd['date']) def authenticate(self, req, form): """Restricted search results pages.""" argd = wash_search_urlargd(form) user_info = collect_user_info(req) for coll in argd['c'] + [argd['cc']]: if collection_restricted_p(coll): (auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=coll) if auth_code and user_info['email'] == 'guest': cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : coll}) target = CFG_SITE_SECURE_URL + '/youraccount/login' + \ make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : CFG_SITE_SECURE_URL + req.unparsed_uri}, {}) return redirect_to_url(req, target, norobot=True) elif auth_code: return page_not_authorized(req, "../", \ text=auth_msg, \ navmenuid='search') #check if the user has rights to set a high wildcard limit #if not, reduce the limit set by user, with the default one if CFG_WEBSEARCH_WILDCARD_LIMIT > 0 and (argd['wl'] > CFG_WEBSEARCH_WILDCARD_LIMIT or argd['wl'] == 0): auth_code, auth_message = acc_authorize_action(req, 'runbibedit') if auth_code != 0: argd['wl'] = CFG_WEBSEARCH_WILDCARD_LIMIT # only superadmins can use verbose parameter for obtaining debug information if not isUserSuperAdmin(user_info): argd['verbose'] = 0 # Keep all the arguments, they might be reused in the # search_engine itself to derivate other queries req.argd = argd uid = getUid(req) if uid > 0: pref = get_user_preferences(uid) try: if not form.has_key('rg'): # fetch user rg preference only if not overridden via URL argd['rg'] = int(pref['websearch_group_records']) except (KeyError, ValueError): pass # mod_python does not like to return [] in case when of=id: out = perform_request_search(req, **argd) if isinstance(out, intbitset): return out.fastdump() elif out == []: return str(out) else: return out index = __call__ class WebInterfaceLegacySearchPages(WebInterfaceDirectory): """ Handling of the /search.py URL and its sub-pages. """ _exports = ['', ('authenticate', 'index')] def __call__(self, req, form): """ Perform a search. """ argd = wash_search_urlargd(form) # We either jump into the generic search form, or the specific # /CFG_SITE_RECORD/... display if a recid is requested if argd['recid'] != -1: target = '/%s/%d' % (CFG_SITE_RECORD, argd['recid']) del argd['recid'] else: target = '/search' target += make_canonical_urlargd(argd, search_results_default_urlargd) return redirect_to_url(req, target, apache.HTTP_MOVED_PERMANENTLY) index = __call__ # Parameters for the legacy URLs, of the form /?c=ALEPH legacy_collection_default_urlargd = { 'as': (int, CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE), 'aas': (int, CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE), 'verbose': (int, 0), 'c': (str, CFG_SITE_NAME)} class WebInterfaceSearchInterfacePages(WebInterfaceDirectory): """ Handling of collection navigation.""" _exports = [('index.py', 'legacy_collection'), ('', 'legacy_collection'), ('search.py', 'legacy_search'), 'search', 'openurl', 'opensearchdescription', 'logout_SSO_hook'] search = WebInterfaceSearchResultsPages() legacy_search = WebInterfaceLegacySearchPages() def logout_SSO_hook(self, req, form): """Script triggered by the display of the centralized SSO logout dialog. It logouts the user from Invenio and stream back the expected picture.""" logoutUser(req) req.content_type = 'image/gif' req.encoding = None req.filename = 'wsignout.gif' req.headers_out["Content-Disposition"] = "inline; filename=wsignout.gif" req.set_content_length(os.path.getsize('%s/img/wsignout.gif' % CFG_WEBDIR)) req.send_http_header() req.sendfile('%s/img/wsignout.gif' % CFG_WEBDIR) def _lookup(self, component, path): """ This handler is invoked for the dynamic URLs (for collections and records)""" if component == 'collection': c = '/'.join(path) def answer(req, form): """Accessing collections cached pages.""" # Accessing collections: this is for accessing the # cached page on top of each collection. argd = wash_urlargd(form, search_interface_default_urlargd) # We simply return the cached page of the collection argd['c'] = c if not argd['c']: # collection argument not present; display # home collection by default argd['c'] = CFG_SITE_NAME # Treat `as' argument specially: if argd.has_key('as'): argd['aas'] = argd['as'] del argd['as'] if argd.get('aas', CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE) not in CFG_WEBSEARCH_ENABLED_SEARCH_INTERFACES: argd['aas'] = CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE return display_collection(req, **argd) return answer, [] elif component == CFG_SITE_RECORD and path and path[0] == 'merge': return WebInterfaceMergePages(), path[1:] elif component == CFG_SITE_RECORD and path and path[0] == 'edit': return WebInterfaceEditPages(), path[1:] elif component == CFG_SITE_RECORD and path and path[0] == 'multiedit': return WebInterfaceMultiEditPages(), path[1:] elif component == CFG_SITE_RECORD and path and path[0] in ('managedocfiles', 'managedocfilesasync'): return WebInterfaceManageDocFilesPages(), path elif component == CFG_SITE_RECORD or component == 'record-restricted': try: if CFG_WEBSEARCH_USE_ALEPH_SYSNOS: # let us try to recognize /<CFG_SITE_RECORD>/<SYSNO> style of URLs: # check for SYSNOs with an embedded slash; needed for [ARXIVINV-15] if len(path) > 1 and get_mysql_recid_from_aleph_sysno(path[0] + "/" + path[1]): path[0] = path[0] + "/" + path[1] del path[1] x = get_mysql_recid_from_aleph_sysno(path[0]) if x: recid = x else: recid = int(path[0]) else: recid = int(path[0]) except IndexError: # display record #1 for URL /CFG_SITE_RECORD without a number recid = 1 except ValueError: if path[0] == '': # display record #1 for URL /CFG_SITE_RECORD/ without a number recid = 1 else: # display page not found for URLs like /CFG_SITE_RECORD/foo return None, [] from invenio.intbitset import __maxelem__ if recid <= 0 or recid > __maxelem__: # __maxelem__ = 2147483647 # display page not found for URLs like /CFG_SITE_RECORD/-5 or /CFG_SITE_RECORD/0 or /CFG_SITE_RECORD/2147483649 return None, [] format = None tab = '' try: if path[1] in ['', 'files', 'reviews', 'comments', 'usage', 'references', 'citations', 'holdings', 'edit', 'keywords', 'multiedit', 'merge', 'plots', 'linkbacks', 'hepdata']: tab = path[1] elif path[1] == 'export': tab = '' format = path[2] # format = None # elif path[1] in output_formats: # tab = '' # format = path[1] else: # display page not found for URLs like /CFG_SITE_RECORD/references # for a collection where 'references' tabs is not visible return None, [] except IndexError: # Keep normal url if tabs is not specified pass #if component == 'record-restricted': #return WebInterfaceRecordRestrictedPages(recid, tab, format), path[1:] #else: return WebInterfaceRecordPages(recid, tab, format), path[1:] elif component == 'sslredirect': ## Fallback solution for sslredirect special path that should ## be rather implemented as an Apache level redirection def redirecter(req, form): real_url = "http://" + '/'.join(path) redirect_to_url(req, real_url) return redirecter, [] return None, [] def openurl(self, req, form): """ OpenURL Handler.""" argd = wash_urlargd(form, websearch_templates.tmpl_openurl_accepted_args) ret_url = websearch_templates.tmpl_openurl2invenio(argd) if ret_url: return redirect_to_url(req, ret_url) else: return redirect_to_url(req, CFG_SITE_URL) def opensearchdescription(self, req, form): """OpenSearch description file""" req.content_type = "application/opensearchdescription+xml" req.send_http_header() argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG), 'verbose': (int, 0) }) return websearch_templates.tmpl_opensearch_description(ln=argd['ln']) def legacy_collection(self, req, form): """Collection URL backward compatibility handling.""" accepted_args = dict(legacy_collection_default_urlargd) argd = wash_urlargd(form, accepted_args) # Treat `as' argument specially: if argd.has_key('as'): argd['aas'] = argd['as'] del argd['as'] if argd.get('aas', CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE) not in (0, 1): argd['aas'] = CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE # If we specify no collection, then we don't need to redirect # the user, so that accessing <http://yoursite/> returns the # default collection. if not form.has_key('c'): return display_collection(req, **argd) # make the collection an element of the path, and keep the # other query elements as is. If the collection is CFG_SITE_NAME, # however, redirect to the main URL. c = argd['c'] del argd['c'] if c == CFG_SITE_NAME: target = '/' else: target = '/collection/' + quote(c) # Treat `as' argument specially: # We are going to redirect, so replace `aas' by `as' visible argument: if argd.has_key('aas'): argd['as'] = argd['aas'] del argd['aas'] target += make_canonical_urlargd(argd, legacy_collection_default_urlargd) return redirect_to_url(req, target) def display_collection(req, c, aas, verbose, ln, em=""): """Display search interface page for collection c by looking in the collection cache.""" _ = gettext_set_language(ln) req.argd = drop_default_urlargd({'aas': aas, 'verbose': verbose, 'ln': ln, 'em' : em}, search_interface_default_urlargd) if em != "": em = em.split(",") # get user ID: try: uid = getUid(req) user_preferences = {} if uid == -1: return page_not_authorized(req, "../", text="You are not authorized to view this collection", navmenuid='search') elif uid > 0: user_preferences = get_user_preferences(uid) except Error: register_exception(req=req, alert_admin=True) return page(title=_("Internal Error"), body=create_error_box(req, verbose=verbose, ln=ln), description="%s - Internal Error" % CFG_SITE_NAME, keywords="%s, Internal Error" % CFG_SITE_NAME, language=ln, req=req, navmenuid='search') # deduce collection id: normalised_name = get_coll_normalised_name(c) colID = get_colID(normalised_name) if type(colID) is not int: page_body = '<p>' + (_("Sorry, collection %s does not seem to exist.") % ('<strong>' + str(c) + '</strong>')) + '</p>' page_body = '<p>' + (_("You may want to start browsing from %s.") % ('<a href="' + CFG_SITE_URL + '?ln=' + ln + '">' + get_coll_i18nname(CFG_SITE_NAME, ln) + '</a>')) + '</p>' if req.header_only: raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND return page(title=_("Collection %s Not Found") % cgi.escape(c), body=page_body, description=(CFG_SITE_NAME + ' - ' + _("Not found") + ': ' + cgi.escape(str(c))), keywords="%s" % CFG_SITE_NAME, uid=uid, language=ln, req=req, navmenuid='search') if normalised_name != c: redirect_to_url(req, normalised_name, apache.HTTP_MOVED_PERMANENTLY) # start display: req.content_type = "text/html" req.send_http_header() c_body, c_navtrail, c_portalbox_lt, c_portalbox_rt, c_portalbox_tp, c_portalbox_te, \ c_last_updated = perform_display_collection(colID, c, aas, ln, em, user_preferences.get('websearch_helpbox', 1)) if em == "" or EM_REPOSITORY["body"] in em: try: title = get_coll_i18nname(c, ln) except: title = "" else: title = "" show_title_p = True body_css_classes = [] if c == CFG_SITE_NAME: # Do not display title on home collection show_title_p = False body_css_classes.append('home') if len(collection_reclist_cache.cache.keys()) == 1: # if there is only one collection defined, do not print its # title on the page as it would be displayed repetitively. show_title_p = False if aas == -1: show_title_p = False if CFG_INSPIRE_SITE == 1: # INSPIRE should never show title, but instead use css to # style collections show_title_p = False body_css_classes.append(nmtoken_from_string(c)) # RSS: rssurl = CFG_SITE_URL + '/rss' rssurl_params = [] if c != CFG_SITE_NAME: rssurl_params.append('cc=' + quote(c)) if ln != CFG_SITE_LANG and \ c in CFG_WEBSEARCH_RSS_I18N_COLLECTIONS: rssurl_params.append('ln=' + ln) if rssurl_params: rssurl += '?' + '&amp;'.join(rssurl_params) if 'hb' in CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS: metaheaderadd = get_mathjax_header(req.is_https()) else: metaheaderadd = '' return page(title=title, body=c_body, navtrail=c_navtrail, description="%s - %s" % (CFG_SITE_NAME, c), keywords="%s, %s" % (CFG_SITE_NAME, c), metaheaderadd=metaheaderadd, uid=uid, language=ln, req=req, cdspageboxlefttopadd=c_portalbox_lt, cdspageboxrighttopadd=c_portalbox_rt, titleprologue=c_portalbox_tp, titleepilogue=c_portalbox_te, lastupdated=c_last_updated, navmenuid='search', rssurl=rssurl, body_css_classes=body_css_classes, show_title_p=show_title_p, show_header=em == "" or EM_REPOSITORY["header"] in em, show_footer=em == "" or EM_REPOSITORY["footer"] in em) class WebInterfaceRSSFeedServicePages(WebInterfaceDirectory): """RSS 2.0 feed service pages.""" def __call__(self, req, form): """RSS 2.0 feed service.""" # Keep only interesting parameters for the search default_params = websearch_templates.rss_default_urlargd # We need to keep 'jrec' and 'rg' here in order to have # 'multi-page' RSS. These parameters are not kept be default # as we don't want to consider them when building RSS links # from search and browse pages. default_params.update({'jrec':(int, 1), 'rg': (int, CFG_WEBSEARCH_INSTANT_BROWSE_RSS)}) argd = wash_urlargd(form, default_params) user_info = collect_user_info(req) for coll in argd['c'] + [argd['cc']]: if collection_restricted_p(coll): (auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=coll) if auth_code and user_info['email'] == 'guest': cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : coll}) target = CFG_SITE_SECURE_URL + '/youraccount/login' + \ make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : CFG_SITE_SECURE_URL + req.unparsed_uri}, {}) return redirect_to_url(req, target, norobot=True) elif auth_code: return page_not_authorized(req, "../", \ text=auth_msg, \ navmenuid='search') # Create a standard filename with these parameters current_url = websearch_templates.build_rss_url(argd) cache_filename = current_url.split('/')[-1] # In the same way as previously, add 'jrec' & 'rg' req.content_type = "application/rss+xml" req.send_http_header() try: # Try to read from cache path = "%s/rss/%s.xml" % (CFG_CACHEDIR, cache_filename) # Check if cache needs refresh filedesc = open(path, "r") last_update_time = datetime.datetime.fromtimestamp(os.stat(os.path.abspath(path)).st_mtime) assert(datetime.datetime.now() < last_update_time + datetime.timedelta(minutes=CFG_WEBSEARCH_RSS_TTL)) c_rss = filedesc.read() filedesc.close() req.write(c_rss) return except Exception, e: # do it live and cache previous_url = None if argd['jrec'] > 1: prev_jrec = argd['jrec'] - argd['rg'] if prev_jrec < 1: prev_jrec = 1 previous_url = websearch_templates.build_rss_url(argd, jrec=prev_jrec) #check if the user has rights to set a high wildcard limit #if not, reduce the limit set by user, with the default one if CFG_WEBSEARCH_WILDCARD_LIMIT > 0 and (argd['wl'] > CFG_WEBSEARCH_WILDCARD_LIMIT or argd['wl'] == 0): if acc_authorize_action(req, 'runbibedit')[0] != 0: argd['wl'] = CFG_WEBSEARCH_WILDCARD_LIMIT req.argd = argd recIDs = perform_request_search(req, of="id", c=argd['c'], cc=argd['cc'], p=argd['p'], f=argd['f'], p1=argd['p1'], f1=argd['f1'], m1=argd['m1'], op1=argd['op1'], p2=argd['p2'], f2=argd['f2'], m2=argd['m2'], op2=argd['op2'], p3=argd['p3'], f3=argd['f3'], m3=argd['m3'], wl=argd['wl']) nb_found = len(recIDs) next_url = None if len(recIDs) >= argd['jrec'] + argd['rg']: next_url = websearch_templates.build_rss_url(argd, jrec=(argd['jrec'] + argd['rg'])) first_url = websearch_templates.build_rss_url(argd, jrec=1) last_url = websearch_templates.build_rss_url(argd, jrec=nb_found - argd['rg'] + 1) recIDs = recIDs[-argd['jrec']:(-argd['rg'] - argd['jrec']):-1] rss_prologue = '<?xml version="1.0" encoding="UTF-8"?>\n' + \ websearch_templates.tmpl_xml_rss_prologue(current_url=current_url, previous_url=previous_url, next_url=next_url, first_url=first_url, last_url=last_url, nb_found=nb_found, jrec=argd['jrec'], rg=argd['rg'], cc=argd['cc']) + '\n' req.write(rss_prologue) rss_body = format_records(recIDs, of='xr', ln=argd['ln'], user_info=user_info, record_separator="\n", req=req, epilogue="\n") rss_epilogue = websearch_templates.tmpl_xml_rss_epilogue() + '\n' req.write(rss_epilogue) # update cache dirname = "%s/rss" % (CFG_CACHEDIR) mymkdir(dirname) fullfilename = "%s/rss/%s.xml" % (CFG_CACHEDIR, cache_filename) try: # Remove the file just in case it already existed # so that a bit of space is created os.remove(fullfilename) except OSError: pass # Check if there's enough space to cache the request. if len(os.listdir(dirname)) < CFG_WEBSEARCH_RSS_MAX_CACHED_REQUESTS: try: os.umask(022) f = open(fullfilename, "w") f.write(rss_prologue + rss_body + rss_epilogue) f.close() except IOError, v: if v[0] == 36: # URL was too long. Never mind, don't cache pass else: raise repr(v) index = __call__ class WebInterfaceRecordExport(WebInterfaceDirectory): """ Handling of a /<CFG_SITE_RECORD>/<recid>/export/<format> URL fragment """ _exports = output_formats def __init__(self, recid, format=None): self.recid = recid self.format = format for output_format in output_formats: self.__dict__[output_format] = self return def __call__(self, req, form): argd = wash_search_urlargd(form) argd['recid'] = self.recid if self.format is not None: argd['of'] = self.format req.argd = argd uid = getUid(req) if uid == -1: return page_not_authorized(req, "../", text="You are not authorized to view this record.", navmenuid='search') elif uid > 0: pref = get_user_preferences(uid) try: if not form.has_key('rg'): # fetch user rg preference only if not overridden via URL argd['rg'] = int(pref['websearch_group_records']) except (KeyError, ValueError): pass # Check if the record belongs to a restricted primary # collection. If yes, redirect to the authenticated URL. user_info = collect_user_info(req) (auth_code, auth_msg) = check_user_can_view_record(user_info, self.recid) if argd['rg'] > CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS and acc_authorize_action(req, 'runbibedit')[0] != 0: argd['rg'] = CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS #check if the user has rights to set a high wildcard limit #if not, reduce the limit set by user, with the default one if CFG_WEBSEARCH_WILDCARD_LIMIT > 0 and (argd['wl'] > CFG_WEBSEARCH_WILDCARD_LIMIT or argd['wl'] == 0): if acc_authorize_action(req, 'runbibedit')[0] != 0: argd['wl'] = CFG_WEBSEARCH_WILDCARD_LIMIT # only superadmins can use verbose parameter for obtaining debug information if not isUserSuperAdmin(user_info): argd['verbose'] = 0 if auth_code and user_info['email'] == 'guest': cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)}) target = CFG_SITE_SECURE_URL + '/youraccount/login' + \ make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : CFG_SITE_SECURE_URL + req.unparsed_uri}, {}) return redirect_to_url(req, target, norobot=True) elif auth_code: return page_not_authorized(req, "../", \ text=auth_msg, \ navmenuid='search') # mod_python does not like to return [] in case when of=id: out = perform_request_search(req, **argd) if isinstance(out, intbitset): return out.fastdump() elif out == []: return str(out) else: return out # Return the same page wether we ask for /CFG_SITE_RECORD/123/export/xm or /CFG_SITE_RECORD/123/export/xm/ index = __call__
gpl-2.0
peterbraden/tensorflow
tensorflow/contrib/learn/python/learn/tests/test_early_stopping.py
5
2501
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import random from tensorflow.contrib.learn.python import learn from tensorflow.contrib.learn.python.learn import datasets from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split class EarlyStoppingTest(tf.test.TestCase): def testIrisES(self): random.seed(42) iris = datasets.load_iris() X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=42) X_train, X_val, y_train, y_val = train_test_split( X_train, y_train, test_size=0.2) val_monitor = learn.monitors.ValidationMonitor(X_val, y_val, n_classes=3) # classifier without early stopping - overfitting classifier1 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3, steps=1000) classifier1.fit(X_train, y_train) score1 = accuracy_score(y_test, classifier1.predict(X_test)) # classifier with early stopping - improved accuracy on testing set classifier2 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3, steps=1000) classifier2.fit(X_train, y_train, val_monitor) score2 = accuracy_score(y_test, classifier2.predict(X_test)) # self.assertGreater(score2, score1, "No improvement using early stopping.") if __name__ == "__main__": tf.test.main()
apache-2.0
kaltsimon/youtube-dl
youtube_dl/extractor/wdr.py
110
10166
# -*- coding: utf-8 -*- from __future__ import unicode_literals import itertools import re from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urlparse, ) from ..utils import ( determine_ext, unified_strdate, ) class WDRIE(InfoExtractor): _PLAYER_REGEX = '-(?:video|audio)player(?:_size-[LMS])?' _VALID_URL = r'(?P<url>https?://www\d?\.(?:wdr\d?|funkhauseuropa)\.de/)(?P<id>.+?)(?P<player>%s)?\.html' % _PLAYER_REGEX _TESTS = [ { 'url': 'http://www1.wdr.de/mediathek/video/sendungen/servicezeit/videoservicezeit560-videoplayer_size-L.html', 'info_dict': { 'id': 'mdb-362427', 'ext': 'flv', 'title': 'Servicezeit', 'description': 'md5:c8f43e5e815eeb54d0b96df2fba906cb', 'upload_date': '20140310', 'is_live': False }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www1.wdr.de/themen/av/videomargaspiegelisttot101-videoplayer.html', 'info_dict': { 'id': 'mdb-363194', 'ext': 'flv', 'title': 'Marga Spiegel ist tot', 'description': 'md5:2309992a6716c347891c045be50992e4', 'upload_date': '20140311', 'is_live': False }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www1.wdr.de/themen/kultur/audioerlebtegeschichtenmargaspiegel100-audioplayer.html', 'md5': '83e9e8fefad36f357278759870805898', 'info_dict': { 'id': 'mdb-194332', 'ext': 'mp3', 'title': 'Erlebte Geschichten: Marga Spiegel (29.11.2009)', 'description': 'md5:2309992a6716c347891c045be50992e4', 'upload_date': '20091129', 'is_live': False }, }, { 'url': 'http://www.funkhauseuropa.de/av/audioflaviacoelhoamaramar100-audioplayer.html', 'md5': '99a1443ff29af19f6c52cf6f4dc1f4aa', 'info_dict': { 'id': 'mdb-478135', 'ext': 'mp3', 'title': 'Flavia Coelho: Amar é Amar', 'description': 'md5:7b29e97e10dfb6e265238b32fa35b23a', 'upload_date': '20140717', 'is_live': False }, }, { 'url': 'http://www1.wdr.de/mediathek/video/sendungen/quarks_und_co/filterseite-quarks-und-co100.html', 'playlist_mincount': 146, 'info_dict': { 'id': 'mediathek/video/sendungen/quarks_und_co/filterseite-quarks-und-co100', } }, { 'url': 'http://www1.wdr.de/mediathek/video/livestream/index.html', 'info_dict': { 'id': 'mdb-103364', 'title': 're:^WDR Fernsehen [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'md5:ae2ff888510623bf8d4b115f95a9b7c9', 'ext': 'flv', 'upload_date': '20150212', 'is_live': True }, 'params': { 'skip_download': True, }, } ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) page_url = mobj.group('url') page_id = mobj.group('id') webpage = self._download_webpage(url, page_id) if mobj.group('player') is None: entries = [ self.url_result(page_url + href, 'WDR') for href in re.findall(r'<a href="/?(.+?%s\.html)" rel="nofollow"' % self._PLAYER_REGEX, webpage) ] if entries: # Playlist page return self.playlist_result(entries, page_id) # Overview page entries = [] for page_num in itertools.count(2): hrefs = re.findall( r'<li class="mediathekvideo"\s*>\s*<img[^>]*>\s*<a href="(/mediathek/video/[^"]+)"', webpage) entries.extend( self.url_result(page_url + href, 'WDR') for href in hrefs) next_url_m = re.search( r'<li class="nextToLast">\s*<a href="([^"]+)"', webpage) if not next_url_m: break next_url = page_url + next_url_m.group(1) webpage = self._download_webpage( next_url, page_id, note='Downloading playlist page %d' % page_num) return self.playlist_result(entries, page_id) flashvars = compat_parse_qs( self._html_search_regex(r'<param name="flashvars" value="([^"]+)"', webpage, 'flashvars')) page_id = flashvars['trackerClipId'][0] video_url = flashvars['dslSrc'][0] title = flashvars['trackerClipTitle'][0] thumbnail = flashvars['startPicture'][0] if 'startPicture' in flashvars else None is_live = flashvars.get('isLive', ['0'])[0] == '1' if is_live: title = self._live_title(title) if 'trackerClipAirTime' in flashvars: upload_date = flashvars['trackerClipAirTime'][0] else: upload_date = self._html_search_meta('DC.Date', webpage, 'upload date') if upload_date: upload_date = unified_strdate(upload_date) if video_url.endswith('.f4m'): video_url += '?hdcore=3.2.0&plugin=aasp-3.2.0.77.18' ext = 'flv' elif video_url.endswith('.smil'): fmt = self._extract_smil_formats(video_url, page_id)[0] video_url = fmt['url'] sep = '&' if '?' in video_url else '?' video_url += sep video_url += 'hdcore=3.3.0&plugin=aasp-3.3.0.99.43' ext = fmt['ext'] else: ext = determine_ext(video_url) description = self._html_search_meta('Description', webpage, 'description') return { 'id': page_id, 'url': video_url, 'ext': ext, 'title': title, 'description': description, 'thumbnail': thumbnail, 'upload_date': upload_date, 'is_live': is_live } class WDRMobileIE(InfoExtractor): _VALID_URL = r'''(?x) https?://mobile-ondemand\.wdr\.de/ .*?/fsk(?P<age_limit>[0-9]+) /[0-9]+/[0-9]+/ (?P<id>[0-9]+)_(?P<title>[0-9]+)''' IE_NAME = 'wdr:mobile' _TEST = { 'url': 'http://mobile-ondemand.wdr.de/CMS2010/mdb/ondemand/weltweit/fsk0/42/421735/421735_4283021.mp4', 'info_dict': { 'title': '4283021', 'id': '421735', 'ext': 'mp4', 'age_limit': 0, }, 'skip': 'Problems with loading data.' } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) return { 'id': mobj.group('id'), 'title': mobj.group('title'), 'age_limit': int(mobj.group('age_limit')), 'url': url, 'http_headers': { 'User-Agent': 'mobile', }, } class WDRMausIE(InfoExtractor): _VALID_URL = 'http://(?:www\.)?wdrmaus\.de/(?:[^/]+/){,2}(?P<id>[^/?#]+)(?:/index\.php5|(?<!index)\.php5|/(?:$|[?#]))' IE_DESC = 'Sendung mit der Maus' _TESTS = [{ 'url': 'http://www.wdrmaus.de/aktuelle-sendung/index.php5', 'info_dict': { 'id': 'aktuelle-sendung', 'ext': 'mp4', 'thumbnail': 're:^http://.+\.jpg', 'upload_date': 're:^[0-9]{8}$', 'title': 're:^[0-9.]{10} - Aktuelle Sendung$', } }, { 'url': 'http://www.wdrmaus.de/sachgeschichten/sachgeschichten/40_jahre_maus.php5', 'md5': '3b1227ca3ed28d73ec5737c65743b2a3', 'info_dict': { 'id': '40_jahre_maus', 'ext': 'mp4', 'thumbnail': 're:^http://.+\.jpg', 'upload_date': '20131007', 'title': '12.03.2011 - 40 Jahre Maus', } }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) param_code = self._html_search_regex( r'<a href="\?startVideo=1&amp;([^"]+)"', webpage, 'parameters') title_date = self._search_regex( r'<div class="sendedatum"><p>Sendedatum:\s*([0-9\.]+)</p>', webpage, 'air date') title_str = self._html_search_regex( r'<h1>(.*?)</h1>', webpage, 'title') title = '%s - %s' % (title_date, title_str) upload_date = unified_strdate( self._html_search_meta('dc.date', webpage)) fields = compat_parse_qs(param_code) video_url = fields['firstVideo'][0] thumbnail = compat_urlparse.urljoin(url, fields['startPicture'][0]) formats = [{ 'format_id': 'rtmp', 'url': video_url, }] jscode = self._download_webpage( 'http://www.wdrmaus.de/codebase/js/extended-medien.min.js', video_id, fatal=False, note='Downloading URL translation table', errnote='Could not download URL translation table') if jscode: for m in re.finditer( r"stream:\s*'dslSrc=(?P<stream>[^']+)',\s*download:\s*'(?P<dl>[^']+)'\s*\}", jscode): if video_url.startswith(m.group('stream')): http_url = video_url.replace( m.group('stream'), m.group('dl')) formats.append({ 'format_id': 'http', 'url': http_url, }) break self._sort_formats(formats) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'upload_date': upload_date, }
unlicense
apple/swift-clang
utils/token-delta.py
35
8282
#!/usr/bin/env python from __future__ import absolute_import, division, print_function import os import re import subprocess import sys import tempfile ### class DeltaAlgorithm(object): def __init__(self): self.cache = set() def test(self, changes): abstract ### def getTestResult(self, changes): # There is no reason to cache successful tests because we will # always reduce the changeset when we see one. changeset = frozenset(changes) if changeset in self.cache: return False elif not self.test(changes): self.cache.add(changeset) return False else: return True def run(self, changes, force=False): # Make sure the initial test passes, if not then (a) either # the user doesn't expect monotonicity, and we may end up # doing O(N^2) tests, or (b) the test is wrong. Avoid the # O(N^2) case unless user requests it. if not force: if not self.getTestResult(changes): raise ValueError('Initial test passed to delta fails.') # Check empty set first to quickly find poor test functions. if self.getTestResult(set()): return set() else: return self.delta(changes, self.split(changes)) def split(self, S): """split(set) -> [sets] Partition a set into one or two pieces. """ # There are many ways to split, we could do a better job with more # context information (but then the API becomes grosser). L = list(S) mid = len(L)//2 if mid==0: return L, else: return L[:mid],L[mid:] def delta(self, c, sets): # assert(reduce(set.union, sets, set()) == c) # If there is nothing left we can remove, we are done. if len(sets) <= 1: return c # Look for a passing subset. res = self.search(c, sets) if res is not None: return res # Otherwise, partition sets if possible; if not we are done. refined = sum(map(list, map(self.split, sets)), []) if len(refined) == len(sets): return c return self.delta(c, refined) def search(self, c, sets): for i,S in enumerate(sets): # If test passes on this subset alone, recurse. if self.getTestResult(S): return self.delta(S, self.split(S)) # Otherwise if we have more than two sets, see if test # pases without this subset. if len(sets) > 2: complement = sum(sets[:i] + sets[i+1:],[]) if self.getTestResult(complement): return self.delta(complement, sets[:i] + sets[i+1:]) ### class Token(object): def __init__(self, type, data, flags, file, line, column): self.type = type self.data = data self.flags = flags self.file = file self.line = line self.column = column kTokenRE = re.compile(r"""([a-z_]+) '(.*)'\t(.*)\tLoc=<(.*):(.*):(.*)>""", re.DOTALL | re.MULTILINE) def getTokens(path): p = subprocess.Popen(['clang','-dump-raw-tokens',path], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out,err = p.communicate() tokens = [] collect = None for ln in err.split('\n'): # Silly programmers refuse to print in simple machine readable # formats. Whatever. if collect is None: collect = ln else: collect = collect + '\n' + ln if 'Loc=<' in ln and ln.endswith('>'): ln,collect = collect,None tokens.append(Token(*kTokenRE.match(ln).groups())) return tokens ### class TMBDDelta(DeltaAlgorithm): def __init__(self, testProgram, tokenLists, log): def patchName(name, suffix): base,ext = os.path.splitext(name) return base + '.' + suffix + ext super(TMBDDelta, self).__init__() self.testProgram = testProgram self.tokenLists = tokenLists self.tempFiles = [patchName(f,'tmp') for f,_ in self.tokenLists] self.targetFiles = [patchName(f,'ok') for f,_ in self.tokenLists] self.log = log self.numTests = 0 def writeFiles(self, changes, fileNames): assert len(fileNames) == len(self.tokenLists) byFile = [[] for i in self.tokenLists] for i,j in changes: byFile[i].append(j) for i,(file,tokens) in enumerate(self.tokenLists): f = open(fileNames[i],'w') for j in byFile[i]: f.write(tokens[j]) f.close() return byFile def test(self, changes): self.numTests += 1 byFile = self.writeFiles(changes, self.tempFiles) if self.log: print('TEST - ', end=' ', file=sys.stderr) if self.log > 1: for i,(file,_) in enumerate(self.tokenLists): indices = byFile[i] if i: sys.stderr.write('\n ') sys.stderr.write('%s:%d tokens: [' % (file,len(byFile[i]))) prev = None for j in byFile[i]: if prev is None or j != prev + 1: if prev: sys.stderr.write('%d][' % prev) sys.stderr.write(str(j)) sys.stderr.write(':') prev = j if byFile[i]: sys.stderr.write(str(byFile[i][-1])) sys.stderr.write('] ') else: print(', '.join(['%s:%d tokens' % (file, len(byFile[i])) for i,(file,_) in enumerate(self.tokenLists)]), end=' ', file=sys.stderr) p = subprocess.Popen([self.testProgram] + self.tempFiles) res = p.wait() == 0 if res: self.writeFiles(changes, self.targetFiles) if self.log: print('=> %s' % res, file=sys.stderr) else: if res: print('\nSUCCESS (%d tokens)' % len(changes)) else: sys.stderr.write('.') return res def run(self): res = super(TMBDDelta, self).run([(i,j) for i,(file,tokens) in enumerate(self.tokenLists) for j in range(len(tokens))]) self.writeFiles(res, self.targetFiles) if not self.log: print(file=sys.stderr) return res def tokenBasedMultiDelta(program, files, log): # Read in the lists of tokens. tokenLists = [(file, [t.data for t in getTokens(file)]) for file in files] numTokens = sum([len(tokens) for _,tokens in tokenLists]) print("Delta on %s with %d tokens." % (', '.join(files), numTokens)) tbmd = TMBDDelta(program, tokenLists, log) res = tbmd.run() print("Finished %s with %d tokens (in %d tests)." % (', '.join(tbmd.targetFiles), len(res), tbmd.numTests)) def main(): from optparse import OptionParser, OptionGroup parser = OptionParser("%prog <test program> {files+}") parser.add_option("", "--debug", dest="debugLevel", help="set debug level [default %default]", action="store", type=int, default=0) (opts, args) = parser.parse_args() if len(args) <= 1: parser.error('Invalid number of arguments.') program,files = args[0],args[1:] md = tokenBasedMultiDelta(program, files, log=opts.debugLevel) if __name__ == '__main__': try: main() except KeyboardInterrupt: print('Interrupted.', file=sys.stderr) os._exit(1) # Avoid freeing our giant cache.
apache-2.0
M157q/pelican-plugins
liquid_tags/test_flickr.py
278
2466
from . import flickr try: from unittest.mock import patch except ImportError: from mock import patch import os import pytest import re PLUGIN_DIR = os.path.dirname(__file__) TEST_DATA_DIR = os.path.join(PLUGIN_DIR, 'test_data') @pytest.mark.parametrize('input,expected', [ ('18873146680 large "test 1"', dict(photo_id='18873146680', size='large', alt='test 1')), ('18873146680 large \'test 1\'', dict(photo_id='18873146680', size='large', alt='test 1')), ('18873143536360 medium "test number two"', dict(photo_id='18873143536360', size='medium', alt='test number two')), ('18873143536360 small "test number 3"', dict(photo_id='18873143536360', size='small', alt='test number 3')), ('18873143536360 "test 4"', dict(photo_id='18873143536360', size=None, alt='test 4')), ('18873143536360', dict(photo_id='18873143536360', size=None, alt=None)), ('123456 small', dict(photo_id='123456', size='small', alt=None)) ]) def test_regex(input, expected): assert re.match(flickr.PARSE_SYNTAX, input).groupdict() == expected @pytest.mark.parametrize('input,expected', [ (['1', 'server1', '1', 'secret1', 'small'], 'https://farm1.staticflickr.com/server1/1_secret1_n.jpg'), (['2', 'server2', '2', 'secret2', 'medium'], 'https://farm2.staticflickr.com/server2/2_secret2_c.jpg'), (['3', 'server3', '3', 'secret3', 'large'], 'https://farm3.staticflickr.com/server3/3_secret3_b.jpg') ]) def test_source_url(input, expected): assert flickr.source_url( input[0], input[1], input[2], input[3], input[4]) == expected @patch('liquid_tags.flickr.urlopen') def test_generage_html(mock_urlopen): # mock the return to deliver the flickr.json file instead with open(TEST_DATA_DIR + '/flickr.json', 'rb') as f: mock_urlopen.return_value.read.return_value = f.read() attrs = dict( photo_id='1234567', size='large', alt='this is a test' ) expected = ('<a href="https://www.flickr.com/photos/' 'marvinxsteadfast/18841055371/">' '<img src="https://farm6.staticflickr.com/5552/1234567_' '17ac287217_b.jpg" alt="this is a test"></a>') assert flickr.generate_html(attrs, 'abcdef') == expected
agpl-3.0
MoonShineVFX/core
avalon/vendor/requests/packages/chardet/gb2312prober.py
289
1754
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import GB2312DistributionAnalysis from .mbcssm import GB2312_SM_MODEL class GB2312Prober(MultiByteCharSetProber): def __init__(self): super(GB2312Prober, self).__init__() self.coding_sm = CodingStateMachine(GB2312_SM_MODEL) self.distribution_analyzer = GB2312DistributionAnalysis() self.reset() @property def charset_name(self): return "GB2312" @property def language(self): return "Chinese"
mit
xiaozhen/p2pool
wstools/XMLname.py
291
2479
"""Translate strings to and from SOAP 1.2 XML name encoding Implements rules for mapping application defined name to XML names specified by the w3 SOAP working group for SOAP version 1.2 in Appendix A of "SOAP Version 1.2 Part 2: Adjuncts", W3C Working Draft 17, December 2001, <http://www.w3.org/TR/soap12-part2/#namemap> Also see <http://www.w3.org/2000/xp/Group/xmlp-issues>. Author: Gregory R. Warnes <Gregory.R.Warnes@Pfizer.com> Date:: 2002-04-25 Version 0.9.0 """ ident = "$Id$" from re import * def _NCNameChar(x): return x.isalpha() or x.isdigit() or x=="." or x=='-' or x=="_" def _NCNameStartChar(x): return x.isalpha() or x=="_" def _toUnicodeHex(x): hexval = hex(ord(x[0]))[2:] hexlen = len(hexval) # Make hexval have either 4 or 8 digits by prepending 0's if (hexlen==1): hexval = "000" + hexval elif (hexlen==2): hexval = "00" + hexval elif (hexlen==3): hexval = "0" + hexval elif (hexlen==4): hexval = "" + hexval elif (hexlen==5): hexval = "000" + hexval elif (hexlen==6): hexval = "00" + hexval elif (hexlen==7): hexval = "0" + hexval elif (hexlen==8): hexval = "" + hexval else: raise Exception, "Illegal Value returned from hex(ord(x))" return "_x"+ hexval + "_" def _fromUnicodeHex(x): return eval( r'u"\u'+x[2:-1]+'"' ) def toXMLname(string): """Convert string to a XML name.""" if string.find(':') != -1 : (prefix, localname) = string.split(':',1) else: prefix = None localname = string T = unicode(localname) N = len(localname) X = []; for i in range(N) : if i< N-1 and T[i]==u'_' and T[i+1]==u'x': X.append(u'_x005F_') elif i==0 and N >= 3 and \ ( T[0]==u'x' or T[0]==u'X' ) and \ ( T[1]==u'm' or T[1]==u'M' ) and \ ( T[2]==u'l' or T[2]==u'L' ): X.append(u'_xFFFF_' + T[0]) elif (not _NCNameChar(T[i])) or (i==0 and not _NCNameStartChar(T[i])): X.append(_toUnicodeHex(T[i])) else: X.append(T[i]) if prefix: return "%s:%s" % (prefix, u''.join(X)) return u''.join(X) def fromXMLname(string): """Convert XML name to unicode string.""" retval = sub(r'_xFFFF_','', string ) def fun( matchobj ): return _fromUnicodeHex( matchobj.group(0) ) retval = sub(r'_x[0-9A-Za-z]+_', fun, retval ) return retval
gpl-3.0
nexiles/odoo
addons/mrp/report/bom_structure.py
297
2405
## -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv from openerp.report import report_sxw class bom_structure(report_sxw.rml_parse): def __init__(self, cr, uid, name, context): super(bom_structure, self).__init__(cr, uid, name, context=context) self.localcontext.update({ 'get_children': self.get_children, }) def get_children(self, object, level=0): result = [] def _get_rec(object, level): for l in object: res = {} res['pname'] = l.product_id.name res['pcode'] = l.product_id.default_code res['pqty'] = l.product_qty res['uname'] = l.product_uom.name res['level'] = level res['code'] = l.bom_id.code result.append(res) if l.child_line_ids: if level<6: level += 1 _get_rec(l.child_line_ids,level) if level>0 and level<6: level -= 1 return result children = _get_rec(object,level) return children class report_mrpbomstructure(osv.AbstractModel): _name = 'report.mrp.report_mrpbomstructure' _inherit = 'report.abstract_report' _template = 'mrp.report_mrpbomstructure' _wrapped_report_class = bom_structure # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
Kenneth-Posey/kens-old-projects
smokin-goldshop/handler/paypalipn.py
1
1157
import os, logging, re, cgi, urllib from public import mechanize from google.appengine.api import memcache, users, urlfetch, taskqueue from google.appengine.ext import db, webapp from google.appengine.ext.webapp.template import render from google.appengine.ext.webapp.util import run_wsgi_app from models.paypalipn import PaypalIPN from base import BaseHandler class PaypalIpnHistory(BaseHandler): LOCATION = "../views/paypalipn.html" def GetContext(self): tContext = {} tActionDic = {} tActionDic['signup'] = 'subscr_signup' tActionDic['payment'] = 'subscr_payment' tActionDic['cancel'] = 'subscr_cancel' tActionDic['fail'] = 'subscr_failed' tIpnList = [] tPaypalIpn = PaypalIPN().all() #variable filter tAction = str(self.request.get("action")) if(tAction in tActionDic.keys()): tPaypalIpn.filter('txn_type', tActionDic[tAction]) tPaypalIpn.order('-ipnMessageSent') tIpnList = tPaypalIpn.fetch(limit=150) tContext['ipnlist'] = tIpnList return tContext
gpl-2.0
TeamTwisted/external_chromium_org
third_party/tlslite/tlslite/utils/python_rsakey.py
60
4405
# Author: Trevor Perrin # See the LICENSE file for legal information regarding use of this file. """Pure-Python RSA implementation.""" from .cryptomath import * from .asn1parser import ASN1Parser from .rsakey import * from .pem import * class Python_RSAKey(RSAKey): def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0): if (n and not e) or (e and not n): raise AssertionError() self.n = n self.e = e self.d = d self.p = p self.q = q self.dP = dP self.dQ = dQ self.qInv = qInv self.blinder = 0 self.unblinder = 0 def hasPrivateKey(self): return self.d != 0 def _rawPrivateKeyOp(self, m): #Create blinding values, on the first pass: if not self.blinder: self.unblinder = getRandomNumber(2, self.n) self.blinder = powMod(invMod(self.unblinder, self.n), self.e, self.n) #Blind the input m = (m * self.blinder) % self.n #Perform the RSA operation c = self._rawPrivateKeyOpHelper(m) #Unblind the output c = (c * self.unblinder) % self.n #Update blinding values self.blinder = (self.blinder * self.blinder) % self.n self.unblinder = (self.unblinder * self.unblinder) % self.n #Return the output return c def _rawPrivateKeyOpHelper(self, m): #Non-CRT version #c = powMod(m, self.d, self.n) #CRT version (~3x faster) s1 = powMod(m, self.dP, self.p) s2 = powMod(m, self.dQ, self.q) h = ((s1 - s2) * self.qInv) % self.p c = s2 + self.q * h return c def _rawPublicKeyOp(self, c): m = powMod(c, self.e, self.n) return m def acceptsPassword(self): return False def generate(bits): key = Python_RSAKey() p = getRandomPrime(bits//2, False) q = getRandomPrime(bits//2, False) t = lcm(p-1, q-1) key.n = p * q key.e = 65537 key.d = invMod(key.e, t) key.p = p key.q = q key.dP = key.d % (p-1) key.dQ = key.d % (q-1) key.qInv = invMod(q, p) return key generate = staticmethod(generate) def parsePEM(s, passwordCallback=None): """Parse a string containing a <privateKey> or <publicKey>, or PEM-encoded key.""" if pemSniff(s, "PRIVATE KEY"): bytes = dePem(s, "PRIVATE KEY") return Python_RSAKey._parsePKCS8(bytes) elif pemSniff(s, "RSA PRIVATE KEY"): bytes = dePem(s, "RSA PRIVATE KEY") return Python_RSAKey._parseSSLeay(bytes) else: raise SyntaxError("Not a PEM private key file") parsePEM = staticmethod(parsePEM) def _parsePKCS8(bytes): p = ASN1Parser(bytes) version = p.getChild(0).value[0] if version != 0: raise SyntaxError("Unrecognized PKCS8 version") rsaOID = p.getChild(1).value if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]: raise SyntaxError("Unrecognized AlgorithmIdentifier") #Get the privateKey privateKeyP = p.getChild(2) #Adjust for OCTET STRING encapsulation privateKeyP = ASN1Parser(privateKeyP.value) return Python_RSAKey._parseASN1PrivateKey(privateKeyP) _parsePKCS8 = staticmethod(_parsePKCS8) def _parseSSLeay(bytes): privateKeyP = ASN1Parser(bytes) return Python_RSAKey._parseASN1PrivateKey(privateKeyP) _parseSSLeay = staticmethod(_parseSSLeay) def _parseASN1PrivateKey(privateKeyP): version = privateKeyP.getChild(0).value[0] if version != 0: raise SyntaxError("Unrecognized RSAPrivateKey version") n = bytesToNumber(privateKeyP.getChild(1).value) e = bytesToNumber(privateKeyP.getChild(2).value) d = bytesToNumber(privateKeyP.getChild(3).value) p = bytesToNumber(privateKeyP.getChild(4).value) q = bytesToNumber(privateKeyP.getChild(5).value) dP = bytesToNumber(privateKeyP.getChild(6).value) dQ = bytesToNumber(privateKeyP.getChild(7).value) qInv = bytesToNumber(privateKeyP.getChild(8).value) return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv) _parseASN1PrivateKey = staticmethod(_parseASN1PrivateKey)
bsd-3-clause
frank10704/DF_GCS_W
MissionPlanner-master/packages/IronPython.StdLib.2.7.5-beta1/content/Lib/formatter.py
67
15356
"""Generic output formatting. Formatter objects transform an abstract flow of formatting events into specific output events on writer objects. Formatters manage several stack structures to allow various properties of a writer object to be changed and restored; writers need not be able to handle relative changes nor any sort of ``change back'' operation. Specific writer properties which may be controlled via formatter objects are horizontal alignment, font, and left margin indentations. A mechanism is provided which supports providing arbitrary, non-exclusive style settings to a writer as well. Additional interfaces facilitate formatting events which are not reversible, such as paragraph separation. Writer objects encapsulate device interfaces. Abstract devices, such as file formats, are supported as well as physical devices. The provided implementations all work with abstract devices. The interface makes available mechanisms for setting the properties which formatter objects manage and inserting data into the output. """ import sys AS_IS = None class NullFormatter: """A formatter which does nothing. If the writer parameter is omitted, a NullWriter instance is created. No methods of the writer are called by NullFormatter instances. Implementations should inherit from this class if implementing a writer interface but don't need to inherit any implementation. """ def __init__(self, writer=None): if writer is None: writer = NullWriter() self.writer = writer def end_paragraph(self, blankline): pass def add_line_break(self): pass def add_hor_rule(self, *args, **kw): pass def add_label_data(self, format, counter, blankline=None): pass def add_flowing_data(self, data): pass def add_literal_data(self, data): pass def flush_softspace(self): pass def push_alignment(self, align): pass def pop_alignment(self): pass def push_font(self, x): pass def pop_font(self): pass def push_margin(self, margin): pass def pop_margin(self): pass def set_spacing(self, spacing): pass def push_style(self, *styles): pass def pop_style(self, n=1): pass def assert_line_data(self, flag=1): pass class AbstractFormatter: """The standard formatter. This implementation has demonstrated wide applicability to many writers, and may be used directly in most circumstances. It has been used to implement a full-featured World Wide Web browser. """ # Space handling policy: blank spaces at the boundary between elements # are handled by the outermost context. "Literal" data is not checked # to determine context, so spaces in literal data are handled directly # in all circumstances. def __init__(self, writer): self.writer = writer # Output device self.align = None # Current alignment self.align_stack = [] # Alignment stack self.font_stack = [] # Font state self.margin_stack = [] # Margin state self.spacing = None # Vertical spacing state self.style_stack = [] # Other state, e.g. color self.nospace = 1 # Should leading space be suppressed self.softspace = 0 # Should a space be inserted self.para_end = 1 # Just ended a paragraph self.parskip = 0 # Skipped space between paragraphs? self.hard_break = 1 # Have a hard break self.have_label = 0 def end_paragraph(self, blankline): if not self.hard_break: self.writer.send_line_break() self.have_label = 0 if self.parskip < blankline and not self.have_label: self.writer.send_paragraph(blankline - self.parskip) self.parskip = blankline self.have_label = 0 self.hard_break = self.nospace = self.para_end = 1 self.softspace = 0 def add_line_break(self): if not (self.hard_break or self.para_end): self.writer.send_line_break() self.have_label = self.parskip = 0 self.hard_break = self.nospace = 1 self.softspace = 0 def add_hor_rule(self, *args, **kw): if not self.hard_break: self.writer.send_line_break() self.writer.send_hor_rule(*args, **kw) self.hard_break = self.nospace = 1 self.have_label = self.para_end = self.softspace = self.parskip = 0 def add_label_data(self, format, counter, blankline = None): if self.have_label or not self.hard_break: self.writer.send_line_break() if not self.para_end: self.writer.send_paragraph((blankline and 1) or 0) if isinstance(format, str): self.writer.send_label_data(self.format_counter(format, counter)) else: self.writer.send_label_data(format) self.nospace = self.have_label = self.hard_break = self.para_end = 1 self.softspace = self.parskip = 0 def format_counter(self, format, counter): label = '' for c in format: if c == '1': label = label + ('%d' % counter) elif c in 'aA': if counter > 0: label = label + self.format_letter(c, counter) elif c in 'iI': if counter > 0: label = label + self.format_roman(c, counter) else: label = label + c return label def format_letter(self, case, counter): label = '' while counter > 0: counter, x = divmod(counter-1, 26) # This makes a strong assumption that lowercase letters # and uppercase letters form two contiguous blocks, with # letters in order! s = chr(ord(case) + x) label = s + label return label def format_roman(self, case, counter): ones = ['i', 'x', 'c', 'm'] fives = ['v', 'l', 'd'] label, index = '', 0 # This will die of IndexError when counter is too big while counter > 0: counter, x = divmod(counter, 10) if x == 9: label = ones[index] + ones[index+1] + label elif x == 4: label = ones[index] + fives[index] + label else: if x >= 5: s = fives[index] x = x-5 else: s = '' s = s + ones[index]*x label = s + label index = index + 1 if case == 'I': return label.upper() return label def add_flowing_data(self, data): if not data: return prespace = data[:1].isspace() postspace = data[-1:].isspace() data = " ".join(data.split()) if self.nospace and not data: return elif prespace or self.softspace: if not data: if not self.nospace: self.softspace = 1 self.parskip = 0 return if not self.nospace: data = ' ' + data self.hard_break = self.nospace = self.para_end = \ self.parskip = self.have_label = 0 self.softspace = postspace self.writer.send_flowing_data(data) def add_literal_data(self, data): if not data: return if self.softspace: self.writer.send_flowing_data(" ") self.hard_break = data[-1:] == '\n' self.nospace = self.para_end = self.softspace = \ self.parskip = self.have_label = 0 self.writer.send_literal_data(data) def flush_softspace(self): if self.softspace: self.hard_break = self.para_end = self.parskip = \ self.have_label = self.softspace = 0 self.nospace = 1 self.writer.send_flowing_data(' ') def push_alignment(self, align): if align and align != self.align: self.writer.new_alignment(align) self.align = align self.align_stack.append(align) else: self.align_stack.append(self.align) def pop_alignment(self): if self.align_stack: del self.align_stack[-1] if self.align_stack: self.align = align = self.align_stack[-1] self.writer.new_alignment(align) else: self.align = None self.writer.new_alignment(None) def push_font(self, font): size, i, b, tt = font if self.softspace: self.hard_break = self.para_end = self.softspace = 0 self.nospace = 1 self.writer.send_flowing_data(' ') if self.font_stack: csize, ci, cb, ctt = self.font_stack[-1] if size is AS_IS: size = csize if i is AS_IS: i = ci if b is AS_IS: b = cb if tt is AS_IS: tt = ctt font = (size, i, b, tt) self.font_stack.append(font) self.writer.new_font(font) def pop_font(self): if self.font_stack: del self.font_stack[-1] if self.font_stack: font = self.font_stack[-1] else: font = None self.writer.new_font(font) def push_margin(self, margin): self.margin_stack.append(margin) fstack = filter(None, self.margin_stack) if not margin and fstack: margin = fstack[-1] self.writer.new_margin(margin, len(fstack)) def pop_margin(self): if self.margin_stack: del self.margin_stack[-1] fstack = filter(None, self.margin_stack) if fstack: margin = fstack[-1] else: margin = None self.writer.new_margin(margin, len(fstack)) def set_spacing(self, spacing): self.spacing = spacing self.writer.new_spacing(spacing) def push_style(self, *styles): if self.softspace: self.hard_break = self.para_end = self.softspace = 0 self.nospace = 1 self.writer.send_flowing_data(' ') for style in styles: self.style_stack.append(style) self.writer.new_styles(tuple(self.style_stack)) def pop_style(self, n=1): del self.style_stack[-n:] self.writer.new_styles(tuple(self.style_stack)) def assert_line_data(self, flag=1): self.nospace = self.hard_break = not flag self.para_end = self.parskip = self.have_label = 0 class NullWriter: """Minimal writer interface to use in testing & inheritance. A writer which only provides the interface definition; no actions are taken on any methods. This should be the base class for all writers which do not need to inherit any implementation methods. """ def __init__(self): pass def flush(self): pass def new_alignment(self, align): pass def new_font(self, font): pass def new_margin(self, margin, level): pass def new_spacing(self, spacing): pass def new_styles(self, styles): pass def send_paragraph(self, blankline): pass def send_line_break(self): pass def send_hor_rule(self, *args, **kw): pass def send_label_data(self, data): pass def send_flowing_data(self, data): pass def send_literal_data(self, data): pass class AbstractWriter(NullWriter): """A writer which can be used in debugging formatters, but not much else. Each method simply announces itself by printing its name and arguments on standard output. """ def new_alignment(self, align): print "new_alignment(%r)" % (align,) def new_font(self, font): print "new_font(%r)" % (font,) def new_margin(self, margin, level): print "new_margin(%r, %d)" % (margin, level) def new_spacing(self, spacing): print "new_spacing(%r)" % (spacing,) def new_styles(self, styles): print "new_styles(%r)" % (styles,) def send_paragraph(self, blankline): print "send_paragraph(%r)" % (blankline,) def send_line_break(self): print "send_line_break()" def send_hor_rule(self, *args, **kw): print "send_hor_rule()" def send_label_data(self, data): print "send_label_data(%r)" % (data,) def send_flowing_data(self, data): print "send_flowing_data(%r)" % (data,) def send_literal_data(self, data): print "send_literal_data(%r)" % (data,) class DumbWriter(NullWriter): """Simple writer class which writes output on the file object passed in as the file parameter or, if file is omitted, on standard output. The output is simply word-wrapped to the number of columns specified by the maxcol parameter. This class is suitable for reflowing a sequence of paragraphs. """ def __init__(self, file=None, maxcol=72): self.file = file or sys.stdout self.maxcol = maxcol NullWriter.__init__(self) self.reset() def reset(self): self.col = 0 self.atbreak = 0 def send_paragraph(self, blankline): self.file.write('\n'*blankline) self.col = 0 self.atbreak = 0 def send_line_break(self): self.file.write('\n') self.col = 0 self.atbreak = 0 def send_hor_rule(self, *args, **kw): self.file.write('\n') self.file.write('-'*self.maxcol) self.file.write('\n') self.col = 0 self.atbreak = 0 def send_literal_data(self, data): self.file.write(data) i = data.rfind('\n') if i >= 0: self.col = 0 data = data[i+1:] data = data.expandtabs() self.col = self.col + len(data) self.atbreak = 0 def send_flowing_data(self, data): if not data: return atbreak = self.atbreak or data[0].isspace() col = self.col maxcol = self.maxcol write = self.file.write for word in data.split(): if atbreak: if col + len(word) >= maxcol: write('\n') col = 0 else: write(' ') col = col + 1 write(word) col = col + len(word) atbreak = 1 self.col = col self.atbreak = data[-1].isspace() def test(file = None): w = DumbWriter() f = AbstractFormatter(w) if file is not None: fp = open(file) elif sys.argv[1:]: fp = open(sys.argv[1]) else: fp = sys.stdin for line in fp: if line == '\n': f.end_paragraph(1) else: f.add_flowing_data(line) f.end_paragraph(0) if __name__ == '__main__': test()
gpl-3.0
KhalidGit/flask
Work/Trivia - Module 5/env/Lib/site-packages/werkzeug/testsuite/internal.py
146
2940
# -*- coding: utf-8 -*- """ werkzeug.testsuite.internal ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Internal tests. :copyright: (c) 2014 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import unittest from datetime import datetime from warnings import filterwarnings, resetwarnings from werkzeug.testsuite import WerkzeugTestCase from werkzeug.wrappers import Request, Response from werkzeug import _internal as internal from werkzeug.test import create_environ class InternalTestCase(WerkzeugTestCase): def test_date_to_unix(self): assert internal._date_to_unix(datetime(1970, 1, 1)) == 0 assert internal._date_to_unix(datetime(1970, 1, 1, 1, 0, 0)) == 3600 assert internal._date_to_unix(datetime(1970, 1, 1, 1, 1, 1)) == 3661 x = datetime(2010, 2, 15, 16, 15, 39) assert internal._date_to_unix(x) == 1266250539 def test_easteregg(self): req = Request.from_values('/?macgybarchakku') resp = Response.force_type(internal._easteregg(None), req) assert b'About Werkzeug' in resp.get_data() assert b'the Swiss Army knife of Python web development' in resp.get_data() def test_wrapper_internals(self): req = Request.from_values(data={'foo': 'bar'}, method='POST') req._load_form_data() assert req.form.to_dict() == {'foo': 'bar'} # second call does not break req._load_form_data() assert req.form.to_dict() == {'foo': 'bar'} # check reprs assert repr(req) == "<Request 'http://localhost/' [POST]>" resp = Response() assert repr(resp) == '<Response 0 bytes [200 OK]>' resp.set_data('Hello World!') assert repr(resp) == '<Response 12 bytes [200 OK]>' resp.response = iter(['Test']) assert repr(resp) == '<Response streamed [200 OK]>' # unicode data does not set content length response = Response([u'Hällo Wörld']) headers = response.get_wsgi_headers(create_environ()) assert u'Content-Length' not in headers response = Response([u'Hällo Wörld'.encode('utf-8')]) headers = response.get_wsgi_headers(create_environ()) assert u'Content-Length' in headers # check for internal warnings filterwarnings('error', category=Warning) response = Response() environ = create_environ() response.response = 'What the...?' self.assert_raises(Warning, lambda: list(response.iter_encoded())) self.assert_raises(Warning, lambda: list(response.get_app_iter(environ))) response.direct_passthrough = True self.assert_raises(Warning, lambda: list(response.iter_encoded())) self.assert_raises(Warning, lambda: list(response.get_app_iter(environ))) resetwarnings() def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(InternalTestCase)) return suite
apache-2.0
Yannig/ansible-modules-core
cloud/openstack/quantum_floating_ip_associate.py
9
8149
#!/usr/bin/python #coding: utf-8 -*- # (c) 2013, Benno Joy <benno@ansible.com> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. import time try: from novaclient.v1_1 import client as nova_client try: from neutronclient.neutron import client except ImportError: from quantumclient.quantum import client from keystoneclient.v2_0 import client as ksclient HAVE_DEPS = True except ImportError: HAVE_DEPS = False DOCUMENTATION = ''' --- module: quantum_floating_ip_associate version_added: "1.2" short_description: Associate or disassociate a particular floating IP with an instance description: - Associates or disassociates a specific floating IP with a particular instance options: login_username: description: - login username to authenticate to keystone required: true default: admin login_password: description: - password of login user required: true default: 'yes' login_tenant_name: description: - the tenant name of the login user required: true default: true auth_url: description: - the keystone url for authentication required: false default: 'http://127.0.0.1:35357/v2.0/' region_name: description: - name of the region required: false default: None state: description: - indicates the desired state of the resource choices: ['present', 'absent'] default: present instance_name: description: - name of the instance to which the public IP should be assigned required: true default: None ip_address: description: - floating ip that should be assigned to the instance required: true default: None requirements: - "python >= 2.6" - "python-novaclient" - "python-neutronclient or python-quantumclient" - "python-keystoneclient" ''' EXAMPLES = ''' # Associate a specific floating IP with an Instance - quantum_floating_ip_associate: state=present login_username=admin login_password=admin login_tenant_name=admin ip_address=1.1.1.1 instance_name=vm1 ''' def _get_ksclient(module, kwargs): try: kclient = ksclient.Client(username=kwargs.get('login_username'), password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) except Exception, e: module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) global _os_keystone _os_keystone = kclient return kclient def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') except Exception, e: module.fail_json(msg = "Error getting network endpoint: %s" % e.message) return endpoint def _get_neutron_client(module, kwargs): _ksclient = _get_ksclient(module, kwargs) token = _ksclient.auth_token endpoint = _get_endpoint(module, _ksclient) kwargs = { 'token': token, 'endpoint_url': endpoint } try: neutron = client.Client('2.0', **kwargs) except Exception, e: module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) return neutron def _get_server_state(module, nova): server_info = None server = None try: for server in nova.servers.list(): if server: info = server._info if info['name'] == module.params['instance_name']: if info['status'] != 'ACTIVE' and module.params['state'] == 'present': module.fail_json(msg="The VM is available but not Active. state:" + info['status']) server_info = info break except Exception, e: module.fail_json(msg = "Error in getting the server list: %s" % e.message) return server_info, server def _get_port_id(neutron, module, instance_id): kwargs = dict(device_id = instance_id) try: ports = neutron.list_ports(**kwargs) except Exception, e: module.fail_json( msg = "Error in listing ports: %s" % e.message) if not ports['ports']: return None return ports['ports'][0]['id'] def _get_floating_ip_id(module, neutron): kwargs = { 'floating_ip_address': module.params['ip_address'] } try: ips = neutron.list_floatingips(**kwargs) except Exception, e: module.fail_json(msg = "error in fetching the floatingips's %s" % e.message) if not ips['floatingips']: module.fail_json(msg = "Could find the ip specified in parameter, Please check") ip = ips['floatingips'][0]['id'] if not ips['floatingips'][0]['port_id']: state = "detached" else: state = "attached" return state, ip def _update_floating_ip(neutron, module, port_id, floating_ip_id): kwargs = { 'port_id': port_id } try: result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs}) except Exception, e: module.fail_json(msg = "There was an error in updating the floating ip address: %s" % e.message) module.exit_json(changed = True, result = result, public_ip=module.params['ip_address']) def main(): argument_spec = openstack_argument_spec() argument_spec.update(dict( ip_address = dict(required=True), instance_name = dict(required=True), state = dict(default='present', choices=['absent', 'present']) )) module = AnsibleModule(argument_spec=argument_spec) if not HAVE_DEPS: module.fail_json(msg='python-novaclient, python-keystoneclient, and either python-neutronclient or python-quantumclient are required') try: nova = nova_client.Client(module.params['login_username'], module.params['login_password'], module.params['login_tenant_name'], module.params['auth_url'], service_type='compute') except Exception, e: module.fail_json( msg = " Error in authenticating to nova: %s" % e.message) neutron = _get_neutron_client(module, module.params) state, floating_ip_id = _get_floating_ip_id(module, neutron) if module.params['state'] == 'present': if state == 'attached': module.exit_json(changed = False, result = 'attached', public_ip=module.params['ip_address']) server_info, server_obj = _get_server_state(module, nova) if not server_info: module.fail_json(msg = " The instance name provided cannot be found") port_id = _get_port_id(neutron, module, server_info['id']) if not port_id: module.fail_json(msg = "Cannot find a port for this instance, maybe fixed ip is not assigned") _update_floating_ip(neutron, module, port_id, floating_ip_id) if module.params['state'] == 'absent': if state == 'detached': module.exit_json(changed = False, result = 'detached') if state == 'attached': _update_floating_ip(neutron, module, None, floating_ip_id) module.exit_json(changed = True, result = "detached") # this is magic, see lib/ansible/module.params['common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main()
gpl-3.0
mikkylok/mikky.lu
venv/lib/python2.7/site-packages/setuptools/glob.py
242
5207
""" Filename globbing utility. Mostly a copy of `glob` from Python 3.5. Changes include: * `yield from` and PEP3102 `*` removed. * `bytes` changed to `six.binary_type`. * Hidden files are not ignored. """ import os import re import fnmatch from setuptools.extern.six import binary_type __all__ = ["glob", "iglob", "escape"] def glob(pathname, recursive=False): """Return a list of paths matching a pathname pattern. The pattern may contain simple shell-style wildcards a la fnmatch. However, unlike fnmatch, filenames starting with a dot are special cases that are not matched by '*' and '?' patterns. If recursive is true, the pattern '**' will match any files and zero or more directories and subdirectories. """ return list(iglob(pathname, recursive=recursive)) def iglob(pathname, recursive=False): """Return an iterator which yields the paths matching a pathname pattern. The pattern may contain simple shell-style wildcards a la fnmatch. However, unlike fnmatch, filenames starting with a dot are special cases that are not matched by '*' and '?' patterns. If recursive is true, the pattern '**' will match any files and zero or more directories and subdirectories. """ it = _iglob(pathname, recursive) if recursive and _isrecursive(pathname): s = next(it) # skip empty string assert not s return it def _iglob(pathname, recursive): dirname, basename = os.path.split(pathname) if not has_magic(pathname): if basename: if os.path.lexists(pathname): yield pathname else: # Patterns ending with a slash should match only directories if os.path.isdir(dirname): yield pathname return if not dirname: if recursive and _isrecursive(basename): for x in glob2(dirname, basename): yield x else: for x in glob1(dirname, basename): yield x return # `os.path.split()` returns the argument itself as a dirname if it is a # drive or UNC path. Prevent an infinite recursion if a drive or UNC path # contains magic characters (i.e. r'\\?\C:'). if dirname != pathname and has_magic(dirname): dirs = _iglob(dirname, recursive) else: dirs = [dirname] if has_magic(basename): if recursive and _isrecursive(basename): glob_in_dir = glob2 else: glob_in_dir = glob1 else: glob_in_dir = glob0 for dirname in dirs: for name in glob_in_dir(dirname, basename): yield os.path.join(dirname, name) # These 2 helper functions non-recursively glob inside a literal directory. # They return a list of basenames. `glob1` accepts a pattern while `glob0` # takes a literal basename (so it only has to check for its existence). def glob1(dirname, pattern): if not dirname: if isinstance(pattern, binary_type): dirname = os.curdir.encode('ASCII') else: dirname = os.curdir try: names = os.listdir(dirname) except OSError: return [] return fnmatch.filter(names, pattern) def glob0(dirname, basename): if not basename: # `os.path.split()` returns an empty basename for paths ending with a # directory separator. 'q*x/' should match only directories. if os.path.isdir(dirname): return [basename] else: if os.path.lexists(os.path.join(dirname, basename)): return [basename] return [] # This helper function recursively yields relative pathnames inside a literal # directory. def glob2(dirname, pattern): assert _isrecursive(pattern) yield pattern[:0] for x in _rlistdir(dirname): yield x # Recursively yields relative pathnames inside a literal directory. def _rlistdir(dirname): if not dirname: if isinstance(dirname, binary_type): dirname = binary_type(os.curdir, 'ASCII') else: dirname = os.curdir try: names = os.listdir(dirname) except os.error: return for x in names: yield x path = os.path.join(dirname, x) if dirname else x for y in _rlistdir(path): yield os.path.join(x, y) magic_check = re.compile('([*?[])') magic_check_bytes = re.compile(b'([*?[])') def has_magic(s): if isinstance(s, binary_type): match = magic_check_bytes.search(s) else: match = magic_check.search(s) return match is not None def _isrecursive(pattern): if isinstance(pattern, binary_type): return pattern == b'**' else: return pattern == '**' def escape(pathname): """Escape all special characters. """ # Escaping is done by wrapping any of "*?[" between square brackets. # Metacharacters do not work in the drive part and shouldn't be escaped. drive, pathname = os.path.splitdrive(pathname) if isinstance(pathname, binary_type): pathname = magic_check_bytes.sub(br'[\1]', pathname) else: pathname = magic_check.sub(r'[\1]', pathname) return drive + pathname
mit
EmadMokhtar/Django
tests/postgres_tests/test_citext.py
89
2781
""" The citext PostgreSQL extension supports indexing of case-insensitive text strings and thus eliminates the need for operations such as iexact and other modifiers to enforce use of an index. """ from django.db import IntegrityError from django.test.utils import modify_settings from . import PostgreSQLTestCase from .models import CITestModel @modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'}) class CITextTestCase(PostgreSQLTestCase): case_sensitive_lookups = ('contains', 'startswith', 'endswith', 'regex') @classmethod def setUpTestData(cls): cls.john = CITestModel.objects.create( name='JoHn', email='joHn@johN.com', description='Average Joe named JoHn', array_field=['JoE', 'jOhn'], ) def test_equal_lowercase(self): """ citext removes the need for iexact as the index is case-insensitive. """ self.assertEqual(CITestModel.objects.filter(name=self.john.name.lower()).count(), 1) self.assertEqual(CITestModel.objects.filter(email=self.john.email.lower()).count(), 1) self.assertEqual(CITestModel.objects.filter(description=self.john.description.lower()).count(), 1) def test_fail_citext_primary_key(self): """ Creating an entry for a citext field used as a primary key which clashes with an existing value isn't allowed. """ with self.assertRaises(IntegrityError): CITestModel.objects.create(name='John') def test_array_field(self): instance = CITestModel.objects.get() self.assertEqual(instance.array_field, self.john.array_field) self.assertTrue(CITestModel.objects.filter(array_field__contains=['joe']).exists()) def test_lookups_name_char(self): for lookup in self.case_sensitive_lookups: with self.subTest(lookup=lookup): query = {'name__{}'.format(lookup): 'john'} self.assertSequenceEqual(CITestModel.objects.filter(**query), [self.john]) def test_lookups_description_text(self): for lookup, string in zip(self.case_sensitive_lookups, ('average', 'average joe', 'john', 'Joe.named')): with self.subTest(lookup=lookup, string=string): query = {'description__{}'.format(lookup): string} self.assertSequenceEqual(CITestModel.objects.filter(**query), [self.john]) def test_lookups_email(self): for lookup, string in zip(self.case_sensitive_lookups, ('john', 'john', 'john.com', 'john.com')): with self.subTest(lookup=lookup, string=string): query = {'email__{}'.format(lookup): string} self.assertSequenceEqual(CITestModel.objects.filter(**query), [self.john])
mit
MrNuggles/HeyBoet-Telegram-Bot
temboo/Library/Amazon/S3/PutBucketNotification.py
5
4521
# -*- coding: utf-8 -*- ############################################################################### # # PutBucketNotification # Enables Amazon SNS notifications of specified events for a bucket. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class PutBucketNotification(Choreography): def __init__(self, temboo_session): """ Create a new instance of the PutBucketNotification Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(PutBucketNotification, self).__init__(temboo_session, '/Library/Amazon/S3/PutBucketNotification') def new_input_set(self): return PutBucketNotificationInputSet() def _make_result_set(self, result, path): return PutBucketNotificationResultSet(result, path) def _make_execution(self, session, exec_id, path): return PutBucketNotificationChoreographyExecution(session, exec_id, path) class PutBucketNotificationInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the PutBucketNotification Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_AWSAccessKeyId(self, value): """ Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.) """ super(PutBucketNotificationInputSet, self)._set_input('AWSAccessKeyId', value) def set_AWSSecretKeyId(self, value): """ Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.) """ super(PutBucketNotificationInputSet, self)._set_input('AWSSecretKeyId', value) def set_BucketName(self, value): """ Set the value of the BucketName input for this Choreo. ((required, string) The name of the bucket to create a notification for.) """ super(PutBucketNotificationInputSet, self)._set_input('BucketName', value) def set_Event(self, value): """ Set the value of the Event input for this Choreo. ((optional, string) A bucket event for which to send notifications. Valid value: "s3:ReducedRedundancyLostObject" (The default and currently only supported notification event).) """ super(PutBucketNotificationInputSet, self)._set_input('Event', value) def set_Topic(self, value): """ Set the value of the Topic input for this Choreo. ((conditional, string) The Amazon SNS topic arn that Amazon S3 will publish a message to report the specified events for the bucket. If this is not supplied, notifications will be turned off.) """ super(PutBucketNotificationInputSet, self)._set_input('Topic', value) class PutBucketNotificationResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the PutBucketNotification Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. ((string) Stores the response from Amazon. Note that for a successful execution, no content is returned and this output variable should be empty.) """ return self._output.get('Response', None) class PutBucketNotificationChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return PutBucketNotificationResultSet(response, path)
gpl-3.0
openstack/ironic
ironic/tests/unit/db/base.py
4
2239
# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Ironic DB test base class.""" import fixtures from oslo_config import cfg from oslo_db.sqlalchemy import enginefacade from ironic.db import api as dbapi from ironic.db.sqlalchemy import migration from ironic.db.sqlalchemy import models from ironic.tests import base CONF = cfg.CONF _DB_CACHE = None class Database(fixtures.Fixture): def __init__(self, engine, db_migrate, sql_connection): self.sql_connection = sql_connection self.engine = engine self.engine.dispose() conn = self.engine.connect() self.setup_sqlite(db_migrate) self.post_migrations() self._DB = "".join(line for line in conn.connection.iterdump()) self.engine.dispose() def setup_sqlite(self, db_migrate): if db_migrate.version(): return models.Base.metadata.create_all(self.engine) db_migrate.stamp('head') def setUp(self): super(Database, self).setUp() conn = self.engine.connect() conn.connection.executescript(self._DB) self.addCleanup(self.engine.dispose) def post_migrations(self): """Any addition steps that are needed outside of the migrations.""" class DbTestCase(base.TestCase): def setUp(self): super(DbTestCase, self).setUp() self.dbapi = dbapi.get_instance() global _DB_CACHE if not _DB_CACHE: engine = enginefacade.writer.get_engine() _DB_CACHE = Database(engine, migration, sql_connection=CONF.database.connection) self.useFixture(_DB_CACHE)
apache-2.0
home-assistant/home-assistant
tests/components/meteo_france/test_config_flow.py
2
7772
"""Tests for the Meteo-France config flow.""" from unittest.mock import patch from meteofrance_api.model import Place import pytest from homeassistant import data_entry_flow from homeassistant.components.meteo_france.const import ( CONF_CITY, DOMAIN, FORECAST_MODE_DAILY, FORECAST_MODE_HOURLY, ) from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_USER from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_MODE from homeassistant.core import HomeAssistant from tests.common import MockConfigEntry CITY_1_POSTAL = "74220" CITY_1_NAME = "La Clusaz" CITY_1_LAT = 45.90417 CITY_1_LON = 6.42306 CITY_1_COUNTRY = "FR" CITY_1_ADMIN = "Rhône-Alpes" CITY_1_ADMIN2 = "74" CITY_1 = Place( { "name": CITY_1_NAME, "lat": CITY_1_LAT, "lon": CITY_1_LON, "country": CITY_1_COUNTRY, "admin": CITY_1_ADMIN, "admin2": CITY_1_ADMIN2, } ) CITY_2_NAME = "Auch" CITY_2_LAT = 43.64528 CITY_2_LON = 0.58861 CITY_2_COUNTRY = "FR" CITY_2_ADMIN = "Midi-Pyrénées" CITY_2_ADMIN2 = "32" CITY_2 = Place( { "name": CITY_2_NAME, "lat": CITY_2_LAT, "lon": CITY_2_LON, "country": CITY_2_COUNTRY, "admin": CITY_2_ADMIN, "admin2": CITY_2_ADMIN2, } ) CITY_3_NAME = "Auchel" CITY_3_LAT = 50.50833 CITY_3_LON = 2.47361 CITY_3_COUNTRY = "FR" CITY_3_ADMIN = "Nord-Pas-de-Calais" CITY_3_ADMIN2 = "62" CITY_3 = Place( { "name": CITY_3_NAME, "lat": CITY_3_LAT, "lon": CITY_3_LON, "country": CITY_3_COUNTRY, "admin": CITY_3_ADMIN, "admin2": CITY_3_ADMIN2, } ) @pytest.fixture(name="client_single") def mock_controller_client_single(): """Mock a successful client.""" with patch( "homeassistant.components.meteo_france.config_flow.MeteoFranceClient", update=False, ) as service_mock: service_mock.return_value.search_places.return_value = [CITY_1] yield service_mock @pytest.fixture(autouse=True) def mock_setup(): """Prevent setup.""" with patch( "homeassistant.components.meteo_france.async_setup", return_value=True, ), patch( "homeassistant.components.meteo_france.async_setup_entry", return_value=True, ): yield @pytest.fixture(name="client_multiple") def mock_controller_client_multiple(): """Mock a successful client.""" with patch( "homeassistant.components.meteo_france.config_flow.MeteoFranceClient", update=False, ) as service_mock: service_mock.return_value.search_places.return_value = [CITY_2, CITY_3] yield service_mock @pytest.fixture(name="client_empty") def mock_controller_client_empty(): """Mock a successful client.""" with patch( "homeassistant.components.meteo_france.config_flow.MeteoFranceClient", update=False, ) as service_mock: service_mock.return_value.search_places.return_value = [] yield service_mock async def test_user(hass, client_single): """Test user config.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" # test with all provided with search returning only 1 place result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data={CONF_CITY: CITY_1_POSTAL}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["result"].unique_id == f"{CITY_1_LAT}, {CITY_1_LON}" assert result["title"] == f"{CITY_1}" assert result["data"][CONF_LATITUDE] == str(CITY_1_LAT) assert result["data"][CONF_LONGITUDE] == str(CITY_1_LON) async def test_user_list(hass, client_multiple): """Test user config.""" # test with all provided with search returning more than 1 place result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data={CONF_CITY: CITY_2_NAME}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "cities" result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={CONF_CITY: f"{CITY_3};{CITY_3_LAT};{CITY_3_LON}"}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["result"].unique_id == f"{CITY_3_LAT}, {CITY_3_LON}" assert result["title"] == f"{CITY_3}" assert result["data"][CONF_LATITUDE] == str(CITY_3_LAT) assert result["data"][CONF_LONGITUDE] == str(CITY_3_LON) async def test_import(hass, client_multiple): """Test import step.""" # import with all result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data={CONF_CITY: CITY_2_NAME}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["result"].unique_id == f"{CITY_2_LAT}, {CITY_2_LON}" assert result["title"] == f"{CITY_2}" assert result["data"][CONF_LATITUDE] == str(CITY_2_LAT) assert result["data"][CONF_LONGITUDE] == str(CITY_2_LON) async def test_search_failed(hass, client_empty): """Test error displayed if no result in search.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data={CONF_CITY: CITY_1_POSTAL}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["errors"] == {CONF_CITY: "empty"} async def test_abort_if_already_setup(hass, client_single): """Test we abort if already setup.""" MockConfigEntry( domain=DOMAIN, data={CONF_LATITUDE: CITY_1_LAT, CONF_LONGITUDE: CITY_1_LON}, unique_id=f"{CITY_1_LAT}, {CITY_1_LON}", ).add_to_hass(hass) # Should fail, same CITY same postal code (import) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data={CONF_CITY: CITY_1_POSTAL}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" # Should fail, same CITY same postal code (flow) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data={CONF_CITY: CITY_1_POSTAL}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_options_flow(hass: HomeAssistant): """Test config flow options.""" config_entry = MockConfigEntry( domain=DOMAIN, data={CONF_LATITUDE: CITY_1_LAT, CONF_LONGITUDE: CITY_1_LON}, unique_id=f"{CITY_1_LAT}, {CITY_1_LON}", ) config_entry.add_to_hass(hass) assert config_entry.options == {} result = await hass.config_entries.options.async_init(config_entry.entry_id) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "init" # Default result = await hass.config_entries.options.async_configure( result["flow_id"], user_input={}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert config_entry.options[CONF_MODE] == FORECAST_MODE_DAILY # Manual result = await hass.config_entries.options.async_init(config_entry.entry_id) result = await hass.config_entries.options.async_configure( result["flow_id"], user_input={CONF_MODE: FORECAST_MODE_HOURLY}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert config_entry.options[CONF_MODE] == FORECAST_MODE_HOURLY
apache-2.0
harshilasu/LinkurApp
y/google-cloud-sdk/lib/googlecloudsdk/gcloud/sdktools/auth/print_refresh_token.py
11
1051
# Copyright 2013 Google Inc. All Rights Reserved. """A hidden command that prints access tokens. """ from oauth2client import client from googlecloudsdk.calliope import base from googlecloudsdk.calliope import exceptions as c_exc from googlecloudsdk.core import log from googlecloudsdk.core.credentials import store as c_store @base.Hidden class PrintRefreshToken(base.Command): """A command that prints the access token for the current account.""" @staticmethod def Args(parser): parser.add_argument( 'account', nargs='?', help=('The account to get the access token for. Leave empty for the ' 'active account.')) @c_exc.RaiseToolExceptionInsteadOf(c_store.Error, client.Error) def Run(self, args): """Run the helper command.""" cred = c_store.Load(args.account) if not cred.refresh_token: raise c_exc.ToolException( 'No access token could be obtained from the current credentials.') return cred.refresh_token def Display(self, args, token): log.Print(token)
gpl-3.0
MarcJoan/django
django/contrib/staticfiles/utils.py
335
1976
import fnmatch import os from django.conf import settings from django.core.exceptions import ImproperlyConfigured def matches_patterns(path, patterns=None): """ Return True or False depending on whether the ``path`` should be ignored (if it matches any pattern in ``ignore_patterns``). """ if patterns is None: patterns = [] for pattern in patterns: if fnmatch.fnmatchcase(path, pattern): return True return False def get_files(storage, ignore_patterns=None, location=''): """ Recursively walk the storage directories yielding the paths of all files that should be copied. """ if ignore_patterns is None: ignore_patterns = [] directories, files = storage.listdir(location) for fn in files: if matches_patterns(fn, ignore_patterns): continue if location: fn = os.path.join(location, fn) yield fn for dir in directories: if matches_patterns(dir, ignore_patterns): continue if location: dir = os.path.join(location, dir) for fn in get_files(storage, ignore_patterns, dir): yield fn def check_settings(base_url=None): """ Checks if the staticfiles settings have sane values. """ if base_url is None: base_url = settings.STATIC_URL if not base_url: raise ImproperlyConfigured( "You're using the staticfiles app " "without having set the required STATIC_URL setting.") if settings.MEDIA_URL == base_url: raise ImproperlyConfigured("The MEDIA_URL and STATIC_URL " "settings must have different values") if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and (settings.MEDIA_ROOT == settings.STATIC_ROOT)): raise ImproperlyConfigured("The MEDIA_ROOT and STATIC_ROOT " "settings must have different values")
bsd-3-clause
jnealtowns/bigcode
tools/infra.py
7
1316
#!/usr/bin/env python ################################################################ # # Copyright 2013, Big Switch Networks, Inc. # # Licensed under the Eclipse Public License, Version 1.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.eclipse.org/legal/epl-v10.html # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the # License. # ################################################################ # # Resolve dependencies on the infra repository. # ################################################################ import os import sys # The root of the repository ROOT = os.path.realpath("%s/.." % (os.path.dirname(__file__))) SUBMODULE_INFRA = os.getenv("SUBMODULE_INFRA") if SUBMODULE_INFRA is None: SUBMODULE_INFRA = "%s/submodules/infra" % ROOT if not os.path.exists("%s/builder/unix/tools" % SUBMODULE_INFRA): raise Exception("This script requires the infra repository.") sys.path.append("%s/builder/unix/tools" % SUBMODULE_INFRA)
epl-1.0
junhuac/MQUIC
depot_tools/ENV/lib/python2.7/site-packages/setuptools/compat.py
456
2094
import sys import itertools PY3 = sys.version_info >= (3,) PY2 = not PY3 if PY2: basestring = basestring import __builtin__ as builtins import ConfigParser from StringIO import StringIO BytesIO = StringIO func_code = lambda o: o.func_code func_globals = lambda o: o.func_globals im_func = lambda o: o.im_func from htmlentitydefs import name2codepoint import httplib from BaseHTTPServer import HTTPServer from SimpleHTTPServer import SimpleHTTPRequestHandler from BaseHTTPServer import BaseHTTPRequestHandler iteritems = lambda o: o.iteritems() long_type = long maxsize = sys.maxint unichr = unichr unicode = unicode bytes = str from urllib import url2pathname, splittag, pathname2url import urllib2 from urllib2 import urlopen, HTTPError, URLError, unquote, splituser from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit filterfalse = itertools.ifilterfalse exec("""def reraise(tp, value, tb=None): raise tp, value, tb""") if PY3: basestring = str import builtins import configparser as ConfigParser from io import StringIO, BytesIO func_code = lambda o: o.__code__ func_globals = lambda o: o.__globals__ im_func = lambda o: o.__func__ from html.entities import name2codepoint import http.client as httplib from http.server import HTTPServer, SimpleHTTPRequestHandler from http.server import BaseHTTPRequestHandler iteritems = lambda o: o.items() long_type = int maxsize = sys.maxsize unichr = chr unicode = str bytes = bytes from urllib.error import HTTPError, URLError import urllib.request as urllib2 from urllib.request import urlopen, url2pathname, pathname2url from urllib.parse import ( urlparse, urlunparse, unquote, splituser, urljoin, urlsplit, urlunsplit, splittag, ) filterfalse = itertools.filterfalse def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value
mit
alexandrucoman/vbox-neutron-agent
neutron/tests/api/test_metering_extensions.py
47
6880
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from tempest_lib.common.utils import data_utils from neutron.tests.api import base from neutron.tests.tempest import test LOG = logging.getLogger(__name__) class MeteringTestJSON(base.BaseAdminNetworkTest): """ Tests the following operations in the Neutron API using the REST client for Neutron: List, Show, Create, Delete Metering labels List, Show, Create, Delete Metering labels rules """ @classmethod def resource_setup(cls): super(MeteringTestJSON, cls).resource_setup() if not test.is_extension_enabled('metering', 'network'): msg = "metering extension not enabled." raise cls.skipException(msg) description = "metering label created by tempest" name = data_utils.rand_name("metering-label") cls.metering_label = cls.create_metering_label(name, description) remote_ip_prefix = ("10.0.0.0/24" if cls._ip_version == 4 else "fd02::/64") direction = "ingress" cls.metering_label_rule = cls.create_metering_label_rule( remote_ip_prefix, direction, metering_label_id=cls.metering_label['id']) def _delete_metering_label(self, metering_label_id): # Deletes a label and verifies if it is deleted or not self.admin_client.delete_metering_label(metering_label_id) # Asserting that the label is not found in list after deletion labels = self.admin_client.list_metering_labels(id=metering_label_id) self.assertEqual(len(labels['metering_labels']), 0) def _delete_metering_label_rule(self, metering_label_rule_id): # Deletes a rule and verifies if it is deleted or not self.admin_client.delete_metering_label_rule( metering_label_rule_id) # Asserting that the rule is not found in list after deletion rules = (self.admin_client.list_metering_label_rules( id=metering_label_rule_id)) self.assertEqual(len(rules['metering_label_rules']), 0) @test.attr(type='smoke') @test.idempotent_id('e2fb2f8c-45bf-429a-9f17-171c70444612') def test_list_metering_labels(self): # Verify label filtering body = self.admin_client.list_metering_labels(id=33) metering_labels = body['metering_labels'] self.assertEqual(0, len(metering_labels)) @test.attr(type='smoke') @test.idempotent_id('ec8e15ff-95d0-433b-b8a6-b466bddb1e50') def test_create_delete_metering_label_with_filters(self): # Creates a label name = data_utils.rand_name('metering-label-') description = "label created by tempest" body = self.admin_client.create_metering_label(name=name, description=description) metering_label = body['metering_label'] self.addCleanup(self._delete_metering_label, metering_label['id']) # Assert whether created labels are found in labels list or fail # if created labels are not found in labels list labels = (self.admin_client.list_metering_labels( id=metering_label['id'])) self.assertEqual(len(labels['metering_labels']), 1) @test.attr(type='smoke') @test.idempotent_id('30abb445-0eea-472e-bd02-8649f54a5968') def test_show_metering_label(self): # Verifies the details of a label body = self.admin_client.show_metering_label(self.metering_label['id']) metering_label = body['metering_label'] self.assertEqual(self.metering_label['id'], metering_label['id']) self.assertEqual(self.metering_label['tenant_id'], metering_label['tenant_id']) self.assertEqual(self.metering_label['name'], metering_label['name']) self.assertEqual(self.metering_label['description'], metering_label['description']) @test.attr(type='smoke') @test.idempotent_id('cc832399-6681-493b-9d79-0202831a1281') def test_list_metering_label_rules(self): # Verify rule filtering body = self.admin_client.list_metering_label_rules(id=33) metering_label_rules = body['metering_label_rules'] self.assertEqual(0, len(metering_label_rules)) @test.attr(type='smoke') @test.idempotent_id('f4d547cd-3aee-408f-bf36-454f8825e045') def test_create_delete_metering_label_rule_with_filters(self): # Creates a rule remote_ip_prefix = ("10.0.1.0/24" if self._ip_version == 4 else "fd03::/64") body = (self.admin_client.create_metering_label_rule( remote_ip_prefix=remote_ip_prefix, direction="ingress", metering_label_id=self.metering_label['id'])) metering_label_rule = body['metering_label_rule'] self.addCleanup(self._delete_metering_label_rule, metering_label_rule['id']) # Assert whether created rules are found in rules list or fail # if created rules are not found in rules list rules = (self.admin_client.list_metering_label_rules( id=metering_label_rule['id'])) self.assertEqual(len(rules['metering_label_rules']), 1) @test.attr(type='smoke') @test.idempotent_id('b7354489-96ea-41f3-9452-bace120fb4a7') def test_show_metering_label_rule(self): # Verifies the details of a rule body = (self.admin_client.show_metering_label_rule( self.metering_label_rule['id'])) metering_label_rule = body['metering_label_rule'] self.assertEqual(self.metering_label_rule['id'], metering_label_rule['id']) self.assertEqual(self.metering_label_rule['remote_ip_prefix'], metering_label_rule['remote_ip_prefix']) self.assertEqual(self.metering_label_rule['direction'], metering_label_rule['direction']) self.assertEqual(self.metering_label_rule['metering_label_id'], metering_label_rule['metering_label_id']) self.assertFalse(metering_label_rule['excluded']) class MeteringIpV6TestJSON(MeteringTestJSON): _ip_version = 6
apache-2.0
ychaim/gest
xcode/Scripts/versiongenerate.py
3088
4536
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """A script to prepare version informtion for use the gtest Info.plist file. This script extracts the version information from the configure.ac file and uses it to generate a header file containing the same information. The #defines in this header file will be included in during the generation of the Info.plist of the framework, giving the correct value to the version shown in the Finder. This script makes the following assumptions (these are faults of the script, not problems with the Autoconf): 1. The AC_INIT macro will be contained within the first 1024 characters of configure.ac 2. The version string will be 3 integers separated by periods and will be surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first segment represents the major version, the second represents the minor version and the third represents the fix version. 3. No ")" character exists between the opening "(" and closing ")" of AC_INIT, including in comments and character strings. """ import sys import re # Read the command line argument (the output directory for Version.h) if (len(sys.argv) < 3): print "Usage: versiongenerate.py input_dir output_dir" sys.exit(1) else: input_dir = sys.argv[1] output_dir = sys.argv[2] # Read the first 1024 characters of the configure.ac file config_file = open("%s/configure.ac" % input_dir, 'r') buffer_size = 1024 opening_string = config_file.read(buffer_size) config_file.close() # Extract the version string from the AC_INIT macro # The following init_expression means: # Extract three integers separated by periods and surrounded by squre # brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy # (*? is the non-greedy flag) since that would pull in everything between # the first "(" and the last ")" in the file. version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)", re.DOTALL) version_values = version_expression.search(opening_string) major_version = version_values.group(1) minor_version = version_values.group(2) fix_version = version_values.group(3) # Write the version information to a header file to be included in the # Info.plist file. file_data = """// // DO NOT MODIFY THIS FILE (but you can delete it) // // This file is autogenerated by the versiongenerate.py script. This script // is executed in a "Run Script" build phase when creating gtest.framework. This // header file is not used during compilation of C-source. Rather, it simply // defines some version strings for substitution in the Info.plist. Because of // this, we are not not restricted to C-syntax nor are we using include guards. // #define GTEST_VERSIONINFO_SHORT %s.%s #define GTEST_VERSIONINFO_LONG %s.%s.%s """ % (major_version, minor_version, major_version, minor_version, fix_version) version_file = open("%s/Version.h" % output_dir, 'w') version_file.write(file_data) version_file.close()
bsd-3-clause
nikgr95/scrapy
scrapy/core/spidermw.py
80
3374
""" Spider Middleware manager See documentation in docs/topics/spider-middleware.rst """ import six from twisted.python.failure import Failure from scrapy.middleware import MiddlewareManager from scrapy.utils.defer import mustbe_deferred from scrapy.utils.conf import build_component_list def _isiterable(possible_iterator): return hasattr(possible_iterator, '__iter__') class SpiderMiddlewareManager(MiddlewareManager): component_name = 'spider middleware' @classmethod def _get_mwlist_from_settings(cls, settings): return build_component_list(settings['SPIDER_MIDDLEWARES_BASE'], \ settings['SPIDER_MIDDLEWARES']) def _add_middleware(self, mw): super(SpiderMiddlewareManager, self)._add_middleware(mw) if hasattr(mw, 'process_spider_input'): self.methods['process_spider_input'].append(mw.process_spider_input) if hasattr(mw, 'process_spider_output'): self.methods['process_spider_output'].insert(0, mw.process_spider_output) if hasattr(mw, 'process_spider_exception'): self.methods['process_spider_exception'].insert(0, mw.process_spider_exception) if hasattr(mw, 'process_start_requests'): self.methods['process_start_requests'].insert(0, mw.process_start_requests) def scrape_response(self, scrape_func, response, request, spider): fname = lambda f:'%s.%s' % ( six.get_method_self(f).__class__.__name__, six.get_method_function(f).__name__) def process_spider_input(response): for method in self.methods['process_spider_input']: try: result = method(response=response, spider=spider) assert result is None, \ 'Middleware %s must returns None or ' \ 'raise an exception, got %s ' \ % (fname(method), type(result)) except: return scrape_func(Failure(), request, spider) return scrape_func(response, request, spider) def process_spider_exception(_failure): exception = _failure.value for method in self.methods['process_spider_exception']: result = method(response=response, exception=exception, spider=spider) assert result is None or _isiterable(result), \ 'Middleware %s must returns None, or an iterable object, got %s ' % \ (fname(method), type(result)) if result is not None: return result return _failure def process_spider_output(result): for method in self.methods['process_spider_output']: result = method(response=response, result=result, spider=spider) assert _isiterable(result), \ 'Middleware %s must returns an iterable object, got %s ' % \ (fname(method), type(result)) return result dfd = mustbe_deferred(process_spider_input, response) dfd.addErrback(process_spider_exception) dfd.addCallback(process_spider_output) return dfd def process_start_requests(self, start_requests, spider): return self._process_chain('process_start_requests', start_requests, spider)
bsd-3-clause
klahnakoski/ActiveData-ETL
activedata_etl/look_at_queue.py
2
1664
# encoding: utf-8 # # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Contact: Kyle Lahnakoski (kyle@lahnakoski.com) # from __future__ import division from __future__ import unicode_literals from mo_logs import Log from mo_logs import startup from pyLibrary import aws def list_queue(settings, num): queue = aws.Queue(settings) for i in range(num): content = queue.pop() Log.note("\n{{content|json}}", content=content) queue.rollback() def scrub_queue(settings): queue = aws.Queue(settings) existing = set() for i in range(120000): content = queue.pop() try: if (content.key, content.bucket) not in existing: existing.add((content.key, content.bucket)) queue.add(content) Log.note("KEEP {{content|json}}", content=content) else: Log.note("remove {{content|json}}", content=content) finally: queue.commit() def main(): try: settings = startup.read_settings(defs={ "name": ["--num"], "help": "number to show", "type": int, "dest": "num", "default": '10', "required": False }) Log.start(settings.debug) # scrub_queue(settings.source) list_queue(settings.source, settings.args.num) except Exception as e: Log.error("Problem with etl", e) finally: Log.stop() if __name__ == "__main__": main()
mpl-2.0
ITURO/ituro
ituro/sumo/migrations/0002_auto_20150409_0841.py
2
1137
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('sumo', '0001_initial'), ] operations = [ migrations.AddField( model_name='sumogroupmatch', name='order', field=models.PositiveSmallIntegerField(default=0, verbose_name='Order'), preserve_default=True, ), migrations.AlterField( model_name='sumogroupteam', name='average', field=models.IntegerField(default=0, verbose_name='Average'), preserve_default=True, ), migrations.AlterField( model_name='sumogroupteam', name='order', field=models.PositiveSmallIntegerField(default=0, verbose_name='Order'), preserve_default=True, ), migrations.AlterField( model_name='sumogroupteam', name='point', field=models.PositiveSmallIntegerField(default=0, verbose_name='Point'), preserve_default=True, ), ]
mit
teltek/edx-platform
common/lib/symmath/symmath/formula.py
17
21112
#!/usr/bin/python # -*- coding: utf-8 -*- """ Flexible python representation of a symbolic mathematical formula. Acceptes Presentation MathML, Content MathML (and could also do OpenMath). Provides sympy representation. """ # # File: formula.py # Date: 04-May-12 (creation) # Author: I. Chuang <ichuang@mit.edu> # import logging import operator import os import re import string import unicodedata #import subprocess from copy import deepcopy from xml.sax.saxutils import unescape import sympy from lxml import etree from sympy import latex, sympify from sympy.physics.quantum.qubit import Qubit from sympy.physics.quantum.state import Ket from sympy.printing.latex import LatexPrinter from sympy.printing.str import StrPrinter log = logging.getLogger(__name__) log.warning("Dark code. Needs review before enabling in prod.") os.environ['PYTHONIOENCODING'] = 'utf-8' #----------------------------------------------------------------------------- class dot(sympy.operations.LatticeOp): # pylint: disable=invalid-name, no-member """my dot product""" zero = sympy.Symbol('dotzero') identity = sympy.Symbol('dotidentity') def _print_dot(_self, expr): """Print statement used for LatexPrinter""" return r'{((%s) \cdot (%s))}' % (expr.args[0], expr.args[1]) LatexPrinter._print_dot = _print_dot # pylint: disable=protected-access #----------------------------------------------------------------------------- # unit vectors (for 8.02) def _print_hat(_self, expr): """Print statement used for LatexPrinter""" return '\\hat{%s}' % str(expr.args[0]).lower() LatexPrinter._print_hat = _print_hat # pylint: disable=protected-access StrPrinter._print_hat = _print_hat # pylint: disable=protected-access #----------------------------------------------------------------------------- # helper routines def to_latex(expr): """ Convert expression to latex mathjax format """ if expr is None: return '' expr_s = latex(expr) expr_s = expr_s.replace(r'\XI', 'XI') # workaround for strange greek # substitute back into latex form for scripts # literally something of the form # 'scriptN' becomes '\\mathcal{N}' # note: can't use something akin to the _print_hat method above because we # sometimes get 'script(N)__B' or more complicated terms expr_s = re.sub( r'script([a-zA-Z0-9]+)', '\\mathcal{\\1}', expr_s ) #return '<math>%s{}{}</math>' % (xs[1:-1]) if expr_s[0] == '$': return '[mathjax]%s[/mathjax]<br>' % (expr_s[1:-1]) # for sympy v6 return '[mathjax]%s[/mathjax]<br>' % (expr_s) # for sympy v7 def my_evalf(expr, chop=False): """ Enhanced sympy evalf to handle lists of expressions and catch eval failures without dropping out. """ if isinstance(expr, list): try: return [x.evalf(chop=chop) for x in expr] except Exception: # pylint: disable=broad-except return expr try: return expr.evalf(chop=chop) except Exception: # pylint: disable=broad-except return expr def my_sympify(expr, normphase=False, matrix=False, abcsym=False, do_qubit=False, symtab=None): """ Version of sympify to import expression into sympy """ # make all lowercase real? if symtab: varset = symtab else: varset = { 'p': sympy.Symbol('p'), 'g': sympy.Symbol('g'), 'e': sympy.E, # for exp 'i': sympy.I, # lowercase i is also sqrt(-1) 'Q': sympy.Symbol('Q'), # otherwise it is a sympy "ask key" 'I': sympy.Symbol('I'), # otherwise it is sqrt(-1) 'N': sympy.Symbol('N'), # or it is some kind of sympy function 'ZZ': sympy.Symbol('ZZ'), # otherwise it is the PythonIntegerRing 'XI': sympy.Symbol('XI'), # otherwise it is the capital \XI 'hat': sympy.Function('hat'), # for unit vectors (8.02) } if do_qubit: # turn qubit(...) into Qubit instance varset.update({ 'qubit': Qubit, 'Ket': Ket, 'dot': dot, 'bit': sympy.Function('bit'), }) if abcsym: # consider all lowercase letters as real symbols, in the parsing for letter in string.lowercase: if letter in varset: # exclude those already done continue varset.update({letter: sympy.Symbol(letter, real=True)}) sexpr = sympify(expr, locals=varset) if normphase: # remove overall phase if sexpr is a list if isinstance(sexpr, list): if sexpr[0].is_number: ophase = sympy.sympify('exp(-I*arg(%s))' % sexpr[0]) sexpr = [sympy.Mul(x, ophase) for x in sexpr] def to_matrix(expr): """ Convert a list, or list of lists to a matrix. """ # if expr is a list of lists, and is rectangular, then return Matrix(expr) if not isinstance(expr, list): return expr for row in expr: if not isinstance(row, list): return expr rdim = len(expr[0]) for row in expr: if not len(row) == rdim: return expr return sympy.Matrix(expr) if matrix: sexpr = to_matrix(sexpr) return sexpr #----------------------------------------------------------------------------- # class for symbolic mathematical formulas class formula(object): """ Representation of a mathematical formula object. Accepts mathml math expression for constructing, and can produce sympy translation. The formula may or may not include an assignment (=). """ def __init__(self, expr, asciimath='', options=None): self.expr = expr.strip() self.asciimath = asciimath self.the_cmathml = None self.the_sympy = None self.options = options def is_presentation_mathml(self): """ Check if formula is in mathml presentation format. """ return '<mstyle' in self.expr def is_mathml(self): """ Check if formula is in mathml format. """ return '<math ' in self.expr def fix_greek_in_mathml(self, xml): """ Recursively fix greek letters in passed in xml. """ def gettag(expr): return re.sub('{http://[^}]+}', '', expr.tag) for k in xml: tag = gettag(k) if tag == 'mi' or tag == 'ci': usym = unicode(k.text) try: udata = unicodedata.name(usym) except Exception: # pylint: disable=broad-except udata = None # print "usym = %s, udata=%s" % (usym,udata) if udata: # eg "GREEK SMALL LETTER BETA" if 'GREEK' in udata: usym = udata.split(' ')[-1] if 'SMALL' in udata: usym = usym.lower() #print "greek: ",usym k.text = usym self.fix_greek_in_mathml(k) return xml def preprocess_pmathml(self, xml): r""" Pre-process presentation MathML from ASCIIMathML to make it more acceptable for SnuggleTeX, and also to accomodate some sympy conventions (eg hat(i) for \hat{i}). This method would be a good spot to look for an integral and convert it, if possible... """ if isinstance(xml, (str, unicode)): xml = etree.fromstring(xml) # TODO: wrap in try xml = self.fix_greek_in_mathml(xml) # convert greek utf letters to greek spelled out in ascii def gettag(expr): return re.sub('{http://[^}]+}', '', expr.tag) def fix_pmathml(xml): """ f and g are processed as functions by asciimathml, eg "f-2" turns into "<mrow><mi>f</mi><mo>-</mo></mrow><mn>2</mn>" this is really terrible for turning into cmathml. undo this here. """ for k in xml: tag = gettag(k) if tag == 'mrow': if len(k) == 2: if gettag(k[0]) == 'mi' and k[0].text in ['f', 'g'] and gettag(k[1]) == 'mo': idx = xml.index(k) xml.insert(idx, deepcopy(k[0])) # drop the <mrow> container xml.insert(idx + 1, deepcopy(k[1])) xml.remove(k) fix_pmathml(k) fix_pmathml(xml) def fix_hat(xml): """ hat i is turned into <mover><mi>i</mi><mo>^</mo></mover> ; mangle this into <mi>hat(f)</mi> hat i also somtimes turned into <mover><mrow> <mi>j</mi> </mrow><mo>^</mo></mover> """ for k in xml: tag = gettag(k) if tag == 'mover': if len(k) == 2: if gettag(k[0]) == 'mi' and gettag(k[1]) == 'mo' and str(k[1].text) == '^': newk = etree.Element('mi') newk.text = 'hat(%s)' % k[0].text xml.replace(k, newk) if gettag(k[0]) == 'mrow' and gettag(k[0][0]) == 'mi' and \ gettag(k[1]) == 'mo' and str(k[1].text) == '^': newk = etree.Element('mi') newk.text = 'hat(%s)' % k[0][0].text xml.replace(k, newk) fix_hat(k) fix_hat(xml) def flatten_pmathml(xml): """ Give the text version of certain PMathML elements Sometimes MathML will be given with each letter separated (it doesn't know if its implicit multiplication or what). From an xml node, find the (text only) variable name it represents. So it takes <mrow> <mi>m</mi> <mi>a</mi> <mi>x</mi> </mrow> and returns 'max', for easier use later on. """ tag = gettag(xml) if tag == 'mn': return xml.text elif tag == 'mi': return xml.text elif tag == 'mrow': return ''.join([flatten_pmathml(y) for y in xml]) raise Exception('[flatten_pmathml] unknown tag %s' % tag) def fix_mathvariant(parent): """ Fix certain kinds of math variants Literally replace <mstyle mathvariant="script"><mi>N</mi></mstyle> with 'scriptN'. There have been problems using script_N or script(N) """ for child in parent: if gettag(child) == 'mstyle' and child.get('mathvariant') == 'script': newchild = etree.Element('mi') newchild.text = 'script%s' % flatten_pmathml(child[0]) parent.replace(child, newchild) fix_mathvariant(child) fix_mathvariant(xml) # find "tagged" superscripts # they have the character \u200b in the superscript # replace them with a__b so snuggle doesn't get confused def fix_superscripts(xml): """ Look for and replace sup elements with 'X__Y' or 'X_Y__Z' In the javascript, variables with '__X' in them had an invisible character inserted into the sup (to distinguish from powers) E.g. normal: <msubsup> <mi>a</mi> <mi>b</mi> <mi>c</mi> </msubsup> to be interpreted '(a_b)^c' (nothing done by this method) And modified: <msubsup> <mi>b</mi> <mi>x</mi> <mrow> <mo>&#x200B;</mo> <mi>d</mi> </mrow> </msubsup> to be interpreted 'a_b__c' also: <msup> <mi>x</mi> <mrow> <mo>&#x200B;</mo> <mi>B</mi> </mrow> </msup> to be 'x__B' """ for k in xml: tag = gettag(k) # match things like the last example-- # the second item in msub is an mrow with the first # character equal to \u200b if ( tag == 'msup' and len(k) == 2 and gettag(k[1]) == 'mrow' and gettag(k[1][0]) == 'mo' and k[1][0].text == u'\u200b' # whew ): # replace the msup with 'X__Y' k[1].remove(k[1][0]) newk = etree.Element('mi') newk.text = '%s__%s' % (flatten_pmathml(k[0]), flatten_pmathml(k[1])) xml.replace(k, newk) # match things like the middle example- # the third item in msubsup is an mrow with the first # character equal to \u200b if ( tag == 'msubsup' and len(k) == 3 and gettag(k[2]) == 'mrow' and gettag(k[2][0]) == 'mo' and k[2][0].text == u'\u200b' # whew ): # replace the msubsup with 'X_Y__Z' k[2].remove(k[2][0]) newk = etree.Element('mi') newk.text = '%s_%s__%s' % (flatten_pmathml(k[0]), flatten_pmathml(k[1]), flatten_pmathml(k[2])) xml.replace(k, newk) fix_superscripts(k) fix_superscripts(xml) def fix_msubsup(parent): """ Snuggle returns an error when it sees an <msubsup> replace such elements with an <msup>, except the first element is of the form a_b. I.e. map a_b^c => (a_b)^c """ for child in parent: # fix msubsup if gettag(child) == 'msubsup' and len(child) == 3: newchild = etree.Element('msup') newbase = etree.Element('mi') newbase.text = '%s_%s' % (flatten_pmathml(child[0]), flatten_pmathml(child[1])) newexp = child[2] newchild.append(newbase) newchild.append(newexp) parent.replace(child, newchild) fix_msubsup(child) fix_msubsup(xml) self.xml = xml # pylint: disable=attribute-defined-outside-init return self.xml def get_content_mathml(self): if self.the_cmathml: return self.the_cmathml # pre-process the presentation mathml before sending it to snuggletex to convert to content mathml try: xml = self.preprocess_pmathml(self.expr) except Exception as err: # pylint: disable=broad-except log.warning('Err %s while preprocessing; expr=%s', err, self.expr) return "<html>Error! Cannot process pmathml</html>" pmathml = etree.tostring(xml, pretty_print=True) self.the_pmathml = pmathml # pylint: disable=attribute-defined-outside-init return self.the_pmathml cmathml = property(get_content_mathml, None, None, 'content MathML representation') def make_sympy(self, xml=None): """ Return sympy expression for the math formula. The math formula is converted to Content MathML then that is parsed. This is a recursive function, called on every CMML node. Support for more functions can be added by modifying opdict, abould halfway down """ if self.the_sympy: return self.the_sympy if xml is None: # root if not self.is_mathml(): return my_sympify(self.expr) if self.is_presentation_mathml(): cmml = None try: cmml = self.cmathml xml = etree.fromstring(str(cmml)) except Exception, err: if 'conversion from Presentation MathML to Content MathML was not successful' in cmml: msg = "Illegal math expression" else: msg = 'Err %s while converting cmathml to xml; cmml=%s' % (err, cmml) raise Exception(msg) xml = self.fix_greek_in_mathml(xml) self.the_sympy = self.make_sympy(xml[0]) else: xml = etree.fromstring(self.expr) xml = self.fix_greek_in_mathml(xml) self.the_sympy = self.make_sympy(xml[0]) return self.the_sympy def gettag(expr): return re.sub('{http://[^}]+}', '', expr.tag) def op_plus(*args): return args[0] if len(args) == 1 else op_plus(*args[:-1]) + args[-1] def op_times(*args): return reduce(operator.mul, args) def op_minus(*args): if len(args) == 1: return -args[0] if not len(args) == 2: raise Exception('minus given wrong number of arguments!') #return sympy.Add(args[0],-args[1]) return args[0] - args[1] opdict = { 'plus': op_plus, 'divide': operator.div, 'times': op_times, 'minus': op_minus, 'root': sympy.sqrt, 'power': sympy.Pow, 'sin': sympy.sin, 'cos': sympy.cos, 'tan': sympy.tan, 'cot': sympy.cot, 'sinh': sympy.sinh, 'cosh': sympy.cosh, 'coth': sympy.coth, 'tanh': sympy.tanh, 'asin': sympy.asin, 'acos': sympy.acos, 'atan': sympy.atan, 'atan2': sympy.atan2, 'acot': sympy.acot, 'asinh': sympy.asinh, 'acosh': sympy.acosh, 'atanh': sympy.atanh, 'acoth': sympy.acoth, 'exp': sympy.exp, 'log': sympy.log, 'ln': sympy.ln, } def parse_presentation_symbol(xml): """ Parse <msub>, <msup>, <mi>, and <mn> """ tag = gettag(xml) if tag == 'mn': return xml.text elif tag == 'mi': return xml.text elif tag == 'msub': return '_'.join([parse_presentation_symbol(y) for y in xml]) elif tag == 'msup': return '^'.join([parse_presentation_symbol(y) for y in xml]) raise Exception('[parse_presentation_symbol] unknown tag %s' % tag) # parser tree for Content MathML tag = gettag(xml) # first do compound objects if tag == 'apply': # apply operator opstr = gettag(xml[0]) if opstr in opdict: op = opdict[opstr] # pylint: disable=invalid-name args = [self.make_sympy(expr) for expr in xml[1:]] try: res = op(*args) except Exception, err: self.args = args # pylint: disable=attribute-defined-outside-init self.op = op # pylint: disable=attribute-defined-outside-init, invalid-name raise Exception('[formula] error=%s failed to apply %s to args=%s' % (err, opstr, args)) return res else: raise Exception('[formula]: unknown operator tag %s' % (opstr)) elif tag == 'list': # square bracket list if gettag(xml[0]) == 'matrix': return self.make_sympy(xml[0]) else: return [self.make_sympy(expr) for expr in xml] elif tag == 'matrix': return sympy.Matrix([self.make_sympy(expr) for expr in xml]) elif tag == 'vector': return [self.make_sympy(expr) for expr in xml] # atoms are below elif tag == 'cn': # number return sympy.sympify(xml.text) elif tag == 'ci': # variable (symbol) if len(xml) > 0 and (gettag(xml[0]) == 'msub' or gettag(xml[0]) == 'msup'): # subscript or superscript usym = parse_presentation_symbol(xml[0]) sym = sympy.Symbol(str(usym)) else: usym = unicode(xml.text) if 'hat' in usym: sym = my_sympify(usym) else: if usym == 'i' and self.options is not None and 'imaginary' in self.options: # i = sqrt(-1) sym = sympy.I else: sym = sympy.Symbol(str(usym)) return sym else: # unknown tag raise Exception('[formula] unknown tag %s' % tag) sympy = property(make_sympy, None, None, 'sympy representation')
agpl-3.0
mezz64/home-assistant
script/scaffold/templates/config_flow/integration/config_flow.py
8
2944
"""Config flow for NEW_NAME integration.""" import logging import voluptuous as vol from homeassistant import config_entries, core, exceptions from .const import DOMAIN # pylint:disable=unused-import _LOGGER = logging.getLogger(__name__) # TODO adjust the data schema to the data that you need STEP_USER_DATA_SCHEMA = vol.Schema({"host": str, "username": str, "password": str}) class PlaceholderHub: """Placeholder class to make tests pass. TODO Remove this placeholder class and replace with things from your PyPI package. """ def __init__(self, host): """Initialize.""" self.host = host async def authenticate(self, username, password) -> bool: """Test if we can authenticate with the host.""" return True async def validate_input(hass: core.HomeAssistant, data): """Validate the user input allows us to connect. Data has the keys from STEP_USER_DATA_SCHEMA with values provided by the user. """ # TODO validate the data can be used to set up a connection. # If your PyPI package is not built with async, pass your methods # to the executor: # await hass.async_add_executor_job( # your_validate_func, data["username"], data["password"] # ) hub = PlaceholderHub(data["host"]) if not await hub.authenticate(data["username"], data["password"]): raise InvalidAuth # If you cannot connect: # throw CannotConnect # If the authentication is wrong: # InvalidAuth # Return info that you want to store in the config entry. return {"title": "Name of the device"} class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Handle a config flow for NEW_NAME.""" VERSION = 1 # TODO pick one of the available connection classes in homeassistant/config_entries.py CONNECTION_CLASS = config_entries.CONN_CLASS_UNKNOWN async def async_step_user(self, user_input=None): """Handle the initial step.""" if user_input is None: return self.async_show_form( step_id="user", data_schema=STEP_USER_DATA_SCHEMA ) errors = {} try: info = await validate_input(self.hass, user_input) except CannotConnect: errors["base"] = "cannot_connect" except InvalidAuth: errors["base"] = "invalid_auth" except Exception: # pylint: disable=broad-except _LOGGER.exception("Unexpected exception") errors["base"] = "unknown" else: return self.async_create_entry(title=info["title"], data=user_input) return self.async_show_form( step_id="user", data_schema=STEP_USER_DATA_SCHEMA, errors=errors ) class CannotConnect(exceptions.HomeAssistantError): """Error to indicate we cannot connect.""" class InvalidAuth(exceptions.HomeAssistantError): """Error to indicate there is invalid auth."""
apache-2.0
DiCarloLab-Delft/PycQED_py3
pycqed/measurement/mc_parameter_wrapper.py
1
4827
''' Module containing functions that wrap a QCodes parameter into a sweep or detector function ''' import qcodes as qc from pycqed.measurement import sweep_functions as swf from pycqed.measurement import detector_functions as det def wrap_par_to_swf(parameter, retrieve_value=False): ''' - only soft sweep_functions ''' sweep_function = swf.Sweep_function() sweep_function.sweep_control = 'soft' sweep_function.name = parameter.name sweep_function.parameter_name = parameter.label sweep_function.unit = parameter.unit sweep_function.prepare = pass_function sweep_function.finish = pass_function if retrieve_value: def set_par(val): parameter.set(val) parameter.get() sweep_function.set_parameter = set_par else: sweep_function.set_parameter = parameter.set sweep_function.get = parameter.get return sweep_function def wrap_pars_to_swf(parameters, retrieve_value=False): # FIXME: Shouldn't this be removed? ''' - only soft sweep_functions ''' sweep_function = swf.Sweep_function() sweep_function.sweep_control = 'soft' sweep_function.name = parameter.name sweep_function.parameter_name = parameter.label sweep_function.unit = parameter.unit def wrapped_set(val): old_val = parameter.get() vector_value = old_val vector_value[index] = val parameter.set(vector_value) sweep_function.prepare = pass_function sweep_function.finish = pass_function def set_par(val): for par in parameters: par.set(val) if retrieve_value: par.get() sweep_function.set_parameter = set_par return sweep_function def wrap_pars_to_swf(parameters, retrieve_value=False): ''' - only soft sweep_functions ''' sweep_function = swf.Sweep_function() sweep_function.sweep_control = 'soft' sweep_function.name = parameters[0].name sweep_function.parameter_name = parameters[0].label sweep_function.unit = parameters[0].unit sweep_function.prepare = pass_function sweep_function.finish = pass_function def set_par(val): for par in parameters: par.set(val) if retrieve_value: par.get() sweep_function.set_parameter = set_par return sweep_function def wrap_par_to_det(parameter, control='soft'): ''' Takes in a QCoDeS Parameter instance and returns a PycQED DetectorFunction that wraps around the Parameter. The following attributes of the QCoDes parameter are used par.name -> detector.name par.label -> detector.value_names (either string or list of strings) par.unit -> detector.value_units par.get -> detector.acquire_data_point -> detector.get_values The following attributes are not taken from the parameter det.prepare <- pass_function det.finish <- pass_function det.detector_control <- input argument of this function ''' detector_function = det.Detector_Function() detector_function.detector_control = control detector_function.name = parameter.name if isinstance(parameter.label, list): detector_function.value_names = parameter.label detector_function.value_units = parameter.unit else: detector_function.value_names = [parameter.label] detector_function.value_units = [parameter.unit] detector_function.prepare = pass_function detector_function.finish = pass_function detector_function.acquire_data_point = parameter.get detector_function.get_values = parameter.get return detector_function def pass_function(**kw): pass def wrap_func_to_det(func, name, value_names, units, control='soft', **kw): detector_function = det.Detector_Function() detector_function.detector_control = control detector_function.name = name detector_function.value_names = value_names detector_function.value_units = units detector_function.prepare = pass_function detector_function.finish = pass_function def wrapped_func(): return func(**kw) detector_function.acquire_data_point = wrapped_func detector_function.get_values = wrapped_func return detector_function def wrap_par_remainder(par, remainder=1): new_par = qc.Parameter(name=par.name, label=par.label, unit=par.unit) def wrap_set(val): val = val % remainder par.set(val) par.get() new_par.set = wrap_set return new_par def wrap_par_set_get(par): new_par = qc.Parameter(name=par.name, label=par.label, unit=par.unit) def wrap_set(val): par.set(val) par.get() new_par.set = wrap_set return new_par
mit
jamesblunt/edx-platform
common/lib/xmodule/xmodule/tests/xml/__init__.py
110
2464
""" Xml parsing tests for XModules """ import pprint from lxml import etree from mock import Mock from unittest import TestCase from xmodule.x_module import XMLParsingSystem, policy_key from xmodule.mako_module import MakoDescriptorSystem from xmodule.modulestore.xml import CourseLocationManager from opaque_keys.edx.locations import SlashSeparatedCourseKey, Location from xblock.runtime import KvsFieldData, DictKeyValueStore class InMemorySystem(XMLParsingSystem, MakoDescriptorSystem): # pylint: disable=abstract-method """ The simplest possible XMLParsingSystem """ def __init__(self, xml_import_data): self.course_id = SlashSeparatedCourseKey.from_deprecated_string(xml_import_data.course_id) self.default_class = xml_import_data.default_class self._descriptors = {} def get_policy(usage_id): """Return the policy data for the specified usage""" return xml_import_data.policy.get(policy_key(usage_id), {}) super(InMemorySystem, self).__init__( get_policy=get_policy, process_xml=self.process_xml, load_item=self.load_item, error_tracker=Mock(), resources_fs=xml_import_data.filesystem, mixins=xml_import_data.xblock_mixins, select=xml_import_data.xblock_select, render_template=lambda template, context: pprint.pformat((template, context)), field_data=KvsFieldData(DictKeyValueStore()), ) def process_xml(self, xml): # pylint: disable=method-hidden """Parse `xml` as an XBlock, and add it to `self._descriptors`""" descriptor = self.xblock_from_node( etree.fromstring(xml), None, CourseLocationManager(self.course_id), ) self._descriptors[descriptor.location.to_deprecated_string()] = descriptor return descriptor def load_item(self, location, for_parent=None): # pylint: disable=method-hidden, unused-argument """Return the descriptor loaded for `location`""" return self._descriptors[location.to_deprecated_string()] class XModuleXmlImportTest(TestCase): """Base class for tests that use basic XML parsing""" def process_xml(self, xml_import_data): """Use the `xml_import_data` to import an :class:`XBlock` from XML.""" system = InMemorySystem(xml_import_data) return system.process_xml(xml_import_data.xml_string)
agpl-3.0
CiscoSystems/tempest
tempest/api/image/v1/test_image_members.py
4
2033
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.image import base from tempest import test class ImageMembersTest(base.BaseV1ImageMembersTest): @test.attr(type='gate') def test_add_image_member(self): image = self._create_image() self.client.add_member(self.alt_tenant_id, image) body = self.client.get_image_membership(image) members = body['members'] members = map(lambda x: x['member_id'], members) self.assertIn(self.alt_tenant_id, members) # get image as alt user self.alt_img_cli.get_image(image) @test.attr(type='gate') def test_get_shared_images(self): image = self._create_image() self.client.add_member(self.alt_tenant_id, image) share_image = self._create_image() self.client.add_member(self.alt_tenant_id, share_image) body = self.client.get_shared_images(self.alt_tenant_id) images = body['shared_images'] images = map(lambda x: x['image_id'], images) self.assertIn(share_image, images) self.assertIn(image, images) @test.attr(type='gate') def test_remove_member(self): image_id = self._create_image() self.client.add_member(self.alt_tenant_id, image_id) self.client.delete_member(self.alt_tenant_id, image_id) body = self.client.get_image_membership(image_id) members = body['members'] self.assertEqual(0, len(members), str(members))
apache-2.0
v1bri/gnuradio
gr-digital/python/digital/qa_simple_correlator.py
57
2151
#!/usr/bin/env python # # Copyright 2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, gr_unittest, blocks, filter, digital class test_simple_correlator(gr_unittest.TestCase): def setUp(self): self.tb = gr.top_block() def tearDown(self): self.tb = None def test_00(self): expected_result = ( 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff) # Filter taps to expand the data to oversample by 8 # Just using a RRC for some basic filter shape taps = filter.firdes.root_raised_cosine(8, 8, 1.0, 0.5, 21) src = blocks.vector_source_b(expected_result) frame = digital.simple_framer(4) unpack = blocks.packed_to_unpacked_bb(1, gr.GR_MSB_FIRST) expand = filter.interp_fir_filter_fff(8, taps) b2f = blocks.char_to_float() mult2 = blocks.multiply_const_ff(2) sub1 = blocks.add_const_ff(-1) op = digital.simple_correlator(4) dst = blocks.vector_sink_b() self.tb.connect(src, frame, unpack, b2f, mult2, sub1, expand) self.tb.connect(expand, op, dst) self.tb.run() result_data = dst.data() self.assertEqual(expected_result, result_data) if __name__ == '__main__': gr_unittest.run(test_simple_correlator, "test_simple_correlator.xml")
gpl-3.0
buntyke/GPy
GPy/core/parameterization/observable_array.py
8
4411
# Copyright (c) 2014, Max Zwiessele # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np from .parameter_core import Pickleable from .observable import Observable class ObsAr(np.ndarray, Pickleable, Observable): """ An ndarray which reports changes to its observers. The observers can add themselves with a callable, which will be called every time this array changes. The callable takes exactly one argument, which is this array itself. """ __array_priority__ = -1 # Never give back ObsAr def __new__(cls, input_array, *a, **kw): # allways make a copy of input paramters, as we need it to be in C order: if not isinstance(input_array, ObsAr): obj = np.atleast_1d(np.require(input_array, dtype=np.float64, requirements=['W', 'C'])).view(cls) else: obj = input_array super(ObsAr, obj).__init__(*a, **kw) return obj def __array_finalize__(self, obj): # see InfoArray.__array_finalize__ for comments if obj is None: return self.observers = getattr(obj, 'observers', None) def __array_wrap__(self, out_arr, context=None): return out_arr.view(np.ndarray) def _setup_observers(self): # do not setup anything, as observable arrays do not have default observers pass @property def values(self): return self.view(np.ndarray) def copy(self): from .lists_and_dicts import ObserverList memo = {} memo[id(self)] = self memo[id(self.observers)] = ObserverList() return self.__deepcopy__(memo) def __deepcopy__(self, memo): s = self.__new__(self.__class__, input_array=self.view(np.ndarray).copy()) memo[id(self)] = s import copy Pickleable.__setstate__(s, copy.deepcopy(self.__getstate__(), memo)) return s def __reduce__(self): func, args, state = super(ObsAr, self).__reduce__() return func, args, (state, Pickleable.__getstate__(self)) def __setstate__(self, state): np.ndarray.__setstate__(self, state[0]) Pickleable.__setstate__(self, state[1]) def __setitem__(self, s, val): super(ObsAr, self).__setitem__(s, val) self.notify_observers() def __getslice__(self, start, stop): return self.__getitem__(slice(start, stop)) def __setslice__(self, start, stop, val): return self.__setitem__(slice(start, stop), val) def __ilshift__(self, *args, **kwargs): r = np.ndarray.__ilshift__(self, *args, **kwargs) self.notify_observers() return r def __irshift__(self, *args, **kwargs): r = np.ndarray.__irshift__(self, *args, **kwargs) self.notify_observers() return r def __ixor__(self, *args, **kwargs): r = np.ndarray.__ixor__(self, *args, **kwargs) self.notify_observers() return r def __ipow__(self, *args, **kwargs): r = np.ndarray.__ipow__(self, *args, **kwargs) self.notify_observers() return r def __ifloordiv__(self, *args, **kwargs): r = np.ndarray.__ifloordiv__(self, *args, **kwargs) self.notify_observers() return r def __isub__(self, *args, **kwargs): r = np.ndarray.__isub__(self, *args, **kwargs) self.notify_observers() return r def __ior__(self, *args, **kwargs): r = np.ndarray.__ior__(self, *args, **kwargs) self.notify_observers() return r def __itruediv__(self, *args, **kwargs): r = np.ndarray.__itruediv__(self, *args, **kwargs) self.notify_observers() return r def __idiv__(self, *args, **kwargs): r = np.ndarray.__idiv__(self, *args, **kwargs) self.notify_observers() return r def __iand__(self, *args, **kwargs): r = np.ndarray.__iand__(self, *args, **kwargs) self.notify_observers() return r def __imod__(self, *args, **kwargs): r = np.ndarray.__imod__(self, *args, **kwargs) self.notify_observers() return r def __iadd__(self, *args, **kwargs): r = np.ndarray.__iadd__(self, *args, **kwargs) self.notify_observers() return r def __imul__(self, *args, **kwargs): r = np.ndarray.__imul__(self, *args, **kwargs) self.notify_observers() return r
mit
Azure/azure-sdk-for-python
sdk/digitaltwins/azure-digitaltwins-core/azure/digitaltwins/core/_generated/operations/_digital_twin_models_operations.py
1
22385
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class DigitalTwinModelsOperations(object): """DigitalTwinModelsOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.digitaltwins.core.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def add( self, models=None, # type: Optional[List[object]] digital_twin_models_add_options=None, # type: Optional["_models.DigitalTwinModelsAddOptions"] **kwargs # type: Any ): # type: (...) -> List["_models.DigitalTwinsModelData"] """Uploads one or more models. When any error occurs, no models are uploaded. Status codes: * 201 Created * 400 Bad Request * DTDLParserError - The models provided are not valid DTDL. * InvalidArgument - The model id is invalid. * LimitExceeded - The maximum number of model ids allowed in 'dependenciesFor' has been reached. * ModelVersionNotSupported - The version of DTDL used is not supported. * 409 Conflict * ModelAlreadyExists - The model provided already exists. :param models: An array of models to add. :type models: list[object] :param digital_twin_models_add_options: Parameter group. :type digital_twin_models_add_options: ~azure.digitaltwins.core.models.DigitalTwinModelsAddOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: list of DigitalTwinsModelData, or the result of cls(response) :rtype: list[~azure.digitaltwins.core.models.DigitalTwinsModelData] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[List["_models.DigitalTwinsModelData"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) _traceparent = None _tracestate = None if digital_twin_models_add_options is not None: _traceparent = digital_twin_models_add_options.traceparent _tracestate = digital_twin_models_add_options.tracestate api_version = "2020-10-31" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.add.metadata['url'] # type: ignore # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] if _traceparent is not None: header_parameters['traceparent'] = self._serialize.header("traceparent", _traceparent, 'str') if _tracestate is not None: header_parameters['tracestate'] = self._serialize.header("tracestate", _tracestate, 'str') header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] if models is not None: body_content = self._serialize.body(models, '[object]') else: body_content = None body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('[DigitalTwinsModelData]', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized add.metadata = {'url': '/models'} # type: ignore def list( self, dependencies_for=None, # type: Optional[List[str]] include_model_definition=False, # type: Optional[bool] digital_twin_models_list_options=None, # type: Optional["_models.DigitalTwinModelsListOptions"] **kwargs # type: Any ): # type: (...) -> Iterable["_models.PagedDigitalTwinsModelDataCollection"] """Retrieves model metadata and, optionally, model definitions. Status codes: * 200 OK * 400 Bad Request * InvalidArgument - The model id is invalid. * LimitExceeded - The maximum number of model ids allowed in 'dependenciesFor' has been reached. * 404 Not Found * ModelNotFound - The model was not found. :param dependencies_for: The set of the models which will have their dependencies retrieved. If omitted, all models are retrieved. :type dependencies_for: list[str] :param include_model_definition: When true the model definition will be returned as part of the result. :type include_model_definition: bool :param digital_twin_models_list_options: Parameter group. :type digital_twin_models_list_options: ~azure.digitaltwins.core.models.DigitalTwinModelsListOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either PagedDigitalTwinsModelDataCollection or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.digitaltwins.core.models.PagedDigitalTwinsModelDataCollection] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedDigitalTwinsModelDataCollection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) _traceparent = None _tracestate = None _max_items_per_page = None if digital_twin_models_list_options is not None: _traceparent = digital_twin_models_list_options.traceparent _tracestate = digital_twin_models_list_options.tracestate _max_items_per_page = digital_twin_models_list_options.max_items_per_page api_version = "2020-10-31" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] if _traceparent is not None: header_parameters['traceparent'] = self._serialize.header("traceparent", _traceparent, 'str') if _tracestate is not None: header_parameters['tracestate'] = self._serialize.header("tracestate", _tracestate, 'str') if _max_items_per_page is not None: header_parameters['max-items-per-page'] = self._serialize.header("max_items_per_page", _max_items_per_page, 'int') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore # Construct parameters query_parameters = {} # type: Dict[str, Any] if dependencies_for is not None: query_parameters['dependenciesFor'] = [self._serialize.query("dependencies_for", q, 'str') if q is not None else '' for q in dependencies_for] if include_model_definition is not None: query_parameters['includeModelDefinition'] = self._serialize.query("include_model_definition", include_model_definition, 'bool') query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('PagedDigitalTwinsModelDataCollection', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/models'} # type: ignore def get_by_id( self, id, # type: str include_model_definition=False, # type: Optional[bool] digital_twin_models_get_by_id_options=None, # type: Optional["_models.DigitalTwinModelsGetByIdOptions"] **kwargs # type: Any ): # type: (...) -> "_models.DigitalTwinsModelData" """Retrieves model metadata and optionally the model definition. Status codes: * 200 OK * 400 Bad Request * InvalidArgument - The model id is invalid. * MissingArgument - The model id was not provided. * 404 Not Found * ModelNotFound - The model was not found. :param id: The id for the model. The id is globally unique and case sensitive. :type id: str :param include_model_definition: When true the model definition will be returned as part of the result. :type include_model_definition: bool :param digital_twin_models_get_by_id_options: Parameter group. :type digital_twin_models_get_by_id_options: ~azure.digitaltwins.core.models.DigitalTwinModelsGetByIdOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: DigitalTwinsModelData, or the result of cls(response) :rtype: ~azure.digitaltwins.core.models.DigitalTwinsModelData :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.DigitalTwinsModelData"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) _traceparent = None _tracestate = None if digital_twin_models_get_by_id_options is not None: _traceparent = digital_twin_models_get_by_id_options.traceparent _tracestate = digital_twin_models_get_by_id_options.tracestate api_version = "2020-10-31" accept = "application/json" # Construct URL url = self.get_by_id.metadata['url'] # type: ignore path_format_arguments = { 'id': self._serialize.url("id", id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] if include_model_definition is not None: query_parameters['includeModelDefinition'] = self._serialize.query("include_model_definition", include_model_definition, 'bool') query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] if _traceparent is not None: header_parameters['traceparent'] = self._serialize.header("traceparent", _traceparent, 'str') if _tracestate is not None: header_parameters['tracestate'] = self._serialize.header("tracestate", _tracestate, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('DigitalTwinsModelData', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_by_id.metadata = {'url': '/models/{id}'} # type: ignore def update( self, id, # type: str update_model, # type: List[object] digital_twin_models_update_options=None, # type: Optional["_models.DigitalTwinModelsUpdateOptions"] **kwargs # type: Any ): # type: (...) -> None """Updates the metadata for a model. Status codes: * 204 No Content * 400 Bad Request * InvalidArgument - The model id is invalid. * JsonPatchInvalid - The JSON Patch provided is invalid. * MissingArgument - The model id was not provided. * 404 Not Found * ModelNotFound - The model was not found. * 409 Conflict * ModelReferencesNotDecommissioned - The model refers to models that are not decommissioned. :param id: The id for the model. The id is globally unique and case sensitive. :type id: str :param update_model: An update specification described by JSON Patch. Only the decommissioned property can be replaced. :type update_model: list[object] :param digital_twin_models_update_options: Parameter group. :type digital_twin_models_update_options: ~azure.digitaltwins.core.models.DigitalTwinModelsUpdateOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) _traceparent = None _tracestate = None if digital_twin_models_update_options is not None: _traceparent = digital_twin_models_update_options.traceparent _tracestate = digital_twin_models_update_options.tracestate api_version = "2020-10-31" content_type = kwargs.pop("content_type", "application/json-patch+json") accept = "application/json" # Construct URL url = self.update.metadata['url'] # type: ignore path_format_arguments = { 'id': self._serialize.url("id", id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] if _traceparent is not None: header_parameters['traceparent'] = self._serialize.header("traceparent", _traceparent, 'str') if _tracestate is not None: header_parameters['tracestate'] = self._serialize.header("tracestate", _tracestate, 'str') header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(update_model, '[object]') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error) if cls: return cls(pipeline_response, None, {}) update.metadata = {'url': '/models/{id}'} # type: ignore def delete( self, id, # type: str digital_twin_models_delete_options=None, # type: Optional["_models.DigitalTwinModelsDeleteOptions"] **kwargs # type: Any ): # type: (...) -> None """Deletes a model. A model can only be deleted if no other models reference it. Status codes: * 204 No Content * 400 Bad Request * InvalidArgument - The model id is invalid. * MissingArgument - The model id was not provided. * 404 Not Found * ModelNotFound - The model was not found. * 409 Conflict * ModelReferencesNotDeleted - The model refers to models that are not deleted. :param id: The id for the model. The id is globally unique and case sensitive. :type id: str :param digital_twin_models_delete_options: Parameter group. :type digital_twin_models_delete_options: ~azure.digitaltwins.core.models.DigitalTwinModelsDeleteOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) _traceparent = None _tracestate = None if digital_twin_models_delete_options is not None: _traceparent = digital_twin_models_delete_options.traceparent _tracestate = digital_twin_models_delete_options.tracestate api_version = "2020-10-31" accept = "application/json" # Construct URL url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'id': self._serialize.url("id", id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] if _traceparent is not None: header_parameters['traceparent'] = self._serialize.header("traceparent", _traceparent, 'str') if _tracestate is not None: header_parameters['tracestate'] = self._serialize.header("tracestate", _tracestate, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error) if cls: return cls(pipeline_response, None, {}) delete.metadata = {'url': '/models/{id}'} # type: ignore
mit
bjko/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/thread/messagepump.py
151
2482
# Copyright (c) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. class MessagePumpDelegate(object): def schedule(self, interval, callback): raise NotImplementedError, "subclasses must implement" def message_available(self, message): raise NotImplementedError, "subclasses must implement" def final_message_delivered(self): raise NotImplementedError, "subclasses must implement" class MessagePump(object): interval = 10 # seconds def __init__(self, delegate, message_queue): self._delegate = delegate self._message_queue = message_queue self._schedule() def _schedule(self): self._delegate.schedule(self.interval, self._callback) def _callback(self): (messages, is_running) = self._message_queue.take_all() for message in messages: self._delegate.message_available(message) if not is_running: self._delegate.final_message_delivered() return self._schedule()
bsd-3-clause
virtmerlin/go-buildpack
vendor/virtualenv-1.11.6/virtualenv.py
33
98477
#!/usr/bin/env python """Create a "virtual" Python installation """ __version__ = "1.11.6" virtualenv_version = __version__ # legacy import base64 import sys import os import codecs import optparse import re import shutil import logging import tempfile import zlib import errno import glob import distutils.sysconfig from distutils.util import strtobool import struct import subprocess import tarfile if sys.version_info < (2, 6): print('ERROR: %s' % sys.exc_info()[1]) print('ERROR: this script requires Python 2.6 or greater.') sys.exit(101) try: set except NameError: from sets import Set as set try: basestring except NameError: basestring = str try: import ConfigParser except ImportError: import configparser as ConfigParser join = os.path.join py_version = 'python%s.%s' % (sys.version_info[0], sys.version_info[1]) is_jython = sys.platform.startswith('java') is_pypy = hasattr(sys, 'pypy_version_info') is_win = (sys.platform == 'win32') is_cygwin = (sys.platform == 'cygwin') is_darwin = (sys.platform == 'darwin') abiflags = getattr(sys, 'abiflags', '') user_dir = os.path.expanduser('~') if is_win: default_storage_dir = os.path.join(user_dir, 'virtualenv') else: default_storage_dir = os.path.join(user_dir, '.virtualenv') default_config_file = os.path.join(default_storage_dir, 'virtualenv.ini') if is_pypy: expected_exe = 'pypy' elif is_jython: expected_exe = 'jython' else: expected_exe = 'python' # Return a mapping of version -> Python executable # Only provided for Windows, where the information in the registry is used if not is_win: def get_installed_pythons(): return {} else: try: import winreg except ImportError: import _winreg as winreg def get_installed_pythons(): python_core = winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE, "Software\\Python\\PythonCore") i = 0 versions = [] while True: try: versions.append(winreg.EnumKey(python_core, i)) i = i + 1 except WindowsError: break exes = dict() for ver in versions: path = winreg.QueryValue(python_core, "%s\\InstallPath" % ver) exes[ver] = join(path, "python.exe") winreg.CloseKey(python_core) # Add the major versions # Sort the keys, then repeatedly update the major version entry # Last executable (i.e., highest version) wins with this approach for ver in sorted(exes): exes[ver[0]] = exes[ver] return exes REQUIRED_MODULES = ['os', 'posix', 'posixpath', 'nt', 'ntpath', 'genericpath', 'fnmatch', 'locale', 'encodings', 'codecs', 'stat', 'UserDict', 'readline', 'copy_reg', 'types', 're', 'sre', 'sre_parse', 'sre_constants', 'sre_compile', 'zlib'] REQUIRED_FILES = ['lib-dynload', 'config'] majver, minver = sys.version_info[:2] if majver == 2: if minver >= 6: REQUIRED_MODULES.extend(['warnings', 'linecache', '_abcoll', 'abc']) if minver >= 7: REQUIRED_MODULES.extend(['_weakrefset']) if minver <= 3: REQUIRED_MODULES.extend(['sets', '__future__']) elif majver == 3: # Some extra modules are needed for Python 3, but different ones # for different versions. REQUIRED_MODULES.extend(['_abcoll', 'warnings', 'linecache', 'abc', 'io', '_weakrefset', 'copyreg', 'tempfile', 'random', '__future__', 'collections', 'keyword', 'tarfile', 'shutil', 'struct', 'copy', 'tokenize', 'token', 'functools', 'heapq', 'bisect', 'weakref', 'reprlib']) if minver >= 2: REQUIRED_FILES[-1] = 'config-%s' % majver if minver >= 3: import sysconfig platdir = sysconfig.get_config_var('PLATDIR') REQUIRED_FILES.append(platdir) # The whole list of 3.3 modules is reproduced below - the current # uncommented ones are required for 3.3 as of now, but more may be # added as 3.3 development continues. REQUIRED_MODULES.extend([ #"aifc", #"antigravity", #"argparse", #"ast", #"asynchat", #"asyncore", "base64", #"bdb", #"binhex", #"bisect", #"calendar", #"cgi", #"cgitb", #"chunk", #"cmd", #"codeop", #"code", #"colorsys", #"_compat_pickle", #"compileall", #"concurrent", #"configparser", #"contextlib", #"cProfile", #"crypt", #"csv", #"ctypes", #"curses", #"datetime", #"dbm", #"decimal", #"difflib", #"dis", #"doctest", #"dummy_threading", "_dummy_thread", #"email", #"filecmp", #"fileinput", #"formatter", #"fractions", #"ftplib", #"functools", #"getopt", #"getpass", #"gettext", #"glob", #"gzip", "hashlib", #"heapq", "hmac", #"html", #"http", #"idlelib", #"imaplib", #"imghdr", "imp", "importlib", #"inspect", #"json", #"lib2to3", #"logging", #"macpath", #"macurl2path", #"mailbox", #"mailcap", #"_markupbase", #"mimetypes", #"modulefinder", #"multiprocessing", #"netrc", #"nntplib", #"nturl2path", #"numbers", #"opcode", #"optparse", #"os2emxpath", #"pdb", #"pickle", #"pickletools", #"pipes", #"pkgutil", #"platform", #"plat-linux2", #"plistlib", #"poplib", #"pprint", #"profile", #"pstats", #"pty", #"pyclbr", #"py_compile", #"pydoc_data", #"pydoc", #"_pyio", #"queue", #"quopri", #"reprlib", "rlcompleter", #"runpy", #"sched", #"shelve", #"shlex", #"smtpd", #"smtplib", #"sndhdr", #"socket", #"socketserver", #"sqlite3", #"ssl", #"stringprep", #"string", #"_strptime", #"subprocess", #"sunau", #"symbol", #"symtable", #"sysconfig", #"tabnanny", #"telnetlib", #"test", #"textwrap", #"this", #"_threading_local", #"threading", #"timeit", #"tkinter", #"tokenize", #"token", #"traceback", #"trace", #"tty", #"turtledemo", #"turtle", #"unittest", #"urllib", #"uuid", #"uu", #"wave", #"weakref", #"webbrowser", #"wsgiref", #"xdrlib", #"xml", #"xmlrpc", #"zipfile", ]) if minver >= 4: REQUIRED_MODULES.extend([ 'operator', '_collections_abc', '_bootlocale', ]) if is_pypy: # these are needed to correctly display the exceptions that may happen # during the bootstrap REQUIRED_MODULES.extend(['traceback', 'linecache']) class Logger(object): """ Logging object for use in command-line script. Allows ranges of levels, to avoid some redundancy of displayed information. """ DEBUG = logging.DEBUG INFO = logging.INFO NOTIFY = (logging.INFO+logging.WARN)/2 WARN = WARNING = logging.WARN ERROR = logging.ERROR FATAL = logging.FATAL LEVELS = [DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL] def __init__(self, consumers): self.consumers = consumers self.indent = 0 self.in_progress = None self.in_progress_hanging = False def debug(self, msg, *args, **kw): self.log(self.DEBUG, msg, *args, **kw) def info(self, msg, *args, **kw): self.log(self.INFO, msg, *args, **kw) def notify(self, msg, *args, **kw): self.log(self.NOTIFY, msg, *args, **kw) def warn(self, msg, *args, **kw): self.log(self.WARN, msg, *args, **kw) def error(self, msg, *args, **kw): self.log(self.ERROR, msg, *args, **kw) def fatal(self, msg, *args, **kw): self.log(self.FATAL, msg, *args, **kw) def log(self, level, msg, *args, **kw): if args: if kw: raise TypeError( "You may give positional or keyword arguments, not both") args = args or kw rendered = None for consumer_level, consumer in self.consumers: if self.level_matches(level, consumer_level): if (self.in_progress_hanging and consumer in (sys.stdout, sys.stderr)): self.in_progress_hanging = False sys.stdout.write('\n') sys.stdout.flush() if rendered is None: if args: rendered = msg % args else: rendered = msg rendered = ' '*self.indent + rendered if hasattr(consumer, 'write'): consumer.write(rendered+'\n') else: consumer(rendered) def start_progress(self, msg): assert not self.in_progress, ( "Tried to start_progress(%r) while in_progress %r" % (msg, self.in_progress)) if self.level_matches(self.NOTIFY, self._stdout_level()): sys.stdout.write(msg) sys.stdout.flush() self.in_progress_hanging = True else: self.in_progress_hanging = False self.in_progress = msg def end_progress(self, msg='done.'): assert self.in_progress, ( "Tried to end_progress without start_progress") if self.stdout_level_matches(self.NOTIFY): if not self.in_progress_hanging: # Some message has been printed out since start_progress sys.stdout.write('...' + self.in_progress + msg + '\n') sys.stdout.flush() else: sys.stdout.write(msg + '\n') sys.stdout.flush() self.in_progress = None self.in_progress_hanging = False def show_progress(self): """If we are in a progress scope, and no log messages have been shown, write out another '.'""" if self.in_progress_hanging: sys.stdout.write('.') sys.stdout.flush() def stdout_level_matches(self, level): """Returns true if a message at this level will go to stdout""" return self.level_matches(level, self._stdout_level()) def _stdout_level(self): """Returns the level that stdout runs at""" for level, consumer in self.consumers: if consumer is sys.stdout: return level return self.FATAL def level_matches(self, level, consumer_level): """ >>> l = Logger([]) >>> l.level_matches(3, 4) False >>> l.level_matches(3, 2) True >>> l.level_matches(slice(None, 3), 3) False >>> l.level_matches(slice(None, 3), 2) True >>> l.level_matches(slice(1, 3), 1) True >>> l.level_matches(slice(2, 3), 1) False """ if isinstance(level, slice): start, stop = level.start, level.stop if start is not None and start > consumer_level: return False if stop is not None and stop <= consumer_level: return False return True else: return level >= consumer_level #@classmethod def level_for_integer(cls, level): levels = cls.LEVELS if level < 0: return levels[0] if level >= len(levels): return levels[-1] return levels[level] level_for_integer = classmethod(level_for_integer) # create a silent logger just to prevent this from being undefined # will be overridden with requested verbosity main() is called. logger = Logger([(Logger.LEVELS[-1], sys.stdout)]) def mkdir(path): if not os.path.exists(path): logger.info('Creating %s', path) os.makedirs(path) else: logger.info('Directory %s already exists', path) def copyfileordir(src, dest, symlink=True): if os.path.isdir(src): shutil.copytree(src, dest, symlink) else: shutil.copy2(src, dest) def copyfile(src, dest, symlink=True): if not os.path.exists(src): # Some bad symlink in the src logger.warn('Cannot find file %s (bad symlink)', src) return if os.path.exists(dest): logger.debug('File %s already exists', dest) return if not os.path.exists(os.path.dirname(dest)): logger.info('Creating parent directories for %s', os.path.dirname(dest)) os.makedirs(os.path.dirname(dest)) if not os.path.islink(src): srcpath = os.path.abspath(src) else: srcpath = os.readlink(src) if symlink and hasattr(os, 'symlink') and not is_win: logger.info('Symlinking %s', dest) try: os.symlink(srcpath, dest) except (OSError, NotImplementedError): logger.info('Symlinking failed, copying to %s', dest) copyfileordir(src, dest, symlink) else: logger.info('Copying to %s', dest) copyfileordir(src, dest, symlink) def writefile(dest, content, overwrite=True): if not os.path.exists(dest): logger.info('Writing %s', dest) f = open(dest, 'wb') f.write(content.encode('utf-8')) f.close() return else: f = open(dest, 'rb') c = f.read() f.close() if c != content.encode("utf-8"): if not overwrite: logger.notify('File %s exists with different content; not overwriting', dest) return logger.notify('Overwriting %s with new content', dest) f = open(dest, 'wb') f.write(content.encode('utf-8')) f.close() else: logger.info('Content %s already in place', dest) def rmtree(dir): if os.path.exists(dir): logger.notify('Deleting tree %s', dir) shutil.rmtree(dir) else: logger.info('Do not need to delete %s; already gone', dir) def make_exe(fn): if hasattr(os, 'chmod'): oldmode = os.stat(fn).st_mode & 0xFFF # 0o7777 newmode = (oldmode | 0x16D) & 0xFFF # 0o555, 0o7777 os.chmod(fn, newmode) logger.info('Changed mode of %s to %s', fn, oct(newmode)) def _find_file(filename, dirs): for dir in reversed(dirs): files = glob.glob(os.path.join(dir, filename)) if files and os.path.isfile(files[0]): return True, files[0] return False, filename def file_search_dirs(): here = os.path.dirname(os.path.abspath(__file__)) dirs = ['.', here, join(here, 'virtualenv_support')] if os.path.splitext(os.path.dirname(__file__))[0] != 'virtualenv': # Probably some boot script; just in case virtualenv is installed... try: import virtualenv except ImportError: pass else: dirs.append(os.path.join(os.path.dirname(virtualenv.__file__), 'virtualenv_support')) return [d for d in dirs if os.path.isdir(d)] class UpdatingDefaultsHelpFormatter(optparse.IndentedHelpFormatter): """ Custom help formatter for use in ConfigOptionParser that updates the defaults before expanding them, allowing them to show up correctly in the help listing """ def expand_default(self, option): if self.parser is not None: self.parser.update_defaults(self.parser.defaults) return optparse.IndentedHelpFormatter.expand_default(self, option) class ConfigOptionParser(optparse.OptionParser): """ Custom option parser which updates its defaults by checking the configuration files and environmental variables """ def __init__(self, *args, **kwargs): self.config = ConfigParser.RawConfigParser() self.files = self.get_config_files() self.config.read(self.files) optparse.OptionParser.__init__(self, *args, **kwargs) def get_config_files(self): config_file = os.environ.get('VIRTUALENV_CONFIG_FILE', False) if config_file and os.path.exists(config_file): return [config_file] return [default_config_file] def update_defaults(self, defaults): """ Updates the given defaults with values from the config files and the environ. Does a little special handling for certain types of options (lists). """ # Then go and look for the other sources of configuration: config = {} # 1. config files config.update(dict(self.get_config_section('virtualenv'))) # 2. environmental variables config.update(dict(self.get_environ_vars())) # Then set the options with those values for key, val in config.items(): key = key.replace('_', '-') if not key.startswith('--'): key = '--%s' % key # only prefer long opts option = self.get_option(key) if option is not None: # ignore empty values if not val: continue # handle multiline configs if option.action == 'append': val = val.split() else: option.nargs = 1 if option.action == 'store_false': val = not strtobool(val) elif option.action in ('store_true', 'count'): val = strtobool(val) try: val = option.convert_value(key, val) except optparse.OptionValueError: e = sys.exc_info()[1] print("An error occured during configuration: %s" % e) sys.exit(3) defaults[option.dest] = val return defaults def get_config_section(self, name): """ Get a section of a configuration """ if self.config.has_section(name): return self.config.items(name) return [] def get_environ_vars(self, prefix='VIRTUALENV_'): """ Returns a generator with all environmental vars with prefix VIRTUALENV """ for key, val in os.environ.items(): if key.startswith(prefix): yield (key.replace(prefix, '').lower(), val) def get_default_values(self): """ Overridding to make updating the defaults after instantiation of the option parser possible, update_defaults() does the dirty work. """ if not self.process_default_values: # Old, pre-Optik 1.5 behaviour. return optparse.Values(self.defaults) defaults = self.update_defaults(self.defaults.copy()) # ours for option in self._get_all_options(): default = defaults.get(option.dest) if isinstance(default, basestring): opt_str = option.get_opt_string() defaults[option.dest] = option.check_value(opt_str, default) return optparse.Values(defaults) def main(): parser = ConfigOptionParser( version=virtualenv_version, usage="%prog [OPTIONS] DEST_DIR", formatter=UpdatingDefaultsHelpFormatter()) parser.add_option( '-v', '--verbose', action='count', dest='verbose', default=0, help="Increase verbosity.") parser.add_option( '-q', '--quiet', action='count', dest='quiet', default=0, help='Decrease verbosity.') parser.add_option( '-p', '--python', dest='python', metavar='PYTHON_EXE', help='The Python interpreter to use, e.g., --python=python2.5 will use the python2.5 ' 'interpreter to create the new environment. The default is the interpreter that ' 'virtualenv was installed with (%s)' % sys.executable) parser.add_option( '--clear', dest='clear', action='store_true', help="Clear out the non-root install and start from scratch.") parser.set_defaults(system_site_packages=False) parser.add_option( '--no-site-packages', dest='system_site_packages', action='store_false', help="DEPRECATED. Retained only for backward compatibility. " "Not having access to global site-packages is now the default behavior.") parser.add_option( '--system-site-packages', dest='system_site_packages', action='store_true', help="Give the virtual environment access to the global site-packages.") parser.add_option( '--always-copy', dest='symlink', action='store_false', default=True, help="Always copy files rather than symlinking.") parser.add_option( '--unzip-setuptools', dest='unzip_setuptools', action='store_true', help="Unzip Setuptools when installing it.") parser.add_option( '--relocatable', dest='relocatable', action='store_true', help='Make an EXISTING virtualenv environment relocatable. ' 'This fixes up scripts and makes all .pth files relative.') parser.add_option( '--no-setuptools', dest='no_setuptools', action='store_true', help='Do not install setuptools (or pip) in the new virtualenv.') parser.add_option( '--no-pip', dest='no_pip', action='store_true', help='Do not install pip in the new virtualenv.') default_search_dirs = file_search_dirs() parser.add_option( '--extra-search-dir', dest="search_dirs", action="append", metavar='DIR', default=default_search_dirs, help="Directory to look for setuptools/pip distributions in. " "This option can be used multiple times.") parser.add_option( '--never-download', dest="never_download", action="store_true", default=True, help="DEPRECATED. Retained only for backward compatibility. This option has no effect. " "Virtualenv never downloads pip or setuptools.") parser.add_option( '--prompt', dest='prompt', help='Provides an alternative prompt prefix for this environment.') parser.add_option( '--setuptools', dest='setuptools', action='store_true', help="DEPRECATED. Retained only for backward compatibility. This option has no effect.") parser.add_option( '--distribute', dest='distribute', action='store_true', help="DEPRECATED. Retained only for backward compatibility. This option has no effect.") if 'extend_parser' in globals(): extend_parser(parser) options, args = parser.parse_args() global logger if 'adjust_options' in globals(): adjust_options(options, args) verbosity = options.verbose - options.quiet logger = Logger([(Logger.level_for_integer(2 - verbosity), sys.stdout)]) if options.python and not os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'): env = os.environ.copy() interpreter = resolve_interpreter(options.python) if interpreter == sys.executable: logger.warn('Already using interpreter %s' % interpreter) else: logger.notify('Running virtualenv with interpreter %s' % interpreter) env['VIRTUALENV_INTERPRETER_RUNNING'] = 'true' file = __file__ if file.endswith('.pyc'): file = file[:-1] popen = subprocess.Popen([interpreter, file] + sys.argv[1:], env=env) raise SystemExit(popen.wait()) if not args: print('You must provide a DEST_DIR') parser.print_help() sys.exit(2) if len(args) > 1: print('There must be only one argument: DEST_DIR (you gave %s)' % ( ' '.join(args))) parser.print_help() sys.exit(2) home_dir = args[0] if os.environ.get('WORKING_ENV'): logger.fatal('ERROR: you cannot run virtualenv while in a workingenv') logger.fatal('Please deactivate your workingenv, then re-run this script') sys.exit(3) if 'PYTHONHOME' in os.environ: logger.warn('PYTHONHOME is set. You *must* activate the virtualenv before using it') del os.environ['PYTHONHOME'] if options.relocatable: make_environment_relocatable(home_dir) return if not options.never_download: logger.warn('The --never-download option is for backward compatibility only.') logger.warn('Setting it to false is no longer supported, and will be ignored.') create_environment(home_dir, site_packages=options.system_site_packages, clear=options.clear, unzip_setuptools=options.unzip_setuptools, prompt=options.prompt, search_dirs=options.search_dirs, never_download=True, no_setuptools=options.no_setuptools, no_pip=options.no_pip, symlink=options.symlink) if 'after_install' in globals(): after_install(options, home_dir) def call_subprocess(cmd, show_stdout=True, filter_stdout=None, cwd=None, raise_on_returncode=True, extra_env=None, remove_from_env=None): cmd_parts = [] for part in cmd: if len(part) > 45: part = part[:20]+"..."+part[-20:] if ' ' in part or '\n' in part or '"' in part or "'" in part: part = '"%s"' % part.replace('"', '\\"') if hasattr(part, 'decode'): try: part = part.decode(sys.getdefaultencoding()) except UnicodeDecodeError: part = part.decode(sys.getfilesystemencoding()) cmd_parts.append(part) cmd_desc = ' '.join(cmd_parts) if show_stdout: stdout = None else: stdout = subprocess.PIPE logger.debug("Running command %s" % cmd_desc) if extra_env or remove_from_env: env = os.environ.copy() if extra_env: env.update(extra_env) if remove_from_env: for varname in remove_from_env: env.pop(varname, None) else: env = None try: proc = subprocess.Popen( cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout, cwd=cwd, env=env) except Exception: e = sys.exc_info()[1] logger.fatal( "Error %s while executing command %s" % (e, cmd_desc)) raise all_output = [] if stdout is not None: stdout = proc.stdout encoding = sys.getdefaultencoding() fs_encoding = sys.getfilesystemencoding() while 1: line = stdout.readline() try: line = line.decode(encoding) except UnicodeDecodeError: line = line.decode(fs_encoding) if not line: break line = line.rstrip() all_output.append(line) if filter_stdout: level = filter_stdout(line) if isinstance(level, tuple): level, line = level logger.log(level, line) if not logger.stdout_level_matches(level): logger.show_progress() else: logger.info(line) else: proc.communicate() proc.wait() if proc.returncode: if raise_on_returncode: if all_output: logger.notify('Complete output from command %s:' % cmd_desc) logger.notify('\n'.join(all_output) + '\n----------------------------------------') raise OSError( "Command %s failed with error code %s" % (cmd_desc, proc.returncode)) else: logger.warn( "Command %s had error code %s" % (cmd_desc, proc.returncode)) def filter_install_output(line): if line.strip().startswith('running'): return Logger.INFO return Logger.DEBUG def find_wheels(projects, search_dirs): """Find wheels from which we can import PROJECTS. Scan through SEARCH_DIRS for a wheel for each PROJECT in turn. Return a list of the first wheel found for each PROJECT """ wheels = [] # Look through SEARCH_DIRS for the first suitable wheel. Don't bother # about version checking here, as this is simply to get something we can # then use to install the correct version. for project in projects: for dirname in search_dirs: # This relies on only having "universal" wheels available. # The pattern could be tightened to require -py2.py3-none-any.whl. files = glob.glob(os.path.join(dirname, project + '-*.whl')) if files: wheels.append(os.path.abspath(files[0])) break else: # We're out of luck, so quit with a suitable error logger.fatal('Cannot find a wheel for %s' % (project,)) return wheels def install_wheel(project_names, py_executable, search_dirs=None): if search_dirs is None: search_dirs = file_search_dirs() wheels = find_wheels(['setuptools', 'pip'], search_dirs) pythonpath = os.pathsep.join(wheels) findlinks = ' '.join(search_dirs) cmd = [ py_executable, '-c', 'import sys, pip; sys.exit(pip.main(["install", "--ignore-installed"] + sys.argv[1:]))', ] + project_names logger.start_progress('Installing %s...' % (', '.join(project_names))) logger.indent += 2 try: call_subprocess(cmd, show_stdout=False, extra_env = { 'PYTHONPATH': pythonpath, 'PIP_FIND_LINKS': findlinks, 'PIP_USE_WHEEL': '1', 'PIP_PRE': '1', 'PIP_NO_INDEX': '1' } ) finally: logger.indent -= 2 logger.end_progress() def create_environment(home_dir, site_packages=False, clear=False, unzip_setuptools=False, prompt=None, search_dirs=None, never_download=False, no_setuptools=False, no_pip=False, symlink=True): """ Creates a new environment in ``home_dir``. If ``site_packages`` is true, then the global ``site-packages/`` directory will be on the path. If ``clear`` is true (default False) then the environment will first be cleared. """ home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir) py_executable = os.path.abspath(install_python( home_dir, lib_dir, inc_dir, bin_dir, site_packages=site_packages, clear=clear, symlink=symlink)) install_distutils(home_dir) if not no_setuptools: to_install = ['setuptools'] if not no_pip: to_install.append('pip') install_wheel(to_install, py_executable, search_dirs) install_activate(home_dir, bin_dir, prompt) def is_executable_file(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) def path_locations(home_dir): """Return the path locations for the environment (where libraries are, where scripts go, etc)""" # XXX: We'd use distutils.sysconfig.get_python_inc/lib but its # prefix arg is broken: http://bugs.python.org/issue3386 if is_win: # Windows has lots of problems with executables with spaces in # the name; this function will remove them (using the ~1 # format): mkdir(home_dir) if ' ' in home_dir: import ctypes GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW size = max(len(home_dir)+1, 256) buf = ctypes.create_unicode_buffer(size) try: u = unicode except NameError: u = str ret = GetShortPathName(u(home_dir), buf, size) if not ret: print('Error: the path "%s" has a space in it' % home_dir) print('We could not determine the short pathname for it.') print('Exiting.') sys.exit(3) home_dir = str(buf.value) lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'Scripts') if is_jython: lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'bin') elif is_pypy: lib_dir = home_dir inc_dir = join(home_dir, 'include') bin_dir = join(home_dir, 'bin') elif not is_win: lib_dir = join(home_dir, 'lib', py_version) multiarch_exec = '/usr/bin/multiarch-platform' if is_executable_file(multiarch_exec): # In Mageia (2) and Mandriva distros the include dir must be like: # virtualenv/include/multiarch-x86_64-linux/python2.7 # instead of being virtualenv/include/python2.7 p = subprocess.Popen(multiarch_exec, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() # stdout.strip is needed to remove newline character inc_dir = join(home_dir, 'include', stdout.strip(), py_version + abiflags) else: inc_dir = join(home_dir, 'include', py_version + abiflags) bin_dir = join(home_dir, 'bin') return home_dir, lib_dir, inc_dir, bin_dir def change_prefix(filename, dst_prefix): prefixes = [sys.prefix] if is_darwin: prefixes.extend(( os.path.join("/Library/Python", sys.version[:3], "site-packages"), os.path.join(sys.prefix, "Extras", "lib", "python"), os.path.join("~", "Library", "Python", sys.version[:3], "site-packages"), # Python 2.6 no-frameworks os.path.join("~", ".local", "lib","python", sys.version[:3], "site-packages"), # System Python 2.7 on OSX Mountain Lion os.path.join("~", "Library", "Python", sys.version[:3], "lib", "python", "site-packages"))) if hasattr(sys, 'real_prefix'): prefixes.append(sys.real_prefix) if hasattr(sys, 'base_prefix'): prefixes.append(sys.base_prefix) prefixes = list(map(os.path.expanduser, prefixes)) prefixes = list(map(os.path.abspath, prefixes)) # Check longer prefixes first so we don't split in the middle of a filename prefixes = sorted(prefixes, key=len, reverse=True) filename = os.path.abspath(filename) for src_prefix in prefixes: if filename.startswith(src_prefix): _, relpath = filename.split(src_prefix, 1) if src_prefix != os.sep: # sys.prefix == "/" assert relpath[0] == os.sep relpath = relpath[1:] return join(dst_prefix, relpath) assert False, "Filename %s does not start with any of these prefixes: %s" % \ (filename, prefixes) def copy_required_modules(dst_prefix, symlink): import imp # If we are running under -p, we need to remove the current # directory from sys.path temporarily here, so that we # definitely get the modules from the site directory of # the interpreter we are running under, not the one # virtualenv.py is installed under (which might lead to py2/py3 # incompatibility issues) _prev_sys_path = sys.path if os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'): sys.path = sys.path[1:] try: for modname in REQUIRED_MODULES: if modname in sys.builtin_module_names: logger.info("Ignoring built-in bootstrap module: %s" % modname) continue try: f, filename, _ = imp.find_module(modname) except ImportError: logger.info("Cannot import bootstrap module: %s" % modname) else: if f is not None: f.close() # special-case custom readline.so on OS X, but not for pypy: if modname == 'readline' and sys.platform == 'darwin' and not ( is_pypy or filename.endswith(join('lib-dynload', 'readline.so'))): dst_filename = join(dst_prefix, 'lib', 'python%s' % sys.version[:3], 'readline.so') elif modname == 'readline' and sys.platform == 'win32': # special-case for Windows, where readline is not a # standard module, though it may have been installed in # site-packages by a third-party package pass else: dst_filename = change_prefix(filename, dst_prefix) copyfile(filename, dst_filename, symlink) if filename.endswith('.pyc'): pyfile = filename[:-1] if os.path.exists(pyfile): copyfile(pyfile, dst_filename[:-1], symlink) finally: sys.path = _prev_sys_path def subst_path(prefix_path, prefix, home_dir): prefix_path = os.path.normpath(prefix_path) prefix = os.path.normpath(prefix) home_dir = os.path.normpath(home_dir) if not prefix_path.startswith(prefix): logger.warn('Path not in prefix %r %r', prefix_path, prefix) return return prefix_path.replace(prefix, home_dir, 1) def install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages, clear, symlink=True): """Install just the base environment, no distutils patches etc""" if sys.executable.startswith(bin_dir): print('Please use the *system* python to run this script') return if clear: rmtree(lib_dir) ## FIXME: why not delete it? ## Maybe it should delete everything with #!/path/to/venv/python in it logger.notify('Not deleting %s', bin_dir) if hasattr(sys, 'real_prefix'): logger.notify('Using real prefix %r' % sys.real_prefix) prefix = sys.real_prefix elif hasattr(sys, 'base_prefix'): logger.notify('Using base prefix %r' % sys.base_prefix) prefix = sys.base_prefix else: prefix = sys.prefix mkdir(lib_dir) fix_lib64(lib_dir, symlink) stdlib_dirs = [os.path.dirname(os.__file__)] if is_win: stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), 'DLLs')) elif is_darwin: stdlib_dirs.append(join(stdlib_dirs[0], 'site-packages')) if hasattr(os, 'symlink'): logger.info('Symlinking Python bootstrap modules') else: logger.info('Copying Python bootstrap modules') logger.indent += 2 try: # copy required files... for stdlib_dir in stdlib_dirs: if not os.path.isdir(stdlib_dir): continue for fn in os.listdir(stdlib_dir): bn = os.path.splitext(fn)[0] if fn != 'site-packages' and bn in REQUIRED_FILES: copyfile(join(stdlib_dir, fn), join(lib_dir, fn), symlink) # ...and modules copy_required_modules(home_dir, symlink) finally: logger.indent -= 2 mkdir(join(lib_dir, 'site-packages')) import site site_filename = site.__file__ if site_filename.endswith('.pyc'): site_filename = site_filename[:-1] elif site_filename.endswith('$py.class'): site_filename = site_filename.replace('$py.class', '.py') site_filename_dst = change_prefix(site_filename, home_dir) site_dir = os.path.dirname(site_filename_dst) writefile(site_filename_dst, SITE_PY) writefile(join(site_dir, 'orig-prefix.txt'), prefix) site_packages_filename = join(site_dir, 'no-global-site-packages.txt') if not site_packages: writefile(site_packages_filename, '') if is_pypy or is_win: stdinc_dir = join(prefix, 'include') else: stdinc_dir = join(prefix, 'include', py_version + abiflags) if os.path.exists(stdinc_dir): copyfile(stdinc_dir, inc_dir, symlink) else: logger.debug('No include dir %s' % stdinc_dir) platinc_dir = distutils.sysconfig.get_python_inc(plat_specific=1) if platinc_dir != stdinc_dir: platinc_dest = distutils.sysconfig.get_python_inc( plat_specific=1, prefix=home_dir) if platinc_dir == platinc_dest: # Do platinc_dest manually due to a CPython bug; # not http://bugs.python.org/issue3386 but a close cousin platinc_dest = subst_path(platinc_dir, prefix, home_dir) if platinc_dest: # PyPy's stdinc_dir and prefix are relative to the original binary # (traversing virtualenvs), whereas the platinc_dir is relative to # the inner virtualenv and ignores the prefix argument. # This seems more evolved than designed. copyfile(platinc_dir, platinc_dest, symlink) # pypy never uses exec_prefix, just ignore it if sys.exec_prefix != prefix and not is_pypy: if is_win: exec_dir = join(sys.exec_prefix, 'lib') elif is_jython: exec_dir = join(sys.exec_prefix, 'Lib') else: exec_dir = join(sys.exec_prefix, 'lib', py_version) for fn in os.listdir(exec_dir): copyfile(join(exec_dir, fn), join(lib_dir, fn), symlink) if is_jython: # Jython has either jython-dev.jar and javalib/ dir, or just # jython.jar for name in 'jython-dev.jar', 'javalib', 'jython.jar': src = join(prefix, name) if os.path.exists(src): copyfile(src, join(home_dir, name), symlink) # XXX: registry should always exist after Jython 2.5rc1 src = join(prefix, 'registry') if os.path.exists(src): copyfile(src, join(home_dir, 'registry'), symlink=False) copyfile(join(prefix, 'cachedir'), join(home_dir, 'cachedir'), symlink=False) mkdir(bin_dir) py_executable = join(bin_dir, os.path.basename(sys.executable)) if 'Python.framework' in prefix: # OS X framework builds cause validation to break # https://github.com/pypa/virtualenv/issues/322 if os.environ.get('__PYVENV_LAUNCHER__'): del os.environ["__PYVENV_LAUNCHER__"] if re.search(r'/Python(?:-32|-64)*$', py_executable): # The name of the python executable is not quite what # we want, rename it. py_executable = os.path.join( os.path.dirname(py_executable), 'python') logger.notify('New %s executable in %s', expected_exe, py_executable) pcbuild_dir = os.path.dirname(sys.executable) pyd_pth = os.path.join(lib_dir, 'site-packages', 'virtualenv_builddir_pyd.pth') if is_win and os.path.exists(os.path.join(pcbuild_dir, 'build.bat')): logger.notify('Detected python running from build directory %s', pcbuild_dir) logger.notify('Writing .pth file linking to build directory for *.pyd files') writefile(pyd_pth, pcbuild_dir) else: pcbuild_dir = None if os.path.exists(pyd_pth): logger.info('Deleting %s (not Windows env or not build directory python)' % pyd_pth) os.unlink(pyd_pth) if sys.executable != py_executable: ## FIXME: could I just hard link? executable = sys.executable shutil.copyfile(executable, py_executable) make_exe(py_executable) if is_win or is_cygwin: pythonw = os.path.join(os.path.dirname(sys.executable), 'pythonw.exe') if os.path.exists(pythonw): logger.info('Also created pythonw.exe') shutil.copyfile(pythonw, os.path.join(os.path.dirname(py_executable), 'pythonw.exe')) python_d = os.path.join(os.path.dirname(sys.executable), 'python_d.exe') python_d_dest = os.path.join(os.path.dirname(py_executable), 'python_d.exe') if os.path.exists(python_d): logger.info('Also created python_d.exe') shutil.copyfile(python_d, python_d_dest) elif os.path.exists(python_d_dest): logger.info('Removed python_d.exe as it is no longer at the source') os.unlink(python_d_dest) # we need to copy the DLL to enforce that windows will load the correct one. # may not exist if we are cygwin. py_executable_dll = 'python%s%s.dll' % ( sys.version_info[0], sys.version_info[1]) py_executable_dll_d = 'python%s%s_d.dll' % ( sys.version_info[0], sys.version_info[1]) pythondll = os.path.join(os.path.dirname(sys.executable), py_executable_dll) pythondll_d = os.path.join(os.path.dirname(sys.executable), py_executable_dll_d) pythondll_d_dest = os.path.join(os.path.dirname(py_executable), py_executable_dll_d) if os.path.exists(pythondll): logger.info('Also created %s' % py_executable_dll) shutil.copyfile(pythondll, os.path.join(os.path.dirname(py_executable), py_executable_dll)) if os.path.exists(pythondll_d): logger.info('Also created %s' % py_executable_dll_d) shutil.copyfile(pythondll_d, pythondll_d_dest) elif os.path.exists(pythondll_d_dest): logger.info('Removed %s as the source does not exist' % pythondll_d_dest) os.unlink(pythondll_d_dest) if is_pypy: # make a symlink python --> pypy-c python_executable = os.path.join(os.path.dirname(py_executable), 'python') if sys.platform in ('win32', 'cygwin'): python_executable += '.exe' logger.info('Also created executable %s' % python_executable) copyfile(py_executable, python_executable, symlink) if is_win: for name in 'libexpat.dll', 'libpypy.dll', 'libpypy-c.dll', 'libeay32.dll', 'ssleay32.dll', 'sqlite.dll': src = join(prefix, name) if os.path.exists(src): copyfile(src, join(bin_dir, name), symlink) if os.path.splitext(os.path.basename(py_executable))[0] != expected_exe: secondary_exe = os.path.join(os.path.dirname(py_executable), expected_exe) py_executable_ext = os.path.splitext(py_executable)[1] if py_executable_ext.lower() == '.exe': # python2.4 gives an extension of '.4' :P secondary_exe += py_executable_ext if os.path.exists(secondary_exe): logger.warn('Not overwriting existing %s script %s (you must use %s)' % (expected_exe, secondary_exe, py_executable)) else: logger.notify('Also creating executable in %s' % secondary_exe) shutil.copyfile(sys.executable, secondary_exe) make_exe(secondary_exe) if '.framework' in prefix: if 'Python.framework' in prefix: logger.debug('MacOSX Python framework detected') # Make sure we use the the embedded interpreter inside # the framework, even if sys.executable points to # the stub executable in ${sys.prefix}/bin # See http://groups.google.com/group/python-virtualenv/ # browse_thread/thread/17cab2f85da75951 original_python = os.path.join( prefix, 'Resources/Python.app/Contents/MacOS/Python') if 'EPD' in prefix: logger.debug('EPD framework detected') original_python = os.path.join(prefix, 'bin/python') shutil.copy(original_python, py_executable) # Copy the framework's dylib into the virtual # environment virtual_lib = os.path.join(home_dir, '.Python') if os.path.exists(virtual_lib): os.unlink(virtual_lib) copyfile( os.path.join(prefix, 'Python'), virtual_lib, symlink) # And then change the install_name of the copied python executable try: mach_o_change(py_executable, os.path.join(prefix, 'Python'), '@executable_path/../.Python') except: e = sys.exc_info()[1] logger.warn("Could not call mach_o_change: %s. " "Trying to call install_name_tool instead." % e) try: call_subprocess( ["install_name_tool", "-change", os.path.join(prefix, 'Python'), '@executable_path/../.Python', py_executable]) except: logger.fatal("Could not call install_name_tool -- you must " "have Apple's development tools installed") raise if not is_win: # Ensure that 'python', 'pythonX' and 'pythonX.Y' all exist py_exe_version_major = 'python%s' % sys.version_info[0] py_exe_version_major_minor = 'python%s.%s' % ( sys.version_info[0], sys.version_info[1]) py_exe_no_version = 'python' required_symlinks = [ py_exe_no_version, py_exe_version_major, py_exe_version_major_minor ] py_executable_base = os.path.basename(py_executable) if py_executable_base in required_symlinks: # Don't try to symlink to yourself. required_symlinks.remove(py_executable_base) for pth in required_symlinks: full_pth = join(bin_dir, pth) if os.path.exists(full_pth): os.unlink(full_pth) if symlink: os.symlink(py_executable_base, full_pth) else: copyfile(py_executable, full_pth, symlink) if is_win and ' ' in py_executable: # There's a bug with subprocess on Windows when using a first # argument that has a space in it. Instead we have to quote # the value: py_executable = '"%s"' % py_executable # NOTE: keep this check as one line, cmd.exe doesn't cope with line breaks cmd = [py_executable, '-c', 'import sys;out=sys.stdout;' 'getattr(out, "buffer", out).write(sys.prefix.encode("utf-8"))'] logger.info('Testing executable with %s %s "%s"' % tuple(cmd)) try: proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) proc_stdout, proc_stderr = proc.communicate() except OSError: e = sys.exc_info()[1] if e.errno == errno.EACCES: logger.fatal('ERROR: The executable %s could not be run: %s' % (py_executable, e)) sys.exit(100) else: raise e proc_stdout = proc_stdout.strip().decode("utf-8") proc_stdout = os.path.normcase(os.path.abspath(proc_stdout)) norm_home_dir = os.path.normcase(os.path.abspath(home_dir)) if hasattr(norm_home_dir, 'decode'): norm_home_dir = norm_home_dir.decode(sys.getfilesystemencoding()) if proc_stdout != norm_home_dir: logger.fatal( 'ERROR: The executable %s is not functioning' % py_executable) logger.fatal( 'ERROR: It thinks sys.prefix is %r (should be %r)' % (proc_stdout, norm_home_dir)) logger.fatal( 'ERROR: virtualenv is not compatible with this system or executable') if is_win: logger.fatal( 'Note: some Windows users have reported this error when they ' 'installed Python for "Only this user" or have multiple ' 'versions of Python installed. Copying the appropriate ' 'PythonXX.dll to the virtualenv Scripts/ directory may fix ' 'this problem.') sys.exit(100) else: logger.info('Got sys.prefix result: %r' % proc_stdout) pydistutils = os.path.expanduser('~/.pydistutils.cfg') if os.path.exists(pydistutils): logger.notify('Please make sure you remove any previous custom paths from ' 'your %s file.' % pydistutils) ## FIXME: really this should be calculated earlier fix_local_scheme(home_dir, symlink) if site_packages: if os.path.exists(site_packages_filename): logger.info('Deleting %s' % site_packages_filename) os.unlink(site_packages_filename) return py_executable def install_activate(home_dir, bin_dir, prompt=None): home_dir = os.path.abspath(home_dir) if is_win or is_jython and os._name == 'nt': files = { 'activate.bat': ACTIVATE_BAT, 'deactivate.bat': DEACTIVATE_BAT, 'activate.ps1': ACTIVATE_PS, } # MSYS needs paths of the form /c/path/to/file drive, tail = os.path.splitdrive(home_dir.replace(os.sep, '/')) home_dir_msys = (drive and "/%s%s" or "%s%s") % (drive[:1], tail) # Run-time conditional enables (basic) Cygwin compatibility home_dir_sh = ("""$(if [ "$OSTYPE" "==" "cygwin" ]; then cygpath -u '%s'; else echo '%s'; fi;)""" % (home_dir, home_dir_msys)) files['activate'] = ACTIVATE_SH.replace('__VIRTUAL_ENV__', home_dir_sh) else: files = {'activate': ACTIVATE_SH} # suppling activate.fish in addition to, not instead of, the # bash script support. files['activate.fish'] = ACTIVATE_FISH # same for csh/tcsh support... files['activate.csh'] = ACTIVATE_CSH files['activate_this.py'] = ACTIVATE_THIS if hasattr(home_dir, 'decode'): home_dir = home_dir.decode(sys.getfilesystemencoding()) vname = os.path.basename(home_dir) for name, content in files.items(): content = content.replace('__VIRTUAL_PROMPT__', prompt or '') content = content.replace('__VIRTUAL_WINPROMPT__', prompt or '(%s)' % vname) content = content.replace('__VIRTUAL_ENV__', home_dir) content = content.replace('__VIRTUAL_NAME__', vname) content = content.replace('__BIN_NAME__', os.path.basename(bin_dir)) writefile(os.path.join(bin_dir, name), content) def install_distutils(home_dir): distutils_path = change_prefix(distutils.__path__[0], home_dir) mkdir(distutils_path) ## FIXME: maybe this prefix setting should only be put in place if ## there's a local distutils.cfg with a prefix setting? home_dir = os.path.abspath(home_dir) ## FIXME: this is breaking things, removing for now: #distutils_cfg = DISTUTILS_CFG + "\n[install]\nprefix=%s\n" % home_dir writefile(os.path.join(distutils_path, '__init__.py'), DISTUTILS_INIT) writefile(os.path.join(distutils_path, 'distutils.cfg'), DISTUTILS_CFG, overwrite=False) def fix_local_scheme(home_dir, symlink=True): """ Platforms that use the "posix_local" install scheme (like Ubuntu with Python 2.7) need to be given an additional "local" location, sigh. """ try: import sysconfig except ImportError: pass else: if sysconfig._get_default_scheme() == 'posix_local': local_path = os.path.join(home_dir, 'local') if not os.path.exists(local_path): os.mkdir(local_path) for subdir_name in os.listdir(home_dir): if subdir_name == 'local': continue copyfile(os.path.abspath(os.path.join(home_dir, subdir_name)), \ os.path.join(local_path, subdir_name), symlink) def fix_lib64(lib_dir, symlink=True): """ Some platforms (particularly Gentoo on x64) put things in lib64/pythonX.Y instead of lib/pythonX.Y. If this is such a platform we'll just create a symlink so lib64 points to lib """ if [p for p in distutils.sysconfig.get_config_vars().values() if isinstance(p, basestring) and 'lib64' in p]: # PyPy's library path scheme is not affected by this. # Return early or we will die on the following assert. if is_pypy: logger.debug('PyPy detected, skipping lib64 symlinking') return logger.debug('This system uses lib64; symlinking lib64 to lib') assert os.path.basename(lib_dir) == 'python%s' % sys.version[:3], ( "Unexpected python lib dir: %r" % lib_dir) lib_parent = os.path.dirname(lib_dir) top_level = os.path.dirname(lib_parent) lib_dir = os.path.join(top_level, 'lib') lib64_link = os.path.join(top_level, 'lib64') assert os.path.basename(lib_parent) == 'lib', ( "Unexpected parent dir: %r" % lib_parent) if os.path.lexists(lib64_link): return cp_or_ln = (os.symlink if symlink else copyfile) cp_or_ln('lib', lib64_link) def resolve_interpreter(exe): """ If the executable given isn't an absolute path, search $PATH for the interpreter """ # If the "executable" is a version number, get the installed executable for # that version python_versions = get_installed_pythons() if exe in python_versions: exe = python_versions[exe] if os.path.abspath(exe) != exe: paths = os.environ.get('PATH', '').split(os.pathsep) for path in paths: if os.path.exists(os.path.join(path, exe)): exe = os.path.join(path, exe) break if not os.path.exists(exe): logger.fatal('The executable %s (from --python=%s) does not exist' % (exe, exe)) raise SystemExit(3) if not is_executable(exe): logger.fatal('The executable %s (from --python=%s) is not executable' % (exe, exe)) raise SystemExit(3) return exe def is_executable(exe): """Checks a file is executable""" return os.access(exe, os.X_OK) ############################################################ ## Relocating the environment: def make_environment_relocatable(home_dir): """ Makes the already-existing environment use relative paths, and takes out the #!-based environment selection in scripts. """ home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir) activate_this = os.path.join(bin_dir, 'activate_this.py') if not os.path.exists(activate_this): logger.fatal( 'The environment doesn\'t have a file %s -- please re-run virtualenv ' 'on this environment to update it' % activate_this) fixup_scripts(home_dir, bin_dir) fixup_pth_and_egg_link(home_dir) ## FIXME: need to fix up distutils.cfg OK_ABS_SCRIPTS = ['python', 'python%s' % sys.version[:3], 'activate', 'activate.bat', 'activate_this.py', 'activate.fish', 'activate.csh'] def fixup_scripts(home_dir, bin_dir): if is_win: new_shebang_args = ( '%s /c' % os.path.normcase(os.environ.get('COMSPEC', 'cmd.exe')), '', '.exe') else: new_shebang_args = ('/usr/bin/env', sys.version[:3], '') # This is what we expect at the top of scripts: shebang = '#!%s' % os.path.normcase(os.path.join( os.path.abspath(bin_dir), 'python%s' % new_shebang_args[2])) # This is what we'll put: new_shebang = '#!%s python%s%s' % new_shebang_args for filename in os.listdir(bin_dir): filename = os.path.join(bin_dir, filename) if not os.path.isfile(filename): # ignore subdirs, e.g. .svn ones. continue f = open(filename, 'rb') try: try: lines = f.read().decode('utf-8').splitlines() except UnicodeDecodeError: # This is probably a binary program instead # of a script, so just ignore it. continue finally: f.close() if not lines: logger.warn('Script %s is an empty file' % filename) continue old_shebang = lines[0].strip() old_shebang = old_shebang[0:2] + os.path.normcase(old_shebang[2:]) if not old_shebang.startswith(shebang): if os.path.basename(filename) in OK_ABS_SCRIPTS: logger.debug('Cannot make script %s relative' % filename) elif lines[0].strip() == new_shebang: logger.info('Script %s has already been made relative' % filename) else: logger.warn('Script %s cannot be made relative (it\'s not a normal script that starts with %s)' % (filename, shebang)) continue logger.notify('Making script %s relative' % filename) script = relative_script([new_shebang] + lines[1:]) f = open(filename, 'wb') f.write('\n'.join(script).encode('utf-8')) f.close() def relative_script(lines): "Return a script that'll work in a relocatable environment." activate = "import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); exec(compile(open(activate_this).read(), activate_this, 'exec'), dict(__file__=activate_this)); del os, activate_this" # Find the last future statement in the script. If we insert the activation # line before a future statement, Python will raise a SyntaxError. activate_at = None for idx, line in reversed(list(enumerate(lines))): if line.split()[:3] == ['from', '__future__', 'import']: activate_at = idx + 1 break if activate_at is None: # Activate after the shebang. activate_at = 1 return lines[:activate_at] + ['', activate, ''] + lines[activate_at:] def fixup_pth_and_egg_link(home_dir, sys_path=None): """Makes .pth and .egg-link files use relative paths""" home_dir = os.path.normcase(os.path.abspath(home_dir)) if sys_path is None: sys_path = sys.path for path in sys_path: if not path: path = '.' if not os.path.isdir(path): continue path = os.path.normcase(os.path.abspath(path)) if not path.startswith(home_dir): logger.debug('Skipping system (non-environment) directory %s' % path) continue for filename in os.listdir(path): filename = os.path.join(path, filename) if filename.endswith('.pth'): if not os.access(filename, os.W_OK): logger.warn('Cannot write .pth file %s, skipping' % filename) else: fixup_pth_file(filename) if filename.endswith('.egg-link'): if not os.access(filename, os.W_OK): logger.warn('Cannot write .egg-link file %s, skipping' % filename) else: fixup_egg_link(filename) def fixup_pth_file(filename): lines = [] prev_lines = [] f = open(filename) prev_lines = f.readlines() f.close() for line in prev_lines: line = line.strip() if (not line or line.startswith('#') or line.startswith('import ') or os.path.abspath(line) != line): lines.append(line) else: new_value = make_relative_path(filename, line) if line != new_value: logger.debug('Rewriting path %s as %s (in %s)' % (line, new_value, filename)) lines.append(new_value) if lines == prev_lines: logger.info('No changes to .pth file %s' % filename) return logger.notify('Making paths in .pth file %s relative' % filename) f = open(filename, 'w') f.write('\n'.join(lines) + '\n') f.close() def fixup_egg_link(filename): f = open(filename) link = f.readline().strip() f.close() if os.path.abspath(link) != link: logger.debug('Link in %s already relative' % filename) return new_link = make_relative_path(filename, link) logger.notify('Rewriting link %s in %s as %s' % (link, filename, new_link)) f = open(filename, 'w') f.write(new_link) f.close() def make_relative_path(source, dest, dest_is_directory=True): """ Make a filename relative, where the filename is dest, and it is being referred to from the filename source. >>> make_relative_path('/usr/share/something/a-file.pth', ... '/usr/share/another-place/src/Directory') '../another-place/src/Directory' >>> make_relative_path('/usr/share/something/a-file.pth', ... '/home/user/src/Directory') '../../../home/user/src/Directory' >>> make_relative_path('/usr/share/a-file.pth', '/usr/share/') './' """ source = os.path.dirname(source) if not dest_is_directory: dest_filename = os.path.basename(dest) dest = os.path.dirname(dest) dest = os.path.normpath(os.path.abspath(dest)) source = os.path.normpath(os.path.abspath(source)) dest_parts = dest.strip(os.path.sep).split(os.path.sep) source_parts = source.strip(os.path.sep).split(os.path.sep) while dest_parts and source_parts and dest_parts[0] == source_parts[0]: dest_parts.pop(0) source_parts.pop(0) full_parts = ['..']*len(source_parts) + dest_parts if not dest_is_directory: full_parts.append(dest_filename) if not full_parts: # Special case for the current directory (otherwise it'd be '') return './' return os.path.sep.join(full_parts) ############################################################ ## Bootstrap script creation: def create_bootstrap_script(extra_text, python_version=''): """ Creates a bootstrap script, which is like this script but with extend_parser, adjust_options, and after_install hooks. This returns a string that (written to disk of course) can be used as a bootstrap script with your own customizations. The script will be the standard virtualenv.py script, with your extra text added (your extra text should be Python code). If you include these functions, they will be called: ``extend_parser(optparse_parser)``: You can add or remove options from the parser here. ``adjust_options(options, args)``: You can change options here, or change the args (if you accept different kinds of arguments, be sure you modify ``args`` so it is only ``[DEST_DIR]``). ``after_install(options, home_dir)``: After everything is installed, this function is called. This is probably the function you are most likely to use. An example would be:: def after_install(options, home_dir): subprocess.call([join(home_dir, 'bin', 'easy_install'), 'MyPackage']) subprocess.call([join(home_dir, 'bin', 'my-package-script'), 'setup', home_dir]) This example immediately installs a package, and runs a setup script from that package. If you provide something like ``python_version='2.5'`` then the script will start with ``#!/usr/bin/env python2.5`` instead of ``#!/usr/bin/env python``. You can use this when the script must be run with a particular Python version. """ filename = __file__ if filename.endswith('.pyc'): filename = filename[:-1] f = codecs.open(filename, 'r', encoding='utf-8') content = f.read() f.close() py_exe = 'python%s' % python_version content = (('#!/usr/bin/env %s\n' % py_exe) + '## WARNING: This file is generated\n' + content) return content.replace('##EXT' 'END##', extra_text) ##EXTEND## def convert(s): b = base64.b64decode(s.encode('ascii')) return zlib.decompress(b).decode('utf-8') ##file site.py SITE_PY = convert(""" eJzFPf1z2zaWv/OvwMqToZTIdOJ0e3tOnRsncVrvuYm3SWdz63q0lARZrCmSJUjL2pu7v/3eBwAC JCXbm+6cphNLJPDw8PC+8PAeOhgMTopCZnOxyud1KoWScTlbiiKulkos8lJUy6Sc7xdxWW3g6ewm vpZKVLlQGxVhqygInn7lJ3gqPi8TZVCAb3Fd5au4SmZxmm5EsiryspJzMa/LJLsWSZZUSZwm/4AW eRaJp1+PQXCWCZh5mshS3MpSAVwl8oW42FTLPBPDusA5v4j+GL8cjYWalUlRQYNS4wwUWcZVkEk5 BzShZa2AlEkl91UhZ8kimdmG67xO56JI45kUf/87T42ahmGg8pVcL2UpRQbIAEwJsArEA74mpZjl cxkJ8UbOYhyAnzfEChjaGNdMIRmzXKR5dg1zyuRMKhWXGzGc1hUBIpTFPAecEsCgStI0WOfljRrB ktJ6rOGRiJk9/Mkwe8A8cfwu5wCOH7Pg5yy5GzNs4B4EVy2ZbUq5SO5EjGDhp7yTs4l+NkwWYp4s FkCDrBphk4ARUCJNpgcFLcd3eoVeHxBWlitjGEMiytyYX1KPKDirRJwqYNu6QBopwvydnCZxBtTI bmE4gAgkDfrGmSeqsuPQ7EQOAEpcxwqkZKXEcBUnGTDrj/GM0P5rks3ztRoRBWC1lPi1VpU7/2EP AaC1Q4BxgItlVrPO0uRGppsRIPAZsC+lqtMKBWKelHJW5WUiFQEA1DZC3gHSYxGXUpOQOdPI7Zjo TzRJMlxYFDAUeHyJJFkk13VJEiYWCXAucMX7jz+Jd6dvzk4+aB4zwFhmr1eAM0ChhXZwggHEQa3K gzQHgY6Cc/wj4vkchewaxwe8mgYH9650MIS5F1G7j7PgQHa9uHoYmGMFyoTGCqjff0OXsVoCff7n nvUOgpNtVKGJ87f1MgeZzOKVFMuY+Qs5I/hOw3kdFdXyFXCDQjgVkErh4iCCCcIDkrg0G+aZFAWw WJpkchQAhabU1l9FYIUPebZPa93iBIBQBhm8dJ6NaMRMwkS7sF6hvjCNNzQz3SSw67zKS1IcwP/Z jHRRGmc3hKMihuJvU3mdZBkihLwQhHshDaxuEuDEeSTOqRXpBdNIhKy9uCWKRA28hEwHPCnv4lWR yjGLL+rW3WqEBpOVMGudMsdBy4rUK61aM9Ve3juMvrS4jtCslqUE4PXUE7pFno/FFHQ2YVPEKxav ap0T5wQ98kSdkCeoJfTF70DRE6XqlbQvkVdAsxBDBYs8TfM1kOwoCITYw0bGKPvMCW/hHfwLcPHf VFazZRA4I1nAGhQivw0UAgGTIDPN1RoJj9s0K7eVTJKxpsjLuSxpqIcR+4ARf2BjnGvwIa+0UePp 4irnq6RClTTVJjNhi5eFFevHVzxvmAZYbkU0M00bOq1wemmxjKfSuCRTuUBJ0Iv0yi47jBn0jEm2 uBIrtjLwDsgiE7Yg/YoFlc6ikuQEAAwWvjhLijqlRgoZTMQw0Kog+KsYTXqunSVgbzbLASokNt8z sD+A2z9AjNbLBOgzAwigYVBLwfJNk6pEB6HRR4Fv9E1/Hh849WyhbRMPuYiTVFv5OAvO6OFpWZL4 zmSBvcaaGApmmFXo2l1nQEcU88FgEATGHdoo8zVXQVVujoAVhBlnMpnWCRq+yQRNvf6hAh5FOAN7 3Ww7Cw80hOn0AajkdFmU+Qpf27l9AmUCY2GPYE9ckJaR7CB7nPgKyeeq9MI0RdvtsLNAPRRc/HT6 /uzL6SdxLC4blTZu67MrGPM0i4GtySIAU7WGbXQZtETFl6DuE+/BvBNTgD2j3iS+Mq5q4F1A/XNZ 02uYxsx7GZx+OHlzfjr5+dPpT5NPZ59PAUGwMzLYoymjeazBYVQRCAdw5VxF2r4GnR704M3JJ/sg mCRq8u03wG7wZHgtK2DicggzHotwFd8pYNBwTE1HiGOnAVjwcDQSr8Xh06cvDwlasSk2AAzMrtMU H060RZ8k2SIPR9T4V3bpj1lJaf/t8uibK3F8LMJf49s4DMCHapoyS/xI4vR5U0joWsGfYa5GQTCX CxC9G4kCOnxKfvGIO8CSQMtc2+lf8yQz75kr3SFIfwypB+AwmczSWClsPJmEQATq0POBDhE71yh1 Q+hYbNyuI40KfkoJC5thlzH+04NiPKV+iAaj6HYxjUBcV7NYSW5F04d+kwnqrMlkqAcEYSaJAYeL 1VAoTBPUWWUCfi1xHuqwqcpT/InwUQuQAOLWCrUkLpLeOkW3cVpLNXQmBUQcDltkREWbKOJHcFGG YImbpRuN2tQ0PAPNgHxpDlq0bFEOP3vg74C6Mps43Ojx3otphpj+mXcahAO4nCGqe6VaUFg7iovT C/Hy+eE+ujOw55xb6njN0UInWS3twwWslpEHRph7GXlx6bJAPYtPj3bDXEV2ZbqssNBLXMpVfivn gC0ysLPK4id6AztzmMcshlUEvU7+AKtQ4zfGuA/l2YO0oO8A1FsRFLP+Zun3OBggMwWKiDfWRGq9 62dTWJT5bYLOxnSjX4KtBGWJFtM4NoGzcB6ToUkEDQFecIaUWssQ1GFZs8NKeCNItBfzRrFGBO4c NfUVfb3J8nU24Z3wMSrd4ciyLgqWZl5s0CzBnngPVgiQzGFj1xCNoYDLL1C29gF5mD5MFyhLewsA BIZe0XbNgWW2ejRF3jXisAhj9EqQ8JYS/YVbMwRttQwxHEj0NrIPjJZASDA5q+CsatBMhrJmmsHA Dkl8rjuPeAvqA2hRMQKzOdTQuJGh3+URKGdx7iolpx9a5C9fvjDbqCXFVxCxKU4aXYgFGcuo2IBh TUAnGI+MozXEBmtwbgFMrTRriv1PIi/YG4P1vNCyDX4A7O6qqjg6OFiv15GOLuTl9YFaHPzxT99+ +6fnrBPnc+IfmI4jLTrUFh3QO/Roo++MBXptVq7Fj0nmcyPBGkryysgVRfy+r5N5Lo72R1Z/Ihc3 Zhr/Na4MKJCJGZSpDLQdNBg9UftPopdqIJ6QdbZthyP2S7RJtVbMt7rQo8rBEwC/ZZbXaKobTlDi GVg32KHP5bS+Du3gno00P2CqKKdDywP7L64QA58zDF8ZUzxBLUFsgRbfIf1PzDYxeUdaQyB50UR1 ds+bfi1miDt/uLxbX9MRGjPDRCF3oET4TR4sgLZxV3Lwo11btHuOa2s+niEwlj4wzKsdyyEKDuGC azF2pc7havR4QZrWrJpBwbiqERQ0OIlTprYGRzYyRJDo3ZjNPi+sbgF0akUOTXzArAK0cMfpWLs2 KzieEPLAsXhBTyS4yEedd895aes0pYBOi0c9qjBgb6HRTufAl0MDYCwG5c8Dbmm2KR9bi8Jr0AMs 5xgQMtiiw0z4xvUBB3uDHnbqWP1tvZnGfSBwkYYci3oQdEL5mEcoFUhTMfR7bmNxS9zuYDstDjGV WSYSabVFuNrKo1eodhqmRZKh7nUWKZqlOXjFVisSIzXvfWeB9kH4uM+YaQnUZGjI4TQ6Jm/PE8BQ t8Pw2XWNgQY3DoMYrRJF1g3JtIR/wK2g+AYFo4CWBM2CeaiU+RP7HWTOzld/2cIeltDIEG7TbW5I x2JoOOb9nkAy6mgMSEEGJOwKI7mOrA5S4DBngTzhhtdyq3QTjEiBnDkWhNQM4E4vvQ0OPonwBIQk FCHfVUoW4pkYwPK1RfVhuvt35VIThBg6DchV0NGLYzey4UQ1jltRDp+h/fgGnZUUOXDwFFweN9Dv srlhWht0AWfdV9wWKdDIFIcZjFxUrwxh3GDyH46dFg2xzCCGobyBvCMdM9IosMutQcOCGzDemrfH 0o/diAX2HYa5OpSrO9j/hWWiZrkKKWbSjl24H80VXdpYbM+T6QD+eAswGF15kGSq4xcYZfknBgk9 6GEfdG+yGBaZx+U6yUJSYJp+x/7SdPCwpPSM3MEn2k4dwEQx4nnwvgQBoaPPAxAn1ASwK5eh0m5/ F+zOKQ4sXO4+8Nzmy6OXV13ijrdFeOynf6lO76oyVrhaKS8aCwWuVteAo9KFycXZRh9e6sNt3CaU uYJdpPj46YtAQnBcdx1vHjf1huERm3vn5H0M6qDX7iVXa3bELoAIakVklIPw8Rz5cGQfO7kdE3sE kEcxzI5FMZA0n/wzcHYtFIyxP99kGEdrqwz8wOtvv5n0REZdJL/9ZnDPKC1i9In9sOUJ2pE5qWDX bEsZp+RqOH0oqJg1rGPbFCPW57T90zx21eNzarRs7Lu/BX4MFAypS/ARno8bsnWnih/fndoKT9up HcA6u1Xz2aNFgL19Pv0VdshKB9Vu4ySlcwWY/P4+Klezued4Rb/28CDtVDAOCfr2X+ryOXBDyNGE UXc62hk7MQHnnl2w+RSx6qKyp3MImiMwLy/APf7sQtUWzDDucz5eOOxRTd6M+5yJr1Gr+PldNJAF 5tFg0Ef2rez4/zHL5/+aST5wKubk+ne0ho8E9HvNhI0HQ9PGw4fVv+yu3TXAHmCetridO9zC7tB8 Vrkwzh2rJCWeou56KtaUrkCxVTwpAihz9vt64OAy6kPvt3VZ8tE1qcBClvt4HDsWmKllPL9eE7Mn Dj7ICjGxzWYUq3byevI+NRLq6LOdSdjsG/rlbJmbmJXMbpMS+oLCHYY/fPzxNOw3IRjHhU4PtyIP 9xsQ7iOYNtTECR/Thyn0mC7/vFS1ty4+QU1GgIkIa7L12gc/EGziCP1rcE9EyDuw5WN23KHPlnJ2 M5GUOoBsil2doPhbfI2Y2IwCP/9LxQtKYoOZzNIaacWON2YfLupsRucjlQT/SqcKY+oQJQRw+G+R xtdiSJ3nGHrS3EjRqdu41N5nUeaYnCrqZH5wncyF/K2OU9zWy8UCcMHDK/0q4uEpAiXecU4DJy0q OavLpNoACWKV67M/Sn9wGk43PNGhhyQf8zABMSHiSHzCaeN7JtzckMsEB/wTD5wk7ruxg5OsENFz eJ/lExx1Qjm+Y0aqey5Pj4P2CDkAGABQmP9gpCN3/htJr9wDRlpzl6ioJT1SupGGnJwxhDIcYaSD f9NPnxFd3tqC5fV2LK93Y3ndxvK6F8trH8vr3Vi6IoELa4NWRhL6AlftY43efBs35sTDnMazJbfD 3E/M8QSIojAbbCNTnALtRbb4fI+AkNp2DpzpYZM/k3BSaZlzCFyDRO7HQyy9mTfJ605nysbRnXkq xp3dlkPk9z2IIkoVm1J3lrd5XMWRJxfXaT4FsbXojhsAY9FOJ+JYaXY7mXJ0t2WpBhf/9fmHjx+w OYIamPQG6oaLiIYFpzJ8GpfXqitNzeavAHakln4iDnXTAPceGFnjUfb4n3eU4YGMI9aUoZCLAjwA yuqyzdzcpzBsPddJUvo5MzkfNh2LQVYNmkltIdLJxcW7k88nAwr5Df534AqMoa0vHS4+poVt0PXf 3OaW4tgHhFrHthrj587Jo3XDEffbWAO248O3Hhw+xGD3hgn8Wf5LKQVLAoSKdPD3MYR68B7oq7YJ HfoYRuwk/7kna+ys2HeO7DkuiiP6fccO7QH8w07cY0yAANqFGpqdQbOZail9a153UNQB+kBf76u3 YO2tV3sn41PUTqLHAXQoa5ttd/+8cxo2ekpWb06/P/twfvbm4uTzD44LiK7cx08Hh+L0xy+C8kPQ gLFPFGNqRIWZSGBY3EInMc/hvxojP/O64iAx9Hp3fq5PalZY6oK5z2hzInjOaUwWGgfNOAptH+r8 I8Qo1Rskp6aI0nWo5gj3SyuuZ1G5zo+mUqUpOqu13nrpWjFTU0bn2hFIHzR2ScEgOMUMXlEWe2V2 hSWfAOo6qx6ktI22iSEpBQU76QLO+Zc5XfECpdQZnjSdtaK/DF1cw6tIFWkCO7lXoZUl3Q3TYxrG 0Q/tATfj1acBne4wsm7Is96KBVqtVyHPTfcfNYz2Ww0YNgz2DuadSUoPoQxsTG4TITbik5xQ3sFX u/R6DRQsGB70VbiIhukSmH0Mm2uxTGADATy5BOuL+wSA0FoJ/0DgyIkOyByzM8K3q/n+X0JNEL/1 L7/0NK/KdP9vooBdkOBUorCHmG7jd7DxiWQkTj++H4WMHKXmir/UWB4ADgkFQB1pp/wlPkGfDJVM Fzq/xNcH+EL7CfS61b2URam797vGIUrAEzUkr+GJMvQLMd3Lwh7jVEYt0Fj5YDHDCkI3DcF89sSn pUxTne9+9u78FHxHLMZACeJzt1MYjuMleISuk++4wrEFCg/Y4XWJbFyiC0tJFvPIa9YbtEaRo95e XoZdJwoMd3t1osBlnCgX7SFOm2GZcoIIWRnWwiwrs3arDVLYbUMUR5lhlphclJTA6vME8DI9jXlL BHslLPUwEXg+RU6yymQspskM9CioXFCoYxASJC7WMxLn5RnHwPNSmTIoeFhsyuR6WeHpBnSOqAQD m/948uX87AOVJRy+bLzuHuYc005gzEkkx5giiNEO+OKm/SFXTSZ9PKtfIQzUPvCn/YqzU455gE4/ Dizin/YrrkM7dnaCPANQUHXRFg/cADjd+uSmkQXG1e6D8eOmADaY+WAoFollLzrRw51flxNty5Yp obiPefmIA5xFYVPSdGc3Ja390XNcFHjONR/2N4K3fbJlPlPoetN5sy35zf10pBBLYgGjbmt/DJMd 1mmqp+Mw2zZuoW2ttrG/ZE6s1Gk3y1CUgYhDt/PIZbJ+JaybMwd6adQdYOI7ja6RxF5VPvglG2gP w8PEEruzTzEdqYyFjABGMqSu/anBh0KLAAqEsn+HjuSOR08PvTk61uD+OWrdBbbxB1CEOheXajzy EjgRvvzGjiO/IrRQjx6J0PFUMpnlNk8MP+slepUv/Dn2ygAFMVHsyji7lkOGNTYwn/nE3hKCJW3r kfoyueozLOIMnNO7LRzelYv+gxODWosROu1u5KatjnzyYIPeUpCdBPPBl/EadH9RV0NeyS3n0L21 dNuh3g8Rsw+hqT59H4YYjvkt3LI+DeBeamhY6OH9tuUUltfGOLLWPraqmkL7QnuwsxK2ZpWiYxmn ONH4otYLaAzucWPyB/apThSyv3vqxJyYkAXKg7sgvbkNdINWOGHA5UpcOZpQOnxTTaPfzeWtTMFo gJEdYrXDr7baYRTZcEpvHthXY3exudj040ZvGsyOTDkGIkCFGL2Bnl0INTjgCv+idyJxdkPO8du/ no3F2w8/wb9v5EewoFjzOBZ/g9HF27yEbSUX7dJtCljAUfF+Ma8VFkYSNDqh4Isn0Fu78MiLpyG6 ssQvKbEKUmAybbni204ARZ4gFbI37oGpl4DfpqCr5YQaB7FvLQb6JdJge40L1oUc6JbRslqlaCac 4EiziJeD87O3px8+nUbVHTK2+Tlwgid+HhZORx8Nl3gMNhb2yazGJ1eOv/yDTIsed1nvNU29DO41 RQjbkcLuL/kmjdjuKeISAwai2MzzWYQtgdO5RK9ag/88craV99p3z7girOFIH541Tjw+BmqIX9r6 ZwANqY+eE/UkhOIp1orx42jQb4HHgiLa8OfpzXruBsR10Q9NsI1pM+uh392qwCXTWcOznER4Hdtl MHWgaRKr1XTm1gd+zIS+CAWUGx1vyEVcp5WQGWylaG9PN1KAgndL+lhCmFXYilGdG0Vn0nW8UU7u UazEAEcdUFE9nsNQoBC23j/GN2wGsNZQ1FwCDdAJUdo25U5XVc+WLMG8EyLq9eQbrJPspZvGoynM g/LGeNb4rzBP9BYZo2tZ6fnzg+Ho8kWT4EDB6JlX0DsrwNi5bLIHGrN4+vTpQPzH/U4PoxKleX4D 3hjA7nVWzun1FoOtJ2dXq+vQmzcR8ONsKS/hwRUFze3zOqOI5I6utCDS/jUwQlyb0DKjad8yxxyr K/l8mVvwOZU2GD9nCV13hBElicpW3xqF0SYjTcSSoBjCWM2SJOToBKzHJq+xFg+ji5pf5B1wfIJg xvgWD8Z4h71Ex5LyZi33WHSOxYAADyiljEejYmaqRgM8JxcbjebkLEuqpozkuXtmqq8AqOwtRpqv RLxGyTDzaBHDKev0WLVxrPOdLOptVPLZpRtnbM2SX9+HO7A2SFq+WBhM4aFZpFkuy5kxp7hiySyp HDCmHcLhznR5E1mfKOhBaQDqnazC3Eq0ffsHuy4uph/p+HjfjKSzhip7IRbHhOKslVcYRc34FH2y hLR8a76MYJQPFM3WnoA3lviDjqViDYF3b4dbzlhn+j4OTttoLukAOHQHlFWQlh09HeFcPGbhM9Nu uUUDP7QzJ9xuk7Kq43Sir32YoJ82sefpGk9bBrezwNN6K+Db5+D47uuMfXAcTHIN0hMzbk1FxrFY 6MhE5FaW+UVYRY5e3iH7SuBTIGXmE1MPbWJHl5ZdbaGpTtV0VDyCemaKl7Y45KZqplNw4mI+pvQm U+6wxXn2M0fp6grxWgxfjsVha+czKzZ4kxMg+2Qe+q4YdYOpOMEAM8f2vRji9bEYvhiLP+6AHm0Z 4OjQHaG9j21B2Ark5dWjyZgmUyJb2JfCfn9fncMImp5xHF21yd8l03dEpX9vUYkrBHWi8ot2onJr 7K371s7HRzJcgeJYJHK+/0QhCTXSjW7ezuCEHxbQ79kcLV073lTUUOHcFDYj60YPOhrRuM12EFOU rtUX1++irmHDae8cMGkyrVRFe8scpjFq9FpEBQCTvqM0/IZ3u8B7TQrXP9t6xKqLACzYngiCrvTk A7OmYSOo9zqCj9IA9zCKCPEwtVEUrmQ9QkRCugeHmOhZ6xDb4fjfnXm4xGDbUWgHy2+/2YWnK5i9 RR09C7q70sITWVte0Sy3+fQH5jxG6ev6VQLjQGlEB5xVc1UluZlHmL3Md9DkNot5hZdB0sk0msRU um4Tb6X51i/0Yyh2QMlksBbgSdULPEi+pbstTxQlveEVNd8cvhibymAGpCfwMnr5TF8BSd3M5Qe+ jz3Wezd4qfsdRv/mAEsqv7d91dnN0LSOW3dB+YOFFD0bRRNLh8Yw3V8H0qxZLPDOxIaY7FvbC0De g7czBT/HXH6ag8MGG9KoD11XYzTSu021bRHg+03GNsl5UNdGkSLSu4Rtm/LcpTgfLQq6V78FwRAC cv4y5jfoCtbFkQ2xGZuCJ59DN5sTP9VNb90Z2xM0ttVNuGv63H/X3HWLwM7cJDN05u7Xl7o00H23 W9E+GnB4QxPiQSUSjcbvNyauHRjrHJr+CL3+IPndTjjTLWblPjAmYwfj/cSeGntj9lfxzP2OCWH7 fCGzW07c62y0pt2xGW2Of4inwMkv+NzeMEAZTXPNgbxfohv2JpwjO5HX12oS4+2OE9pkUz5XZ/dk tm3v6XI+GauN2W3hpUUAwnCTzrx1k+uBMUBX8i3TnA7l3E4jaGhKGnaykFUyZ5Ogt3YALuKIKfU3 gXhOIx6kEgPdqi6LEnbDA30XMefp9KU2N0BNAG8VqxuDuukx1lfTkmKl5DBTgsxx2laSDxCBjXjH NEwm9h3wyvPmmoVkbJlBZvVKlnHVXDHkZwQksOlqRqCic1xcJzzXSGWLS1zEEssbDlIYILPfn8HG 0ttU77hXYWS13cPZiXrokO9jrmxwjJHh4uTOXi/oXms1p6utXe/QNmu4zl6pBMtg7sojHaljZfxW 39/Fd8xyJB/9S4d/QN7dyks/C92qM/ZuLRrOM1chdC9swhsDyDj33cPY4YDujYutDbAd39cXllE6 HuaWxpaK2ifvVTjNaKMmgoQJo/dEkPyigEdGkDz4D4wg6VszwdBofLQe6C0TuCfUxOrBvYKyYQTo MwEi4QF26wJDYyqHbtJ9kavkbmAvlGZd6VTyGfOAHNm9m4xA8FWTys1Q9q6C2xVB8qWLHn9//vHN yTnRYnJx8vY/T76npCw8LmnZqgeH2LJ8n6m976V/u+E2nUjTN3iDbc8NsVzDpCF03ndyEHog9Ner 9S1oW5G5r7d16NT9dDsB4run3YK6TWX3Qu74ZbrGxE2faeVpB/opJ9WaX05mgnlkTupYHJqTOPO+ OTzRMtqJLW9bOCe9tatOtL+qbwHdEvce2SRrWgE8M0H+skcmpmLGBubZQWn/bz4oMxyrDc0NOiCF M+nc5EiXODKoyv//iZSg7GLc27GjOLZ3c1M7Ph5S9tJ5PPudycgQxCv3G3Tn5wr7XKZbqBAErPD0 PYWMiNF/+kDVph88UeJynwqL91HZXNlfuGbauf1rgkkGlb3vS3GCEh+zQuNFnbqJA7ZPpwM5fXQa lS+cShbQfAdA50Y8FbA3+kusEKcbEcLGUbtkmBxLdNSX9TnIo910sDe0ei72t5WdumWXQrzY3nDe quzUPQ65h7qnh6pNcZ9jgTFLc1s9qXhNkPk4U9AFX57zgWfoetsPX28vXxzZwwXkd3ztKBLKJhs4 hv3Sycbceamk052YpRxTuh7u1ZyQsG5x5UBln2Db3qZTkrJl/2PyHBjSwHvfHzIzPbyr9wdtTC3r HcGUxPCJGtG0nCIejbt9MupOt1FbXSBckPQAIB0VCLAQTEc3OgmiG87yHj7Xu8FpTdfxuidMoSMV lCzmcwT3ML5fg1+7OxUSP6g7o2j6c4M2B+olB+Fm34FbjbxQyHaT0J56wwdbXACuye7v/+IB/btp jLb74S6/2rZ62VsHyL4sZr5iZlCLROZxBEYG9OaQtDWWSxhBx2toGjq6DNXMDfkCHT/KpsXLtmmD Qc7sRHsA1igE/wfVIOdx """) ##file activate.sh ACTIVATE_SH = convert(""" eJytVVFvokAQfudXTLEPtTlLeo9tvMSmJpq02hSvl7u2wRUG2QR2DSxSe7n/frOACEVNLlceRHa+ nfl25pvZDswCnoDPQ4QoTRQsENIEPci4CsBMZBq7CAsuLOYqvmYKTTj3YxnBgiXBudGBjUzBZUJI BXEqgCvweIyuCjeG4eF2F5x14bcB9KQiQQWrjSddI1/oQIx6SYYeoFjzWIoIhYI1izlbhJjkKO7D M/QEmKfO9O7WeRo/zr4P7pyHwWxkwitcgwpQ5Ej96OX+PmiFwLeVjFUOrNYKaq1Nud3nR2n8nI2m k9H0friPTGVsUdptaxGrTEfpNVFEskxpXtUkkCkl1UNF9cgLBkx48J4EXyALuBtAwNYIjF5kcmUU abMKmMq1ULoiRbgsDEkTSsKSGFCJ6Z8vY/2xYiSacmtyAfCDdCNTVZoVF8vSTQOoEwSnOrngBkws MYGMBMg8/bMBLSYKS7pYEXP0PqT+ZmBT0Xuy+Pplj5yn4aM9nk72JD8/Wi+Gr98sD9eWSMOwkapD BbUv91XSvmyVkICt2tmXR4tWmrcUCsjWOpw87YidEC8i0gdTSOFhouJUNxR+4NYBG0MftoCTD9F7 2rTtxG3oPwY1b2HncYwhrlmj6Wq924xtGDWqfdNxap+OYxplEurnMVo9RWks+rH8qKEtx7kZT5zJ 4H7oOFclrN6uFe+d+nW2aIUsSgs/42EIPuOhXq+jEo3S6tX6w2ilNkDnIpHCWdEQhFgwj9pkk7FN l/y5eQvRSIQ5+TrL05lewxWpt/Lbhes5cJF3mLET1MGhcKCF+40tNWnUulxrpojwDo2sObdje3Bz N3QeHqf3D7OjEXMVV8LN3ZlvuzoWHqiUcNKHtwNd0IbvPGKYYM31nPKCgkUILw3KL+Y8l7aO1ArS Ad37nIU0fCj5NE5gQCuC5sOSu+UdI2NeXg/lFkQIlFpdWVaWZRfvqGiirC9o6liJ9FXGYrSY9mI1 D/Ncozgn13vJvsznr7DnkJWXsyMH7e42ljdJ+aqNDF1bFnKWFLdj31xtaJYK6EXFgqmV/ymD/ROG +n8O9H8f5vsGOWXsL1+1k3g= """) ##file activate.fish ACTIVATE_FISH = convert(""" eJydVW2P2jgQ/s6vmAZQoVpA9/WkqqJaTou0u6x2uZVOVWWZZEKsS+yc7UDpr+84bziQbauLxEvs eXnsZ56ZIWwTYSAWKUJWGAs7hMJgBEdhEwiMKnSIsBNywUMrDtziPBYmCeBDrFUG7v8HmCTW5n8u Fu7NJJim81Bl08EQTqqAkEupLOhCgrAQCY2hTU+DQVxIiqgkRNiEBphFEKy+kd1BaFvwFOUBuIxA oy20BKtAKp3xFMo0QNtCK5mhtMEA6BmSpUELKo38TThwLfguRVNaiRgs0llnEoIR29zfstf18/bv 5T17Wm7vAiiN3ONCzfbfwC3DtWXXDqHfAGX0q6z/bO82j3ebh1VwnbrduwTQbvwcRtesAfMGor/W L3fs6Xnz8LRlm9fV8/P61sM0LDNwCZjl9gSpCokJRzpryGQ5t8kNGFUt51QjOZGu0Mj35FlYlXEr yC09EVOp4lEXfF84Lz1qbhBsgl59vDedXI3rTV03xipduSgt9kLytI3XmBp3aV6MPoMQGNUU62T6 uQdeefTy1Hfj10zVHg2pq8fXDoHBiOv94csfXwN49xECqWREy7pwukKfvxdMY2j23vXDPuuxxeE+ JOdCOhxCE3N44B1ZeSLuZh8Mmkr2wEPAmPfKWHA2uxIRjEopdbQYjDz3BWOf14/scfmwoki1eQvX ExBdF60Mqh+Y/QcX4uiH4Amwzx79KOVFtbL63sXJbtcvy8/3q5rupmO5CnE91wBviQAhjUUegYpL vVEbpLt2/W+PklRgq5Ku6mp+rpMhhCo/lXthQTxJ2ysO4Ka0ad97S7VT/n6YXus6fzk3fLnBZW5C KDC6gSO62QDqgFqLCCtPmjegjnLeAdArtSE8VYGbAJ/aLb+vnQutFhk768E9uRbSxhCMzdgEveYw IZ5ZqFKl6+kz7UR4U+buqQZXu9SIujrAfD7f0FXpozB4Q0gwp31H9mVTZGGC4b871/wm7lvyDLu1 FUyvTj/yvD66k3UPTs08x1AQQaGziOl0S1qRkPG9COtBTSTWM9NzQ4R64B+Px/l3tDzCgxv5C6Ni e+QaF9xFWrxx0V/G5uvYQOdiZzvYpQUVQSIsTr1TTghI33GnPbTA7/GCqcE3oE3GZurq4HeQXQD6 32XS1ITj/qLjN72ob0hc5C9bzw8MhfmL """) ##file activate.csh ACTIVATE_CSH = convert(""" eJx9VG1P2zAQ/u5fcYQKNgTNPtN1WxlIQ4KCUEGaxuQ6yYVYSuzKdhqVX7+zk3bpy5YPUXL3PPfc ne98DLNCWshliVDV1kGCUFvMoJGugMjq2qQIiVSxSJ1cCofD1BYRnOVGV0CfZ0N2DD91DalQSjsw tQLpIJMGU1euvPe7QeJlkKzgWixlhnAt4aoUVsLnLBiy5NtbJWQ5THX1ZciYKKWwkOFaE04dUm6D r/zh7pq/3D7Nnid3/HEy+wFHY/gEJydg0aFaQrBFgz1c5DG1IhTs+UZgsBC2GMFBlaeH+8dZXwcW VPvCjXdlAvCfQsE7al0+07XjZvrSCUevR5dnkVeKlFYZmUztG4BdzL2u9KyLVabTU0bdfg7a0hgs cSmUg6UwUiQl2iHrcbcVGNvPCiLOe7+cRwG13z9qRGgx2z6DHjfm/Op2yqeT+xvOLzs0PTKHDz2V tkckFHoQfQRXoGJAj9el0FyJCmEMhzgMS4sB7KPOE2ExoLcSieYwDvR+cP8cg11gKkVJc2wRcm1g QhYFlXiTaTfO2ki0fQoiFM4tLuO4aZrhOzqR4dIPcWx17hphMBY+Srwh7RTyN83XOWkcSPh1Pg/k TXX/jbJTbMtUmcxZ+/bbqOsy82suFQg/BhdSOTRhMNBHlUarCpU7JzBhmkKmRejKOQzayQe6MWoa n1wqWmuh6LZAaHxcdeqIlVLhIBJdO9/kbl0It2oEXQj+eGjJOuvOIR/YGRqvFhttUB2XTvLXYN2H 37CBdbW2W7j2r2+VsCn0doVWcFG1/4y1VwBjfwAyoZhD """) ##file activate.bat ACTIVATE_BAT = convert(""" eJx9UdEKgjAUfW6wfxjiIH+hEDKUFHSKLCMI7kNOEkIf9P9pTJ3OLJ/03HPPPed4Es9XS9qqwqgT PbGKKOdXL4aAFS7A4gvAwgijuiKlqOpGlATS2NeMLE+TjJM9RkQ+SmqAXLrBo1LLIeLdiWlD6jZt r7VNubWkndkXaxg5GO3UaOOKS6drO3luDDiO5my3iA0YAKGzPRV1ack8cOdhysI0CYzIPzjSiH5X 0QcvC8Lfaj0emsVKYF2rhL5L3fCkVjV76kShi59NHwDniAHzkgDgqBcwOgTMx+gDQQqXCw== """) ##file deactivate.bat DEACTIVATE_BAT = convert(""" eJxzSE3OyFfIT0vj4ipOLVEI8wwKCXX0iXf1C7Pl4spMU0hJTcvMS01RiPf3cYmHyQYE+fsGhCho cCkAAUibEkTEVhWLMlUlLk6QGixStlyaeCyJDPHw9/Pw93VFsQguim4ZXAJoIUw5DhX47XUM8UCx EchHtwsohN1bILUgw61c/Vy4AJYPYm4= """) ##file activate.ps1 ACTIVATE_PS = convert(""" eJylWdmS40Z2fVeE/oHT6rCloNUEAXDThB6wAyQAEjsB29GBjdgXYiWgmC/zgz/Jv+AEWNVd3S2N xuOKYEUxM+/Jmzfvcm7W//zXf/+wUMOoXtyi1F9kbd0sHH/hFc2iLtrK9b3FrSqyxaVQwr8uhqJd uHaeg9mqzRdR8/13Pyy8qPLdJh0+LMhi0QCoXxYfFh9WtttEnd34H8p6/f1300KauwrULws39e18 0ZaLNm9rgN/ZVf3h++/e124Vlc0vKsspHy+Yyi5+XbzPhijvCtduoiL/kA1ukWV27n0o7Sb8LIFj CvWR5GQgUJdp1Pw8TS9+rPy6SDv/+e3d+0+4qw8f3v20+PliV37efEYBAB9FTKC+RHn/Cfxn3rdv 00Fube5O+iyCtHDs9BfPfz3q4sfFv9d91Ljhfy7ei0VO+nVTtdOkv/jpt0l2AX6iG1jXgKnnDuD4 ke2k/i8fzzz5UedkVcP4pwF+Wvz2FJl+3vt598urXf5Y6LNA5WcFOP7r0sW7b9a+W/xcu0Xpv5zk Kfq3P9Dz9di/fCxS72MXVU1rpx9L4Bxl85Wmn5a+zP76Zuh3pL9ROWr87PN+//GHIl+oOtvn9XSU qH+p0gQBFnx1uV+JLH5O5zv+PXW+WepXVVHZT0+oQezkIATcIm+ivPV/z5J/+cYj3ir4w0Lx09vC e5n/y5/Y5LPPfdrqb88ga/PabxZRVfmp39l588m/6u+/e+OpP+dF7n1WZpJ9//Z4v372fDDz9eHB 7Juvs/BLMHzrxL9+9twXpJfhd1/DrpQ5Euu/vlss3wp9HXC/54C/Ld69m6zwdx3tC0d8daSv0V8B n4b9YYF53sJelJV/ix6LZspw/sJtqyl5LJ5r/23htA1Imfm/gt9R7dqVB1LjhydAX4Gb+zksQF59 9+P7H//U+376afFuvh2/T6P85Xr/5c8C6OXyFY4BGuN+EE0+GeR201b+wkkLN5mmBY5TfMw8ngqL CztXxCSXKMCYrRIElWkEJlEPYsSOeKBVZCAQTKBhApMwRFQzmCThE0YQu2CdEhgjbgmk9GluHpfR /hhwJCZhGI5jt5FsAkOrObVyE6g2y1snyhMGFlDY1x+BoHpCMulTj5JYWNAYJmnKpvLxXgmQ8az1 4fUGxxcitMbbhDFcsiAItg04E+OSBIHTUYD1HI4FHH4kMREPknuYRMyhh3AARWMkfhCketqD1CWJ mTCo/nhUScoQcInB1hpFhIKoIXLo5jLpwFCgsnLCx1QlEMlz/iFEGqzH3vWYcpRcThgWnEKm0QcS rA8ek2a2IYYeowUanOZOlrbWSJUC4c7y2EMI3uJPMnMF/SSXdk6E495VLhzkWHps0rOhKwqk+xBI DhJirhdUCTamMfXz2Hy303hM4DFJ8QL21BcPBULR+gcdYxoeiDqOFSqpi5B5PUISfGg46gFZBPo4 jdh8lueaWuVSMTURfbAUnLINr/QYuuYoMQV6l1aWxuZVTjlaLC14UzqZ+ziTGDzJzhiYoPLrt3uI tXkVR47kAo09lo5BD76CH51cTt1snVpMOttLhY93yxChCQPI4OBecS7++h4p4Bdn4H97bJongtPk s9gQnXku1vzsjjmX4/o4YUDkXkjHwDg5FXozU0fW4y5kyeYW0uJWlh536BKr0kMGjtzTkng6Ep62 uTWnQtiIqKnEsx7e1hLtzlXs7Upw9TwEnp0t9yzCGgUJIZConx9OHJArLkRYW0dW42G9OeR5Nzwk yk1mX7du5RGHT7dka7N3AznmSif7y6tuKe2N1Al/1TUPRqH6E2GLVc27h9IptMLkCKQYRqPQJgzV 2m6WLsSipS3v3b1/WmXEYY1meLEVIU/arOGVkyie7ZsH05ZKpjFW4cpY0YkjySpSExNG2TS8nnJx nrQmWh2WY3cP1eISP9wbaVK35ZXc60yC3VN/j9n7UFoK6zvjSTE2+Pvz6Mx322rnftfP8Y0XKIdv Qd7AfK0nexBTMqRiErvCMa3Hegpfjdh58glW2oNMsKeAX8x6YJLZs9K8/ozjJkWL+JmECMvhQ54x 9rsTHwcoGrDi6Y4I+H7yY4/rJVPAbYymUH7C2D3uiUS3KQ1nrCAUkE1dJMneDQIJMQQx5SONxoEO OEn1/Ig1eBBUeEDRuOT2WGGGE4bNypBLFh2PeIg3bEbg44PHiqNDbGIQm50LW6MJU62JHCGBrmc9 2F7WBJrrj1ssnTAK4sxwRgh5LLblhwNAclv3Gd+jC/etCfyfR8TMhcWQz8TBIbG8IIyAQ81w2n/C mHWAwRzxd3WoBY7BZnsqGOWrOCKwGkMMNfO0Kci/joZgEocLjNnzgcmdehPHJY0FudXgsr+v44TB I3jnMGnsK5veAhgi9iXGifkHMOC09Rh9cAw9sQ0asl6wKMk8mpzFYaaDSgG4F0wisQDDBRpjCINg FIxhlhQ31xdSkkk6odXZFpTYOQpOOgw9ugM2cDQ+2MYa7JsEirGBrOuxsQy5nPMRdYjsTJ/j1iNw FeSt1jY2+dd5yx1/pzZMOQXUIDcXeAzR7QlDRM8AMkUldXOmGmvYXPABjxqkYKO7VAY6JRU7kpXr +Epu2BU3qFFXClFi27784LrDZsJwbNlDw0JzhZ6M0SMXE4iBHehCpHVkrQhpTFn2dsvsZYkiPEEB GSEAwdiur9LS1U6P2U9JhGp4hnFpJo4FfkdJHcwV6Q5dV1Q9uNeeu7rV8PAjwdFg9RLtroifOr0k uOiRTo/obNPhQIf42Fr4mtThWoSjitEdAmFW66UCe8WFjPk1YVNpL9srFbond7jrLg8tqAasIMpy zkH0SY/6zVAwJrEc14zt14YRXdY+fcJ4qOd2XKB0/Kghw1ovd11t2o+zjt+txndo1ZDZ2T+uMVHT VSXhedBAHoJIID9xm6wPQI3cXY+HR7vxtrJuCKh6kbXaW5KkVeJsdsjqsYsOwYSh0w5sMbu7LF8J 5T7U6LJdiTx+ca7RKlulGgS5Z1JSU2Llt32cHFipkaurtBrvNX5UtvNZjkufZ/r1/XyLl6yOpytL Km8Fn+y4wkhlqZP5db0rooqy7xdL4wxzFVTX+6HaxuQJK5E5B1neSSovZ9ALB8091dDbbjVxhWNY Ve5hn1VnI9OF0wpvaRm7SZuC1IRczwC7GnkhPt3muHV1YxUJfo+uh1sYnJy+vI0ZwuPV2uqWJYUH bmBsi1zmFSxHrqwA+WIzLrHkwW4r+bad7xbOzJCnKIa3S3YvrzEBK1Dc0emzJW+SqysQfdEDorQG 9ZJlbQzEHQV8naPaF440YXzJk/7vHGK2xwuP+Gc5xITxyiP+WQ4x18oXHjFzCBy9kir1EFTAm0Zq LYwS8MpiGhtfxiBRDXpxDWxk9g9Q2fzPPAhS6VFDAc/aiNGatUkPtZIStZFQ1qD0IlJa/5ZPAi5J ySp1ETDomZMnvgiysZSBfMikrSDte/K5lqV6iwC5q7YN9I1dBZXUytDJNqU74MJsUyNNLAPopWK3 tzmLkCiDyl7WQnj9sm7Kd5kzgpoccdNeMw/6zPVB3pUwMgi4C7hj4AMFAf4G27oXH8NNT9zll/sK S6wVlQwazjxWKWy20ZzXb9ne8ngGalPBWSUSj9xkc1drsXkZ8oOyvYT3e0rnYsGwx85xZB9wKeKg cJKZnamYwiaMymZvzk6wtDUkxmdUg0mPad0YHtvzpjEfp2iMxvORhnx0kCVLf5Qa43WJsVoyfEyI pzmf8ruM6xBr7dnBgzyxpqXuUPYaKahOaz1LrxNkS/Q3Ae5AC+xl6NbxAqXXlzghZBZHmOrM6Y6Y ctAkltwlF7SKEsShjVh7QHuxMU0a08/eiu3x3M+07OijMcKFFltByXrpk8w+JNnZpnp3CfgjV1Ax gUYCnWwYow42I5wHCcTzLXK0hMZN2DrPM/zCSqe9jRSlJnr70BPE4+zrwbk/xVIDHy2FAQyHoomT Tt5jiM68nBQut35Y0qLclLiQrutxt/c0OlSqXAC8VrxW97lGoRWzhOnifE2zbF05W4xuyhg7JTUL aqJ7SWDywhjlal0b+NLTpERBgnPW0+Nw99X2Ws72gOL27iER9jgzj7Uu09JaZ3n+hmCjjvZpjNst vOWWTbuLrg+/1ltX8WpPauEDEvcunIgTxuMEHweWKCx2KQ9DU/UKdO/3za4Szm2iHYL+ss9AAttm gZHq2pkUXFbV+FiJCKrpBms18zH75vax5jSo7FNunrVWY3Chvd8KKnHdaTt/6ealwaA1x17yTlft 8VBle3nAE+7R0MScC3MJofNCCkA9PGKBgGMYEwfB2QO5j8zUqa8F/EkWKCzGQJ5EZ05HTly1B01E z813G5BY++RZ2sxbQS8ZveGPJNabp5kXAeoign6Tlt5+L8i5ZquY9+S+KEUHkmYMRFBxRrHnbl2X rVemKnG+oB1yd9+zT+4c43jQ0wWmQRR6mTCkY1q3VG05Y120ZzKOMBe6Vy7I5Vz4ygPB3yY4G0FP 8RxiMx985YJPXsgRU58EuHj75gygTzejP+W/zKGe78UQN3yOJ1aMQV9hFH+GAfLRsza84WlPLAI/ 9G/5JdcHftEfH+Y3/fHUG7/o8bv98dzzy3e8S+XCvgqB+VUf7sH0yDHpONdbRE8tAg9NWOzcTJ7q TuAxe/AJ07c1Rs9okJvl1/0G60qvbdDzz5zO0FuPFQIHNp9y9Bd1CufYVx7dB26mAxwa8GMNrN/U oGbNZ3EQ7inLzHy5tRg9AXJrN8cB59cCUBeCiVO7zKM0jU0MamhnRThkg/NMmBOGb6StNeD9tDfA 7czsAWopDdnGoXUHtA+s/k0vNPkBcxEI13jVd/axp85va3LpwGggXXWw12Gwr/JGAH0b8CPboiZd QO1l0mk/UHukud4C+w5uRoNzpCmoW6GbgbMyaQNkga2pQINB18lOXOCJzSWPFOhZcwzdgrsQnne7 nvjBi+7cP2BbtBeDOW5uOLGf3z94FasKIguOqJl+8ss/6Kumns4cuWbqq5592TN/RNIbn5Qo6qbi O4F0P9txxPAwagqPlftztO8cWBzdN/jz3b7GD6JHYP/Zp4ToAMaA74M+EGSft3hEGMuf8EwjnTk/ nz/P7SLipB/ogQ6xNX0fDqNncMCfHqGLCMM0ZzFa+6lPJYQ5p81vW4HkCvidYf6kb+P/oB965g8K C6uR0rdjX1DNKc5pOSTquI8uQ6KXxYaKBn+30/09tK4kMpJPgUIQkbENEPbuezNPPje2Um83SgyX GTCJb6MnGVIpgncdQg1qz2bvPfxYD9fewCXDomx9S+HQJuX6W3VAL+v5WZMudRQZk9ZdOk6GIUtC PqEb/uwSIrtR7/edzqgEdtpEwq7p2J5OQV+RLrmtTvFwFpf03M/VrRyTZ73qVod7v7Jh2Dwe5J25 JqFOU2qEu1sP+CRotklediycKfLjeIZzjJQsvKmiGSNQhxuJpKa+hoWUizaE1PuIRGzJqropwgVB oo1hr870MZLgnXF5ZIpr6mF0L8aSy2gVnTAuoB4WEd4d5NPVC9TMotYXERKlTcwQ2KiB/C48AEfH Qbyq4CN8xTFnTvf/ebOc3isnjD95s0QF0nx9s+y+zMmz782xL0SgEmRpA3x1w1Ff9/74xcxKEPdS IEFTz6GgU0+BK/UZ5Gwbl4gZwycxEw+Kqa5QmMkh4OzgzEVPnDAiAOGBFaBW4wkDmj1G4RyElKgj NlLCq8zsp085MNh/+R4t1Q8yxoSv8PUpTt7izZwf2BTHZZ3pIZpUIpuLkL1nNL6sYcHqcKm237wp T2+RCjgXweXd2Zp7ZM8W6dG5bZsqo0nrJBTx8EC0+CQQdzEGnabTnkzofu1pYkWl4E7XSniECdxy vLYavPMcL9LW5SToJFNnos+uqweOHriUZ1ntIYZUonc7ltEQ6oTRtwOHNwez2sVREskHN+bqG3ua eaEbJ8XpyO8CeD9QJc8nbLP2C2R3A437ISUNyt5Yd0TbDNcl11/DSsOzdbi/VhCC0KE6v1vqVNkq 45ZnG6fiV2NwzInxCNth3BwL0+8814jE6+1W1EeWtpWbSZJOJNYXmWRXa7vLnAljE692eHjZ4y5u y1u63De0IzKca7As48Z3XshVF+3XiLNz0JIMh/JOpbiNLlMi672uO0wYzOCZjRxcxj3D+gVenGIE MvFUGGXuRps2RzMcgWIRolHXpGUP6sMsQt1hspUBnVKUn/WQj2u6j3SXd9Xz0QtEzoM7qTu5y7gR q9gNNsrlEMLdikBt9bFvBnfbUIh6voTw7eDsyTmPKUvF0bHqWLbHe3VRHyRZnNeSGKsB73q66Vsk taxWYmwz1tYVFG/vOQhlM0gUkyvIab3nv2caJ1udU1F3pDMty7stubTE4OJqm0i0ECfrJIkLtraC HwRWKzlqpfhEIqYH09eT9WrOhQyt8YEoyBlnXtAT37WHIQ03TIuEHbnRxZDdLun0iok9PUC79prU m5beZzfQUelEXnhzb/pIROKx3F7qCttYIFGh5dXNzFzID7u8vKykA8Uejf7XXz//S4nKvW//ofS/ QastYw== """) ##file distutils-init.py DISTUTILS_INIT = convert(""" eJytV1uL4zYUfvevOE0ottuMW9q3gVDa3aUMXXbLMlDKMBiNrSTqOJKRlMxkf33PkXyRbGe7Dw2E UXTu37lpxLFV2oIyifAncxmOL0xLIfcG+gv80x9VW6maw7o/CANSWWBwFtqeWMPlGY6qPjV8A0bB C4eKSTgZ5LRgFeyErMEeOBhbN+Ipgeizhjtnhkn7DdyjuNLPoCS0l/ayQTG0djwZC08cLXozeMss aG5EzQ0IScpnWtHSTXuxByV/QCmxE7y+eS0uxWeoheaVVfqSJHiU7Mhhi6gULbOHorshkrEnKxpT 0n3A8Y8SMpuwZx6aoix3ouFlmW8gHRSkeSJ2g7hU+kiHLDaQw3bmRDaTGfTnty7gPm0FHbIBg9U9 oh1kZzAFLaue2R6htPCtAda2nGlDSUJ4PZBgCJBGVcwKTAMz/vJiLD+Oin5Z5QlvDPdulC6EsiyE NFzb7McNTKJzbJqzphx92VKRFY1idenzmq3K0emRcbWBD0ryqc4NZGmKOOOX9Pz5x+/l27tP797c f/z0d+4NruGNai8uAM0bfsYaw8itFk8ny41jsfpyO+BWlpqfhcG4yxLdi/0tQqoT4a8Vby382mt8 p7XSo7aWGdPBc+b6utaBmCQ7rQKQoWtAuthQCiold2KfJIPTT8xwg9blPumc+YDZC/wYGdAyHpJk vUbHbHWAp5No6pK/WhhLEWrFjUwtPEv1Agf8YmnsuXUQYkeZoHm8ogP16gt2uHoxcEMdf2C6pmbw hUMsWGhanboh4IzzmsIpWs134jVPqD/c74bZHdY69UKKSn/+KfVhxLgUlToemayLMYQOqfEC61bh cbhwaqoGUzIyZRFHPmau5juaWqwRn3mpWmoEA5nhzS5gog/5jbcFQqOZvmBasZtwYlG93k5GEiyw buHhMWLjDarEGpMGB2LFs5nIJkhp/nUmZneFaRth++lieJtHepIvKgx6PJqIlD9X2j6pG1i9x3pZ 5bHuCPFiirGHeO7McvoXkz786GaKVzC9DSpnOxJdc4xm6NSVq7lNEnKdVlnpu9BNYoKX2Iq3wvgh gGEUM66kK6j4NiyoneuPLSwaCWDxczgaolEWpiMyDVDb7dNuLAbriL8ig8mmeju31oNvQdpnvEPC 1vAXbWacGRVrGt/uXN/gU0CDDwgooKRrHfTBb1/s9lYZ8ZqOBU0yLvpuP6+K9hLFsvIjeNhBi0KL MlOuWRn3FRwx5oHXjl0YImUx0+gLzjGchrgzca026ETmYJzPD+IpuKzNi8AFn048Thd63OdD86M6 84zE8yQm0VqXdbbgvub2pKVnS76icBGdeTHHXTKspUmr4NYo/furFLKiMdQzFjHJNcdAnMhltBJK 0/IKX3DVFqvPJ2dLE7bDBkH0l/PJ29074+F0CsGYOxsb7U3myTUncYfXqnLLfa6sJybX4g+hmcjO kMRBfA1JellfRRKJcyRpxdS4rIl6FdmQCWjo/o9Qz7yKffoP4JHjOvABcRn4CZIT2RH4jnxmfpVG qgLaAvQBNfuO6X0/Ux02nb4FKx3vgP+XnkX0QW9pLy/NsXgdN24dD3LxO2Nwil7Zlc1dqtP3d7/h kzp1/+7hGBuY4pk0XD/0Ao/oTe/XGrfyM773aB7iUhgkpy+dwAMalxMP0DrBcsVw/6p25+/hobP9 GBknrWExDhLJ1bwt1NcCNblaFbMKCyvmX0PeRaQ= """) ##file distutils.cfg DISTUTILS_CFG = convert(""" eJxNj00KwkAMhfc9xYNuxe4Ft57AjYiUtDO1wXSmNJnK3N5pdSEEAu8nH6lxHVlRhtDHMPATA4uH xJ4EFmGbvfJiicSHFRzUSISMY6hq3GLCRLnIvSTnEefN0FIjw5tF0Hkk9Q5dRunBsVoyFi24aaLg 9FDOlL0FPGluf4QjcInLlxd6f6rqkgPu/5nHLg0cXCscXoozRrP51DRT3j9QNl99AP53T2Q= """) ##file activate_this.py ACTIVATE_THIS = convert(""" eJyNU01v2zAMvetXEB4K21jmDOstQA4dMGCHbeihlyEIDMWmG62yJEiKE//7kXKdpN2KzYBt8euR fKSyLPs8wiEo8wh4wqZTGou4V6Hm0wJa1cSiTkJdr8+GsoTRHuCotBayiWqQEYGtMCgfD1KjGYBe 5a3p0cRKiAe2NtLADikftnDco0ko/SFEVgEZ8aRC5GLux7i3BpSJ6J1H+i7A2CjiHq9z7JRZuuQq siwTIvpxJYCeuWaBpwZdhB+yxy/eWz+ZvVSU8C4E9FFZkyxFsvCT/ZzL8gcz9aXVE14Yyp2M+2W0 y7n5mp0qN+avKXvbsyyzUqjeWR8hjGE+2iCE1W1tQ82hsCZN9UzlJr+/e/iab8WfqsmPI6pWeUPd FrMsd4H/55poeO9n54COhUs+sZNEzNtg/wanpjpuqHJaxs76HtZryI/K3H7KJ/KDIhqcbJ7kI4ar XL+sMgXnX0D+Te2Iy5xdP8yueSlQB/x/ED2BTAtyE3K4SYUN6AMNfbO63f4lBW3bUJPbTL+mjSxS PyRfJkZRgj+VbFv+EzHFi5pKwUEepa4JslMnwkowSRCXI+m5XvEOvtuBrxHdhLalG0JofYBok6qj YdN2dEngUlbC4PG60M1WEN0piu7Nq7on0mgyyUw3iV1etLo6r/81biWdQ9MWHFaePWZYaq+nmp+t s3az+sj7eA0jfgPfeoN1 """) MH_MAGIC = 0xfeedface MH_CIGAM = 0xcefaedfe MH_MAGIC_64 = 0xfeedfacf MH_CIGAM_64 = 0xcffaedfe FAT_MAGIC = 0xcafebabe BIG_ENDIAN = '>' LITTLE_ENDIAN = '<' LC_LOAD_DYLIB = 0xc maxint = majver == 3 and getattr(sys, 'maxsize') or getattr(sys, 'maxint') class fileview(object): """ A proxy for file-like objects that exposes a given view of a file. Modified from macholib. """ def __init__(self, fileobj, start=0, size=maxint): if isinstance(fileobj, fileview): self._fileobj = fileobj._fileobj else: self._fileobj = fileobj self._start = start self._end = start + size self._pos = 0 def __repr__(self): return '<fileview [%d, %d] %r>' % ( self._start, self._end, self._fileobj) def tell(self): return self._pos def _checkwindow(self, seekto, op): if not (self._start <= seekto <= self._end): raise IOError("%s to offset %d is outside window [%d, %d]" % ( op, seekto, self._start, self._end)) def seek(self, offset, whence=0): seekto = offset if whence == os.SEEK_SET: seekto += self._start elif whence == os.SEEK_CUR: seekto += self._start + self._pos elif whence == os.SEEK_END: seekto += self._end else: raise IOError("Invalid whence argument to seek: %r" % (whence,)) self._checkwindow(seekto, 'seek') self._fileobj.seek(seekto) self._pos = seekto - self._start def write(self, bytes): here = self._start + self._pos self._checkwindow(here, 'write') self._checkwindow(here + len(bytes), 'write') self._fileobj.seek(here, os.SEEK_SET) self._fileobj.write(bytes) self._pos += len(bytes) def read(self, size=maxint): assert size >= 0 here = self._start + self._pos self._checkwindow(here, 'read') size = min(size, self._end - here) self._fileobj.seek(here, os.SEEK_SET) bytes = self._fileobj.read(size) self._pos += len(bytes) return bytes def read_data(file, endian, num=1): """ Read a given number of 32-bits unsigned integers from the given file with the given endianness. """ res = struct.unpack(endian + 'L' * num, file.read(num * 4)) if len(res) == 1: return res[0] return res def mach_o_change(path, what, value): """ Replace a given name (what) in any LC_LOAD_DYLIB command found in the given binary with a new name (value), provided it's shorter. """ def do_macho(file, bits, endian): # Read Mach-O header (the magic number is assumed read by the caller) cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = read_data(file, endian, 6) # 64-bits header has one more field. if bits == 64: read_data(file, endian) # The header is followed by ncmds commands for n in range(ncmds): where = file.tell() # Read command header cmd, cmdsize = read_data(file, endian, 2) if cmd == LC_LOAD_DYLIB: # The first data field in LC_LOAD_DYLIB commands is the # offset of the name, starting from the beginning of the # command. name_offset = read_data(file, endian) file.seek(where + name_offset, os.SEEK_SET) # Read the NUL terminated string load = file.read(cmdsize - name_offset).decode() load = load[:load.index('\0')] # If the string is what is being replaced, overwrite it. if load == what: file.seek(where + name_offset, os.SEEK_SET) file.write(value.encode() + '\0'.encode()) # Seek to the next command file.seek(where + cmdsize, os.SEEK_SET) def do_file(file, offset=0, size=maxint): file = fileview(file, offset, size) # Read magic number magic = read_data(file, BIG_ENDIAN) if magic == FAT_MAGIC: # Fat binaries contain nfat_arch Mach-O binaries nfat_arch = read_data(file, BIG_ENDIAN) for n in range(nfat_arch): # Read arch header cputype, cpusubtype, offset, size, align = read_data(file, BIG_ENDIAN, 5) do_file(file, offset, size) elif magic == MH_MAGIC: do_macho(file, 32, BIG_ENDIAN) elif magic == MH_CIGAM: do_macho(file, 32, LITTLE_ENDIAN) elif magic == MH_MAGIC_64: do_macho(file, 64, BIG_ENDIAN) elif magic == MH_CIGAM_64: do_macho(file, 64, LITTLE_ENDIAN) assert(len(what) >= len(value)) do_file(open(path, 'r+b')) if __name__ == '__main__': main() ## TODO: ## Copy python.exe.manifest ## Monkeypatch distutils.sysconfig
mit
catch/postal
src/drivers/twisted/txpostal.py
1
6155
#!/usr/bin/env python # # Copyright (C) 2012 Catch.com # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # """ Twisted Python module for Postal Push Notification Server. """ import json from twisted.application import service from twisted.internet import defer from twisted.web import client import types class Service(service.Service): name = 'PostalService' def __init__(self, host='localhost', port=5300): assert isinstance(host, basestring) assert isinstance(port, int) self.host = host self.port = port self.urlBase = 'http://%s:%d' % (host, port) def addDevice(self, user, device): """ Asynchronously creates a device in Postal. @param user: a stringable value representing the user id. @type user: C{basestring} or coercable to string. @param device: A dictionary containing the device parameters. @type device: C{dict} Returns: L{defer.Deferred} """ assert isinstance(device, types.DictType) url = '/'.join([self.urlBase, 'v1/users', str(user), 'devices', str(device['device_token'])]) postdata = json.dumps(device) headers = {'Content-Type': 'application/json'} df = client.getPage(url, method='PUT', postdata=postdata, agent='txpostal', timeout=60, headers=headers) df.addCallback(lambda d: json.loads(d)) return df def removeDevice(self, user, device_or_token): """ Asynchronously remove a device in Postal. @param user: a stringable value representing the user id. @type user: C{basestring} @param device_or_token: a string containing the device id. @type device_or_token: C{basestring} """ if isinstance(device_or_token, dict): device_or_token = device_or_token.get('device_token') assert isinstance(user, basestring) assert isinstance(device_or_token, basestring) user = user.encode('ascii') url = '/'.join([self.urlBase, 'v1/users', str(user), 'devices', str(device_or_token)]) def onErrback(failure): if failure.value.status == '204': return None failure.raiseException() df = client.getPage(url, method='DELETE', agent='txpostal', timeout=60) df.addErrback(onErrback) return df def getDevice(self, user, device_token): """ Asynchronously fetch a device from Postal. @param user: a stringable value representing the user id. @type user: C{basestring} @param device_token: a stringable value representing the device. @type user: C{basestring} Returns: L{defer.Deferred} to a dict of the device. """ assert isinstance(user, basestring) user = user.encode('utf-8') device_token = device_token.encode('utf-8') url = '/'.join([self.urlBase, 'v1/users', str(user), 'devices', str(device_token)]) headers = {'Accept': 'application/json'} df = client.getPage(url, method='GET', agent='txpostal', timeout=60, headers=headers) df.addCallback(lambda d: json.loads(d)) return df def getDevices(self, user): """ Asynchronously fetch devices for a user from Postal. @param user: a stringable value representing the user id. @type user: C{basestring} Returns: L{defer.Deferred} to a list of devices. """ assert isinstance(user, basestring) user = user.encode('utf-8') url = '%s/v1/users/%s/devices' % (self.urlBase, user) headers = {'Accept': 'application/json'} df = client.getPage(url, method='GET', agent='txpostal', timeout=60, headers=headers) df.addCallback(lambda d: json.loads(d)) return df def notify(self, notif, users=None, devices=None): pass if __name__ == '__main__': from twisted.internet import reactor svc = Service() user = '000011110000111100001111' device = {'device_type': 'aps', 'device_token': '123123123123123123'} def p(r): print r return r df = svc.addDevice(user, device) df.addBoth(p) df.addCallback(lambda d: svc.removeDevice(user, d['device_token'])) df.addBoth(p) df.addCallback(lambda d: svc.getDevices(user)) df.addBoth(p) df.addCallback(lambda d: svc.getDevice(user, d[0]['device_token'])) df.addBoth(p) df.addBoth(lambda *_: reactor.stop()) reactor.run()
agpl-3.0
citrix-openstack-build/tempest
tempest/api/volume/test_volumes_list.py
2
4162
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.volume import base from tempest.common.utils.data_utils import rand_name from tempest.openstack.common import log as logging from tempest.test import attr LOG = logging.getLogger(__name__) class VolumesListTest(base.BaseVolumeTest): """ This test creates a number of 1G volumes. To run successfully, ensure that the backing file for the volume group that Nova uses has space for at least 3 1G volumes! If you are running a Devstack environment, ensure that the VOLUME_BACKING_FILE_SIZE is at least 4G in your localrc """ _interface = 'json' def assertVolumesIn(self, fetched_list, expected_list): missing_vols = [v for v in expected_list if v not in fetched_list] if len(missing_vols) == 0: return def str_vol(vol): return "%s:%s" % (vol['id'], vol['display_name']) raw_msg = "Could not find volumes %s in expected list %s; fetched %s" self.fail(raw_msg % ([str_vol(v) for v in missing_vols], [str_vol(v) for v in expected_list], [str_vol(v) for v in fetched_list])) @classmethod def setUpClass(cls): super(VolumesListTest, cls).setUpClass() cls.client = cls.volumes_client # Create 3 test volumes cls.volume_list = [] cls.volume_id_list = [] for i in range(3): v_name = rand_name('volume') metadata = {'Type': 'work'} try: resp, volume = cls.client.create_volume(size=1, display_name=v_name, metadata=metadata) cls.client.wait_for_volume_status(volume['id'], 'available') resp, volume = cls.client.get_volume(volume['id']) cls.volume_list.append(volume) cls.volume_id_list.append(volume['id']) except Exception as exc: LOG.exception(exc) if cls.volume_list: # We could not create all the volumes, though we were able # to create *some* of the volumes. This is typically # because the backing file size of the volume group is # too small. for volid in cls.volume_id_list: cls.client.delete_volume(volid) cls.client.wait_for_resource_deletion(volid) raise exc @classmethod def tearDownClass(cls): # Delete the created volumes for volid in cls.volume_id_list: resp, _ = cls.client.delete_volume(volid) cls.client.wait_for_resource_deletion(volid) super(VolumesListTest, cls).tearDownClass() @attr(type='smoke') def test_volume_list(self): # Get a list of Volumes # Fetch all volumes resp, fetched_list = self.client.list_volumes() self.assertEqual(200, resp.status) self.assertVolumesIn(fetched_list, self.volume_list) @attr(type='gate') def test_volume_list_with_details(self): # Get a list of Volumes with details # Fetch all Volumes resp, fetched_list = self.client.list_volumes_with_detail() self.assertEqual(200, resp.status) self.assertVolumesIn(fetched_list, self.volume_list) class VolumeListTestXML(VolumesListTest): _interface = 'xml'
apache-2.0
adamncasey/servo
tests/wpt/css-tests/tools/html5lib/html5lib/treewalkers/etree.py
658
4613
from __future__ import absolute_import, division, unicode_literals try: from collections import OrderedDict except ImportError: try: from ordereddict import OrderedDict except ImportError: OrderedDict = dict import gettext _ = gettext.gettext import re from six import text_type from . import _base from ..utils import moduleFactoryFactory tag_regexp = re.compile("{([^}]*)}(.*)") def getETreeBuilder(ElementTreeImplementation): ElementTree = ElementTreeImplementation ElementTreeCommentType = ElementTree.Comment("asd").tag class TreeWalker(_base.NonRecursiveTreeWalker): """Given the particular ElementTree representation, this implementation, to avoid using recursion, returns "nodes" as tuples with the following content: 1. The current element 2. The index of the element relative to its parent 3. A stack of ancestor elements 4. A flag "text", "tail" or None to indicate if the current node is a text node; either the text or tail of the current element (1) """ def getNodeDetails(self, node): if isinstance(node, tuple): # It might be the root Element elt, key, parents, flag = node if flag in ("text", "tail"): return _base.TEXT, getattr(elt, flag) else: node = elt if not(hasattr(node, "tag")): node = node.getroot() if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"): return (_base.DOCUMENT,) elif node.tag == "<!DOCTYPE>": return (_base.DOCTYPE, node.text, node.get("publicId"), node.get("systemId")) elif node.tag == ElementTreeCommentType: return _base.COMMENT, node.text else: assert type(node.tag) == text_type, type(node.tag) # This is assumed to be an ordinary element match = tag_regexp.match(node.tag) if match: namespace, tag = match.groups() else: namespace = None tag = node.tag attrs = OrderedDict() for name, value in list(node.attrib.items()): match = tag_regexp.match(name) if match: attrs[(match.group(1), match.group(2))] = value else: attrs[(None, name)] = value return (_base.ELEMENT, namespace, tag, attrs, len(node) or node.text) def getFirstChild(self, node): if isinstance(node, tuple): element, key, parents, flag = node else: element, key, parents, flag = node, None, [], None if flag in ("text", "tail"): return None else: if element.text: return element, key, parents, "text" elif len(element): parents.append(element) return element[0], 0, parents, None else: return None def getNextSibling(self, node): if isinstance(node, tuple): element, key, parents, flag = node else: return None if flag == "text": if len(element): parents.append(element) return element[0], 0, parents, None else: return None else: if element.tail and flag != "tail": return element, key, parents, "tail" elif key < len(parents[-1]) - 1: return parents[-1][key + 1], key + 1, parents, None else: return None def getParentNode(self, node): if isinstance(node, tuple): element, key, parents, flag = node else: return None if flag == "text": if not parents: return element else: return element, key, parents, None else: parent = parents.pop() if not parents: return parent else: return parent, list(parents[-1]).index(parent), parents, None return locals() getETreeModule = moduleFactoryFactory(getETreeBuilder)
mpl-2.0
benjaminjkraft/django
tests/gis_tests/test_geoforms.py
292
14830
from unittest import skipUnless from django.contrib.gis import forms from django.contrib.gis.gdal import HAS_GDAL from django.contrib.gis.geos import GEOSGeometry from django.forms import ValidationError from django.test import SimpleTestCase, skipUnlessDBFeature from django.utils import six from django.utils.html import escape @skipUnless(HAS_GDAL, "GeometryFieldTest needs GDAL support") @skipUnlessDBFeature("gis_enabled") class GeometryFieldTest(SimpleTestCase): def test_init(self): "Testing GeometryField initialization with defaults." fld = forms.GeometryField() for bad_default in ('blah', 3, 'FoO', None, 0): self.assertRaises(ValidationError, fld.clean, bad_default) def test_srid(self): "Testing GeometryField with a SRID set." # Input that doesn't specify the SRID is assumed to be in the SRID # of the input field. fld = forms.GeometryField(srid=4326) geom = fld.clean('POINT(5 23)') self.assertEqual(4326, geom.srid) # Making the field in a different SRID from that of the geometry, and # asserting it transforms. fld = forms.GeometryField(srid=32140) tol = 0.0000001 xform_geom = GEOSGeometry('POINT (951640.547328465 4219369.26171664)', srid=32140) # The cleaned geometry should be transformed to 32140. cleaned_geom = fld.clean('SRID=4326;POINT (-95.363151 29.763374)') self.assertTrue(xform_geom.equals_exact(cleaned_geom, tol)) def test_null(self): "Testing GeometryField's handling of null (None) geometries." # Form fields, by default, are required (`required=True`) fld = forms.GeometryField() with six.assertRaisesRegex(self, forms.ValidationError, "No geometry value provided."): fld.clean(None) # This will clean None as a geometry (See #10660). fld = forms.GeometryField(required=False) self.assertIsNone(fld.clean(None)) def test_geom_type(self): "Testing GeometryField's handling of different geometry types." # By default, all geometry types are allowed. fld = forms.GeometryField() for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'): self.assertEqual(GEOSGeometry(wkt), fld.clean(wkt)) pnt_fld = forms.GeometryField(geom_type='POINT') self.assertEqual(GEOSGeometry('POINT(5 23)'), pnt_fld.clean('POINT(5 23)')) # a WKT for any other geom_type will be properly transformed by `to_python` self.assertEqual(GEOSGeometry('LINESTRING(0 0, 1 1)'), pnt_fld.to_python('LINESTRING(0 0, 1 1)')) # but rejected by `clean` self.assertRaises(forms.ValidationError, pnt_fld.clean, 'LINESTRING(0 0, 1 1)') def test_to_python(self): """ Testing to_python returns a correct GEOSGeometry object or a ValidationError """ fld = forms.GeometryField() # to_python returns the same GEOSGeometry for a WKT for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'): self.assertEqual(GEOSGeometry(wkt), fld.to_python(wkt)) # but raises a ValidationError for any other string for wkt in ('POINT(5)', 'MULTI POLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'BLAH(0 0, 1 1)'): self.assertRaises(forms.ValidationError, fld.to_python, wkt) def test_field_with_text_widget(self): class PointForm(forms.Form): pt = forms.PointField(srid=4326, widget=forms.TextInput) form = PointForm() cleaned_pt = form.fields['pt'].clean('POINT(5 23)') self.assertEqual(cleaned_pt, GEOSGeometry('POINT(5 23)')) self.assertEqual(4326, cleaned_pt.srid) point = GEOSGeometry('SRID=4326;POINT(5 23)') form = PointForm(data={'pt': 'POINT(5 23)'}, initial={'pt': point}) self.assertFalse(form.has_changed()) @skipUnless(HAS_GDAL, "SpecializedFieldTest needs GDAL support") @skipUnlessDBFeature("gis_enabled") class SpecializedFieldTest(SimpleTestCase): def setUp(self): self.geometries = { 'point': GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)"), 'multipoint': GEOSGeometry("SRID=4326;MULTIPOINT(" "(13.18634033203125 14.504356384277344)," "(13.207969665527 14.490966796875)," "(13.177070617675 14.454917907714))"), 'linestring': GEOSGeometry("SRID=4326;LINESTRING(" "-8.26171875 -0.52734375," "-7.734375 4.21875," "6.85546875 3.779296875," "5.44921875 -3.515625)"), 'multilinestring': GEOSGeometry("SRID=4326;MULTILINESTRING(" "(-16.435546875 -2.98828125," "-17.2265625 2.98828125," "-0.703125 3.515625," "-1.494140625 -3.33984375)," "(-8.0859375 -5.9765625," "8.525390625 -8.7890625," "12.392578125 -0.87890625," "10.01953125 7.646484375))"), 'polygon': GEOSGeometry("SRID=4326;POLYGON(" "(-1.669921875 6.240234375," "-3.8671875 -0.615234375," "5.9765625 -3.955078125," "18.193359375 3.955078125," "9.84375 9.4921875," "-1.669921875 6.240234375))"), 'multipolygon': GEOSGeometry("SRID=4326;MULTIPOLYGON(" "((-17.578125 13.095703125," "-17.2265625 10.8984375," "-13.974609375 10.1953125," "-13.359375 12.744140625," "-15.732421875 13.7109375," "-17.578125 13.095703125))," "((-8.525390625 5.537109375," "-8.876953125 2.548828125," "-5.888671875 1.93359375," "-5.09765625 4.21875," "-6.064453125 6.240234375," "-8.525390625 5.537109375)))"), 'geometrycollection': GEOSGeometry("SRID=4326;GEOMETRYCOLLECTION(" "POINT(5.625 -0.263671875)," "POINT(6.767578125 -3.603515625)," "POINT(8.525390625 0.087890625)," "POINT(8.0859375 -2.13134765625)," "LINESTRING(" "6.273193359375 -1.175537109375," "5.77880859375 -1.812744140625," "7.27294921875 -2.230224609375," "7.657470703125 -1.25244140625))"), } def assertMapWidget(self, form_instance): """ Make sure the MapWidget js is passed in the form media and a MapWidget is actually created """ self.assertTrue(form_instance.is_valid()) rendered = form_instance.as_p() self.assertIn('new MapWidget(options);', rendered) self.assertIn('gis/js/OLMapWidget.js', str(form_instance.media)) def assertTextarea(self, geom, rendered): """Makes sure the wkt and a textarea are in the content""" self.assertIn('<textarea ', rendered) self.assertIn('required', rendered) self.assertIn(geom.wkt, rendered) def test_pointfield(self): class PointForm(forms.Form): p = forms.PointField() geom = self.geometries['point'] form = PointForm(data={'p': geom}) self.assertTextarea(geom, form.as_p()) self.assertMapWidget(form) self.assertFalse(PointForm().is_valid()) invalid = PointForm(data={'p': 'some invalid geom'}) self.assertFalse(invalid.is_valid()) self.assertIn('Invalid geometry value', str(invalid.errors)) for invalid in [geo for key, geo in self.geometries.items() if key != 'point']: self.assertFalse(PointForm(data={'p': invalid.wkt}).is_valid()) def test_multipointfield(self): class PointForm(forms.Form): p = forms.MultiPointField() geom = self.geometries['multipoint'] form = PointForm(data={'p': geom}) self.assertTextarea(geom, form.as_p()) self.assertMapWidget(form) self.assertFalse(PointForm().is_valid()) for invalid in [geo for key, geo in self.geometries.items() if key != 'multipoint']: self.assertFalse(PointForm(data={'p': invalid.wkt}).is_valid()) def test_linestringfield(self): class LineStringForm(forms.Form): l = forms.LineStringField() geom = self.geometries['linestring'] form = LineStringForm(data={'l': geom}) self.assertTextarea(geom, form.as_p()) self.assertMapWidget(form) self.assertFalse(LineStringForm().is_valid()) for invalid in [geo for key, geo in self.geometries.items() if key != 'linestring']: self.assertFalse(LineStringForm(data={'p': invalid.wkt}).is_valid()) def test_multilinestringfield(self): class LineStringForm(forms.Form): l = forms.MultiLineStringField() geom = self.geometries['multilinestring'] form = LineStringForm(data={'l': geom}) self.assertTextarea(geom, form.as_p()) self.assertMapWidget(form) self.assertFalse(LineStringForm().is_valid()) for invalid in [geo for key, geo in self.geometries.items() if key != 'multilinestring']: self.assertFalse(LineStringForm(data={'p': invalid.wkt}).is_valid()) def test_polygonfield(self): class PolygonForm(forms.Form): p = forms.PolygonField() geom = self.geometries['polygon'] form = PolygonForm(data={'p': geom}) self.assertTextarea(geom, form.as_p()) self.assertMapWidget(form) self.assertFalse(PolygonForm().is_valid()) for invalid in [geo for key, geo in self.geometries.items() if key != 'polygon']: self.assertFalse(PolygonForm(data={'p': invalid.wkt}).is_valid()) def test_multipolygonfield(self): class PolygonForm(forms.Form): p = forms.MultiPolygonField() geom = self.geometries['multipolygon'] form = PolygonForm(data={'p': geom}) self.assertTextarea(geom, form.as_p()) self.assertMapWidget(form) self.assertFalse(PolygonForm().is_valid()) for invalid in [geo for key, geo in self.geometries.items() if key != 'multipolygon']: self.assertFalse(PolygonForm(data={'p': invalid.wkt}).is_valid()) def test_geometrycollectionfield(self): class GeometryForm(forms.Form): g = forms.GeometryCollectionField() geom = self.geometries['geometrycollection'] form = GeometryForm(data={'g': geom}) self.assertTextarea(geom, form.as_p()) self.assertMapWidget(form) self.assertFalse(GeometryForm().is_valid()) for invalid in [geo for key, geo in self.geometries.items() if key != 'geometrycollection']: self.assertFalse(GeometryForm(data={'g': invalid.wkt}).is_valid()) @skipUnless(HAS_GDAL, "OSMWidgetTest needs GDAL support") @skipUnlessDBFeature("gis_enabled") class OSMWidgetTest(SimpleTestCase): def setUp(self): self.geometries = { 'point': GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)"), } def test_osm_widget(self): class PointForm(forms.Form): p = forms.PointField(widget=forms.OSMWidget) geom = self.geometries['point'] form = PointForm(data={'p': geom}) rendered = form.as_p() self.assertIn("OpenStreetMap (Mapnik)", rendered) self.assertIn("id: 'id_p',", rendered) def test_default_lat_lon(self): class PointForm(forms.Form): p = forms.PointField( widget=forms.OSMWidget(attrs={ 'default_lon': 20, 'default_lat': 30 }), ) form = PointForm() rendered = form.as_p() self.assertIn("options['default_lon'] = 20;", rendered) self.assertIn("options['default_lat'] = 30;", rendered) if forms.OSMWidget.default_lon != 20: self.assertNotIn( "options['default_lon'] = %d;" % forms.OSMWidget.default_lon, rendered) if forms.OSMWidget.default_lat != 30: self.assertNotIn( "options['default_lat'] = %d;" % forms.OSMWidget.default_lat, rendered) @skipUnless(HAS_GDAL, "CustomGeometryWidgetTest needs GDAL support") @skipUnlessDBFeature("gis_enabled") class CustomGeometryWidgetTest(SimpleTestCase): def test_custom_serialization_widget(self): class CustomGeometryWidget(forms.BaseGeometryWidget): template_name = 'gis/openlayers.html' deserialize_called = 0 def serialize(self, value): return value.json if value else '' def deserialize(self, value): self.deserialize_called += 1 return GEOSGeometry(value) class PointForm(forms.Form): p = forms.PointField(widget=CustomGeometryWidget) point = GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)") form = PointForm(data={'p': point}) self.assertIn(escape(point.json), form.as_p()) CustomGeometryWidget.called = 0 widget = form.fields['p'].widget # Force deserialize use due to a string value self.assertIn(escape(point.json), widget.render('p', point.json)) self.assertEqual(widget.deserialize_called, 1) form = PointForm(data={'p': point.json}) self.assertTrue(form.is_valid()) # Ensure that resulting geometry has srid set self.assertEqual(form.cleaned_data['p'].srid, 4326)
bsd-3-clause
berendkleinhaneveld/VTK
ThirdParty/Twisted/twisted/lore/default.py
60
1876
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Default processing factory plugin. """ from xml.dom import minidom as dom from twisted.lore import tree, latex, lint, process from twisted.web import sux htmlDefault = {'template': 'template.tpl', 'baseurl': '%s', 'ext': '.html'} class ProcessingFunctionFactory: def getDoFile(self): return tree.doFile def generate_html(self, options, filenameGenerator=tree.getOutputFileName): n = htmlDefault.copy() n.update(options) options = n try: fp = open(options['template']) templ = dom.parse(fp) except IOError, e: raise process.NoProcessorError(e.filename+": "+e.strerror) except sux.ParseError, e: raise process.NoProcessorError(str(e)) df = lambda file, linkrel: self.getDoFile()(file, linkrel, options['ext'], options['baseurl'], templ, options, filenameGenerator) return df latexSpitters = {None: latex.LatexSpitter, 'section': latex.SectionLatexSpitter, 'chapter': latex.ChapterLatexSpitter, 'book': latex.BookLatexSpitter, } def generate_latex(self, options, filenameGenerator=None): spitter = self.latexSpitters[None] for (key, value) in self.latexSpitters.items(): if key and options.get(key): spitter = value df = lambda file, linkrel: latex.convertFile(file, spitter) return df def getLintChecker(self): return lint.getDefaultChecker() def generate_lint(self, options, filenameGenerator=None): checker = self.getLintChecker() return lambda file, linkrel: lint.doFile(file, checker) factory = ProcessingFunctionFactory()
bsd-3-clause
akarki15/mozillians
vendor-local/lib/python/dateutil/relativedelta.py
257
17135
""" Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net> This module offers extensions to the standard python 2.3+ datetime module. """ __author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>" __license__ = "PSF License" import datetime import calendar __all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] class weekday(object): __slots__ = ["weekday", "n"] def __init__(self, weekday, n=None): self.weekday = weekday self.n = n def __call__(self, n): if n == self.n: return self else: return self.__class__(self.weekday, n) def __eq__(self, other): try: if self.weekday != other.weekday or self.n != other.n: return False except AttributeError: return False return True def __repr__(self): s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] if not self.n: return s else: return "%s(%+d)" % (s, self.n) MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)]) class relativedelta: """ The relativedelta type is based on the specification of the excelent work done by M.-A. Lemburg in his mx.DateTime extension. However, notice that this type does *NOT* implement the same algorithm as his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. There's two different ways to build a relativedelta instance. The first one is passing it two date/datetime classes: relativedelta(datetime1, datetime2) And the other way is to use the following keyword arguments: year, month, day, hour, minute, second, microsecond: Absolute information. years, months, weeks, days, hours, minutes, seconds, microseconds: Relative information, may be negative. weekday: One of the weekday instances (MO, TU, etc). These instances may receive a parameter N, specifying the Nth weekday, which could be positive or negative (like MO(+1) or MO(-2). Not specifying it is the same as specifying +1. You can also use an integer, where 0=MO. leapdays: Will add given days to the date found, if year is a leap year, and the date found is post 28 of february. yearday, nlyearday: Set the yearday or the non-leap year day (jump leap days). These are converted to day/month/leapdays information. Here is the behavior of operations with relativedelta: 1) Calculate the absolute year, using the 'year' argument, or the original datetime year, if the argument is not present. 2) Add the relative 'years' argument to the absolute year. 3) Do steps 1 and 2 for month/months. 4) Calculate the absolute day, using the 'day' argument, or the original datetime day, if the argument is not present. Then, subtract from the day until it fits in the year and month found after their operations. 5) Add the relative 'days' argument to the absolute day. Notice that the 'weeks' argument is multiplied by 7 and added to 'days'. 6) Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds, microsecond/microseconds. 7) If the 'weekday' argument is present, calculate the weekday, with the given (wday, nth) tuple. wday is the index of the weekday (0-6, 0=Mon), and nth is the number of weeks to add forward or backward, depending on its signal. Notice that if the calculated date is already Monday, for example, using (0, 1) or (0, -1) won't change the day. """ def __init__(self, dt1=None, dt2=None, years=0, months=0, days=0, leapdays=0, weeks=0, hours=0, minutes=0, seconds=0, microseconds=0, year=None, month=None, day=None, weekday=None, yearday=None, nlyearday=None, hour=None, minute=None, second=None, microsecond=None): if dt1 and dt2: if not isinstance(dt1, datetime.date) or \ not isinstance(dt2, datetime.date): raise TypeError, "relativedelta only diffs datetime/date" if type(dt1) is not type(dt2): if not isinstance(dt1, datetime.datetime): dt1 = datetime.datetime.fromordinal(dt1.toordinal()) elif not isinstance(dt2, datetime.datetime): dt2 = datetime.datetime.fromordinal(dt2.toordinal()) self.years = 0 self.months = 0 self.days = 0 self.leapdays = 0 self.hours = 0 self.minutes = 0 self.seconds = 0 self.microseconds = 0 self.year = None self.month = None self.day = None self.weekday = None self.hour = None self.minute = None self.second = None self.microsecond = None self._has_time = 0 months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month) self._set_months(months) dtm = self.__radd__(dt2) if dt1 < dt2: while dt1 > dtm: months += 1 self._set_months(months) dtm = self.__radd__(dt2) else: while dt1 < dtm: months -= 1 self._set_months(months) dtm = self.__radd__(dt2) delta = dt1 - dtm self.seconds = delta.seconds+delta.days*86400 self.microseconds = delta.microseconds else: self.years = years self.months = months self.days = days+weeks*7 self.leapdays = leapdays self.hours = hours self.minutes = minutes self.seconds = seconds self.microseconds = microseconds self.year = year self.month = month self.day = day self.hour = hour self.minute = minute self.second = second self.microsecond = microsecond if type(weekday) is int: self.weekday = weekdays[weekday] else: self.weekday = weekday yday = 0 if nlyearday: yday = nlyearday elif yearday: yday = yearday if yearday > 59: self.leapdays = -1 if yday: ydayidx = [31,59,90,120,151,181,212,243,273,304,334,366] for idx, ydays in enumerate(ydayidx): if yday <= ydays: self.month = idx+1 if idx == 0: self.day = yday else: self.day = yday-ydayidx[idx-1] break else: raise ValueError, "invalid year day (%d)" % yday self._fix() def _fix(self): if abs(self.microseconds) > 999999: s = self.microseconds//abs(self.microseconds) div, mod = divmod(self.microseconds*s, 1000000) self.microseconds = mod*s self.seconds += div*s if abs(self.seconds) > 59: s = self.seconds//abs(self.seconds) div, mod = divmod(self.seconds*s, 60) self.seconds = mod*s self.minutes += div*s if abs(self.minutes) > 59: s = self.minutes//abs(self.minutes) div, mod = divmod(self.minutes*s, 60) self.minutes = mod*s self.hours += div*s if abs(self.hours) > 23: s = self.hours//abs(self.hours) div, mod = divmod(self.hours*s, 24) self.hours = mod*s self.days += div*s if abs(self.months) > 11: s = self.months//abs(self.months) div, mod = divmod(self.months*s, 12) self.months = mod*s self.years += div*s if (self.hours or self.minutes or self.seconds or self.microseconds or self.hour is not None or self.minute is not None or self.second is not None or self.microsecond is not None): self._has_time = 1 else: self._has_time = 0 def _set_months(self, months): self.months = months if abs(self.months) > 11: s = self.months//abs(self.months) div, mod = divmod(self.months*s, 12) self.months = mod*s self.years = div*s else: self.years = 0 def __radd__(self, other): if not isinstance(other, datetime.date): raise TypeError, "unsupported type for add operation" elif self._has_time and not isinstance(other, datetime.datetime): other = datetime.datetime.fromordinal(other.toordinal()) year = (self.year or other.year)+self.years month = self.month or other.month if self.months: assert 1 <= abs(self.months) <= 12 month += self.months if month > 12: year += 1 month -= 12 elif month < 1: year -= 1 month += 12 day = min(calendar.monthrange(year, month)[1], self.day or other.day) repl = {"year": year, "month": month, "day": day} for attr in ["hour", "minute", "second", "microsecond"]: value = getattr(self, attr) if value is not None: repl[attr] = value days = self.days if self.leapdays and month > 2 and calendar.isleap(year): days += self.leapdays ret = (other.replace(**repl) + datetime.timedelta(days=days, hours=self.hours, minutes=self.minutes, seconds=self.seconds, microseconds=self.microseconds)) if self.weekday: weekday, nth = self.weekday.weekday, self.weekday.n or 1 jumpdays = (abs(nth)-1)*7 if nth > 0: jumpdays += (7-ret.weekday()+weekday)%7 else: jumpdays += (ret.weekday()-weekday)%7 jumpdays *= -1 ret += datetime.timedelta(days=jumpdays) return ret def __rsub__(self, other): return self.__neg__().__radd__(other) def __add__(self, other): if not isinstance(other, relativedelta): raise TypeError, "unsupported type for add operation" return relativedelta(years=other.years+self.years, months=other.months+self.months, days=other.days+self.days, hours=other.hours+self.hours, minutes=other.minutes+self.minutes, seconds=other.seconds+self.seconds, microseconds=other.microseconds+self.microseconds, leapdays=other.leapdays or self.leapdays, year=other.year or self.year, month=other.month or self.month, day=other.day or self.day, weekday=other.weekday or self.weekday, hour=other.hour or self.hour, minute=other.minute or self.minute, second=other.second or self.second, microsecond=other.second or self.microsecond) def __sub__(self, other): if not isinstance(other, relativedelta): raise TypeError, "unsupported type for sub operation" return relativedelta(years=other.years-self.years, months=other.months-self.months, days=other.days-self.days, hours=other.hours-self.hours, minutes=other.minutes-self.minutes, seconds=other.seconds-self.seconds, microseconds=other.microseconds-self.microseconds, leapdays=other.leapdays or self.leapdays, year=other.year or self.year, month=other.month or self.month, day=other.day or self.day, weekday=other.weekday or self.weekday, hour=other.hour or self.hour, minute=other.minute or self.minute, second=other.second or self.second, microsecond=other.second or self.microsecond) def __neg__(self): return relativedelta(years=-self.years, months=-self.months, days=-self.days, hours=-self.hours, minutes=-self.minutes, seconds=-self.seconds, microseconds=-self.microseconds, leapdays=self.leapdays, year=self.year, month=self.month, day=self.day, weekday=self.weekday, hour=self.hour, minute=self.minute, second=self.second, microsecond=self.microsecond) def __nonzero__(self): return not (not self.years and not self.months and not self.days and not self.hours and not self.minutes and not self.seconds and not self.microseconds and not self.leapdays and self.year is None and self.month is None and self.day is None and self.weekday is None and self.hour is None and self.minute is None and self.second is None and self.microsecond is None) def __mul__(self, other): f = float(other) return relativedelta(years=self.years*f, months=self.months*f, days=self.days*f, hours=self.hours*f, minutes=self.minutes*f, seconds=self.seconds*f, microseconds=self.microseconds*f, leapdays=self.leapdays, year=self.year, month=self.month, day=self.day, weekday=self.weekday, hour=self.hour, minute=self.minute, second=self.second, microsecond=self.microsecond) def __eq__(self, other): if not isinstance(other, relativedelta): return False if self.weekday or other.weekday: if not self.weekday or not other.weekday: return False if self.weekday.weekday != other.weekday.weekday: return False n1, n2 = self.weekday.n, other.weekday.n if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): return False return (self.years == other.years and self.months == other.months and self.days == other.days and self.hours == other.hours and self.minutes == other.minutes and self.seconds == other.seconds and self.leapdays == other.leapdays and self.year == other.year and self.month == other.month and self.day == other.day and self.hour == other.hour and self.minute == other.minute and self.second == other.second and self.microsecond == other.microsecond) def __ne__(self, other): return not self.__eq__(other) def __div__(self, other): return self.__mul__(1/float(other)) def __repr__(self): l = [] for attr in ["years", "months", "days", "leapdays", "hours", "minutes", "seconds", "microseconds"]: value = getattr(self, attr) if value: l.append("%s=%+d" % (attr, value)) for attr in ["year", "month", "day", "weekday", "hour", "minute", "second", "microsecond"]: value = getattr(self, attr) if value is not None: l.append("%s=%s" % (attr, `value`)) return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) # vim:ts=4:sw=4:et
bsd-3-clause
gaddman/ansible
test/units/modules/network/dellos6/test_dellos6_config.py
68
6262
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from units.compat.mock import patch from ansible.modules.network.dellos6 import dellos6_config from units.modules.utils import set_module_args from .dellos6_module import TestDellos6Module, load_fixture class TestDellos6ConfigModule(TestDellos6Module): module = dellos6_config def setUp(self): super(TestDellos6ConfigModule, self).setUp() self.mock_get_config = patch('ansible.modules.network.dellos6.dellos6_config.get_config') self.get_config = self.mock_get_config.start() self.mock_load_config = patch('ansible.modules.network.dellos6.dellos6_config.load_config') self.load_config = self.mock_load_config.start() self.mock_run_commands = patch('ansible.modules.network.dellos6.dellos6_config.run_commands') self.run_commands = self.mock_run_commands.start() def tearDown(self): super(TestDellos6ConfigModule, self).tearDown() self.mock_get_config.stop() self.mock_load_config.stop() self.mock_run_commands.stop() def load_fixtures(self, commands=None): config_file = 'dellos6_config_config.cfg' self.get_config.return_value = load_fixture(config_file) self.load_config.return_value = None def test_dellos6_config_unchanged(self): src = load_fixture('dellos6_config_config.cfg') set_module_args(dict(src=src)) self.execute_module() def test_dellos6_config_src(self): src = load_fixture('dellos6_config_src.cfg') set_module_args(dict(src=src)) commands = ['hostname foo', 'exit', 'interface Te1/0/2', 'shutdown', 'exit'] self.execute_module(changed=True, commands=commands) def test_dellos6_config_backup(self): set_module_args(dict(backup=True)) result = self.execute_module() self.assertIn('__backup__', result) def test_dellos6_config_save(self): set_module_args(dict(save=True)) self.execute_module(changed=True) self.assertEqual(self.run_commands.call_count, 1) self.assertEqual(self.get_config.call_count, 0) self.assertEqual(self.load_config.call_count, 0) args = self.run_commands.call_args[0][1] self.assertDictContainsSubset({'command': 'copy running-config startup-config'}, args[0]) # self.assertIn('copy running-config startup-config\r', args) def test_dellos6_config_lines_wo_parents(self): set_module_args(dict(lines=['hostname foo'])) commands = ['hostname foo'] self.execute_module(changed=True, commands=commands) def test_dellos6_config_lines_w_parents(self): set_module_args(dict(lines=['description "teest"', 'exit'], parents=['interface Te1/0/2'])) commands = ['interface Te1/0/2', 'description "teest"', 'exit'] self.execute_module(changed=True, commands=commands) def test_dellos6_config_before(self): set_module_args(dict(lines=['hostname foo'], before=['snmp-server contact bar'])) commands = ['snmp-server contact bar', 'hostname foo'] self.execute_module(changed=True, commands=commands, sort=False) def test_dellos6_config_after(self): set_module_args(dict(lines=['hostname foo'], after=['snmp-server contact bar'])) commands = ['hostname foo', 'snmp-server contact bar'] self.execute_module(changed=True, commands=commands, sort=False) def test_dellos6_config_before_after_no_change(self): set_module_args(dict(lines=['hostname router'], before=['snmp-server contact bar'], after=['snmp-server location chennai'])) self.execute_module() def test_dellos6_config_config(self): config = 'hostname localhost' set_module_args(dict(lines=['hostname router'], config=config)) commands = ['hostname router'] self.execute_module(changed=True, commands=commands) def test_dellos6_config_replace_block(self): lines = ['description test string', 'shutdown'] parents = ['interface Te1/0/2'] set_module_args(dict(lines=lines, replace='block', parents=parents)) commands = parents + lines self.execute_module(changed=True, commands=commands) def test_dellos6_config_match_none(self): lines = ['hostname router'] set_module_args(dict(lines=lines, match='none')) self.execute_module(changed=True, commands=lines) def test_dellos6_config_match_none(self): lines = ['description test string', 'shutdown'] parents = ['interface Te1/0/2'] set_module_args(dict(lines=lines, parents=parents, match='none')) commands = parents + lines self.execute_module(changed=True, commands=commands, sort=False) def test_dellos6_config_match_strict(self): lines = ['description "test_string"', 'shutdown'] parents = ['interface Te1/0/1'] set_module_args(dict(lines=lines, parents=parents, match='strict')) commands = parents + ['shutdown'] self.execute_module(changed=True, commands=commands, sort=False) def test_dellos6_config_match_exact(self): lines = ['description test_string', 'shutdown'] parents = ['interface Te1/0/1'] set_module_args(dict(lines=lines, parents=parents, match='exact')) commands = parents + lines self.execute_module(changed=True, commands=commands, sort=False)
gpl-3.0
orbitfp7/nova
nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py
70
6931
# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import tarfile import eventlet from nova.image import glance from nova import test from nova.virt.xenapi.client import session as xenapi_session from nova.virt.xenapi.image import vdi_through_dev @contextlib.contextmanager def fake_context(result=None): yield result class TestDelegatingToCommand(test.NoDBTestCase): def test_upload_image_is_delegated_to_command(self): command = self.mox.CreateMock(vdi_through_dev.UploadToGlanceAsRawTgz) self.mox.StubOutWithMock(vdi_through_dev, 'UploadToGlanceAsRawTgz') vdi_through_dev.UploadToGlanceAsRawTgz( 'ctx', 'session', 'instance', 'image_id', 'vdis').AndReturn( command) command.upload_image().AndReturn('result') self.mox.ReplayAll() store = vdi_through_dev.VdiThroughDevStore() result = store.upload_image( 'ctx', 'session', 'instance', 'image_id', 'vdis') self.assertEqual('result', result) class TestUploadToGlanceAsRawTgz(test.NoDBTestCase): def test_upload_image(self): store = vdi_through_dev.UploadToGlanceAsRawTgz( 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1']) self.mox.StubOutWithMock(store, '_perform_upload') self.mox.StubOutWithMock(store, '_get_vdi_ref') self.mox.StubOutWithMock(vdi_through_dev, 'glance') self.mox.StubOutWithMock(vdi_through_dev, 'vm_utils') self.mox.StubOutWithMock(vdi_through_dev, 'utils') store._get_vdi_ref().AndReturn('vdi_ref') vdi_through_dev.vm_utils.vdi_attached_here( 'session', 'vdi_ref', read_only=True).AndReturn( fake_context('dev')) vdi_through_dev.utils.make_dev_path('dev').AndReturn('devpath') vdi_through_dev.utils.temporary_chown('devpath').AndReturn( fake_context()) store._perform_upload('devpath') self.mox.ReplayAll() store.upload_image() def test__perform_upload(self): producer = self.mox.CreateMock(vdi_through_dev.TarGzProducer) consumer = self.mox.CreateMock(glance.UpdateGlanceImage) pool = self.mox.CreateMock(eventlet.GreenPool) store = vdi_through_dev.UploadToGlanceAsRawTgz( 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1']) self.mox.StubOutWithMock(store, '_create_pipe') self.mox.StubOutWithMock(store, '_get_virtual_size') self.mox.StubOutWithMock(producer, 'get_metadata') self.mox.StubOutWithMock(vdi_through_dev, 'TarGzProducer') self.mox.StubOutWithMock(glance, 'UpdateGlanceImage') self.mox.StubOutWithMock(vdi_through_dev, 'eventlet') producer.get_metadata().AndReturn('metadata') store._get_virtual_size().AndReturn('324') store._create_pipe().AndReturn(('readfile', 'writefile')) vdi_through_dev.TarGzProducer( 'devpath', 'writefile', '324', 'disk.raw').AndReturn( producer) glance.UpdateGlanceImage('context', 'id', 'metadata', 'readfile').AndReturn(consumer) vdi_through_dev.eventlet.GreenPool().AndReturn(pool) pool.spawn(producer.start) pool.spawn(consumer.start) pool.waitall() self.mox.ReplayAll() store._perform_upload('devpath') def test__get_vdi_ref(self): session = self.mox.CreateMock(xenapi_session.XenAPISession) store = vdi_through_dev.UploadToGlanceAsRawTgz( 'context', session, 'instance', 'id', ['vdi0', 'vdi1']) session.call_xenapi('VDI.get_by_uuid', 'vdi0').AndReturn('vdi_ref') self.mox.ReplayAll() self.assertEqual('vdi_ref', store._get_vdi_ref()) def test__get_virtual_size(self): session = self.mox.CreateMock(xenapi_session.XenAPISession) store = vdi_through_dev.UploadToGlanceAsRawTgz( 'context', session, 'instance', 'id', ['vdi0', 'vdi1']) self.mox.StubOutWithMock(store, '_get_vdi_ref') store._get_vdi_ref().AndReturn('vdi_ref') session.call_xenapi('VDI.get_virtual_size', 'vdi_ref') self.mox.ReplayAll() store._get_virtual_size() def test__create_pipe(self): store = vdi_through_dev.UploadToGlanceAsRawTgz( 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1']) self.mox.StubOutWithMock(vdi_through_dev, 'os') self.mox.StubOutWithMock(vdi_through_dev, 'greenio') vdi_through_dev.os.pipe().AndReturn(('rpipe', 'wpipe')) vdi_through_dev.greenio.GreenPipe('rpipe', 'rb', 0).AndReturn('rfile') vdi_through_dev.greenio.GreenPipe('wpipe', 'wb', 0).AndReturn('wfile') self.mox.ReplayAll() result = store._create_pipe() self.assertEqual(('rfile', 'wfile'), result) class TestTarGzProducer(test.NoDBTestCase): def test_constructor(self): producer = vdi_through_dev.TarGzProducer('devpath', 'writefile', '100', 'fname') self.assertEqual('devpath', producer.fpath) self.assertEqual('writefile', producer.output) self.assertEqual('100', producer.size) self.assertEqual('writefile', producer.output) def test_start(self): outf = self.mox.CreateMock(file) producer = vdi_through_dev.TarGzProducer('fpath', outf, '100', 'fname') tfile = self.mox.CreateMock(tarfile.TarFile) tinfo = self.mox.CreateMock(tarfile.TarInfo) inf = self.mox.CreateMock(file) self.mox.StubOutWithMock(vdi_through_dev, 'tarfile') self.mox.StubOutWithMock(producer, '_open_file') vdi_through_dev.tarfile.TarInfo(name='fname').AndReturn(tinfo) vdi_through_dev.tarfile.open(fileobj=outf, mode='w|gz').AndReturn( fake_context(tfile)) producer._open_file('fpath', 'rb').AndReturn(fake_context(inf)) tfile.addfile(tinfo, fileobj=inf) outf.close() self.mox.ReplayAll() producer.start() self.assertEqual(100, tinfo.size) def test_get_metadata(self): producer = vdi_through_dev.TarGzProducer('devpath', 'writefile', '100', 'fname') self.assertEqual({ 'disk_format': 'raw', 'container_format': 'tgz'}, producer.get_metadata())
apache-2.0
tescande/linux-nfc-next-stable
tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
120
22731
#!/usr/bin/python # -*- coding: utf-8 -*- # """ This utility can be used to debug and tune the performance of the intel_pstate driver. This utility can be used in two ways: - If there is Linux trace file with pstate_sample events enabled, then this utility can parse the trace file and generate performance plots. - If user has not specified a trace file as input via command line parameters, then this utility enables and collects trace data for a user specified interval and generates performance plots. Prerequisites: Python version 2.7.x gnuplot 5.0 or higher gnuplot-py 1.8 (Most of the distributions have these required packages. They may be called gnuplot-py, phython-gnuplot. ) HWP (Hardware P-States are disabled) Kernel config for Linux trace is enabled see print_help(): for Usage and Output details """ from __future__ import print_function from datetime import datetime import subprocess import os import time import re import sys import getopt import Gnuplot from numpy import * from decimal import * __author__ = "Srinivas Pandruvada" __copyright__ = " Copyright (c) 2017, Intel Corporation. " __license__ = "GPL version 2" MAX_CPUS = 256 # Define the csv file columns C_COMM = 18 C_GHZ = 17 C_ELAPSED = 16 C_SAMPLE = 15 C_DURATION = 14 C_LOAD = 13 C_BOOST = 12 C_FREQ = 11 C_TSC = 10 C_APERF = 9 C_MPERF = 8 C_TO = 7 C_FROM = 6 C_SCALED = 5 C_CORE = 4 C_USEC = 3 C_SEC = 2 C_CPU = 1 global sample_num, last_sec_cpu, last_usec_cpu, start_time, testname # 11 digits covers uptime to 115 days getcontext().prec = 11 sample_num =0 last_sec_cpu = [0] * MAX_CPUS last_usec_cpu = [0] * MAX_CPUS def print_help(): print('intel_pstate_tracer.py:') print(' Usage:') print(' If the trace file is available, then to simply parse and plot, use (sudo not required):') print(' ./intel_pstate_tracer.py [-c cpus] -t <trace_file> -n <test_name>') print(' Or') print(' ./intel_pstate_tracer.py [--cpu cpus] ---trace_file <trace_file> --name <test_name>') print(' To generate trace file, parse and plot, use (sudo required):') print(' sudo ./intel_pstate_tracer.py [-c cpus] -i <interval> -n <test_name>') print(' Or') print(' sudo ./intel_pstate_tracer.py [--cpu cpus] --interval <interval> --name <test_name>') print(' Optional argument:') print(' cpus: comma separated list of CPUs') print(' Output:') print(' If not already present, creates a "results/test_name" folder in the current working directory with:') print(' cpu.csv - comma seperated values file with trace contents and some additional calculations.') print(' cpu???.csv - comma seperated values file for CPU number ???.') print(' *.png - a variety of PNG format plot files created from the trace contents and the additional calculations.') print(' Notes:') print(' Avoid the use of _ (underscore) in test names, because in gnuplot it is a subscript directive.') print(' Maximum number of CPUs is {0:d}. If there are more the script will abort with an error.'.format(MAX_CPUS)) print(' Off-line CPUs cause the script to list some warnings, and create some empty files. Use the CPU mask feature for a clean run.') print(' Empty y range warnings for autoscaled plots can occur and can be ignored.') def plot_perf_busy_with_sample(cpu_index): """ Plot method to per cpu information """ file_name = 'cpu{:0>3}.csv'.format(cpu_index) if os.path.exists(file_name): output_png = "cpu%03d_perf_busy_vs_samples.png" % cpu_index g_plot = common_all_gnuplot_settings(output_png) g_plot('set yrange [0:40]') g_plot('set y2range [0:200]') g_plot('set y2tics 0, 10') g_plot('set title "{} : cpu perf busy vs. sample : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now())) # Override common g_plot('set xlabel "Samples"') g_plot('set ylabel "P-State"') g_plot('set y2label "Scaled Busy/performance/io-busy(%)"') set_4_plot_linestyles(g_plot) g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y2 title "performance",\\'.format(C_SAMPLE, C_CORE)) g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 2 axis x1y2 title "scaled-busy",\\'.format(C_SAMPLE, C_SCALED)) g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 3 axis x1y2 title "io-boost",\\'.format(C_SAMPLE, C_BOOST)) g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 4 axis x1y1 title "P-State"'.format(C_SAMPLE, C_TO)) def plot_perf_busy(cpu_index): """ Plot some per cpu information """ file_name = 'cpu{:0>3}.csv'.format(cpu_index) if os.path.exists(file_name): output_png = "cpu%03d_perf_busy.png" % cpu_index g_plot = common_all_gnuplot_settings(output_png) g_plot('set yrange [0:40]') g_plot('set y2range [0:200]') g_plot('set y2tics 0, 10') g_plot('set title "{} : perf busy : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now())) g_plot('set ylabel "P-State"') g_plot('set y2label "Scaled Busy/performance/io-busy(%)"') set_4_plot_linestyles(g_plot) g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y2 title "performance",\\'.format(C_ELAPSED, C_CORE)) g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 2 axis x1y2 title "scaled-busy",\\'.format(C_ELAPSED, C_SCALED)) g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 3 axis x1y2 title "io-boost",\\'.format(C_ELAPSED, C_BOOST)) g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 4 axis x1y1 title "P-State"'.format(C_ELAPSED, C_TO)) def plot_durations(cpu_index): """ Plot per cpu durations """ file_name = 'cpu{:0>3}.csv'.format(cpu_index) if os.path.exists(file_name): output_png = "cpu%03d_durations.png" % cpu_index g_plot = common_all_gnuplot_settings(output_png) # Should autoscale be used here? Should seconds be used here? g_plot('set yrange [0:5000]') g_plot('set ytics 0, 500') g_plot('set title "{} : durations : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now())) g_plot('set ylabel "Timer Duration (MilliSeconds)"') # override common g_plot('set key off') set_4_plot_linestyles(g_plot) g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_DURATION)) def plot_loads(cpu_index): """ Plot per cpu loads """ file_name = 'cpu{:0>3}.csv'.format(cpu_index) if os.path.exists(file_name): output_png = "cpu%03d_loads.png" % cpu_index g_plot = common_all_gnuplot_settings(output_png) g_plot('set yrange [0:100]') g_plot('set ytics 0, 10') g_plot('set title "{} : loads : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now())) g_plot('set ylabel "CPU load (percent)"') # override common g_plot('set key off') set_4_plot_linestyles(g_plot) g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_LOAD)) def plot_pstate_cpu_with_sample(): """ Plot all cpu information """ if os.path.exists('cpu.csv'): output_png = 'all_cpu_pstates_vs_samples.png' g_plot = common_all_gnuplot_settings(output_png) g_plot('set yrange [0:40]') # override common g_plot('set xlabel "Samples"') g_plot('set ylabel "P-State"') g_plot('set title "{} : cpu pstate vs. sample : {:%F %H:%M}"'.format(testname, datetime.now())) title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_SAMPLE, C_TO) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) def plot_pstate_cpu(): """ Plot all cpu information from csv files """ output_png = 'all_cpu_pstates.png' g_plot = common_all_gnuplot_settings(output_png) g_plot('set yrange [0:40]') g_plot('set ylabel "P-State"') g_plot('set title "{} : cpu pstates : {:%F %H:%M}"'.format(testname, datetime.now())) # the following command is really cool, but doesn't work with the CPU masking option because it aborts on the first missing file. # plot_str = 'plot for [i=0:*] file=sprintf("cpu%03d.csv",i) title_s=sprintf("cpu%03d",i) file using 16:7 pt 7 ps 1 title title_s' # title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_TO) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) def plot_load_cpu(): """ Plot all cpu loads """ output_png = 'all_cpu_loads.png' g_plot = common_all_gnuplot_settings(output_png) g_plot('set yrange [0:100]') g_plot('set ylabel "CPU load (percent)"') g_plot('set title "{} : cpu loads : {:%F %H:%M}"'.format(testname, datetime.now())) title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_LOAD) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) def plot_frequency_cpu(): """ Plot all cpu frequencies """ output_png = 'all_cpu_frequencies.png' g_plot = common_all_gnuplot_settings(output_png) g_plot('set yrange [0:4]') g_plot('set ylabel "CPU Frequency (GHz)"') g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(testname, datetime.now())) title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) def plot_duration_cpu(): """ Plot all cpu durations """ output_png = 'all_cpu_durations.png' g_plot = common_all_gnuplot_settings(output_png) g_plot('set yrange [0:5000]') g_plot('set ytics 0, 500') g_plot('set ylabel "Timer Duration (MilliSeconds)"') g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now())) title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_DURATION) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) def plot_scaled_cpu(): """ Plot all cpu scaled busy """ output_png = 'all_cpu_scaled.png' g_plot = common_all_gnuplot_settings(output_png) # autoscale this one, no set y range g_plot('set ylabel "Scaled Busy (Unitless)"') g_plot('set title "{} : cpu scaled busy : {:%F %H:%M}"'.format(testname, datetime.now())) title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_SCALED) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) def plot_boost_cpu(): """ Plot all cpu IO Boosts """ output_png = 'all_cpu_boost.png' g_plot = common_all_gnuplot_settings(output_png) g_plot('set yrange [0:100]') g_plot('set ylabel "CPU IO Boost (percent)"') g_plot('set title "{} : cpu io boost : {:%F %H:%M}"'.format(testname, datetime.now())) title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_BOOST) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) def plot_ghz_cpu(): """ Plot all cpu tsc ghz """ output_png = 'all_cpu_ghz.png' g_plot = common_all_gnuplot_settings(output_png) # autoscale this one, no set y range g_plot('set ylabel "TSC Frequency (GHz)"') g_plot('set title "{} : cpu TSC Frequencies (Sanity check calculation) : {:%F %H:%M}"'.format(testname, datetime.now())) title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_GHZ) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) def common_all_gnuplot_settings(output_png): """ common gnuplot settings for multiple CPUs one one graph. """ g_plot = common_gnuplot_settings() g_plot('set output "' + output_png + '"') return(g_plot) def common_gnuplot_settings(): """ common gnuplot settings. """ g_plot = Gnuplot.Gnuplot(persist=1) # The following line is for rigor only. It seems to be assumed for .csv files g_plot('set datafile separator \",\"') g_plot('set ytics nomirror') g_plot('set xtics nomirror') g_plot('set xtics font ", 10"') g_plot('set ytics font ", 10"') g_plot('set tics out scale 1.0') g_plot('set grid') g_plot('set key out horiz') g_plot('set key bot center') g_plot('set key samplen 2 spacing .8 font ", 9"') g_plot('set term png size 1200, 600') g_plot('set title font ", 11"') g_plot('set ylabel font ", 10"') g_plot('set xlabel font ", 10"') g_plot('set xlabel offset 0, 0.5') g_plot('set xlabel "Elapsed Time (Seconds)"') return(g_plot) def set_4_plot_linestyles(g_plot): """ set the linestyles used for 4 plots in 1 graphs. """ g_plot('set style line 1 linetype 1 linecolor rgb "green" pointtype -1') g_plot('set style line 2 linetype 1 linecolor rgb "red" pointtype -1') g_plot('set style line 3 linetype 1 linecolor rgb "purple" pointtype -1') g_plot('set style line 4 linetype 1 linecolor rgb "blue" pointtype -1') def store_csv(cpu_int, time_pre_dec, time_post_dec, core_busy, scaled, _from, _to, mperf, aperf, tsc, freq_ghz, io_boost, common_comm, load, duration_ms, sample_num, elapsed_time, tsc_ghz): """ Store master csv file information """ global graph_data_present if cpu_mask[cpu_int] == 0: return try: f_handle = open('cpu.csv', 'a') string_buffer = "CPU_%03u, %05u, %06u, %u, %u, %u, %u, %u, %u, %u, %.4f, %u, %.2f, %.3f, %u, %.3f, %.3f, %s\n" % (cpu_int, int(time_pre_dec), int(time_post_dec), int(core_busy), int(scaled), int(_from), int(_to), int(mperf), int(aperf), int(tsc), freq_ghz, int(io_boost), load, duration_ms, sample_num, elapsed_time, tsc_ghz, common_comm) f_handle.write(string_buffer); f_handle.close() except: print('IO error cpu.csv') return graph_data_present = True; def split_csv(): """ seperate the all csv file into per CPU csv files. """ global current_max_cpu if os.path.exists('cpu.csv'): for index in range(0, current_max_cpu + 1): if cpu_mask[int(index)] != 0: os.system('grep -m 1 common_cpu cpu.csv > cpu{:0>3}.csv'.format(index)) os.system('grep CPU_{:0>3} cpu.csv >> cpu{:0>3}.csv'.format(index, index)) def fix_ownership(path): """Change the owner of the file to SUDO_UID, if required""" uid = os.environ.get('SUDO_UID') gid = os.environ.get('SUDO_GID') if uid is not None: os.chown(path, int(uid), int(gid)) def cleanup_data_files(): """ clean up existing data files """ if os.path.exists('cpu.csv'): os.remove('cpu.csv') f_handle = open('cpu.csv', 'a') f_handle.write('common_cpu, common_secs, common_usecs, core_busy, scaled_busy, from, to, mperf, aperf, tsc, freq, boost, load, duration_ms, sample_num, elapsed_time, tsc_ghz, common_comm') f_handle.write('\n') f_handle.close() def clear_trace_file(): """ Clear trace file """ try: f_handle = open('/sys/kernel/debug/tracing/trace', 'w') f_handle.close() except: print('IO error clearing trace file ') quit() def enable_trace(): """ Enable trace """ try: open('/sys/kernel/debug/tracing/events/power/pstate_sample/enable' , 'w').write("1") except: print('IO error enabling trace ') quit() def disable_trace(): """ Disable trace """ try: open('/sys/kernel/debug/tracing/events/power/pstate_sample/enable' , 'w').write("0") except: print('IO error disabling trace ') quit() def set_trace_buffer_size(): """ Set trace buffer size """ try: open('/sys/kernel/debug/tracing/buffer_size_kb' , 'w').write("10240") except: print('IO error setting trace buffer size ') quit() def read_trace_data(filename): """ Read and parse trace data """ global current_max_cpu global sample_num, last_sec_cpu, last_usec_cpu, start_time try: data = open(filename, 'r').read() except: print('Error opening ', filename) quit() for line in data.splitlines(): search_obj = \ re.search(r'(^(.*?)\[)((\d+)[^\]])(.*?)(\d+)([.])(\d+)(.*?core_busy=)(\d+)(.*?scaled=)(\d+)(.*?from=)(\d+)(.*?to=)(\d+)(.*?mperf=)(\d+)(.*?aperf=)(\d+)(.*?tsc=)(\d+)(.*?freq=)(\d+)' , line) if search_obj: cpu = search_obj.group(3) cpu_int = int(cpu) cpu = str(cpu_int) time_pre_dec = search_obj.group(6) time_post_dec = search_obj.group(8) core_busy = search_obj.group(10) scaled = search_obj.group(12) _from = search_obj.group(14) _to = search_obj.group(16) mperf = search_obj.group(18) aperf = search_obj.group(20) tsc = search_obj.group(22) freq = search_obj.group(24) common_comm = search_obj.group(2).replace(' ', '') # Not all kernel versions have io_boost field io_boost = '0' search_obj = re.search(r'.*?io_boost=(\d+)', line) if search_obj: io_boost = search_obj.group(1) if sample_num == 0 : start_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000) sample_num += 1 if last_sec_cpu[cpu_int] == 0 : last_sec_cpu[cpu_int] = time_pre_dec last_usec_cpu[cpu_int] = time_post_dec else : duration_us = (int(time_pre_dec) - int(last_sec_cpu[cpu_int])) * 1000000 + (int(time_post_dec) - int(last_usec_cpu[cpu_int])) duration_ms = Decimal(duration_us) / Decimal(1000) last_sec_cpu[cpu_int] = time_pre_dec last_usec_cpu[cpu_int] = time_post_dec elapsed_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000) - start_time load = Decimal(int(mperf)*100)/ Decimal(tsc) freq_ghz = Decimal(freq)/Decimal(1000000) # Sanity check calculation, typically anomalies indicate missed samples # However, check for 0 (should never occur) tsc_ghz = Decimal(0) if duration_ms != Decimal(0) : tsc_ghz = Decimal(tsc)/duration_ms/Decimal(1000000) store_csv(cpu_int, time_pre_dec, time_post_dec, core_busy, scaled, _from, _to, mperf, aperf, tsc, freq_ghz, io_boost, common_comm, load, duration_ms, sample_num, elapsed_time, tsc_ghz) if cpu_int > current_max_cpu: current_max_cpu = cpu_int # End of for each trace line loop # Now seperate the main overall csv file into per CPU csv files. split_csv() interval = "" filename = "" cpu_list = "" testname = "" graph_data_present = False; valid1 = False valid2 = False cpu_mask = zeros((MAX_CPUS,), dtype=int) try: opts, args = getopt.getopt(sys.argv[1:],"ht:i:c:n:",["help","trace_file=","interval=","cpu=","name="]) except getopt.GetoptError: print_help() sys.exit(2) for opt, arg in opts: if opt == '-h': print() sys.exit() elif opt in ("-t", "--trace_file"): valid1 = True location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) filename = os.path.join(location, arg) elif opt in ("-i", "--interval"): valid1 = True interval = arg elif opt in ("-c", "--cpu"): cpu_list = arg elif opt in ("-n", "--name"): valid2 = True testname = arg if not (valid1 and valid2): print_help() sys.exit() if cpu_list: for p in re.split("[,]", cpu_list): if int(p) < MAX_CPUS : cpu_mask[int(p)] = 1 else: for i in range (0, MAX_CPUS): cpu_mask[i] = 1 if not os.path.exists('results'): os.mkdir('results') # The regular user needs to own the directory, not root. fix_ownership('results') os.chdir('results') if os.path.exists(testname): print('The test name directory already exists. Please provide a unique test name. Test re-run not supported, yet.') sys.exit() os.mkdir(testname) # The regular user needs to own the directory, not root. fix_ownership(testname) os.chdir(testname) # Temporary (or perhaps not) cur_version = sys.version_info print('python version (should be >= 2.7):') print(cur_version) # Left as "cleanup" for potential future re-run ability. cleanup_data_files() if interval: filename = "/sys/kernel/debug/tracing/trace" clear_trace_file() set_trace_buffer_size() enable_trace() print('Sleeping for ', interval, 'seconds') time.sleep(int(interval)) disable_trace() current_max_cpu = 0 read_trace_data(filename) if graph_data_present == False: print('No valid data to plot') sys.exit(2) for cpu_no in range(0, current_max_cpu + 1): plot_perf_busy_with_sample(cpu_no) plot_perf_busy(cpu_no) plot_durations(cpu_no) plot_loads(cpu_no) plot_pstate_cpu_with_sample() plot_pstate_cpu() plot_load_cpu() plot_frequency_cpu() plot_duration_cpu() plot_scaled_cpu() plot_boost_cpu() plot_ghz_cpu() # It is preferrable, but not necessary, that the regular user owns the files, not root. for root, dirs, files in os.walk('.'): for f in files: fix_ownership(f) os.chdir('../../')
gpl-2.0
rwl/PyCIM
CIM15/IEC61970/Generation/GenerationDynamics/__init__.py
1
2662
# Copyright (C) 2010-2011 Richard Lincoln # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """The Generation Dynamics package contains prime movers, such as turbines and boilers, which are needed for simulation and educational purposes. """ from CIM15.IEC61970.Generation.GenerationDynamics.BWRSteamSupply import BWRSteamSupply from CIM15.IEC61970.Generation.GenerationDynamics.HydroTurbine import HydroTurbine from CIM15.IEC61970.Generation.GenerationDynamics.SteamTurbine import SteamTurbine from CIM15.IEC61970.Generation.GenerationDynamics.SteamSupply import SteamSupply from CIM15.IEC61970.Generation.GenerationDynamics.FossilSteamSupply import FossilSteamSupply from CIM15.IEC61970.Generation.GenerationDynamics.Subcritical import Subcritical from CIM15.IEC61970.Generation.GenerationDynamics.PWRSteamSupply import PWRSteamSupply from CIM15.IEC61970.Generation.GenerationDynamics.PrimeMover import PrimeMover from CIM15.IEC61970.Generation.GenerationDynamics.CombustionTurbine import CombustionTurbine from CIM15.IEC61970.Generation.GenerationDynamics.HeatRecoveryBoiler import HeatRecoveryBoiler from CIM15.IEC61970.Generation.GenerationDynamics.Supercritical import Supercritical from CIM15.IEC61970.Generation.GenerationDynamics.DrumBoiler import DrumBoiler from CIM15.IEC61970.Generation.GenerationDynamics.CTTempActivePowerCurve import CTTempActivePowerCurve nsURI = "http://iec.ch/TC57/2010/CIM-schema-cim15#GenerationDynamics" nsPrefix = "cimGenerationDynamics" class TurbineType(str): """Values are: pelton, kaplan, francis """ pass class BoilerControlMode(str): """Values are: following, coordinated """ pass
mit
pdamodaran/yellowbrick
yellowbrick/features/importances.py
1
13346
# yellowbrick.features.importances # Feature importance visualizer # # Author: Benjamin Bengfort <benjamin@bengfort.com> # Created: Fri Mar 02 15:21:36 2018 -0500 # Author: Rebecca Bilbro <rbilbro@districtdatalabs.com> # Updated: Sun Jun 24 10:53:36 2018 -0500 # # Copyright (C) 2018 District Data Labs # For license information, see LICENSE.txt # # ID: importances.py [] benjamin@bengfort.com $ """ Implementation of a feature importances visualizer. This visualizer sits in kind of a weird place since it is technically a model scoring visualizer, but is generally used for feature engineering. """ ########################################################################## ## Imports ########################################################################## import warnings import numpy as np import matplotlib.pyplot as plt from yellowbrick.base import ModelVisualizer from yellowbrick.utils import is_dataframe, is_classifier from yellowbrick.exceptions import YellowbrickTypeError, NotFitted, YellowbrickWarning from ..draw import bar_stack ########################################################################## ## Feature Visualizer ########################################################################## class FeatureImportances(ModelVisualizer): """ Displays the most informative features in a model by showing a bar chart of features ranked by their importances. Although primarily a feature engineering mechanism, this visualizer requires a model that has either a ``coef_`` or ``feature_importances_`` parameter after fit. Note: Some classification models such as ``LogisticRegression``, return ``coef_`` as a multidimensional array of shape ``(n_classes, n_features)``. In this case, the ``FeatureImportances`` visualizer computes the mean of the ``coefs_`` by class for each feature. Parameters ---------- model : Estimator A Scikit-Learn estimator that learns feature importances. Must support either ``coef_`` or ``feature_importances_`` parameters. ax : matplotlib Axes, default: None The axis to plot the figure on. If None is passed in the current axes will be used (or generated if required). labels : list, default: None A list of feature names to use. If a DataFrame is passed to fit and features is None, feature names are selected as the column names. relative : bool, default: True If true, the features are described by their relative importance as a percentage of the strongest feature component; otherwise the raw numeric description of the feature importance is shown. absolute : bool, default: False Make all coeficients absolute to more easily compare negative coeficients with positive ones. xlabel : str, default: None The label for the X-axis. If None is automatically determined by the underlying model and options provided. stack : bool, default: False If true and the classifier returns multi-class feature importance, then a stacked bar plot is plotted; otherwise the mean of the feature importance across classes are plotted. kwargs : dict Keyword arguments that are passed to the base class and may influence the visualization as defined in other Visualizers. Attributes ---------- features_ : np.array The feature labels ranked according to their importance feature_importances_ : np.array The numeric value of the feature importance computed by the model classes_ : np.array The classees labeled. Is not None only for classifier. Examples -------- >>> from sklearn.ensemble import GradientBoostingClassifier >>> visualizer = FeatureImportances(GradientBoostingClassifier()) >>> visualizer.fit(X, y) >>> visualizer.poof() """ def __init__(self, model, ax=None, labels=None, relative=True, absolute=False, xlabel=None, stack=False, **kwargs): super(FeatureImportances, self).__init__(model, ax, **kwargs) # Data Parameters self.set_params( labels=labels, relative=relative, absolute=absolute, xlabel=xlabel, stack=stack ) def fit(self, X, y=None, **kwargs): """ Fits the estimator to discover the feature importances described by the data, then draws those importances as a bar plot. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n An array or series of target or class values kwargs : dict Keyword arguments passed to the fit method of the estimator. Returns ------- self : visualizer The fit method must always return self to support pipelines. """ super(FeatureImportances, self).fit(X, y, **kwargs) # Get the feature importances from the model self.feature_importances_ = self._find_importances_param() # Get the classes from the model if is_classifier(self): self.classes_ = self._find_classes_param() else: self.classes_ = None self.stack = False # If self.stack = True and feature importances is a multidim array, # we're expecting a shape of (n_classes, n_features) # therefore we flatten by taking the average by # column to get shape (n_features,) (see LogisticRegression) if not self.stack and self.feature_importances_.ndim > 1: self.feature_importances_ = np.mean(self.feature_importances_, axis=0) warnings.warn(( "detected multi-dimensional feature importances but stack=False, " "using mean to aggregate them." ), YellowbrickWarning) # Apply absolute value filter before normalization if self.absolute: self.feature_importances_ = np.abs(self.feature_importances_) # Normalize features relative to the maximum if self.relative: maxv = np.abs(self.feature_importances_).max() self.feature_importances_ /= maxv self.feature_importances_ *= 100.0 # Create labels for the feature importances # NOTE: this code is duplicated from MultiFeatureVisualizer if self.labels is None: # Use column names if a dataframe if is_dataframe(X): self.features_ = np.array(X.columns) # Otherwise use the column index as the labels else: _, ncols = X.shape self.features_ = np.arange(0, ncols) else: self.features_ = np.array(self.labels) # Sort the features and their importances if self.stack: sort_idx = np.argsort(np.mean(self.feature_importances_, 0)) self.features_ = self.features_[sort_idx] self.feature_importances_ = self.feature_importances_[:, sort_idx] else: sort_idx = np.argsort(self.feature_importances_) self.features_ = self.features_[sort_idx] self.feature_importances_ = self.feature_importances_[sort_idx] # Draw the feature importances self.draw() return self def draw(self, **kwargs): """ Draws the feature importances as a bar chart; called from fit. """ # Quick validation for param in ('feature_importances_', 'features_'): if not hasattr(self, param): raise NotFitted("missing required param '{}'".format(param)) # Find the positions for each bar pos = np.arange(self.features_.shape[0]) + 0.5 # Plot the bar chart if self.stack: legend_kws = {'bbox_to_anchor':(1.04, 0.5), 'loc':"center left"} bar_stack(self.feature_importances_, ax=self.ax, labels=list(self.classes_), ticks=self.features_, orientation='h', legend_kws=legend_kws) else: self.ax.barh(pos, self.feature_importances_, align='center') # Set the labels for the bars self.ax.set_yticks(pos) self.ax.set_yticklabels(self.features_) return self.ax def finalize(self, **kwargs): """ Finalize the drawing setting labels and title. """ # Set the title self.set_title('Feature Importances of {} Features using {}'.format( len(self.features_), self.name)) # Set the xlabel self.ax.set_xlabel(self._get_xlabel()) # Remove the ygrid self.ax.grid(False, axis='y') # Ensure we have a tight fit plt.tight_layout() def _find_classes_param(self): """ Searches the wrapped model for the classes_ parameter. """ for attr in ["classes_"]: try: return getattr(self.estimator, attr) except AttributeError: continue raise YellowbrickTypeError( "could not find classes_ param on {}".format( self.estimator.__class__.__name__ ) ) def _find_importances_param(self): """ Searches the wrapped model for the feature importances parameter. """ for attr in ("feature_importances_", "coef_"): try: return getattr(self.estimator, attr) except AttributeError: continue raise YellowbrickTypeError( "could not find feature importances param on {}".format( self.estimator.__class__.__name__ ) ) def _get_xlabel(self): """ Determines the xlabel based on the underlying data structure """ # Return user-specified label if self.xlabel: return self.xlabel # Label for coefficients if hasattr(self.estimator, "coef_"): if self.relative: return "relative coefficient magnitude" return "coefficient value" # Default label for feature_importances_ if self.relative: return "relative importance" return "feature importance" def _is_fitted(self): """ Returns true if the visualizer has been fit. """ return hasattr(self, 'feature_importances_') and hasattr(self, 'features_') ########################################################################## ## Quick Method ########################################################################## def feature_importances(model, X, y=None, ax=None, labels=None, relative=True, absolute=False, xlabel=None, stack=False, **kwargs): """ Displays the most informative features in a model by showing a bar chart of features ranked by their importances. Although primarily a feature engineering mechanism, this visualizer requires a model that has either a ``coef_`` or ``feature_importances_`` parameter after fit. Parameters ---------- model : Estimator A Scikit-Learn estimator that learns feature importances. Must support either ``coef_`` or ``feature_importances_`` parameters. X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n, optional An array or series of target or class values ax : matplotlib Axes, default: None The axis to plot the figure on. If None is passed in the current axes will be used (or generated if required). labels : list, default: None A list of feature names to use. If a DataFrame is passed to fit and features is None, feature names are selected as the column names. relative : bool, default: True If true, the features are described by their relative importance as a percentage of the strongest feature component; otherwise the raw numeric description of the feature importance is shown. absolute : bool, default: False Make all coeficients absolute to more easily compare negative coeficients with positive ones. xlabel : str, default: None The label for the X-axis. If None is automatically determined by the underlying model and options provided. stack : bool, default: False If true and the classifier returns multi-class feature importance, then a stacked bar plot is plotted; otherwise the mean of the feature importance across classes are plotted. kwargs : dict Keyword arguments that are passed to the base class and may influence the visualization as defined in other Visualizers. Returns ------- ax : matplotlib axes Returns the axes that the parallel coordinates were drawn on. """ # Instantiate the visualizer visualizer = FeatureImportances( model, ax, labels, relative, absolute, xlabel, stack, **kwargs) # Fit and transform the visualizer (calls draw) visualizer.fit(X, y) visualizer.finalize() # Return the axes object on the visualizer return visualizer.ax
apache-2.0
mintoo/NetDim
pyNMS/graph_algorithms/shortest_path_window.py
2
2947
# Copyright (C) 2017 Antoine Fourmy <antoine dot fourmy at gmail dot com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from miscellaneous.decorators import update_paths from PyQt5.QtWidgets import ( QComboBox, QGridLayout, QLabel, QLineEdit, QPushButton, QWidget, ) class ShortestPathWindow(QWidget): algorithms = ( 'Constrained A*', 'Bellman-Ford algorithm', 'Floyd-Warshall algorithm', 'Linear programming' ) def __init__(self, controller): super().__init__() self.controller = controller self.setWindowTitle('Shortest path algorithms') algorithm = QLabel('Algorithm') self.sp_list = QComboBox() self.sp_list.addItems(self.algorithms) source = QLabel('Source') self.source_edit = QLineEdit() destination = QLabel('Destination') self.destination_edit = QLineEdit() # confirmation button button_compute = QPushButton() button_compute.setText('Compute') button_compute.clicked.connect(self.compute_sp) # position in the grid layout = QGridLayout() layout.addWidget(algorithm, 0, 0, 1, 1) layout.addWidget(self.sp_list, 0, 1, 1, 1) layout.addWidget(source, 1, 0, 1, 1) layout.addWidget(self.source_edit, 1, 1, 1, 1) layout.addWidget(destination, 2, 0, 1, 1) layout.addWidget(self.destination_edit, 2, 1, 1, 1) layout.addWidget(button_compute, 3, 0, 1, 2) self.setLayout(layout) @update_paths def compute_sp(self, _): source = self.network.nf(name=self.source_edit.text()) destination = self.network.nf(name=self.destination_edit.text()) algorithm = { 'Constrained A*': self.network.A_star, 'Bellman-Ford algorithm': self.network.bellman_ford, 'Floyd-Warshall algorithm': self.network.floyd_warshall, 'Linear programming': self.network.LP_SP_formulation }[self.sp_list.currentText()] nodes, physical_links = algorithm(source, destination) self.view.select(*(nodes + physical_links))
gpl-3.0
gregkorte/Python-Koans
python2/libs/colorama/ansitowin32.py
287
6621
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. import re import sys from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style from .winterm import WinTerm, WinColor, WinStyle from .win32 import windll if windll is not None: winterm = WinTerm() def is_a_tty(stream): return hasattr(stream, 'isatty') and stream.isatty() class StreamWrapper(object): ''' Wraps a stream (such as stdout), acting as a transparent proxy for all attribute access apart from method 'write()', which is delegated to our Converter instance. ''' def __init__(self, wrapped, converter): # double-underscore everything to prevent clashes with names of # attributes on the wrapped stream object. self.__wrapped = wrapped self.__convertor = converter def __getattr__(self, name): return getattr(self.__wrapped, name) def write(self, text): self.__convertor.write(text) class AnsiToWin32(object): ''' Implements a 'write()' method which, on Windows, will strip ANSI character sequences from the text, and if outputting to a tty, will convert them into win32 function calls. ''' ANSI_RE = re.compile('\033\[((?:\d|;)*)([a-zA-Z])') def __init__(self, wrapped, convert=None, strip=None, autoreset=False): # The wrapped stream (normally sys.stdout or sys.stderr) self.wrapped = wrapped # should we reset colors to defaults after every .write() self.autoreset = autoreset # create the proxy wrapping our output stream self.stream = StreamWrapper(wrapped, self) on_windows = sys.platform.startswith('win') # should we strip ANSI sequences from our output? if strip is None: strip = on_windows self.strip = strip # should we should convert ANSI sequences into win32 calls? if convert is None: convert = on_windows and is_a_tty(wrapped) self.convert = convert # dict of ansi codes to win32 functions and parameters self.win32_calls = self.get_win32_calls() # are we wrapping stderr? self.on_stderr = self.wrapped is sys.stderr def should_wrap(self): ''' True if this class is actually needed. If false, then the output stream will not be affected, nor will win32 calls be issued, so wrapping stdout is not actually required. This will generally be False on non-Windows platforms, unless optional functionality like autoreset has been requested using kwargs to init() ''' return self.convert or self.strip or self.autoreset def get_win32_calls(self): if self.convert and winterm: return { AnsiStyle.RESET_ALL: (winterm.reset_all, ), AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT), AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL), AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL), AnsiFore.BLACK: (winterm.fore, WinColor.BLACK), AnsiFore.RED: (winterm.fore, WinColor.RED), AnsiFore.GREEN: (winterm.fore, WinColor.GREEN), AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW), AnsiFore.BLUE: (winterm.fore, WinColor.BLUE), AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA), AnsiFore.CYAN: (winterm.fore, WinColor.CYAN), AnsiFore.WHITE: (winterm.fore, WinColor.GREY), AnsiFore.RESET: (winterm.fore, ), AnsiBack.BLACK: (winterm.back, WinColor.BLACK), AnsiBack.RED: (winterm.back, WinColor.RED), AnsiBack.GREEN: (winterm.back, WinColor.GREEN), AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW), AnsiBack.BLUE: (winterm.back, WinColor.BLUE), AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA), AnsiBack.CYAN: (winterm.back, WinColor.CYAN), AnsiBack.WHITE: (winterm.back, WinColor.GREY), AnsiBack.RESET: (winterm.back, ), } def write(self, text): if self.strip or self.convert: self.write_and_convert(text) else: self.wrapped.write(text) self.wrapped.flush() if self.autoreset: self.reset_all() def reset_all(self): if self.convert: self.call_win32('m', (0,)) elif is_a_tty(self.wrapped): self.wrapped.write(Style.RESET_ALL) def write_and_convert(self, text): ''' Write the given text to our wrapped stream, stripping any ANSI sequences from the text, and optionally converting them into win32 calls. ''' cursor = 0 for match in self.ANSI_RE.finditer(text): start, end = match.span() self.write_plain_text(text, cursor, start) self.convert_ansi(*match.groups()) cursor = end self.write_plain_text(text, cursor, len(text)) def write_plain_text(self, text, start, end): if start < end: self.wrapped.write(text[start:end]) self.wrapped.flush() def convert_ansi(self, paramstring, command): if self.convert: params = self.extract_params(paramstring) self.call_win32(command, params) def extract_params(self, paramstring): def split(paramstring): for p in paramstring.split(';'): if p != '': yield int(p) return tuple(split(paramstring)) def call_win32(self, command, params): if params == []: params = [0] if command == 'm': for param in params: if param in self.win32_calls: func_args = self.win32_calls[param] func = func_args[0] args = func_args[1:] kwargs = dict(on_stderr=self.on_stderr) func(*args, **kwargs) elif command in ('H', 'f'): # set cursor position func = winterm.set_cursor_position func(params, on_stderr=self.on_stderr) elif command in ('J'): func = winterm.erase_data func(params, on_stderr=self.on_stderr) elif command == 'A': if params == () or params == None: num_rows = 1 else: num_rows = params[0] func = winterm.cursor_up func(num_rows, on_stderr=self.on_stderr)
mit
tianweizhang/nova
nova/spice/__init__.py
38
1705
#!/usr/bin/env python # Copyright (c) 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module for SPICE Proxying.""" from oslo.config import cfg spice_opts = [ cfg.StrOpt('html5proxy_base_url', default='http://127.0.0.1:6082/spice_auto.html', help='Location of spice HTML5 console proxy, in the form ' '"http://127.0.0.1:6082/spice_auto.html"'), cfg.StrOpt('server_listen', default='127.0.0.1', help='IP address on which instance spice server should listen'), cfg.StrOpt('server_proxyclient_address', default='127.0.0.1', help='The address to which proxy clients ' '(like nova-spicehtml5proxy) should connect'), cfg.BoolOpt('enabled', default=False, help='Enable spice related features'), cfg.BoolOpt('agent_enabled', default=True, help='Enable spice guest agent support'), cfg.StrOpt('keymap', default='en-us', help='Keymap for spice'), ] CONF = cfg.CONF CONF.register_opts(spice_opts, group='spice')
apache-2.0
joychugh/pgoapi
examples/spiral_poi_search.py
1
8280
#!/usr/bin/env python """ pgoapi - Pokemon Go API Copyright (c) 2016 tjado <https://github.com/tejado> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Author: tjado <https://github.com/tejado> """ import os import re import sys import json import time import struct import random import logging import requests import argparse import pprint from pgoapi import PGoApi from pgoapi.utilities import f2i, h2f from pgoapi import utilities as util from google.protobuf.internal import encoder from geopy.geocoders import GoogleV3 from s2sphere import Cell, CellId, LatLng log = logging.getLogger(__name__) def get_pos_by_name(location_name): geolocator = GoogleV3() loc = geolocator.geocode(location_name) log.info('Your given location: %s', loc.address.encode('utf-8')) log.info('lat/long/alt: %s %s %s', loc.latitude, loc.longitude, loc.altitude) return (loc.latitude, loc.longitude, loc.altitude) def get_cell_ids(lat, long, radius = 10): origin = CellId.from_lat_lng(LatLng.from_degrees(lat, long)).parent(15) walk = [origin.id()] right = origin.next() left = origin.prev() # Search around provided radius for i in range(radius): walk.append(right.id()) walk.append(left.id()) right = right.next() left = left.prev() # Return everything return sorted(walk) def encode(cellid): output = [] encoder._VarintEncoder()(output.append, cellid) return ''.join(output) def init_config(): parser = argparse.ArgumentParser() config_file = "config.json" # If config file exists, load variables from json load = {} if os.path.isfile(config_file): with open(config_file) as data: load.update(json.load(data)) # Read passed in Arguments required = lambda x: not x in load parser.add_argument("-a", "--auth_service", help="Auth Service ('ptc' or 'google')", required=required("auth_service")) parser.add_argument("-u", "--username", help="Username", required=required("username")) parser.add_argument("-p", "--password", help="Password", required=required("password")) parser.add_argument("-l", "--location", help="Location", required=required("location")) parser.add_argument("-d", "--debug", help="Debug Mode", action='store_true') parser.add_argument("-t", "--test", help="Only parse the specified location", action='store_true') parser.set_defaults(DEBUG=False, TEST=False) config = parser.parse_args() # Passed in arguments shoud trump for key in config.__dict__: if key in load and config.__dict__[key] == None: config.__dict__[key] = load[key] if config.auth_service not in ['ptc', 'google']: log.error("Invalid Auth service specified! ('ptc' or 'google')") return None return config def main(): # log settings # log format logging.basicConfig(level=logging.DEBUG, format='%(asctime)s [%(module)10s] [%(levelname)5s] %(message)s') # log level for http request class logging.getLogger("requests").setLevel(logging.WARNING) # log level for main pgoapi class logging.getLogger("pgoapi").setLevel(logging.INFO) # log level for internal pgoapi class logging.getLogger("rpc_api").setLevel(logging.INFO) config = init_config() if not config: return if config.debug: logging.getLogger("requests").setLevel(logging.DEBUG) logging.getLogger("pgoapi").setLevel(logging.DEBUG) logging.getLogger("rpc_api").setLevel(logging.DEBUG) position = get_pos_by_name(config.location) if config.test: return # instantiate pgoapi api = PGoApi() # provide player position on the earth api.set_position(*position) if not api.login(config.auth_service, config.username, config.password): return # chain subrequests (methods) into one RPC call # get player profile call # ---------------------- api.get_player() # execute the RPC call response_dict = api.call() # apparently new dict has binary data in it, so formatting it with this method no longer works, pprint works here but there are other alternatives # print('Response dictionary: \n\r{}'.format(json.dumps(response_dict, indent=2))) print('Response dictionary: \n\r{}'.format(pprint.PrettyPrinter(indent=4).pformat(response_dict))) find_poi(api, position[0], position[1]) def find_poi(api, lat, lng): poi = {'pokemons': {}, 'forts': []} step_size = 0.0015 step_limit = 49 coords = generate_spiral(lat, lng, step_size, step_limit) for coord in coords: lat = coord['lat'] lng = coord['lng'] api.set_position(lat, lng, 0) #get_cellid was buggy -> replaced through get_cell_ids from pokecli #timestamp gets computed a different way: cell_ids = get_cell_ids(lat, lng) timestamps = [0,] * len(cell_ids) api.get_map_objects(latitude = util.f2i(lat), longitude = util.f2i(lng), since_timestamp_ms = timestamps, cell_id = cell_ids) response_dict = api.call() if 'status' in response_dict['responses']['GET_MAP_OBJECTS']: if response_dict['responses']['GET_MAP_OBJECTS']['status'] == 1: for map_cell in response_dict['responses']['GET_MAP_OBJECTS']['map_cells']: if 'wild_pokemons' in map_cell: for pokemon in map_cell['wild_pokemons']: pokekey = get_key_from_pokemon(pokemon) pokemon['hides_at'] = time.time() + pokemon['time_till_hidden_ms']/1000 poi['pokemons'][pokekey] = pokemon # time.sleep(0.51) # new dict, binary data # print('POI dictionary: \n\r{}'.format(json.dumps(poi, indent=2))) print('POI dictionary: \n\r{}'.format(pprint.PrettyPrinter(indent=4).pformat(poi))) print('Open this in a browser to see the path the spiral search took:') print_gmaps_dbug(coords) def get_key_from_pokemon(pokemon): return '{}-{}'.format(pokemon['spawnpoint_id'], pokemon['pokemon_data']['pokemon_id']) def print_gmaps_dbug(coords): url_string = 'http://maps.googleapis.com/maps/api/staticmap?size=400x400&path=' for coord in coords: url_string += '{},{}|'.format(coord['lat'], coord['lng']) print(url_string[:-1]) def generate_spiral(starting_lat, starting_lng, step_size, step_limit): coords = [{'lat': starting_lat, 'lng': starting_lng}] steps,x,y,d,m = 1, 0, 0, 1, 1 rlow = 0.0 rhigh = 0.0005 while steps < step_limit: while 2 * x * d < m and steps < step_limit: x = x + d steps += 1 lat = x * step_size + starting_lat + random.uniform(rlow, rhigh) lng = y * step_size + starting_lng + random.uniform(rlow, rhigh) coords.append({'lat': lat, 'lng': lng}) while 2 * y * d < m and steps < step_limit: y = y + d steps += 1 lat = x * step_size + starting_lat + random.uniform(rlow, rhigh) lng = y * step_size + starting_lng + random.uniform(rlow, rhigh) coords.append({'lat': lat, 'lng': lng}) d = -1 * d m = m + 1 return coords if __name__ == '__main__': main()
mit
daikeren/opbeat_python
tests/instrumentation/psycopg2_tests.py
2
5144
# -*- coding: utf-8 -*- import os import pytest from opbeat.instrumentation import control from opbeat.instrumentation.packages.psycopg2 import extract_signature from tests.contrib.django.django_tests import get_client try: import psycopg2 has_psycopg2 = True except ImportError: has_psycopg2 = False travis_and_psycopg2 = 'TRAVIS' not in os.environ or not has_psycopg2 def test_insert(): sql = """INSERT INTO mytable (id, name) VALUE ('2323', 'Ron')""" actual = extract_signature(sql) assert "INSERT INTO mytable" == actual def test_update(): sql = """UPDATE "my table" set name='Ron' WHERE id = 2323""" actual = extract_signature(sql) assert "UPDATE my table" == actual def test_delete_simple(): sql = 'DELETE FROM "mytable"' actual = extract_signature(sql) assert "DELETE FROM mytable" == actual def test_delete(): sql = """DELETE FROM "my table" WHERE id = 2323""" actual = extract_signature(sql) assert "DELETE FROM my table" == actual def test_select_simple(): sql = """SELECT id, name FROM my_table WHERE id = 2323""" actual = extract_signature(sql) assert "SELECT FROM my_table" == actual def test_select_with_entity_quotes(): sql = """SELECT id, name FROM "mytable" WHERE id = 2323""" actual = extract_signature(sql) assert "SELECT FROM mytable" == actual def test_select_with_difficult_values(): sql = """SELECT id, 'some name' + '" from Denmark' FROM "mytable" WHERE id = 2323""" actual = extract_signature(sql) assert "SELECT FROM mytable" == actual def test_select_with_dollar_quotes(): sql = """SELECT id, $$some single doubles ' $$ + '" from Denmark' FROM "mytable" WHERE id = 2323""" actual = extract_signature(sql) assert "SELECT FROM mytable" == actual def test_select_with_invalid_dollar_quotes(): sql = """SELECT id, $fish$some single doubles ' $$ + '" from Denmark' FROM "mytable" WHERE id = 2323""" actual = extract_signature(sql) assert "SELECT FROM" == actual def test_select_with_dollar_quotes_custom_token(): sql = """SELECT id, $token $FROM $ FROM $ FROM single doubles ' $token $ + '" from Denmark' FROM "mytable" WHERE id = 2323""" actual = extract_signature(sql) assert "SELECT FROM mytable" == actual def test_select_with_difficult_table_name(): sql = "SELECT id FROM \"myta\n-æøåble\" WHERE id = 2323""" actual = extract_signature(sql) assert "SELECT FROM myta\n-æøåble" == actual def test_select_subselect(): sql = """SELECT id, name FROM ( SELECT id, 'not a FROM ''value' FROM mytable WHERE id = 2323 ) LIMIT 20""" actual = extract_signature(sql) assert "SELECT FROM mytable" == actual def test_select_subselect_with_alias(): sql = """ SELECT count(*) FROM ( SELECT count(id) AS some_alias, some_column FROM mytable GROUP BY some_colun HAVING count(id) > 1 ) AS foo """ actual = extract_signature(sql) assert "SELECT FROM mytable" == actual def test_select_with_multiple_tables(): sql = """SELECT count(table2.id) FROM table1, table2, table2 WHERE table2.id = table1.table2_id """ actual = extract_signature(sql) assert "SELECT FROM table1" == actual def test_select_with_invalid_subselect(): sql = "SELECT id FROM (SELECT * """ actual = extract_signature(sql) assert "SELECT FROM" == actual def test_select_with_invalid_literal(): sql = "SELECT 'neverending literal FROM (SELECT * FROM ...""" actual = extract_signature(sql) assert "SELECT FROM" == actual def test_savepoint(): sql = """SAVEPOINT x_asd1234""" actual = extract_signature(sql) assert "SAVEPOINT" == actual def test_begin(): sql = """BEGIN""" actual = extract_signature(sql) assert "BEGIN" == actual def test_create_index_with_name(): sql = """CREATE INDEX myindex ON mytable""" actual = extract_signature(sql) assert "CREATE INDEX" == actual def test_create_index_without_name(): sql = """CREATE INDEX ON mytable""" actual = extract_signature(sql) assert "CREATE INDEX" == actual def test_drop_table(): sql = """DROP TABLE mytable""" actual = extract_signature(sql) assert "DROP TABLE" == actual def test_multi_statement_sql(): sql = """CREATE TABLE mytable; SELECT * FROM mytable; DROP TABLE mytable""" actual = extract_signature(sql) assert "CREATE TABLE" == actual @pytest.mark.skipif(travis_and_psycopg2, reason="Requires postgres server. Only runs ontravisci.") def test_psycopg2_register_type(): import psycopg2.extras client = get_client() control.instrument(client) try: client.begin_transaction() conn = psycopg2.connect(database="opbeat_test", user="postgres") new_type = psycopg2.extras.register_uuid(None, conn) client.end_transaction(None, "test-transaction") finally: # make sure we've cleared out the traces for the other tests. client.instrumentation_store.get_all() assert new_type is not None
bsd-3-clause
pabloborrego93/edx-platform
cms/djangoapps/course_creators/tests/test_views.py
16
4629
""" Tests course_creators.views.py. """ from django.contrib.auth.models import User from django.core.exceptions import PermissionDenied from django.test import TestCase from django.core.urlresolvers import reverse from course_creators.views import add_user_with_status_unrequested, add_user_with_status_granted from course_creators.views import get_course_creator_status, update_course_creator_group, user_requested_access import mock from student.roles import CourseCreatorRole from student import auth class CourseCreatorView(TestCase): """ Tests for modifying the course creator table. """ def setUp(self): """ Test case setup """ super(CourseCreatorView, self).setUp() self.user = User.objects.create_user('test_user', 'test_user+courses@edx.org', 'foo') self.admin = User.objects.create_user('Mark', 'admin+courses@edx.org', 'foo') self.admin.is_staff = True def test_staff_permission_required(self): """ Tests that any method changing the course creator authz group must be called with staff permissions. """ with self.assertRaises(PermissionDenied): add_user_with_status_granted(self.user, self.user) with self.assertRaises(PermissionDenied): update_course_creator_group(self.user, self.user, True) def test_table_initially_empty(self): self.assertIsNone(get_course_creator_status(self.user)) def test_add_unrequested(self): add_user_with_status_unrequested(self.user) self.assertEqual('unrequested', get_course_creator_status(self.user)) # Calling add again will be a no-op (even if state is different). add_user_with_status_granted(self.admin, self.user) self.assertEqual('unrequested', get_course_creator_status(self.user)) def test_add_granted(self): with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}): # Calling add_user_with_status_granted impacts is_user_in_course_group_role. self.assertFalse(auth.user_has_role(self.user, CourseCreatorRole())) add_user_with_status_granted(self.admin, self.user) self.assertEqual('granted', get_course_creator_status(self.user)) # Calling add again will be a no-op (even if state is different). add_user_with_status_unrequested(self.user) self.assertEqual('granted', get_course_creator_status(self.user)) self.assertTrue(auth.user_has_role(self.user, CourseCreatorRole())) def test_update_creator_group(self): with mock.patch.dict('django.conf.settings.FEATURES', {"ENABLE_CREATOR_GROUP": True}): self.assertFalse(auth.user_has_role(self.user, CourseCreatorRole())) update_course_creator_group(self.admin, self.user, True) self.assertTrue(auth.user_has_role(self.user, CourseCreatorRole())) update_course_creator_group(self.admin, self.user, False) self.assertFalse(auth.user_has_role(self.user, CourseCreatorRole())) def test_user_requested_access(self): add_user_with_status_unrequested(self.user) self.assertEqual('unrequested', get_course_creator_status(self.user)) self.client.login(username=self.user.username, password='foo') # The user_requested_access function renders a template that requires # request-specific information. Use the django TestClient to supply # the appropriate request context. self.client.post(reverse('request_course_creator')) self.assertEqual('pending', get_course_creator_status(self.user)) def test_user_requested_already_granted(self): add_user_with_status_granted(self.admin, self.user) self.assertEqual('granted', get_course_creator_status(self.user)) # Will not "downgrade" to pending because that would require removing the # user from the authz course creator group (and that can only be done by an admin). user_requested_access(self.user) self.assertEqual('granted', get_course_creator_status(self.user)) def test_add_user_unrequested_staff(self): # Users marked as is_staff will not be added to the course creator table. add_user_with_status_unrequested(self.admin) self.assertIsNone(get_course_creator_status(self.admin)) def test_add_user_granted_staff(self): # Users marked as is_staff will not be added to the course creator table. add_user_with_status_granted(self.admin, self.admin) self.assertIsNone(get_course_creator_status(self.admin))
agpl-3.0
JanDintel/ansible
lib/ansible/inventory/script.py
27
6367
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ############################################# from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import subprocess import sys from collections import Mapping from six import iteritems from ansible import constants as C from ansible.errors import * from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.module_utils.basic import json_dict_bytes_to_unicode class InventoryScript: ''' Host inventory parser for ansible using external inventory scripts. ''' def __init__(self, loader, filename=C.DEFAULT_HOST_LIST): self._loader = loader # Support inventory scripts that are not prefixed with some # path information but happen to be in the current working # directory when '.' is not in PATH. self.filename = os.path.abspath(filename) cmd = [ self.filename, "--list" ] try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (stdout, stderr) = sp.communicate() if sp.returncode != 0: raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr)) self.data = stdout # see comment about _meta below self.host_vars_from_top = None self.groups = self._parse(stderr) def _parse(self, err): all_hosts = {} # not passing from_remote because data from CMDB is trusted try: self.raw = self._loader.load(self.data) except Exception as e: sys.stderr.write(err + "\n") raise AnsibleError("failed to parse executable inventory script results from {0}: {1}".format(self.filename, str(e))) if not isinstance(self.raw, Mapping): sys.stderr.write(err + "\n") raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(self.filename)) self.raw = json_dict_bytes_to_unicode(self.raw) all = Group('all') groups = dict(all=all) group = None for (group_name, data) in self.raw.items(): # in Ansible 1.3 and later, a "_meta" subelement may contain # a variable "hostvars" which contains a hash for each host # if this "hostvars" exists at all then do not call --host for each # host. This is for efficiency and scripts should still return data # if called with --host for backwards compat with 1.2 and earlier. if group_name == '_meta': if 'hostvars' in data: self.host_vars_from_top = data['hostvars'] continue if group_name != all.name: group = groups[group_name] = Group(group_name) else: group = all host = None if not isinstance(data, dict): data = {'hosts': data} # is not those subkeys, then simplified syntax, host with vars elif not any(k in data for k in ('hosts','vars')): data = {'hosts': [group_name], 'vars': data} if 'hosts' in data: if not isinstance(data['hosts'], list): raise AnsibleError("You defined a group \"%s\" with bad " "data for the host list:\n %s" % (group_name, data)) for hostname in data['hosts']: if not hostname in all_hosts: all_hosts[hostname] = Host(hostname) host = all_hosts[hostname] group.add_host(host) if 'vars' in data: if not isinstance(data['vars'], dict): raise AnsibleError("You defined a group \"%s\" with bad " "data for variables:\n %s" % (group_name, data)) for k, v in iteritems(data['vars']): if group.name == all.name: all.set_variable(k, v) else: group.set_variable(k, v) # Separate loop to ensure all groups are defined for (group_name, data) in self.raw.items(): if group_name == '_meta': continue if isinstance(data, dict) and 'children' in data: for child_name in data['children']: if child_name in groups: groups[group_name].add_child_group(groups[child_name]) for group in groups.values(): if group.depth == 0 and group.name != 'all': all.add_child_group(group) return groups def get_host_variables(self, host): """ Runs <script> --host <hostname> to determine additional host variables """ if self.host_vars_from_top is not None: got = self.host_vars_from_top.get(host.name, {}) return got cmd = [self.filename, "--host", host.name] try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (out, err) = sp.communicate() if out.strip() == '': return dict() try: return json_dict_bytes_to_unicode(self._loader.load(out)) except ValueError: raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
gpl-3.0
cjdelisle/cjdns
node_build/dependencies/libuv/build/gyp/test/prune_targets/gyptest-prune-targets.py
53
2285
#!/usr/bin/env python # Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies --root-target removes the unnecessary targets. """ import TestGyp test = TestGyp.TestGyp() # The xcode-ninja generator has its own logic for which targets to include if test.format == 'xcode-ninja': test.skip_test() build_error_code = { 'cmake': 1, 'make': 2, 'msvs': 1, 'ninja': 1, 'xcode': 65, }[test.format] # By default, everything will be included. test.run_gyp('test1.gyp') test.build('test2.gyp', 'lib1') test.build('test2.gyp', 'lib2') test.build('test2.gyp', 'lib3') test.build('test2.gyp', 'lib_indirect') test.build('test1.gyp', 'program1') test.build('test1.gyp', 'program2') test.build('test1.gyp', 'program3') # With deep dependencies of program1 only. test.run_gyp('test1.gyp', '--root-target=program1') test.build('test2.gyp', 'lib1') test.build('test2.gyp', 'lib2', status=build_error_code, stderr=None) test.build('test2.gyp', 'lib3', status=build_error_code, stderr=None) test.build('test2.gyp', 'lib_indirect') test.build('test1.gyp', 'program1') test.build('test1.gyp', 'program2', status=build_error_code, stderr=None) test.build('test1.gyp', 'program3', status=build_error_code, stderr=None) # With deep dependencies of program2 only. test.run_gyp('test1.gyp', '--root-target=program2') test.build('test2.gyp', 'lib1', status=build_error_code, stderr=None) test.build('test2.gyp', 'lib2') test.build('test2.gyp', 'lib3', status=build_error_code, stderr=None) test.build('test2.gyp', 'lib_indirect') test.build('test1.gyp', 'program1', status=build_error_code, stderr=None) test.build('test1.gyp', 'program2') test.build('test1.gyp', 'program3', status=build_error_code, stderr=None) # With deep dependencies of program1 and program2. test.run_gyp('test1.gyp', '--root-target=program1', '--root-target=program2') test.build('test2.gyp', 'lib1') test.build('test2.gyp', 'lib2') test.build('test2.gyp', 'lib3', status=build_error_code, stderr=None) test.build('test2.gyp', 'lib_indirect') test.build('test1.gyp', 'program1') test.build('test1.gyp', 'program2') test.build('test1.gyp', 'program3', status=build_error_code, stderr=None) test.pass_test()
gpl-3.0
erkrishna9/odoo
addons/portal_project/tests/test_access_rights.py
65
15194
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.addons.project.tests.test_project_base import TestProjectBase from openerp.exceptions import AccessError from openerp.osv.orm import except_orm from openerp.tools import mute_logger class TestPortalProjectBase(TestProjectBase): def setUp(self): super(TestPortalProjectBase, self).setUp() cr, uid = self.cr, self.uid # Find Portal group group_portal_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_portal') self.group_portal_id = group_portal_ref and group_portal_ref[1] or False # Find Public group group_public_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_public') self.group_public_id = group_public_ref and group_public_ref[1] or False # # Test users to use through the various tests self.user_portal_id = self.res_users.create(cr, uid, { 'name': 'Chell Portal', 'login': 'chell', 'alias_name': 'chell', 'groups_id': [(6, 0, [self.group_portal_id])] }) self.user_public_id = self.res_users.create(cr, uid, { 'name': 'Donovan Public', 'login': 'donovan', 'alias_name': 'donovan', 'groups_id': [(6, 0, [self.group_public_id])] }) self.user_manager_id = self.res_users.create(cr, uid, { 'name': 'Eustache Manager', 'login': 'eustache', 'alias_name': 'eustache', 'groups_id': [(6, 0, [self.group_project_manager_id])] }) # Test 'Pigs' project self.project_pigs_id = self.project_project.create(cr, uid, { 'name': 'Pigs', 'privacy_visibility': 'public'}, {'mail_create_nolog': True}) # Various test tasks self.task_1_id = self.project_task.create(cr, uid, { 'name': 'Test1', 'user_id': False, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True}) self.task_2_id = self.project_task.create(cr, uid, { 'name': 'Test2', 'user_id': False, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True}) self.task_3_id = self.project_task.create(cr, uid, { 'name': 'Test3', 'user_id': False, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True}) self.task_4_id = self.project_task.create(cr, uid, { 'name': 'Test4', 'user_id': self.user_projectuser_id, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True}) self.task_5_id = self.project_task.create(cr, uid, { 'name': 'Test5', 'user_id': self.user_portal_id, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True}) self.task_6_id = self.project_task.create(cr, uid, { 'name': 'Test6', 'user_id': self.user_public_id, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True}) class TestPortalProject(TestPortalProjectBase): @mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models') def test_00_project_access_rights(self): """ Test basic project access rights, for project and portal_project """ cr, uid, pigs_id = self.cr, self.uid, self.project_pigs_id # ---------------------------------------- # CASE1: public project # ---------------------------------------- # Do: Alfred reads project -> ok (employee ok public) self.project_project.read(cr, self.user_projectuser_id, [pigs_id], ['state']) # Test: all project tasks visible task_ids = self.project_task.search(cr, self.user_projectuser_id, [('project_id', '=', pigs_id)]) test_task_ids = set([self.task_1_id, self.task_2_id, self.task_3_id, self.task_4_id, self.task_5_id, self.task_6_id]) self.assertEqual(set(task_ids), test_task_ids, 'access rights: project user cannot see all tasks of a public project') # Test: all project tasks readable self.project_task.read(cr, self.user_projectuser_id, task_ids, ['name']) # Test: all project tasks writable self.project_task.write(cr, self.user_projectuser_id, task_ids, {'description': 'TestDescription'}) # Do: Bert reads project -> crash, no group self.assertRaises(AccessError, self.project_project.read, cr, self.user_none_id, [pigs_id], ['state']) # Test: no project task visible self.assertRaises(AccessError, self.project_task.search, cr, self.user_none_id, [('project_id', '=', pigs_id)]) # Test: no project task readable self.assertRaises(AccessError, self.project_task.read, cr, self.user_none_id, task_ids, ['name']) # Test: no project task writable self.assertRaises(AccessError, self.project_task.write, cr, self.user_none_id, task_ids, {'description': 'TestDescription'}) # Do: Chell reads project -> ok (portal ok public) self.project_project.read(cr, self.user_portal_id, [pigs_id], ['state']) # Test: all project tasks visible task_ids = self.project_task.search(cr, self.user_portal_id, [('project_id', '=', pigs_id)]) self.assertEqual(set(task_ids), test_task_ids, 'access rights: project user cannot see all tasks of a public project') # Test: all project tasks readable self.project_task.read(cr, self.user_portal_id, task_ids, ['name']) # Test: no project task writable self.assertRaises(AccessError, self.project_task.write, cr, self.user_portal_id, task_ids, {'description': 'TestDescription'}) # Do: Donovan reads project -> ok (public) self.project_project.read(cr, self.user_public_id, [pigs_id], ['state']) # Test: all project tasks visible task_ids = self.project_task.search(cr, self.user_public_id, [('project_id', '=', pigs_id)]) self.assertEqual(set(task_ids), test_task_ids, 'access rights: public user cannot see all tasks of a public project') # Test: all project tasks readable self.project_task.read(cr, self.user_public_id, task_ids, ['name']) # Test: no project task writable self.assertRaises(AccessError, self.project_task.write, cr, self.user_public_id, task_ids, {'description': 'TestDescription'}) # ---------------------------------------- # CASE2: portal project # ---------------------------------------- self.project_project.write(cr, uid, [pigs_id], {'privacy_visibility': 'portal'}) self.project_project.invalidate_cache(cr, uid) # Do: Alfred reads project -> ok (employee ok public) self.project_project.read(cr, self.user_projectuser_id, [pigs_id], ['state']) # Test: all project tasks visible task_ids = self.project_task.search(cr, self.user_projectuser_id, [('project_id', '=', pigs_id)]) self.assertEqual(set(task_ids), test_task_ids, 'access rights: project user cannot see all tasks of a portal project') # Do: Bert reads project -> crash, no group self.assertRaises(AccessError, self.project_project.read, cr, self.user_none_id, [pigs_id], ['state']) # Test: no project task searchable self.assertRaises(AccessError, self.project_task.search, cr, self.user_none_id, [('project_id', '=', pigs_id)]) # Data: task follower self.project_task.message_subscribe_users(cr, self.user_projectuser_id, [self.task_1_id, self.task_3_id], [self.user_portal_id]) # Do: Chell reads project -> ok (portal ok public) self.project_project.read(cr, self.user_portal_id, [pigs_id], ['state']) # Test: only followed project tasks visible + assigned task_ids = self.project_task.search(cr, self.user_portal_id, [('project_id', '=', pigs_id)]) test_task_ids = set([self.task_1_id, self.task_3_id, self.task_5_id]) self.assertEqual(set(task_ids), test_task_ids, 'access rights: portal user should see the followed tasks of a portal project') # Do: Donovan reads project -> ko (public ko portal) self.assertRaises(except_orm, self.project_project.read, cr, self.user_public_id, [pigs_id], ['state']) # Test: no project task visible task_ids = self.project_task.search(cr, self.user_public_id, [('project_id', '=', pigs_id)]) self.assertFalse(task_ids, 'access rights: public user should not see tasks of a portal project') # Data: task follower cleaning self.project_task.message_unsubscribe_users(cr, self.user_projectuser_id, [self.task_1_id, self.task_3_id], [self.user_portal_id]) # ---------------------------------------- # CASE3: employee project # ---------------------------------------- self.project_project.write(cr, uid, [pigs_id], {'privacy_visibility': 'employees'}) self.project_project.invalidate_cache(cr, uid) # Do: Alfred reads project -> ok (employee ok employee) self.project_project.read(cr, self.user_projectuser_id, [pigs_id], ['state']) # Test: all project tasks visible task_ids = self.project_task.search(cr, self.user_projectuser_id, [('project_id', '=', pigs_id)]) test_task_ids = set([self.task_1_id, self.task_2_id, self.task_3_id, self.task_4_id, self.task_5_id, self.task_6_id]) self.assertEqual(set(task_ids), test_task_ids, 'access rights: project user cannot see all tasks of an employees project') # Do: Bert reads project -> crash, no group self.assertRaises(AccessError, self.project_project.read, cr, self.user_none_id, [pigs_id], ['state']) # Do: Chell reads project -> ko (portal ko employee) self.assertRaises(except_orm, self.project_project.read, cr, self.user_portal_id, [pigs_id], ['state']) # Test: no project task visible + assigned task_ids = self.project_task.search(cr, self.user_portal_id, [('project_id', '=', pigs_id)]) self.assertFalse(task_ids, 'access rights: portal user should not see tasks of an employees project, even if assigned') # Do: Donovan reads project -> ko (public ko employee) self.assertRaises(except_orm, self.project_project.read, cr, self.user_public_id, [pigs_id], ['state']) # Test: no project task visible task_ids = self.project_task.search(cr, self.user_public_id, [('project_id', '=', pigs_id)]) self.assertFalse(task_ids, 'access rights: public user should not see tasks of an employees project') # ---------------------------------------- # CASE4: followers project # ---------------------------------------- self.project_project.write(cr, uid, [pigs_id], {'privacy_visibility': 'followers'}) self.project_project.invalidate_cache(cr, uid) # Do: Alfred reads project -> ko (employee ko followers) self.assertRaises(except_orm, self.project_project.read, cr, self.user_projectuser_id, [pigs_id], ['state']) # Test: no project task visible task_ids = self.project_task.search(cr, self.user_projectuser_id, [('project_id', '=', pigs_id)]) test_task_ids = set([self.task_4_id]) self.assertEqual(set(task_ids), test_task_ids, 'access rights: employee user should not see tasks of a not-followed followers project, only assigned') # Do: Bert reads project -> crash, no group self.assertRaises(AccessError, self.project_project.read, cr, self.user_none_id, [pigs_id], ['state']) # Do: Chell reads project -> ko (portal ko employee) self.assertRaises(except_orm, self.project_project.read, cr, self.user_portal_id, [pigs_id], ['state']) # Test: no project task visible task_ids = self.project_task.search(cr, self.user_portal_id, [('project_id', '=', pigs_id)]) test_task_ids = set([self.task_5_id]) self.assertEqual(set(task_ids), test_task_ids, 'access rights: portal user should not see tasks of a not-followed followers project, only assigned') # Do: Donovan reads project -> ko (public ko employee) self.assertRaises(except_orm, self.project_project.read, cr, self.user_public_id, [pigs_id], ['state']) # Test: no project task visible task_ids = self.project_task.search(cr, self.user_public_id, [('project_id', '=', pigs_id)]) self.assertFalse(task_ids, 'access rights: public user should not see tasks of a followers project') # Data: subscribe Alfred, Chell and Donovan as follower self.project_project.message_subscribe_users(cr, uid, [pigs_id], [self.user_projectuser_id, self.user_portal_id, self.user_public_id]) self.project_task.message_subscribe_users(cr, self.user_manager_id, [self.task_1_id, self.task_3_id], [self.user_portal_id, self.user_projectuser_id]) # Do: Alfred reads project -> ok (follower ok followers) self.project_project.read(cr, self.user_projectuser_id, [pigs_id], ['state']) # Test: followed + assigned tasks visible task_ids = self.project_task.search(cr, self.user_projectuser_id, [('project_id', '=', pigs_id)]) test_task_ids = set([self.task_1_id, self.task_3_id, self.task_4_id]) self.assertEqual(set(task_ids), test_task_ids, 'access rights: employee user should not see followed + assigned tasks of a follower project') # Do: Chell reads project -> ok (follower ok follower) self.project_project.read(cr, self.user_portal_id, [pigs_id], ['state']) # Test: followed + assigned tasks visible task_ids = self.project_task.search(cr, self.user_portal_id, [('project_id', '=', pigs_id)]) test_task_ids = set([self.task_1_id, self.task_3_id, self.task_5_id]) self.assertEqual(set(task_ids), test_task_ids, 'access rights: employee user should not see followed + assigned tasks of a follower project') # Do: Donovan reads project -> ko (public ko follower even if follower) self.assertRaises(except_orm, self.project_project.read, cr, self.user_public_id, [pigs_id], ['state'])
agpl-3.0
named-data-ndnSIM/ns-3-dev
src/bridge/examples/csma-bridge.py
171
5153
# /* # * This program is free software; you can redistribute it and/or modify # * it under the terms of the GNU General Public License version 2 as # * published by the Free Software Foundation # * # * This program is distributed in the hope that it will be useful, # * but WITHOUT ANY WARRANTY; without even the implied warranty of # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # * GNU General Public License for more details. # * # * You should have received a copy of the GNU General Public License # * along with this program; if not, write to the Free Software # * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # */ # Network topology # # n0 n1 # | | # ---------- # | Switch | # ---------- # | | # n2 n3 # # # - CBR/UDP flows from n0 to n1 and from n3 to n0 # - DropTail queues # - Tracing of queues and packet receptions to file "csma-bridge.tr" import ns.applications import ns.bridge import ns.core import ns.csma import ns.internet import ns.network def main(argv): # # Allow the user to override any of the defaults and the above Bind() at # run-time, via command-line arguments # cmd = ns.core.CommandLine() cmd.Parse(argv) # # Explicitly create the nodes required by the topology(shown above). # #print "Create nodes." terminals = ns.network.NodeContainer() terminals.Create(4) csmaSwitch = ns.network.NodeContainer() csmaSwitch.Create(1) #print "Build Topology" csma = ns.csma.CsmaHelper() csma.SetChannelAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate(5000000))) csma.SetChannelAttribute("Delay", ns.core.TimeValue(ns.core.MilliSeconds(2))) # Create the csma links, from each terminal to the switch terminalDevices = ns.network.NetDeviceContainer() switchDevices = ns.network.NetDeviceContainer() for i in range(4): link = csma.Install(ns.network.NodeContainer(ns.network.NodeContainer(terminals.Get(i)), csmaSwitch)) terminalDevices.Add(link.Get(0)) switchDevices.Add(link.Get(1)) # Create the bridge netdevice, which will do the packet switching switchNode = csmaSwitch.Get(0) bridgeDevice = ns.bridge.BridgeNetDevice() switchNode.AddDevice(bridgeDevice) for portIter in range(switchDevices.GetN()): bridgeDevice.AddBridgePort(switchDevices.Get(portIter)) # Add internet stack to the terminals internet = ns.internet.InternetStackHelper() internet.Install(terminals) # We've got the "hardware" in place. Now we need to add IP addresses. # #print "Assign IP Addresses." ipv4 = ns.internet.Ipv4AddressHelper() ipv4.SetBase(ns.network.Ipv4Address("10.1.1.0"), ns.network.Ipv4Mask("255.255.255.0")) ipv4.Assign(terminalDevices) # # Create an OnOff application to send UDP datagrams from node zero to node 1. # #print "Create Applications." port = 9 # Discard port(RFC 863) onoff = ns.applications.OnOffHelper("ns3::UdpSocketFactory", ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.1.1.2"), port))) onoff.SetConstantRate (ns.network.DataRate ("500kb/s")) app = onoff.Install(ns.network.NodeContainer(terminals.Get(0))) # Start the application app.Start(ns.core.Seconds(1.0)) app.Stop(ns.core.Seconds(10.0)) # Create an optional packet sink to receive these packets sink = ns.applications.PacketSinkHelper("ns3::UdpSocketFactory", ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address.GetAny(), port))) app = sink.Install(ns.network.NodeContainer(terminals.Get(1))) app.Start(ns.core.Seconds(0.0)) # # Create a similar flow from n3 to n0, starting at time 1.1 seconds # onoff.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.1.1.1"), port))) app = onoff.Install(ns.network.NodeContainer(terminals.Get(3))) app.Start(ns.core.Seconds(1.1)) app.Stop(ns.core.Seconds(10.0)) app = sink.Install(ns.network.NodeContainer(terminals.Get(0))) app.Start(ns.core.Seconds(0.0)) # # Configure tracing of all enqueue, dequeue, and NetDevice receive events. # Trace output will be sent to the file "csma-bridge.tr" # #print "Configure Tracing." #ascii = ns.network.AsciiTraceHelper(); #csma.EnableAsciiAll(ascii.CreateFileStream ("csma-bridge.tr")); # # Also configure some tcpdump traces; each interface will be traced. # The output files will be named: # csma-bridge.pcap-<nodeId>-<interfaceId> # and can be read by the "tcpdump -r" command(use "-tt" option to # display timestamps correctly) # csma.EnablePcapAll("csma-bridge", False) # # Now, do the actual simulation. # #print "Run Simulation." ns.core.Simulator.Run() ns.core.Simulator.Destroy() #print "Done." if __name__ == '__main__': import sys main(sys.argv)
gpl-2.0
yjxtogo/horizon
openstack_dashboard/dashboards/settings/user/tests.py
73
1493
# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.conf import settings from django.core.urlresolvers import reverse from openstack_dashboard.test import helpers as test INDEX_URL = reverse("horizon:settings:user:index") class UserSettingsTest(test.TestCase): def test_timezone_offset_is_displayed(self): res = self.client.get(INDEX_URL) self.assertContains(res, "UTC +11:00: Australia (Melbourne) Time") self.assertContains(res, "UTC -03:00: Falkland Islands Time") self.assertContains(res, "UTC -10:00: United States (Honolulu) Time") def test_display_language(self): # Add an unknown language to LANGUAGES list settings.LANGUAGES += (('unknown', 'Unknown Language'),) res = self.client.get(INDEX_URL) # Known language self.assertContains(res, 'English') # Unknown language self.assertContains(res, 'Unknown Language')
apache-2.0
elmerdpadilla/iv
addons/hr_attendance/__openerp__.py
52
2119
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Attendances', 'version': '1.1', 'category': 'Human Resources', 'description': """ This module aims to manage employee's attendances. ================================================== Keeps account of the attendances of the employees on the basis of the actions(Sign in/Sign out) performed by them. """, 'author': 'OpenERP SA', 'website': 'https://www.odoo.com/page/employees', 'images': ['images/hr_attendances.jpeg'], 'depends': ['hr', 'report'], 'data': [ 'security/ir_rule.xml', 'security/ir.model.access.csv', 'hr_attendance_view.xml', 'hr_attendance_report.xml', 'wizard/hr_attendance_error_view.xml', 'res_config_view.xml', 'views/report_attendanceerrors.xml', 'views/hr_attendance.xml', ], 'demo': ['hr_attendance_demo.xml'], 'test': [ 'test/attendance_process.yml', 'test/hr_attendance_report.yml', ], 'installable': True, 'auto_install': False, #web 'qweb': ["static/src/xml/attendance.xml"], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
myyc/pyhs2
pyhs2/TCLIService/TCLIService.py
8
88595
# # Autogenerated by Thrift Compiler (0.9.0) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # # options string: py # from thrift.Thrift import TType, TMessageType, TException, TApplicationException from ttypes import * from thrift.Thrift import TProcessor from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol, TProtocol try: from thrift.protocol import fastbinary except: fastbinary = None class Iface: def OpenSession(self, req): """ Parameters: - req """ pass def CloseSession(self, req): """ Parameters: - req """ pass def GetInfo(self, req): """ Parameters: - req """ pass def ExecuteStatement(self, req): """ Parameters: - req """ pass def GetTypeInfo(self, req): """ Parameters: - req """ pass def GetCatalogs(self, req): """ Parameters: - req """ pass def GetSchemas(self, req): """ Parameters: - req """ pass def GetTables(self, req): """ Parameters: - req """ pass def GetTableTypes(self, req): """ Parameters: - req """ pass def GetColumns(self, req): """ Parameters: - req """ pass def GetFunctions(self, req): """ Parameters: - req """ pass def GetOperationStatus(self, req): """ Parameters: - req """ pass def CancelOperation(self, req): """ Parameters: - req """ pass def CloseOperation(self, req): """ Parameters: - req """ pass def GetResultSetMetadata(self, req): """ Parameters: - req """ pass def FetchResults(self, req): """ Parameters: - req """ pass def GetLog(self, req): """ Parameters: - req """ pass class Client(Iface): def __init__(self, iprot, oprot=None): self._iprot = self._oprot = iprot if oprot is not None: self._oprot = oprot self._seqid = 0 def OpenSession(self, req): """ Parameters: - req """ self.send_OpenSession(req) return self.recv_OpenSession() def send_OpenSession(self, req): self._oprot.writeMessageBegin('OpenSession', TMessageType.CALL, self._seqid) args = OpenSession_args() args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_OpenSession(self, ): (fname, mtype, rseqid) = self._iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(self._iprot) self._iprot.readMessageEnd() raise x result = OpenSession_result() result.read(self._iprot) self._iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "OpenSession failed: unknown result"); def CloseSession(self, req): """ Parameters: - req """ self.send_CloseSession(req) return self.recv_CloseSession() def send_CloseSession(self, req): self._oprot.writeMessageBegin('CloseSession', TMessageType.CALL, self._seqid) args = CloseSession_args() args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_CloseSession(self, ): (fname, mtype, rseqid) = self._iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(self._iprot) self._iprot.readMessageEnd() raise x result = CloseSession_result() result.read(self._iprot) self._iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "CloseSession failed: unknown result"); def GetInfo(self, req): """ Parameters: - req """ self.send_GetInfo(req) return self.recv_GetInfo() def send_GetInfo(self, req): self._oprot.writeMessageBegin('GetInfo', TMessageType.CALL, self._seqid) args = GetInfo_args() args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_GetInfo(self, ): (fname, mtype, rseqid) = self._iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(self._iprot) self._iprot.readMessageEnd() raise x result = GetInfo_result() result.read(self._iprot) self._iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "GetInfo failed: unknown result"); def ExecuteStatement(self, req): """ Parameters: - req """ self.send_ExecuteStatement(req) return self.recv_ExecuteStatement() def send_ExecuteStatement(self, req): self._oprot.writeMessageBegin('ExecuteStatement', TMessageType.CALL, self._seqid) args = ExecuteStatement_args() args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_ExecuteStatement(self, ): (fname, mtype, rseqid) = self._iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(self._iprot) self._iprot.readMessageEnd() raise x result = ExecuteStatement_result() result.read(self._iprot) self._iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "ExecuteStatement failed: unknown result"); def GetTypeInfo(self, req): """ Parameters: - req """ self.send_GetTypeInfo(req) return self.recv_GetTypeInfo() def send_GetTypeInfo(self, req): self._oprot.writeMessageBegin('GetTypeInfo', TMessageType.CALL, self._seqid) args = GetTypeInfo_args() args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_GetTypeInfo(self, ): (fname, mtype, rseqid) = self._iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(self._iprot) self._iprot.readMessageEnd() raise x result = GetTypeInfo_result() result.read(self._iprot) self._iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "GetTypeInfo failed: unknown result"); def GetCatalogs(self, req): """ Parameters: - req """ self.send_GetCatalogs(req) return self.recv_GetCatalogs() def send_GetCatalogs(self, req): self._oprot.writeMessageBegin('GetCatalogs', TMessageType.CALL, self._seqid) args = GetCatalogs_args() args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_GetCatalogs(self, ): (fname, mtype, rseqid) = self._iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(self._iprot) self._iprot.readMessageEnd() raise x result = GetCatalogs_result() result.read(self._iprot) self._iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "GetCatalogs failed: unknown result"); def GetSchemas(self, req): """ Parameters: - req """ self.send_GetSchemas(req) return self.recv_GetSchemas() def send_GetSchemas(self, req): self._oprot.writeMessageBegin('GetSchemas', TMessageType.CALL, self._seqid) args = GetSchemas_args() args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_GetSchemas(self, ): (fname, mtype, rseqid) = self._iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(self._iprot) self._iprot.readMessageEnd() raise x result = GetSchemas_result() result.read(self._iprot) self._iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "GetSchemas failed: unknown result"); def GetTables(self, req): """ Parameters: - req """ self.send_GetTables(req) return self.recv_GetTables() def send_GetTables(self, req): self._oprot.writeMessageBegin('GetTables', TMessageType.CALL, self._seqid) args = GetTables_args() args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_GetTables(self, ): (fname, mtype, rseqid) = self._iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(self._iprot) self._iprot.readMessageEnd() raise x result = GetTables_result() result.read(self._iprot) self._iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "GetTables failed: unknown result"); def GetTableTypes(self, req): """ Parameters: - req """ self.send_GetTableTypes(req) return self.recv_GetTableTypes() def send_GetTableTypes(self, req): self._oprot.writeMessageBegin('GetTableTypes', TMessageType.CALL, self._seqid) args = GetTableTypes_args() args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_GetTableTypes(self, ): (fname, mtype, rseqid) = self._iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(self._iprot) self._iprot.readMessageEnd() raise x result = GetTableTypes_result() result.read(self._iprot) self._iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "GetTableTypes failed: unknown result"); def GetColumns(self, req): """ Parameters: - req """ self.send_GetColumns(req) return self.recv_GetColumns() def send_GetColumns(self, req): self._oprot.writeMessageBegin('GetColumns', TMessageType.CALL, self._seqid) args = GetColumns_args() args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_GetColumns(self, ): (fname, mtype, rseqid) = self._iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(self._iprot) self._iprot.readMessageEnd() raise x result = GetColumns_result() result.read(self._iprot) self._iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "GetColumns failed: unknown result"); def GetFunctions(self, req): """ Parameters: - req """ self.send_GetFunctions(req) return self.recv_GetFunctions() def send_GetFunctions(self, req): self._oprot.writeMessageBegin('GetFunctions', TMessageType.CALL, self._seqid) args = GetFunctions_args() args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_GetFunctions(self, ): (fname, mtype, rseqid) = self._iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(self._iprot) self._iprot.readMessageEnd() raise x result = GetFunctions_result() result.read(self._iprot) self._iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "GetFunctions failed: unknown result"); def GetOperationStatus(self, req): """ Parameters: - req """ self.send_GetOperationStatus(req) return self.recv_GetOperationStatus() def send_GetOperationStatus(self, req): self._oprot.writeMessageBegin('GetOperationStatus', TMessageType.CALL, self._seqid) args = GetOperationStatus_args() args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_GetOperationStatus(self, ): (fname, mtype, rseqid) = self._iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(self._iprot) self._iprot.readMessageEnd() raise x result = GetOperationStatus_result() result.read(self._iprot) self._iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "GetOperationStatus failed: unknown result"); def CancelOperation(self, req): """ Parameters: - req """ self.send_CancelOperation(req) return self.recv_CancelOperation() def send_CancelOperation(self, req): self._oprot.writeMessageBegin('CancelOperation', TMessageType.CALL, self._seqid) args = CancelOperation_args() args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_CancelOperation(self, ): (fname, mtype, rseqid) = self._iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(self._iprot) self._iprot.readMessageEnd() raise x result = CancelOperation_result() result.read(self._iprot) self._iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "CancelOperation failed: unknown result"); def CloseOperation(self, req): """ Parameters: - req """ self.send_CloseOperation(req) return self.recv_CloseOperation() def send_CloseOperation(self, req): self._oprot.writeMessageBegin('CloseOperation', TMessageType.CALL, self._seqid) args = CloseOperation_args() args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_CloseOperation(self, ): (fname, mtype, rseqid) = self._iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(self._iprot) self._iprot.readMessageEnd() raise x result = CloseOperation_result() result.read(self._iprot) self._iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "CloseOperation failed: unknown result"); def GetResultSetMetadata(self, req): """ Parameters: - req """ self.send_GetResultSetMetadata(req) return self.recv_GetResultSetMetadata() def send_GetResultSetMetadata(self, req): self._oprot.writeMessageBegin('GetResultSetMetadata', TMessageType.CALL, self._seqid) args = GetResultSetMetadata_args() args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_GetResultSetMetadata(self, ): (fname, mtype, rseqid) = self._iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(self._iprot) self._iprot.readMessageEnd() raise x result = GetResultSetMetadata_result() result.read(self._iprot) self._iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "GetResultSetMetadata failed: unknown result"); def FetchResults(self, req): """ Parameters: - req """ self.send_FetchResults(req) return self.recv_FetchResults() def send_FetchResults(self, req): self._oprot.writeMessageBegin('FetchResults', TMessageType.CALL, self._seqid) args = FetchResults_args() args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_FetchResults(self, ): (fname, mtype, rseqid) = self._iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(self._iprot) self._iprot.readMessageEnd() raise x result = FetchResults_result() result.read(self._iprot) self._iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "FetchResults failed: unknown result"); def GetLog(self, req): """ Parameters: - req """ self.send_GetLog(req) return self.recv_GetLog() def send_GetLog(self, req): self._oprot.writeMessageBegin('GetLog', TMessageType.CALL, self._seqid) args = GetLog_args() args.req = req args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() def recv_GetLog(self, ): (fname, mtype, rseqid) = self._iprot.readMessageBegin() if mtype == TMessageType.EXCEPTION: x = TApplicationException() x.read(self._iprot) self._iprot.readMessageEnd() raise x result = GetLog_result() result.read(self._iprot) self._iprot.readMessageEnd() if result.success is not None: return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "GetLog failed: unknown result"); class Processor(Iface, TProcessor): def __init__(self, handler): self._handler = handler self._processMap = {} self._processMap["OpenSession"] = Processor.process_OpenSession self._processMap["CloseSession"] = Processor.process_CloseSession self._processMap["GetInfo"] = Processor.process_GetInfo self._processMap["ExecuteStatement"] = Processor.process_ExecuteStatement self._processMap["GetTypeInfo"] = Processor.process_GetTypeInfo self._processMap["GetCatalogs"] = Processor.process_GetCatalogs self._processMap["GetSchemas"] = Processor.process_GetSchemas self._processMap["GetTables"] = Processor.process_GetTables self._processMap["GetTableTypes"] = Processor.process_GetTableTypes self._processMap["GetColumns"] = Processor.process_GetColumns self._processMap["GetFunctions"] = Processor.process_GetFunctions self._processMap["GetOperationStatus"] = Processor.process_GetOperationStatus self._processMap["CancelOperation"] = Processor.process_CancelOperation self._processMap["CloseOperation"] = Processor.process_CloseOperation self._processMap["GetResultSetMetadata"] = Processor.process_GetResultSetMetadata self._processMap["FetchResults"] = Processor.process_FetchResults self._processMap["GetLog"] = Processor.process_GetLog def process(self, iprot, oprot): (name, type, seqid) = iprot.readMessageBegin() if name not in self._processMap: iprot.skip(TType.STRUCT) iprot.readMessageEnd() x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name)) oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid) x.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() return else: self._processMap[name](self, seqid, iprot, oprot) return True def process_OpenSession(self, seqid, iprot, oprot): args = OpenSession_args() args.read(iprot) iprot.readMessageEnd() result = OpenSession_result() result.success = self._handler.OpenSession(args.req) oprot.writeMessageBegin("OpenSession", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_CloseSession(self, seqid, iprot, oprot): args = CloseSession_args() args.read(iprot) iprot.readMessageEnd() result = CloseSession_result() result.success = self._handler.CloseSession(args.req) oprot.writeMessageBegin("CloseSession", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_GetInfo(self, seqid, iprot, oprot): args = GetInfo_args() args.read(iprot) iprot.readMessageEnd() result = GetInfo_result() result.success = self._handler.GetInfo(args.req) oprot.writeMessageBegin("GetInfo", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_ExecuteStatement(self, seqid, iprot, oprot): args = ExecuteStatement_args() args.read(iprot) iprot.readMessageEnd() result = ExecuteStatement_result() result.success = self._handler.ExecuteStatement(args.req) oprot.writeMessageBegin("ExecuteStatement", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_GetTypeInfo(self, seqid, iprot, oprot): args = GetTypeInfo_args() args.read(iprot) iprot.readMessageEnd() result = GetTypeInfo_result() result.success = self._handler.GetTypeInfo(args.req) oprot.writeMessageBegin("GetTypeInfo", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_GetCatalogs(self, seqid, iprot, oprot): args = GetCatalogs_args() args.read(iprot) iprot.readMessageEnd() result = GetCatalogs_result() result.success = self._handler.GetCatalogs(args.req) oprot.writeMessageBegin("GetCatalogs", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_GetSchemas(self, seqid, iprot, oprot): args = GetSchemas_args() args.read(iprot) iprot.readMessageEnd() result = GetSchemas_result() result.success = self._handler.GetSchemas(args.req) oprot.writeMessageBegin("GetSchemas", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_GetTables(self, seqid, iprot, oprot): args = GetTables_args() args.read(iprot) iprot.readMessageEnd() result = GetTables_result() result.success = self._handler.GetTables(args.req) oprot.writeMessageBegin("GetTables", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_GetTableTypes(self, seqid, iprot, oprot): args = GetTableTypes_args() args.read(iprot) iprot.readMessageEnd() result = GetTableTypes_result() result.success = self._handler.GetTableTypes(args.req) oprot.writeMessageBegin("GetTableTypes", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_GetColumns(self, seqid, iprot, oprot): args = GetColumns_args() args.read(iprot) iprot.readMessageEnd() result = GetColumns_result() result.success = self._handler.GetColumns(args.req) oprot.writeMessageBegin("GetColumns", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_GetFunctions(self, seqid, iprot, oprot): args = GetFunctions_args() args.read(iprot) iprot.readMessageEnd() result = GetFunctions_result() result.success = self._handler.GetFunctions(args.req) oprot.writeMessageBegin("GetFunctions", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_GetOperationStatus(self, seqid, iprot, oprot): args = GetOperationStatus_args() args.read(iprot) iprot.readMessageEnd() result = GetOperationStatus_result() result.success = self._handler.GetOperationStatus(args.req) oprot.writeMessageBegin("GetOperationStatus", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_CancelOperation(self, seqid, iprot, oprot): args = CancelOperation_args() args.read(iprot) iprot.readMessageEnd() result = CancelOperation_result() result.success = self._handler.CancelOperation(args.req) oprot.writeMessageBegin("CancelOperation", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_CloseOperation(self, seqid, iprot, oprot): args = CloseOperation_args() args.read(iprot) iprot.readMessageEnd() result = CloseOperation_result() result.success = self._handler.CloseOperation(args.req) oprot.writeMessageBegin("CloseOperation", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_GetResultSetMetadata(self, seqid, iprot, oprot): args = GetResultSetMetadata_args() args.read(iprot) iprot.readMessageEnd() result = GetResultSetMetadata_result() result.success = self._handler.GetResultSetMetadata(args.req) oprot.writeMessageBegin("GetResultSetMetadata", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_FetchResults(self, seqid, iprot, oprot): args = FetchResults_args() args.read(iprot) iprot.readMessageEnd() result = FetchResults_result() result.success = self._handler.FetchResults(args.req) oprot.writeMessageBegin("FetchResults", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() def process_GetLog(self, seqid, iprot, oprot): args = GetLog_args() args.read(iprot) iprot.readMessageEnd() result = GetLog_result() result.success = self._handler.GetLog(args.req) oprot.writeMessageBegin("GetLog", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() oprot.trans.flush() # HELPER FUNCTIONS AND STRUCTURES class OpenSession_args: """ Attributes: - req """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'req', (TOpenSessionReq, TOpenSessionReq.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.req = TOpenSessionReq() self.req.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('OpenSession_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class OpenSession_result: """ Attributes: - success """ thrift_spec = ( (0, TType.STRUCT, 'success', (TOpenSessionResp, TOpenSessionResp.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TOpenSessionResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('OpenSession_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class CloseSession_args: """ Attributes: - req """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'req', (TCloseSessionReq, TCloseSessionReq.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.req = TCloseSessionReq() self.req.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('CloseSession_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class CloseSession_result: """ Attributes: - success """ thrift_spec = ( (0, TType.STRUCT, 'success', (TCloseSessionResp, TCloseSessionResp.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TCloseSessionResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('CloseSession_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetInfo_args: """ Attributes: - req """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'req', (TGetInfoReq, TGetInfoReq.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.req = TGetInfoReq() self.req.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetInfo_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetInfo_result: """ Attributes: - success """ thrift_spec = ( (0, TType.STRUCT, 'success', (TGetInfoResp, TGetInfoResp.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TGetInfoResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetInfo_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class ExecuteStatement_args: """ Attributes: - req """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'req', (TExecuteStatementReq, TExecuteStatementReq.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.req = TExecuteStatementReq() self.req.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('ExecuteStatement_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class ExecuteStatement_result: """ Attributes: - success """ thrift_spec = ( (0, TType.STRUCT, 'success', (TExecuteStatementResp, TExecuteStatementResp.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TExecuteStatementResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('ExecuteStatement_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetTypeInfo_args: """ Attributes: - req """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'req', (TGetTypeInfoReq, TGetTypeInfoReq.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.req = TGetTypeInfoReq() self.req.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetTypeInfo_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetTypeInfo_result: """ Attributes: - success """ thrift_spec = ( (0, TType.STRUCT, 'success', (TGetTypeInfoResp, TGetTypeInfoResp.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TGetTypeInfoResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetTypeInfo_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetCatalogs_args: """ Attributes: - req """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'req', (TGetCatalogsReq, TGetCatalogsReq.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.req = TGetCatalogsReq() self.req.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetCatalogs_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetCatalogs_result: """ Attributes: - success """ thrift_spec = ( (0, TType.STRUCT, 'success', (TGetCatalogsResp, TGetCatalogsResp.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TGetCatalogsResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetCatalogs_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetSchemas_args: """ Attributes: - req """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'req', (TGetSchemasReq, TGetSchemasReq.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.req = TGetSchemasReq() self.req.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetSchemas_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetSchemas_result: """ Attributes: - success """ thrift_spec = ( (0, TType.STRUCT, 'success', (TGetSchemasResp, TGetSchemasResp.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TGetSchemasResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetSchemas_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetTables_args: """ Attributes: - req """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'req', (TGetTablesReq, TGetTablesReq.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.req = TGetTablesReq() self.req.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetTables_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetTables_result: """ Attributes: - success """ thrift_spec = ( (0, TType.STRUCT, 'success', (TGetTablesResp, TGetTablesResp.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TGetTablesResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetTables_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetTableTypes_args: """ Attributes: - req """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'req', (TGetTableTypesReq, TGetTableTypesReq.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.req = TGetTableTypesReq() self.req.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetTableTypes_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetTableTypes_result: """ Attributes: - success """ thrift_spec = ( (0, TType.STRUCT, 'success', (TGetTableTypesResp, TGetTableTypesResp.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TGetTableTypesResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetTableTypes_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetColumns_args: """ Attributes: - req """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'req', (TGetColumnsReq, TGetColumnsReq.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.req = TGetColumnsReq() self.req.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetColumns_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetColumns_result: """ Attributes: - success """ thrift_spec = ( (0, TType.STRUCT, 'success', (TGetColumnsResp, TGetColumnsResp.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TGetColumnsResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetColumns_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetFunctions_args: """ Attributes: - req """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'req', (TGetFunctionsReq, TGetFunctionsReq.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.req = TGetFunctionsReq() self.req.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetFunctions_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetFunctions_result: """ Attributes: - success """ thrift_spec = ( (0, TType.STRUCT, 'success', (TGetFunctionsResp, TGetFunctionsResp.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TGetFunctionsResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetFunctions_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetOperationStatus_args: """ Attributes: - req """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'req', (TGetOperationStatusReq, TGetOperationStatusReq.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.req = TGetOperationStatusReq() self.req.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetOperationStatus_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetOperationStatus_result: """ Attributes: - success """ thrift_spec = ( (0, TType.STRUCT, 'success', (TGetOperationStatusResp, TGetOperationStatusResp.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TGetOperationStatusResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetOperationStatus_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class CancelOperation_args: """ Attributes: - req """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'req', (TCancelOperationReq, TCancelOperationReq.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.req = TCancelOperationReq() self.req.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('CancelOperation_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class CancelOperation_result: """ Attributes: - success """ thrift_spec = ( (0, TType.STRUCT, 'success', (TCancelOperationResp, TCancelOperationResp.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TCancelOperationResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('CancelOperation_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class CloseOperation_args: """ Attributes: - req """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'req', (TCloseOperationReq, TCloseOperationReq.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.req = TCloseOperationReq() self.req.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('CloseOperation_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class CloseOperation_result: """ Attributes: - success """ thrift_spec = ( (0, TType.STRUCT, 'success', (TCloseOperationResp, TCloseOperationResp.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TCloseOperationResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('CloseOperation_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetResultSetMetadata_args: """ Attributes: - req """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'req', (TGetResultSetMetadataReq, TGetResultSetMetadataReq.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.req = TGetResultSetMetadataReq() self.req.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetResultSetMetadata_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetResultSetMetadata_result: """ Attributes: - success """ thrift_spec = ( (0, TType.STRUCT, 'success', (TGetResultSetMetadataResp, TGetResultSetMetadataResp.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TGetResultSetMetadataResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetResultSetMetadata_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class FetchResults_args: """ Attributes: - req """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'req', (TFetchResultsReq, TFetchResultsReq.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.req = TFetchResultsReq() self.req.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('FetchResults_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class FetchResults_result: """ Attributes: - success """ thrift_spec = ( (0, TType.STRUCT, 'success', (TFetchResultsResp, TFetchResultsResp.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TFetchResultsResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('FetchResults_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetLog_args: """ Attributes: - req """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'req', (TGetLogReq, TGetLogReq.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.req = TGetLogReq() self.req.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetLog_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GetLog_result: """ Attributes: - success """ thrift_spec = ( (0, TType.STRUCT, 'success', (TGetLogResp, TGetLogResp.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): self.success = success def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = TGetLogResp() self.success.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GetLog_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other)
mit
vismartltd/edx-platform
common/djangoapps/util/migrations/0001_initial.py
102
4641
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'RateLimitConfiguration' db.create_table('util_ratelimitconfiguration', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('change_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('changed_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.PROTECT)), ('enabled', self.gf('django.db.models.fields.BooleanField')(default=False)), )) db.send_create_signal('util', ['RateLimitConfiguration']) def backwards(self, orm): # Deleting model 'RateLimitConfiguration' db.delete_table('util_ratelimitconfiguration') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'util.ratelimitconfiguration': { 'Meta': {'object_name': 'RateLimitConfiguration'}, 'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}), 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) } } complete_apps = ['util']
agpl-3.0
snnn/tensorflow
tensorflow/python/kernel_tests/distributions/laplace_test.py
44
13222
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import importlib import numpy as np from tensorflow.python.eager import backprop from tensorflow.python.framework import constant_op from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import nn_ops from tensorflow.python.ops.distributions import laplace as laplace_lib from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging def try_import(name): # pylint: disable=invalid-name module = None try: module = importlib.import_module(name) except ImportError as e: tf_logging.warning("Could not import %s: %s" % (name, str(e))) return module stats = try_import("scipy.stats") @test_util.run_all_in_graph_and_eager_modes class LaplaceTest(test.TestCase): def testLaplaceShape(self): loc = constant_op.constant([3.0] * 5) scale = constant_op.constant(11.0) laplace = laplace_lib.Laplace(loc=loc, scale=scale) self.assertEqual(self.evaluate(laplace.batch_shape_tensor()), (5,)) self.assertEqual(laplace.batch_shape, tensor_shape.TensorShape([5])) self.assertAllEqual(self.evaluate(laplace.event_shape_tensor()), []) self.assertEqual(laplace.event_shape, tensor_shape.TensorShape([])) def testLaplaceLogPDF(self): batch_size = 6 loc = constant_op.constant([2.0] * batch_size) scale = constant_op.constant([3.0] * batch_size) loc_v = 2.0 scale_v = 3.0 x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32) laplace = laplace_lib.Laplace(loc=loc, scale=scale) log_pdf = laplace.log_prob(x) self.assertEqual(log_pdf.get_shape(), (6,)) if not stats: return expected_log_pdf = stats.laplace.logpdf(x, loc_v, scale=scale_v) self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf) pdf = laplace.prob(x) self.assertEqual(pdf.get_shape(), (6,)) self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf)) def testLaplaceLogPDFMultidimensional(self): batch_size = 6 loc = constant_op.constant([[2.0, 4.0]] * batch_size) scale = constant_op.constant([[3.0, 4.0]] * batch_size) loc_v = np.array([2.0, 4.0]) scale_v = np.array([3.0, 4.0]) x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T laplace = laplace_lib.Laplace(loc=loc, scale=scale) log_pdf = laplace.log_prob(x) log_pdf_values = self.evaluate(log_pdf) self.assertEqual(log_pdf.get_shape(), (6, 2)) pdf = laplace.prob(x) pdf_values = self.evaluate(pdf) self.assertEqual(pdf.get_shape(), (6, 2)) if not stats: return expected_log_pdf = stats.laplace.logpdf(x, loc_v, scale=scale_v) self.assertAllClose(log_pdf_values, expected_log_pdf) self.assertAllClose(pdf_values, np.exp(expected_log_pdf)) def testLaplaceLogPDFMultidimensionalBroadcasting(self): batch_size = 6 loc = constant_op.constant([[2.0, 4.0]] * batch_size) scale = constant_op.constant(3.0) loc_v = np.array([2.0, 4.0]) scale_v = 3.0 x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T laplace = laplace_lib.Laplace(loc=loc, scale=scale) log_pdf = laplace.log_prob(x) log_pdf_values = self.evaluate(log_pdf) self.assertEqual(log_pdf.get_shape(), (6, 2)) pdf = laplace.prob(x) pdf_values = self.evaluate(pdf) self.assertEqual(pdf.get_shape(), (6, 2)) if not stats: return expected_log_pdf = stats.laplace.logpdf(x, loc_v, scale=scale_v) self.assertAllClose(log_pdf_values, expected_log_pdf) self.assertAllClose(pdf_values, np.exp(expected_log_pdf)) def testLaplaceCDF(self): batch_size = 6 loc = constant_op.constant([2.0] * batch_size) scale = constant_op.constant([3.0] * batch_size) loc_v = 2.0 scale_v = 3.0 x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32) laplace = laplace_lib.Laplace(loc=loc, scale=scale) cdf = laplace.cdf(x) self.assertEqual(cdf.get_shape(), (6,)) if not stats: return expected_cdf = stats.laplace.cdf(x, loc_v, scale=scale_v) self.assertAllClose(self.evaluate(cdf), expected_cdf) def testLaplaceLogCDF(self): batch_size = 6 loc = constant_op.constant([2.0] * batch_size) scale = constant_op.constant([3.0] * batch_size) loc_v = 2.0 scale_v = 3.0 x = np.array([-2.5, 2.5, -4.0, 0.1, 1.0, 2.0], dtype=np.float32) laplace = laplace_lib.Laplace(loc=loc, scale=scale) cdf = laplace.log_cdf(x) self.assertEqual(cdf.get_shape(), (6,)) if not stats: return expected_cdf = stats.laplace.logcdf(x, loc_v, scale=scale_v) self.assertAllClose(self.evaluate(cdf), expected_cdf) def testLaplaceLogSurvivalFunction(self): batch_size = 6 loc = constant_op.constant([2.0] * batch_size) scale = constant_op.constant([3.0] * batch_size) loc_v = 2.0 scale_v = 3.0 x = np.array([-2.5, 2.5, -4.0, 0.1, 1.0, 2.0], dtype=np.float32) laplace = laplace_lib.Laplace(loc=loc, scale=scale) sf = laplace.log_survival_function(x) self.assertEqual(sf.get_shape(), (6,)) if not stats: return expected_sf = stats.laplace.logsf(x, loc_v, scale=scale_v) self.assertAllClose(self.evaluate(sf), expected_sf) def testLaplaceMean(self): loc_v = np.array([1.0, 3.0, 2.5]) scale_v = np.array([1.0, 4.0, 5.0]) laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v) self.assertEqual(laplace.mean().get_shape(), (3,)) if not stats: return expected_means = stats.laplace.mean(loc_v, scale=scale_v) self.assertAllClose(self.evaluate(laplace.mean()), expected_means) def testLaplaceMode(self): loc_v = np.array([0.5, 3.0, 2.5]) scale_v = np.array([1.0, 4.0, 5.0]) laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v) self.assertEqual(laplace.mode().get_shape(), (3,)) self.assertAllClose(self.evaluate(laplace.mode()), loc_v) def testLaplaceVariance(self): loc_v = np.array([1.0, 3.0, 2.5]) scale_v = np.array([1.0, 4.0, 5.0]) laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v) self.assertEqual(laplace.variance().get_shape(), (3,)) if not stats: return expected_variances = stats.laplace.var(loc_v, scale=scale_v) self.assertAllClose(self.evaluate(laplace.variance()), expected_variances) def testLaplaceStd(self): loc_v = np.array([1.0, 3.0, 2.5]) scale_v = np.array([1.0, 4.0, 5.0]) laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v) self.assertEqual(laplace.stddev().get_shape(), (3,)) if not stats: return expected_stddev = stats.laplace.std(loc_v, scale=scale_v) self.assertAllClose(self.evaluate(laplace.stddev()), expected_stddev) def testLaplaceEntropy(self): loc_v = np.array([1.0, 3.0, 2.5]) scale_v = np.array([1.0, 4.0, 5.0]) laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v) self.assertEqual(laplace.entropy().get_shape(), (3,)) if not stats: return expected_entropy = stats.laplace.entropy(loc_v, scale=scale_v) self.assertAllClose(self.evaluate(laplace.entropy()), expected_entropy) def testLaplaceSample(self): loc_v = 4.0 scale_v = 3.0 loc = constant_op.constant(loc_v) scale = constant_op.constant(scale_v) n = 100000 laplace = laplace_lib.Laplace(loc=loc, scale=scale) samples = laplace.sample(n, seed=137) sample_values = self.evaluate(samples) self.assertEqual(samples.get_shape(), (n,)) self.assertEqual(sample_values.shape, (n,)) if not stats: return self.assertAllClose( sample_values.mean(), stats.laplace.mean(loc_v, scale=scale_v), rtol=0.05, atol=0.) self.assertAllClose( sample_values.var(), stats.laplace.var(loc_v, scale=scale_v), rtol=0.05, atol=0.) self.assertTrue(self._kstest(loc_v, scale_v, sample_values)) def testLaplaceFullyReparameterized(self): loc = constant_op.constant(4.0) scale = constant_op.constant(3.0) with backprop.GradientTape() as tape: tape.watch(loc) tape.watch(scale) laplace = laplace_lib.Laplace(loc=loc, scale=scale) samples = laplace.sample(100) grad_loc, grad_scale = tape.gradient(samples, [loc, scale]) self.assertIsNotNone(grad_loc) self.assertIsNotNone(grad_scale) def testLaplaceSampleMultiDimensional(self): loc_v = np.array([np.arange(1, 101, dtype=np.float32)]) # 1 x 100 scale_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1 laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v) n = 10000 samples = laplace.sample(n, seed=137) sample_values = self.evaluate(samples) self.assertEqual(samples.get_shape(), (n, 10, 100)) self.assertEqual(sample_values.shape, (n, 10, 100)) zeros = np.zeros_like(loc_v + scale_v) # 10 x 100 loc_bc = loc_v + zeros scale_bc = scale_v + zeros if not stats: return self.assertAllClose( sample_values.mean(axis=0), stats.laplace.mean(loc_bc, scale=scale_bc), rtol=0.35, atol=0.) self.assertAllClose( sample_values.var(axis=0), stats.laplace.var(loc_bc, scale=scale_bc), rtol=0.105, atol=0.0) fails = 0 trials = 0 for ai, a in enumerate(np.reshape(loc_v, [-1])): for bi, b in enumerate(np.reshape(scale_v, [-1])): s = sample_values[:, bi, ai] trials += 1 fails += 0 if self._kstest(a, b, s) else 1 self.assertLess(fails, trials * 0.03) def _kstest(self, loc, scale, samples): # Uses the Kolmogorov-Smirnov test for goodness of fit. if not stats: return True # If scipy isn't available, return "True" for passing ks, _ = stats.kstest(samples, stats.laplace(loc, scale=scale).cdf) # Return True when the test passes. return ks < 0.02 def testLaplacePdfOfSampleMultiDims(self): laplace = laplace_lib.Laplace(loc=[7., 11.], scale=[[5.], [6.]]) num = 50000 samples = laplace.sample(num, seed=137) pdfs = laplace.prob(samples) sample_vals, pdf_vals = self.evaluate([samples, pdfs]) self.assertEqual(samples.get_shape(), (num, 2, 2)) self.assertEqual(pdfs.get_shape(), (num, 2, 2)) self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02) self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02) self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02) self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02) if not stats: return self.assertAllClose( stats.laplace.mean( [[7., 11.], [7., 11.]], scale=np.array([[5., 5.], [6., 6.]])), sample_vals.mean(axis=0), rtol=0.05, atol=0.) self.assertAllClose( stats.laplace.var([[7., 11.], [7., 11.]], scale=np.array([[5., 5.], [6., 6.]])), sample_vals.var(axis=0), rtol=0.05, atol=0.) def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3): s_p = zip(sample_vals, pdf_vals) prev = (0, 0) total = 0 for k in sorted(s_p, key=lambda x: x[0]): pair_pdf = (k[1] + prev[1]) / 2 total += (k[0] - prev[0]) * pair_pdf prev = k self.assertNear(1., total, err=err) def testLaplaceNonPositiveInitializationParamsRaises(self): loc_v = constant_op.constant(0.0, name="loc") scale_v = constant_op.constant(-1.0, name="scale") with self.assertRaisesOpError("Condition x > 0 did not hold element-wise"): laplace = laplace_lib.Laplace( loc=loc_v, scale=scale_v, validate_args=True) self.evaluate(laplace.mean()) loc_v = constant_op.constant(1.0, name="loc") scale_v = constant_op.constant(0.0, name="scale") with self.assertRaisesOpError("Condition x > 0 did not hold element-wise"): laplace = laplace_lib.Laplace( loc=loc_v, scale=scale_v, validate_args=True) self.evaluate(laplace.mean()) def testLaplaceWithSoftplusScale(self): loc_v = constant_op.constant([0.0, 1.0], name="loc") scale_v = constant_op.constant([-1.0, 2.0], name="scale") laplace = laplace_lib.LaplaceWithSoftplusScale(loc=loc_v, scale=scale_v) self.assertAllClose( self.evaluate(nn_ops.softplus(scale_v)), self.evaluate(laplace.scale)) self.assertAllClose(self.evaluate(loc_v), self.evaluate(laplace.loc)) if __name__ == "__main__": test.main()
apache-2.0
labsanmartin/Bika-LIMS
bika/lims/browser/log.py
4
6206
from AccessControl.SecurityManagement import newSecurityManager from Acquisition import aq_inner, aq_parent from bika.lims import bikaMessageFactory as _ from bika.lims.utils import t from bika.lims.browser.bika_listing import BikaListingView from bika.lims.utils import to_utf8 from DateTime import DateTime from operator import itemgetter from plone.app.layout.globals.interfaces import IViewView from plone.app.layout.viewlets.content import ContentHistoryView, ContentHistoryViewlet from Products.Archetypes.config import REFERENCE_CATALOG from Products.CMFCore.utils import getToolByName from Products.CMFCore.WorkflowCore import WorkflowException from Products.CMFPlone.utils import safe_unicode from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile from zope.interface import implements from zope.publisher.browser import TestRequest import json class LogView(BikaListingView): """ Show log entries, workflow history and revision history details for an object """ implements(IViewView) template = ViewPageTemplateFile("templates/log.pt") def __init__(self, context, request): BikaListingView.__init__(self, context, request) self.show_sort_column = False self.show_select_row = False self.show_select_column = False self.show_workflow_action_buttons = False self.pagesize = 999999 self.icon = self.portal_url + "/++resource++bika.lims.images/%s_big.png" % \ context.portal_type.lower() self.title = to_utf8(self.context.Title()) + " " + t(_("Log")) self.description = "" self.columns = { 'Version': {'title': _('Version'), 'sortable': False}, 'Date': {'title': _('Date'), 'sortable': False}, 'User': {'title': _('User'), 'sortable': False}, 'Action': {'title': _('Action'), 'sortable': False}, 'Description': {'title': _('Description'), 'sortable': False}, } self.review_states = [ {'id': 'default', 'title': 'All', 'contentFilter': {}, 'columns': ['Version', 'Date', 'User', 'Action', 'Description']}, ] def folderitems(self): rc = getToolByName(self.context, REFERENCE_CATALOG) wf = getToolByName(self.context, 'portal_workflow') pr = getToolByName(self.context, 'portal_repository') isVersionable = pr.isVersionable(aq_inner(self.context)) try: review_history = wf.getInfoFor(self.context, 'review_history') review_history = list(review_history) review_history.reverse() except WorkflowException: review_history = [] items = [] for entry in review_history: # this folderitems doesn't subclass from the bika_listing.py # so we create items from scratch review_state = entry.get('review_state') state_title = wf.getTitleForStateOnType(review_state, self.context.portal_type) item = { 'obj': self.context, 'id': self.context.id, 'uid': self.context.UID(), 'title': self.context.title_or_id(), 'type_class': '', 'url': self.context.absolute_url(), 'relative_url': self.context.absolute_url(), 'view_url': self.context.absolute_url(), 'path': "/".join(self.context.getPhysicalPath()), 'replace': {}, 'before': {}, 'after': {}, 'choices': {}, 'class': {}, 'state_class': '', 'allow_edit': [], 'required': [], 'Version': isVersionable and self.context.get('version_id', '') or '0', 'Date': self.ulocalized_time(entry.get('time')), 'sortable_date': entry.get('time'), 'User': entry.get('actor'), 'Action': entry.get('action') and entry.get('action') or 'Create', 'Description': "review state: %s" % state_title, } items.append(item) if isVersionable: request = TestRequest() chv = ContentHistoryViewlet(self.context, request, None, None) chv.navigation_root_url = chv.site_url = 'http://localhost:8080/bikas' version_history = chv.revisionHistory() else: version_history = [] for entry in version_history: # this folderitems doesn't subclass from the bika_listing.py # so we create items from scratch # disregard the first entry of version history, as it is # represented by the first entry in review_history if not entry.get('version_id'): continue item = { 'obj': self.context, 'id': self.context.id, 'uid': self.context.UID(), 'title': self.context.title_or_id(), 'type_class': '', 'url': self.context.absolute_url(), 'relative_url': self.context.absolute_url(), 'view_url': self.context.absolute_url(), 'path': "/".join(self.context.getPhysicalPath()), 'replace': {}, 'before': {}, 'after': {}, 'choices': {}, 'class': {}, 'state_class': '', 'allow_edit': [], 'required': [], 'Version': entry.get('version_id'), 'Date': self.ulocalized_time(DateTime(entry.get('time'))), 'sortable_date': entry.get('time'), 'User': entry.get('actor').get('fullname'), 'Action': entry.get('action') and entry.get('action') or 'Create', 'Description': entry.get('comments'), } items.append(item) items = sorted(items, key=itemgetter('sortable_date')) items.reverse() return items
agpl-3.0
Buggaarde/youtube-dl
youtube_dl/extractor/airmozilla.py
78
3097
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, parse_iso8601, ) class AirMozillaIE(InfoExtractor): _VALID_URL = r'https?://air\.mozilla\.org/(?P<id>[0-9a-z-]+)/?' _TEST = { 'url': 'https://air.mozilla.org/privacy-lab-a-meetup-for-privacy-minded-people-in-san-francisco/', 'md5': '2e3e7486ba5d180e829d453875b9b8bf', 'info_dict': { 'id': '6x4q2w', 'ext': 'mp4', 'title': 'Privacy Lab - a meetup for privacy minded people in San Francisco', 'thumbnail': 're:https?://vid\.ly/(?P<id>[0-9a-z-]+)/poster', 'description': 'Brings together privacy professionals and others interested in privacy at for-profits, non-profits, and NGOs in an effort to contribute to the state of the ecosystem...', 'timestamp': 1422487800, 'upload_date': '20150128', 'location': 'SFO Commons', 'duration': 3780, 'view_count': int, 'categories': ['Main', 'Privacy'], } } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._html_search_regex(r'//vid.ly/(.*?)/embed', webpage, 'id') embed_script = self._download_webpage('https://vid.ly/{0}/embed'.format(video_id), video_id) jwconfig = self._search_regex(r'\svar jwconfig = (\{.*?\});\s', embed_script, 'metadata') metadata = self._parse_json(jwconfig, video_id) formats = [{ 'url': source['file'], 'ext': source['type'], 'format_id': self._search_regex(r'&format=(.*)$', source['file'], 'video format'), 'format': source['label'], 'height': int(source['label'].rstrip('p')), } for source in metadata['playlist'][0]['sources']] self._sort_formats(formats) view_count = int_or_none(self._html_search_regex( r'Views since archived: ([0-9]+)', webpage, 'view count', fatal=False)) timestamp = parse_iso8601(self._html_search_regex( r'<time datetime="(.*?)"', webpage, 'timestamp', fatal=False)) duration = parse_duration(self._search_regex( r'Duration:\s*(\d+\s*hours?\s*\d+\s*minutes?)', webpage, 'duration', fatal=False)) return { 'id': video_id, 'title': self._og_search_title(webpage), 'formats': formats, 'url': self._og_search_url(webpage), 'display_id': display_id, 'thumbnail': metadata['playlist'][0].get('image'), 'description': self._og_search_description(webpage), 'timestamp': timestamp, 'location': self._html_search_regex(r'Location: (.*)', webpage, 'location', default=None), 'duration': duration, 'view_count': view_count, 'categories': re.findall(r'<a href=".*?" class="channel">(.*?)</a>', webpage), }
unlicense
malishevg/edugraph
cms/djangoapps/contentstore/views/preview.py
10
7884
from __future__ import absolute_import import logging from functools import partial from django.conf import settings from django.core.urlresolvers import reverse from django.http import Http404, HttpResponseBadRequest from django.contrib.auth.decorators import login_required from edxmako.shortcuts import render_to_string from xmodule_modifiers import replace_static_urls, wrap_xblock, wrap_fragment from xmodule.error_module import ErrorDescriptor from xmodule.exceptions import NotFoundError, ProcessingError from xmodule.modulestore.django import modulestore, loc_mapper, ModuleI18nService from xmodule.modulestore.locator import Locator from xmodule.x_module import ModuleSystem from xblock.runtime import KvsFieldData from xblock.django.request import webob_to_django_response, django_to_webob_request from xblock.exceptions import NoSuchHandlerError from xblock.fragment import Fragment from lms.lib.xblock.field_data import LmsFieldData from lms.lib.xblock.runtime import quote_slashes, unquote_slashes from cms.lib.xblock.runtime import local_resource_url from util.sandboxing import can_execute_unsafe_code import static_replace from .session_kv_store import SessionKeyValueStore from .helpers import render_from_lms from ..utils import get_course_for_item from contentstore.views.access import get_user_role __all__ = ['preview_handler'] log = logging.getLogger(__name__) @login_required def preview_handler(request, usage_id, handler, suffix=''): """ Dispatch an AJAX action to an xblock usage_id: The usage-id of the block to dispatch to, passed through `quote_slashes` handler: The handler to execute suffix: The remainder of the url to be passed to the handler """ # Note: usage_id is currently the string form of a Location, but in the # future it will be the string representation of a Locator. location = unquote_slashes(usage_id) descriptor = modulestore().get_item(location) instance = _load_preview_module(request, descriptor) # Let the module handle the AJAX req = django_to_webob_request(request) try: resp = instance.handle(handler, req, suffix) except NoSuchHandlerError: log.exception("XBlock %s attempted to access missing handler %r", instance, handler) raise Http404 except NotFoundError: log.exception("Module indicating to user that request doesn't exist") raise Http404 except ProcessingError: log.warning("Module raised an error while processing AJAX request", exc_info=True) return HttpResponseBadRequest() except Exception: log.exception("error processing ajax call") raise return webob_to_django_response(resp) class PreviewModuleSystem(ModuleSystem): # pylint: disable=abstract-method """ An XModule ModuleSystem for use in Studio previews """ # xmodules can check for this attribute during rendering to determine if # they are being rendered for preview (i.e. in Studio) is_author_mode = True def handler_url(self, block, handler_name, suffix='', query='', thirdparty=False): return reverse('preview_handler', kwargs={ 'usage_id': quote_slashes(unicode(block.scope_ids.usage_id).encode('utf-8')), 'handler': handler_name, 'suffix': suffix, }) + '?' + query def local_resource_url(self, block, uri): return local_resource_url(block, uri) def _preview_module_system(request, descriptor): """ Returns a ModuleSystem for the specified descriptor that is specialized for rendering module previews. request: The active django request descriptor: An XModuleDescriptor """ if isinstance(descriptor.location, Locator): course_location = loc_mapper().translate_locator_to_location(descriptor.location, get_course=True) course_id = course_location.course_id else: course_id = get_course_for_item(descriptor.location).location.course_id display_name_only = (descriptor.category == 'static_tab') wrappers = [ # This wrapper wraps the module in the template specified above partial(wrap_xblock, 'PreviewRuntime', display_name_only=display_name_only), # This wrapper replaces urls in the output that start with /static # with the correct course-specific url for the static content partial(replace_static_urls, None, course_id=course_id), _studio_wrap_xblock, ] return PreviewModuleSystem( static_url=settings.STATIC_URL, # TODO (cpennington): Do we want to track how instructors are using the preview problems? track_function=lambda event_type, event: None, filestore=descriptor.runtime.resources_fs, get_module=partial(_load_preview_module, request), render_template=render_from_lms, debug=True, replace_urls=partial(static_replace.replace_static_urls, data_directory=None, course_id=course_id), user=request.user, can_execute_unsafe_code=(lambda: can_execute_unsafe_code(course_id)), mixins=settings.XBLOCK_MIXINS, course_id=course_id, anonymous_student_id='student', # Set up functions to modify the fragment produced by student_view wrappers=wrappers, error_descriptor_class=ErrorDescriptor, # get_user_role accepts a location or a CourseLocator. # If descriptor.location is a CourseLocator, course_id is unused. get_user_role=lambda: get_user_role(request.user, descriptor.location, course_id), descriptor_runtime=descriptor.runtime, services={ "i18n": ModuleI18nService(), }, ) def _load_preview_module(request, descriptor): """ Return a preview XModule instantiated from the supplied descriptor. request: The active django request descriptor: An XModuleDescriptor """ student_data = KvsFieldData(SessionKeyValueStore(request)) descriptor.bind_for_student( _preview_module_system(request, descriptor), LmsFieldData(descriptor._field_data, student_data), # pylint: disable=protected-access ) return descriptor # pylint: disable=unused-argument def _studio_wrap_xblock(xblock, view, frag, context, display_name_only=False): """ Wraps the results of rendering an XBlock view in a div which adds a header and Studio action buttons. """ # Only add the Studio wrapper when on the container page. The unit page will remain as is for now. if context.get('container_view', None) and view == 'student_view': locator = loc_mapper().translate_location(xblock.course_id, xblock.location, published=False) template_context = { 'xblock_context': context, 'xblock': xblock, 'locator': locator, 'content': frag.content, } if xblock.category == 'vertical': template = 'studio_vertical_wrapper.html' elif xblock.location != context.get('root_xblock').location and xblock.has_children: template = 'container_xblock_component.html' else: template = 'studio_xblock_wrapper.html' html = render_to_string(template, template_context) frag = wrap_fragment(frag, html) return frag def get_preview_fragment(request, descriptor, context): """ Returns the HTML returned by the XModule's student_view, specified by the descriptor and idx. """ module = _load_preview_module(request, descriptor) try: fragment = module.render("student_view", context) except Exception as exc: # pylint: disable=W0703 log.warning("Unable to render student_view for %r", module, exc_info=True) fragment = Fragment(render_to_string('html_error.html', {'message': str(exc)})) return fragment
agpl-3.0
alaunay/bigtop
bigtop-packages/src/charm/hadoop/layer-hadoop-slave/tests/01-basic-deployment.py
12
1341
#!/usr/bin/env python3 # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import amulet class TestDeploy(unittest.TestCase): """ Trivial deployment test for Apache Bigtop Slave. This charm cannot do anything useful by itself, so integration testing is done in the bundle. """ def test_deploy(self): self.d = amulet.Deployment(series='xenial') self.d.add('slave', 'hadoop-slave') self.d.setup(timeout=900) self.d.sentry.wait(timeout=1800) self.unit = self.d.sentry['slave'][0] if __name__ == '__main__': unittest.main()
apache-2.0
simleo/pydoop
pydoop/test_utils.py
2
5785
# BEGIN_COPYRIGHT # # Copyright 2009-2021 CRS4. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # END_COPYRIGHT """ Utilities for unit tests. """ import sys import os import random import uuid import tempfile import imp import unittest import shutil import warnings import subprocess import pydoop import pydoop.utils.jvm as jvm from pydoop.utils.py3compat import StringIO JAVA_HOME = jvm.get_java_home() JAVA = os.path.join(JAVA_HOME, "bin", "java") JAVAC = os.path.join(JAVA_HOME, "bin", "javac") _RANDOM_DATA_SIZE = 32 # Default NameNode RPC port. 8020 for all versions except 3.0.0. See # https://issues.apache.org/jira/browse/HDFS-12990 _DEFAULT_HDFS_PORT = 8020 _DEFAULT_BYTES_PER_CHECKSUM = 512 def _get_special_chr(): """ This is used to check unicode support. On some systems, depending on locale settings, we won't be able to use non-ASCII characters when interacting with system calls. Since in such cases it doesn't really make sense to run these tests we set UNI_CHR to a regular ASCII character. """ # something outside the latin-1 range the_chr = u'\N{CYRILLIC CAPITAL LETTER O WITH DIAERESIS}' fd = None fname = None try: fd, fname = tempfile.mkstemp(suffix=the_chr) except UnicodeEncodeError: msg = ("local file system doesn't support unicode characters" "in filenames, falling back to ASCII-only") warnings.warn(msg, UnicodeWarning) the_chr = u's' finally: if fd: os.close(fd) os.remove(fname) return the_chr UNI_CHR = _get_special_chr() _FD_MAP = { "stdout": sys.stdout.fileno(), "stderr": sys.stderr.fileno(), } class FSTree(object): """ >>> t = FSTree('root') >>> d1 = t.add('d1') >>> f1 = t.add('f1', 0) >>> d2 = d1.add('d2') >>> f2 = d2.add('f2', 0) >>> for x in t.walk(): print x.name, x.kind ... root 1 d1 1 d2 1 f2 0 f1 0 """ def __init__(self, name, kind=1): assert kind in (0, 1) # (file, dir) self.name = name self.kind = kind if self.kind: self.children = [] def add(self, name, kind=1): t = FSTree(name, kind) self.children.append(t) return t def walk(self): yield self if self.kind: for c in self.children: for t in c.walk(): yield t def make_wd(fs, prefix="pydoop_test_"): if fs.host: wd = "%s%s" % (prefix, uuid.uuid4().hex) fs.create_directory(wd) return fs.get_path_info(wd)['name'] else: return tempfile.mkdtemp(prefix=prefix) def make_random_data(size=_RANDOM_DATA_SIZE, printable=True): randint = random.randint start, stop = (32, 126) if printable else (0, 255) return bytes(bytearray([randint(start, stop) for _ in range(size)])) def get_bytes_per_checksum(): params = pydoop.hadoop_params() return int(params.get('dfs.bytes-per-checksum', params.get('io.bytes.per.checksum', _DEFAULT_BYTES_PER_CHECKSUM))) def silent_call(func, *args, **kwargs): with open(os.devnull, "w") as dev_null: cache = {} for s in "stdout", "stderr": cache[s] = os.dup(_FD_MAP[s]) os.dup2(dev_null.fileno(), _FD_MAP[s]) try: ret = func(*args, **kwargs) finally: for s in "stdout", "stderr": os.dup2(cache[s], _FD_MAP[s]) return ret def get_module(name, path=None): fp, pathname, description = imp.find_module(name, path) try: module = imp.load_module(name, fp, pathname, description) return module finally: fp.close() def compile_java(java_file, classpath, opts=None): if opts is None: opts = [] java_class_file = os.path.splitext( os.path.realpath(java_file) )[0] + '.class' if (not os.path.exists(java_class_file) or os.path.getmtime(java_file) > os.path.getmtime(java_class_file)): cmd = [JAVAC] + opts if not {"-cp", "-classpath"}.intersection(opts): cmd.extend(["-cp", classpath]) cmd.append(java_file) try: subprocess.check_call(cmd, cwd=os.path.dirname(java_file)) except subprocess.CalledProcessError as e: raise RuntimeError("Error compiling Java file %s\n%s" % ( java_file, e)) def run_java(jclass, classpath, args, wd): try: subprocess.check_call([JAVA, '-cp', classpath, jclass] + args, cwd=wd) except subprocess.CalledProcessError as e: raise RuntimeError("Error running Java class %s\n%s" % ( jclass, e)) def get_java_output_stream(jclass, classpath, args, wd): output = subprocess.check_output( [JAVA, '-cp', classpath, jclass] + args, cwd=wd, stderr=open('/dev/null', 'w')) return StringIO(output) class WDTestCase(unittest.TestCase): def setUp(self): self.wd = tempfile.mkdtemp(prefix='pydoop_test_') def tearDown(self): shutil.rmtree(self.wd) def _mkfn(self, basename): return os.path.join(self.wd, basename) def _mkf(self, basename, mode='w'): return open(self._mkfn(basename), mode)
apache-2.0
egetzel/wecrow
truehand2014/temboo/Library/SendGrid/NewsletterAPI/Recipients/AddRecipientList.py
1
3464
# -*- coding: utf-8 -*- ############################################################################### # # AddRecipientList # Add one or more recipient lists to a newsletter. # # Python version 2.6 # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class AddRecipientList(Choreography): def __init__(self, temboo_session): """ Create a new instance of the AddRecipientList Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ Choreography.__init__(self, temboo_session, '/Library/SendGrid/NewsletterAPI/Recipients/AddRecipientList') def new_input_set(self): return AddRecipientListInputSet() def _make_result_set(self, result, path): return AddRecipientListResultSet(result, path) def _make_execution(self, session, exec_id, path): return AddRecipientListChoreographyExecution(session, exec_id, path) class AddRecipientListInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the AddRecipientList Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_APIKey(self, value): """ Set the value of the APIKey input for this Choreo. ((required, string) The API Key obtained from SendGrid.) """ InputSet._set_input(self, 'APIKey', value) def set_APIUser(self, value): """ Set the value of the APIUser input for this Choreo. ((required, string) The username registered with SendGrid.) """ InputSet._set_input(self, 'APIUser', value) def set_List(self, value): """ Set the value of the List input for this Choreo. ((required, string) The recipient list to be added to the specified newsletter.) """ InputSet._set_input(self, 'List', value) def set_Name(self, value): """ Set the value of the Name input for this Choreo. ((required, string) The name of an existing newsletter to which a recipient list is being added.) """ InputSet._set_input(self, 'Name', value) def set_ResponseFormat(self, value): """ Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format of the response from SendGrid, in either json, or xml. Default is set to json.) """ InputSet._set_input(self, 'ResponseFormat', value) class AddRecipientListResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the AddRecipientList Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. (The response from SendGrid. The format corresponds to the ResponseFormat input. Default is json.) """ return self._output.get('Response', None) class AddRecipientListChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return AddRecipientListResultSet(response, path)
apache-2.0
cuihantao/cvxopt
examples/doc/chap8/qcl1.py
4
4120
# The quadratically constrained 1-norm minimization example of section 8.7 # (Exploiting structure). from cvxopt import blas, lapack, solvers, matrix, mul, div, setseed, normal from math import sqrt def qcl1(A, b): """ Returns the solution u, z of (primal) minimize || u ||_1 subject to || A * u - b ||_2 <= 1 (dual) maximize b^T z - ||z||_2 subject to || A'*z ||_inf <= 1. Exploits structure, assuming A is m by n with m >= n. """ m, n = A.size # Solve equivalent cone LP with variables x = [u; v]: # # minimize [0; 1]' * x # subject to [ I -I ] * x <= [ 0 ] (componentwise) # [-I -I ] * x <= [ 0 ] (componentwise) # [ 0 0 ] * x <= [ 1 ] (SOC) # [-A 0 ] [ -b ]. # # maximize -t + b' * w # subject to z1 - z2 = A'*w # z1 + z2 = 1 # z1 >= 0, z2 >=0, ||w||_2 <= t. c = matrix(n*[0.0] + n*[1.0]) h = matrix( 0.0, (2*n + m + 1, 1)) h[2*n] = 1.0 h[2*n+1:] = -b def G(x, y, alpha = 1.0, beta = 0.0, trans = 'N'): y *= beta if trans=='N': # y += alpha * G * x y[:n] += alpha * (x[:n] - x[n:2*n]) y[n:2*n] += alpha * (-x[:n] - x[n:2*n]) y[2*n+1:] -= alpha * A*x[:n] else: # y += alpha * G'*x y[:n] += alpha * (x[:n] - x[n:2*n] - A.T * x[-m:]) y[n:] -= alpha * (x[:n] + x[n:2*n]) def Fkkt(W): # Returns a function f(x, y, z) that solves # # [ 0 G' ] [ x ] = [ bx ] # [ G -W'*W ] [ z ] [ bz ]. # First factor # # S = G' * W**-1 * W**-T * G # = [0; -A]' * W3^-2 * [0; -A] + 4 * (W1**2 + W2**2)**-1 # # where # # W1 = diag(d1) with d1 = W['d'][:n] = 1 ./ W['di'][:n] # W2 = diag(d2) with d2 = W['d'][n:] = 1 ./ W['di'][n:] # W3 = beta * (2*v*v' - J), W3^-1 = 1/beta * (2*J*v*v'*J - J) # with beta = W['beta'][0], v = W['v'][0], J = [1, 0; 0, -I]. # As = W3^-1 * [ 0 ; -A ] = 1/beta * ( 2*J*v * v' - I ) * [0; A] beta, v = W['beta'][0], W['v'][0] As = 2 * v * (v[1:].T * A) As[1:,:] *= -1.0 As[1:,:] -= A As /= beta # S = As'*As + 4 * (W1**2 + W2**2)**-1 S = As.T * As d1, d2 = W['d'][:n], W['d'][n:] d = 4.0 * (d1**2 + d2**2)**-1 S[::n+1] += d lapack.potrf(S) def f(x, y, z): # z := - W**-T * z z[:n] = -div( z[:n], d1 ) z[n:2*n] = -div( z[n:2*n], d2 ) z[2*n:] -= 2.0*v*( v[0]*z[2*n] - blas.dot(v[1:], z[2*n+1:]) ) z[2*n+1:] *= -1.0 z[2*n:] /= beta # x := x - G' * W**-1 * z x[:n] -= div(z[:n], d1) - div(z[n:2*n], d2) + As.T * z[-(m+1):] x[n:] += div(z[:n], d1) + div(z[n:2*n], d2) # Solve for x[:n]: # # S*x[:n] = x[:n] - (W1**2 - W2**2)(W1**2 + W2**2)^-1 * x[n:] x[:n] -= mul( div(d1**2 - d2**2, d1**2 + d2**2), x[n:]) lapack.potrs(S, x) # Solve for x[n:]: # # (d1**-2 + d2**-2) * x[n:] = x[n:] + (d1**-2 - d2**-2)*x[:n] x[n:] += mul( d1**-2 - d2**-2, x[:n]) x[n:] = div( x[n:], d1**-2 + d2**-2) # z := z + W^-T * G*x z[:n] += div( x[:n] - x[n:2*n], d1) z[n:2*n] += div( -x[:n] - x[n:2*n], d2) z[2*n:] += As*x[:n] return f dims = {'l': 2*n, 'q': [m+1], 's': []} sol = solvers.conelp(c, G, h, dims, kktsolver = Fkkt) if sol['status'] == 'optimal': return sol['x'][:n], sol['z'][-m:] else: return None, None setseed() m, n = 100, 100 A, b = normal(m,n), normal(m,1) x, z = qcl1(A, b) if x is None: print("infeasible")
gpl-3.0
dfannin/projectbox-layout
ProjectBox.py
1
6264
#!/usr/bin/env python ''' Project Metal Box Template for BitX40 http://www.hfsigs.com/ This is a executable python program that generates DXF (2-d CAD Files) that are templates for a project metal box for the BitX40 radio (http://www.hfsigs.com/ ). 3 DXF files are created - Top pattern, Bottom pattern and Front Panel. This program uses the python dxfwrite library ("pip install dxfwrite"). The project box dimensions can be modified by adjusting the paramters below. version 1.0 ''' import dxfwrite from dxfwrite import DXFEngine as dxf ''' circle_ch() draws a circle with a cross-hair. It adds the circle to the default '0' layer, and a cross-hair in the drill layer. ''' def circle_ch(r, x , y , chl=1.0, layer='0', layerch='drill' ): ''' Circle with cross-hair center ''' drawing.add(dxf.circle( r , ( x, y) , layer=layer) ) drawing.add(dxf.line( (x, y - chl ) , ( x , y + chl) , layer=layerch) ) drawing.add(dxf.line( (x + chl , y ) , ( x - chl , y ) , layer=layerch) ) ''' Dimensions of Project box - set variables here ''' # box dimensions w = 140.0 # width of box h = 60.0 # height of box d = 160.0 # depth of box t = 10.0 # tab width ba = 1.790 # bend adjust # other dimensions chl = 1.5 # cross hair length cr = 5 # calibration circle radius = 5mm radius/10mm diameter # box hole sizes tr = 1.13 # number 4 screw hole for AL is 0.089 inches, 2.25 mm dia # lcd cutout dimensions and location lw = 72.0 # lcd width lh = 25.0 # lcd height lx = 0.18 * h # lcd x ly = t + (w * 0.40) + 2.0 # lcd y lhr=2.0 # lcd drilling hole radius lmh1x= lx - ( (31.0 - lh )/2.0) # lcd mnt hole 1 x lmh1y= ly - ( (75.0 - lw )/2.0) # lcd mnt hole 1 y lmh1r = 1.56 # lcd mnt hole 1 radius 1/8" # volume pot size and location volx=42.0 voly=20.0 + t volr=3.5 # tuning pot size and location tunx=18.0 tuny=voly tunr=5.0 # tuning pot hole radius # jack - mic micx=50.0 micy=90.0 + t micr=2.8 # jack - head headx=50.0 heady=101.5 + t headr=2.8 # anderson pp opening ppw=17.5 pph=9.7 ppx= 4.0 + (1.5*h)+d-(pph/2.0) ppy= t + (w*3.0/4.0) - (ppw/2.0) pphr=2.0 # pp mnt holes - radius pphd=17.3 # pp mnt holes - distance # ant hole antx= 4.0 + (1.5*h) + d anty= t + (1.0*w/4.0) antr= 6.35 # ant connector radius # pcb mounting holes pcbw = 117.5 #pcb hole width pcbd = 105.0 #pcb hole width pcbr = 1.75 # pcb hole radius pcbx = h + ( (d - pcbw) / 2.0 ) + 7.0 pcby = t + ( (w - pcbd) / 2.0 ) ''' main ''' print "Project box dxf file creator - bitx" print "version 1.0" print "box dimensions (mm) height: %6.2f width: %6.2f depth: %6.2f tab: %6.2f" % (h,w,d,t) print "box dimensions (mm) bend adjustment: %6.2f " % (ba) ''' Top + Sides ''' # define top drawing drawing = dxf.drawing('pb_top.dxf') alen = w + ( 2.0*h ) + ( 2.0*ba) # adjusted length # main outline drawing.add(dxf.rectangle( (0,0), d, alen ) ) # create layers drawing.add_layer('bend',color=2) drawing.add_layer('calibrate', color=3) drawing.add_layer('drill', color=4) # add bend lines acen = alen / 2.0 # center of adjusted length bendlen = ( w + ba/2.0 ) / 2.0 # bend length from center drawing.add(dxf.line( ( 0, acen + bendlen ) , ( d, acen + bendlen), layer='bend' ) ) drawing.add(dxf.line( ( 0, acen - bendlen ),( d, acen - bendlen ), layer='bend' ) ) # add box screw holes (#4 screws) # side 1 circle_ch(tr,t/2,h/2) circle_ch(tr,d-(t/2),h/2) circle_ch(tr,d/2,t/2) # side 1 circle_ch(tr,t/2,alen-(h/2)) circle_ch(tr,d-(t/2),alen - (h/2)) circle_ch(tr,d/2,alen - (t/2)) # add calibration marks circle_ch(cr,d/2.0,acen,chl,'calibrate','calibrate') drawing.save() ''' Box Bottom (Front/Back+tabs) ''' drawing = dxf.drawing('pb_bottom.dxf') # add layers drawing.add_layer('bend',color=2) drawing.add_layer('calibrate', color=3) drawing.add_layer('drill', color=4) # bottom outline drawing.add(dxf.rectangle( (0,0), d + (2.0*h) , ( 2.0 * t ) + w ) ) # lcd hole drawing.add(dxf.rectangle( (lx,ly), lh, lw ) ) # lcd corner holes circle_ch(lhr, lx+lhr, ly+lhr) circle_ch(lhr, lx+lh-lhr, ly+lw-lhr) circle_ch(lhr, lx+lhr, ly+lw-lhr) circle_ch(lhr, lx+lh-lhr, ly+lhr ) # lcd mnt holes circle_ch(lmh1r, lmh1x, lmh1y) circle_ch(lmh1r, lmh1x + 31.0, lmh1y) circle_ch(lmh1r, lmh1x + 31.0, lmh1y + 75.0) circle_ch(lmh1r, lmh1x , lmh1y + 75.0) # pot holes circle_ch(volr,volx,voly) circle_ch(tunr,tunx,tuny) # Kenwood-style 2 pin jack holes circle_ch(micr,micx,micy) circle_ch(headr,headx,heady) # Anderson Powerpole opening drawing.add(dxf.rectangle( (ppx,ppy),pph, ppw ) ) # antenna opening circle_ch(antr,antx,anty) # 4 PCB mounting holes circle_ch(pcbr,pcbx,pcby) circle_ch(pcbr,pcbx+pcbw,pcby) circle_ch(pcbr,pcbx,pcby+pcbd) circle_ch(pcbr,pcbx+pcbw,pcby+pcbd) # calibration hole circle_ch(cr, (d +(2.0*h))/2.0 , ((2.0*t)+w)/2.0,chl,'calibrate','calibrate') # front bends drawing.add(dxf.line( ( h, 0 ) , ( h, (2.0*t) + w ), layer='bend' ) ) drawing.add(dxf.line( ( d + h , 0 ),( d+h, (2.0*t) + w ) , layer='bend' ) ) # tab bends drawing.add(dxf.line( ( 0, t + w ) , ( d+(h*2.0) , t + w ), layer='bend' ) ) drawing.add(dxf.line( ( 0, t ),( d+(h*2.0) , t ) , layer='bend' ) ) drawing.save() ''' Front panel template ''' drawing = dxf.drawing('pb_front.dxf') drawing.add_layer('bend',color=2) drawing.add_layer('calibrate', color=3) drawing.add_layer('drill', color=4) # rounded rectangle radius rrr=2.0 # full panel outline drawing.add(dxf.line( (rrr, 0.0) , ( h-rrr, 0.0) ) ) drawing.add(dxf.line( (rrr, w) , ( h-rrr, w) ) ) drawing.add(dxf.line( (0.0, rrr) , ( 0.0, w-rrr) ) ) drawing.add(dxf.line( (h, rrr) , ( h, w-rrr) ) ) # edge corners rounded drawing.add(dxf.arc(rrr,center=(rrr,rrr),startangle=180.0,endangle=270.0)) drawing.add(dxf.arc(rrr,center=(rrr,w-rrr),startangle=90.0,endangle=180.0)) drawing.add(dxf.arc(rrr,center=(h-rrr,rrr),startangle=270.0,endangle=0.0)) drawing.add(dxf.arc(rrr,center=(h-rrr,w-rrr),startangle=0.0,endangle=90.0)) # lcd hole drawing.add(dxf.rectangle( (lx,ly-t), lh, lw ) ) # pot holes circle_ch(volr,volx,voly-t) circle_ch(tunr,tunx,tuny-t) # Kenwood-style 2 pin jack holes circle_ch(micr,micx,micy-t) circle_ch(headr,headx,heady-t) drawing.save()
bsd-3-clause
xydinesh/youtube-dl
youtube_dl/extractor/wat.py
114
5370
# coding: utf-8 from __future__ import unicode_literals import re import hashlib from .common import InfoExtractor from ..utils import ( ExtractorError, unified_strdate, ) class WatIE(InfoExtractor): _VALID_URL = r'http://www\.wat\.tv/video/(?P<display_id>.*)-(?P<short_id>.*?)_.*?\.html' IE_NAME = 'wat.tv' _TESTS = [ { 'url': 'http://www.wat.tv/video/soupe-figues-l-orange-aux-epices-6z1uz_2hvf7_.html', 'md5': 'ce70e9223945ed26a8056d413ca55dc9', 'info_dict': { 'id': '11713067', 'display_id': 'soupe-figues-l-orange-aux-epices', 'ext': 'mp4', 'title': 'Soupe de figues à l\'orange et aux épices', 'description': 'Retrouvez l\'émission "Petits plats en équilibre", diffusée le 18 août 2014.', 'upload_date': '20140819', 'duration': 120, }, }, { 'url': 'http://www.wat.tv/video/gregory-lemarchal-voix-ange-6z1v7_6ygkj_.html', 'md5': 'fbc84e4378165278e743956d9c1bf16b', 'info_dict': { 'id': '11713075', 'display_id': 'gregory-lemarchal-voix-ange', 'ext': 'mp4', 'title': 'Grégory Lemarchal, une voix d\'ange depuis 10 ans (1/3)', 'description': 'md5:b7a849cf16a2b733d9cd10c52906dee3', 'upload_date': '20140816', 'duration': 2910, }, 'skip': "Ce contenu n'est pas disponible pour l'instant.", }, ] def download_video_info(self, real_id): # 'contentv4' is used in the website, but it also returns the related # videos, we don't need them info = self._download_json('http://www.wat.tv/interface/contentv3/' + real_id, real_id) return info['media'] def _real_extract(self, url): def real_id_for_chapter(chapter): return chapter['tc_start'].split('-')[0] mobj = re.match(self._VALID_URL, url) short_id = mobj.group('short_id') display_id = mobj.group('display_id') webpage = self._download_webpage(url, display_id or short_id) real_id = self._search_regex(r'xtpage = ".*-(.*?)";', webpage, 'real id') video_info = self.download_video_info(real_id) error_desc = video_info.get('error_desc') if error_desc: raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, error_desc), expected=True) geo_list = video_info.get('geoList') country = geo_list[0] if geo_list else '' chapters = video_info['chapters'] first_chapter = chapters[0] files = video_info['files'] first_file = files[0] if real_id_for_chapter(first_chapter) != real_id: self.to_screen('Multipart video detected') chapter_urls = [] for chapter in chapters: chapter_id = real_id_for_chapter(chapter) # Yes, when we this chapter is processed by WatIE, # it will download the info again chapter_info = self.download_video_info(chapter_id) chapter_urls.append(chapter_info['url']) entries = [self.url_result(chapter_url) for chapter_url in chapter_urls] return self.playlist_result(entries, real_id, video_info['title']) upload_date = None if 'date_diffusion' in first_chapter: upload_date = unified_strdate(first_chapter['date_diffusion']) # Otherwise we can continue and extract just one part, we have to use # the short id for getting the video url formats = [{ 'url': 'http://wat.tv/get/android5/%s.mp4' % real_id, 'format_id': 'Mobile', }] fmts = [('SD', 'web')] if first_file.get('hasHD'): fmts.append(('HD', 'webhd')) def compute_token(param): timestamp = '%08x' % int(self._download_webpage( 'http://www.wat.tv/servertime', real_id, 'Downloading server time').split('|')[0]) magic = '9b673b13fa4682ed14c3cfa5af5310274b514c4133e9b3a81e6e3aba009l2564' return '%s/%s' % (hashlib.md5((magic + param + timestamp).encode('ascii')).hexdigest(), timestamp) for fmt in fmts: webid = '/%s/%s' % (fmt[1], real_id) video_url = self._download_webpage( 'http://www.wat.tv/get%s?token=%s&getURL=1&country=%s' % (webid, compute_token(webid), country), real_id, 'Downloading %s video URL' % fmt[0], 'Failed to download %s video URL' % fmt[0], False) if not video_url: continue formats.append({ 'url': video_url, 'ext': 'mp4', 'format_id': fmt[0], }) return { 'id': real_id, 'display_id': display_id, 'title': first_chapter['title'], 'thumbnail': first_chapter['preview'], 'description': first_chapter['description'], 'view_count': video_info['views'], 'upload_date': upload_date, 'duration': first_file['duration'], 'formats': formats, }
unlicense
hhkaos/awesome-arcgis
node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py
1534
3426
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import collections import os import gyp import gyp.common import gyp.msvs_emulation import json import sys generator_supports_multiple_toolsets = True generator_wants_static_library_dependencies_adjusted = False generator_filelist_paths = { } generator_default_variables = { } for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']: # Some gyp steps fail if these are empty(!). generator_default_variables[dirname] = 'dir' for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME', 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX', 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX', 'CONFIGURATION_NAME']: generator_default_variables[unused] = '' def CalculateVariables(default_variables, params): generator_flags = params.get('generator_flags', {}) for key, val in generator_flags.items(): default_variables.setdefault(key, val) default_variables.setdefault('OS', gyp.common.GetFlavor(params)) flavor = gyp.common.GetFlavor(params) if flavor =='win': # Copy additional generator configuration data from VS, which is shared # by the Windows Ninja generator. import gyp.generator.msvs as msvs_generator generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', []) gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) def CalculateGeneratorInputInfo(params): """Calculate the generator specific info that gets fed to input (called by gyp).""" generator_flags = params.get('generator_flags', {}) if generator_flags.get('adjust_static_libraries', False): global generator_wants_static_library_dependencies_adjusted generator_wants_static_library_dependencies_adjusted = True toplevel = params['options'].toplevel_dir generator_dir = os.path.relpath(params['options'].generator_output or '.') # output_dir: relative path from generator_dir to the build directory. output_dir = generator_flags.get('output_dir', 'out') qualified_out_dir = os.path.normpath(os.path.join( toplevel, generator_dir, output_dir, 'gypfiles')) global generator_filelist_paths generator_filelist_paths = { 'toplevel': toplevel, 'qualified_out_dir': qualified_out_dir, } def GenerateOutput(target_list, target_dicts, data, params): # Map of target -> list of targets it depends on. edges = {} # Queue of targets to visit. targets_to_visit = target_list[:] while len(targets_to_visit) > 0: target = targets_to_visit.pop() if target in edges: continue edges[target] = [] for dep in target_dicts[target].get('dependencies', []): edges[target].append(dep) targets_to_visit.append(dep) try: filepath = params['generator_flags']['output_dir'] except KeyError: filepath = '.' filename = os.path.join(filepath, 'dump.json') f = open(filename, 'w') json.dump(edges, f) f.close() print 'Wrote json to %s.' % filename
gpl-3.0
tvtsoft/odoo8
addons/mass_mailing/__openerp__.py
2
1743
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. { 'name': 'Mass Mailing Campaigns', 'summary': 'Design, send and track emails', 'description': """ Easily send mass mailing to your leads, opportunities or customers. Track marketing campaigns performance to improve conversion rates. Design professional emails and reuse templates in a few clicks. """, 'version': '2.0', 'author': 'OpenERP', 'website': 'https://www.odoo.com/page/mailing', 'category': 'Marketing', 'depends': [ 'mail', 'marketing', 'utm', 'link_tracker', 'web_editor', 'web_kanban_gauge', ], 'data': [ 'data/mail_data.xml', 'data/mass_mailing_data.xml', 'wizard/mail_compose_message_view.xml', 'wizard/test_mailing.xml', 'views/mass_mailing_report.xml', 'views/mass_mailing.xml', 'views/res_config.xml', 'views/email_template.xml', 'security/ir.model.access.csv', 'views/mass_mailing.xml', 'views/editor_field_html.xml', 'views/snippets_themes.xml', 'views/snippets_themes_options.xml', 'views/theme_list.xml', 'views/theme/neopolitan_snippets.xml', 'views/theme/skyline_snippets.xml', 'views/theme/narrative_snippets.xml', 'views/theme/sunday_snippets.xml', 'views/theme/go_snippets.xml', 'views/theme/airmail_snippets.xml', 'views/theme/zenflat_snippets.xml', 'views/theme/cleave_snippets.xml', ], 'qweb': [], 'demo': [ 'data/mass_mailing_demo.xml', ], 'installable': True, 'auto_install': False, 'application': True, }
agpl-3.0
CubicERP/odoo
addons/website/models/ir_ui_view.py
161
10904
# -*- coding: utf-8 -*- import copy from lxml import etree, html from openerp import SUPERUSER_ID, api from openerp.addons.website.models import website from openerp.http import request from openerp.osv import osv, fields class view(osv.osv): _inherit = "ir.ui.view" _columns = { 'page': fields.boolean("Whether this view is a web page template (complete)"), 'website_meta_title': fields.char("Website meta title", size=70, translate=True), 'website_meta_description': fields.text("Website meta description", size=160, translate=True), 'website_meta_keywords': fields.char("Website meta keywords", translate=True), 'customize_show': fields.boolean("Show As Optional Inherit"), } _defaults = { 'page': False, 'customize_show': False, } def _view_obj(self, cr, uid, view_id, context=None): if isinstance(view_id, basestring): return self.pool['ir.model.data'].xmlid_to_object( cr, uid, view_id, raise_if_not_found=True, context=context ) elif isinstance(view_id, (int, long)): return self.browse(cr, uid, view_id, context=context) # assume it's already a view object (WTF?) return view_id # Returns all views (called and inherited) related to a view # Used by translation mechanism, SEO and optional templates def _views_get(self, cr, uid, view_id, options=True, context=None, root=True): """ For a given view ``view_id``, should return: * the view itself * all views inheriting from it, enabled or not - but not the optional children of a non-enabled child * all views called from it (via t-call) """ try: view = self._view_obj(cr, uid, view_id, context=context) except ValueError: # Shall we log that ? return [] while root and view.inherit_id: view = view.inherit_id result = [view] node = etree.fromstring(view.arch) for child in node.xpath("//t[@t-call]"): try: called_view = self._view_obj(cr, uid, child.get('t-call'), context=context) except ValueError: continue if called_view not in result: result += self._views_get(cr, uid, called_view, options=options, context=context) extensions = view.inherit_children_ids if not options: # only active children extensions = (v for v in view.inherit_children_ids if v.active) # Keep options in a deterministic order regardless of their applicability for extension in sorted(extensions, key=lambda v: v.id): for r in self._views_get( cr, uid, extension, # only return optional grandchildren if this child is enabled options=extension.active, context=context, root=False): if r not in result: result.append(r) return result def extract_embedded_fields(self, cr, uid, arch, context=None): return arch.xpath('//*[@data-oe-model != "ir.ui.view"]') def save_embedded_field(self, cr, uid, el, context=None): Model = self.pool[el.get('data-oe-model')] field = el.get('data-oe-field') converter = self.pool['website.qweb'].get_converter_for(el.get('data-oe-type')) value = converter.from_html(cr, uid, Model, Model._fields[field], el) if value is not None: # TODO: batch writes? Model.write(cr, uid, [int(el.get('data-oe-id'))], { field: value }, context=context) def to_field_ref(self, cr, uid, el, context=None): # filter out meta-information inserted in the document attributes = dict((k, v) for k, v in el.items() if not k.startswith('data-oe-')) attributes['t-field'] = el.get('data-oe-expression') out = html.html_parser.makeelement(el.tag, attrib=attributes) out.tail = el.tail return out def replace_arch_section(self, cr, uid, view_id, section_xpath, replacement, context=None): # the root of the arch section shouldn't actually be replaced as it's # not really editable itself, only the content truly is editable. [view] = self.browse(cr, uid, [view_id], context=context) arch = etree.fromstring(view.arch.encode('utf-8')) # => get the replacement root if not section_xpath: root = arch else: # ensure there's only one match [root] = arch.xpath(section_xpath) root.text = replacement.text root.tail = replacement.tail # replace all children del root[:] for child in replacement: root.append(copy.deepcopy(child)) return arch @api.cr_uid_ids_context def render(self, cr, uid, id_or_xml_id, values=None, engine='ir.qweb', context=None): if request and getattr(request, 'website_enabled', False): engine='website.qweb' if isinstance(id_or_xml_id, list): id_or_xml_id = id_or_xml_id[0] if not context: context = {} company = self.pool['res.company'].browse(cr, SUPERUSER_ID, request.website.company_id.id, context=context) qcontext = dict( context.copy(), website=request.website, url_for=website.url_for, slug=website.slug, res_company=company, user_id=self.pool.get("res.users").browse(cr, uid, uid), translatable=context.get('lang') != request.website.default_lang_code, editable=request.website.is_publisher(), menu_data=self.pool['ir.ui.menu'].load_menus_root(cr, uid, context=context) if request.website.is_user() else None, ) # add some values if values: qcontext.update(values) # in edit mode ir.ui.view will tag nodes if qcontext.get('editable'): context = dict(context, inherit_branding=True) elif request.registry['res.users'].has_group(cr, uid, 'base.group_website_publisher'): context = dict(context, inherit_branding_auto=True) view_obj = request.website.get_template(id_or_xml_id) if 'main_object' not in qcontext: qcontext['main_object'] = view_obj values = qcontext return super(view, self).render(cr, uid, id_or_xml_id, values=values, engine=engine, context=context) def _pretty_arch(self, arch): # remove_blank_string does not seem to work on HTMLParser, and # pretty-printing with lxml more or less requires stripping # whitespace: http://lxml.de/FAQ.html#why-doesn-t-the-pretty-print-option-reformat-my-xml-output # so serialize to XML, parse as XML (remove whitespace) then serialize # as XML (pretty print) arch_no_whitespace = etree.fromstring( etree.tostring(arch, encoding='utf-8'), parser=etree.XMLParser(encoding='utf-8', remove_blank_text=True)) return etree.tostring( arch_no_whitespace, encoding='unicode', pretty_print=True) def save(self, cr, uid, res_id, value, xpath=None, context=None): """ Update a view section. The view section may embed fields to write :param str model: :param int res_id: :param str xpath: valid xpath to the tag to replace """ res_id = int(res_id) arch_section = html.fromstring( value, parser=html.HTMLParser(encoding='utf-8')) if xpath is None: # value is an embedded field on its own, not a view section self.save_embedded_field(cr, uid, arch_section, context=context) return for el in self.extract_embedded_fields(cr, uid, arch_section, context=context): self.save_embedded_field(cr, uid, el, context=context) # transform embedded field back to t-field el.getparent().replace(el, self.to_field_ref(cr, uid, el, context=context)) arch = self.replace_arch_section(cr, uid, res_id, xpath, arch_section, context=context) self.write(cr, uid, res_id, { 'arch': self._pretty_arch(arch) }, context=context) view = self.browse(cr, SUPERUSER_ID, res_id, context=context) if view.model_data_id: view.model_data_id.write({'noupdate': True}) def customize_template_get(self, cr, uid, xml_id, full=False, bundles=False , context=None): """ Get inherit view's informations of the template ``key``. By default, only returns ``customize_show`` templates (which can be active or not), if ``full=True`` returns inherit view's informations of the template ``key``. ``bundles=True`` returns also the asset bundles """ imd = request.registry['ir.model.data'] view_model, view_theme_id = imd.get_object_reference(cr, uid, 'website', 'theme') user = request.registry['res.users'].browse(cr, uid, uid, context) user_groups = set(user.groups_id) views = self._views_get(cr, uid, xml_id, context=dict(context or {}, active_test=False)) done = set() result = [] for v in views: if not user_groups.issuperset(v.groups_id): continue if full or (v.customize_show and v.inherit_id.id != view_theme_id): if v.inherit_id not in done: result.append({ 'name': v.inherit_id.name, 'id': v.id, 'xml_id': v.xml_id, 'inherit_id': v.inherit_id.id, 'header': True, 'active': False }) done.add(v.inherit_id) result.append({ 'name': v.name, 'id': v.id, 'xml_id': v.xml_id, 'inherit_id': v.inherit_id.id, 'header': False, 'active': v.active, }) return result def get_view_translations(self, cr, uid, xml_id, lang, field=['id', 'res_id', 'value', 'state', 'gengo_translation'], context=None): views = self.customize_template_get(cr, uid, xml_id, full=True, context=context) views_ids = [view.get('id') for view in views if view.get('active')] domain = [('type', '=', 'view'), ('res_id', 'in', views_ids), ('lang', '=', lang)] irt = request.registry.get('ir.translation') return irt.search_read(cr, uid, domain, field, context=context)
agpl-3.0
gongweibao/cloud
demo/word2vec/train.py
2
5940
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import pickle import glob import os import sys import paddle.v2 as paddle import paddle.v2.dataset.common as common embsize = 32 hiddensize = 256 N = 5 # NOTE: You need to generate and split dataset then put it under your cloud storage. # then you can use different size of embedding. # NOTE: must change this to your own username on paddlecloud. USERNAME = "your-username" DC = os.getenv("PADDLE_CLOUD_CURRENT_DATACENTER") common.DATA_HOME = "/pfs/%s/home/%s" % (DC, USERNAME) TRAIN_FILES_PATH = os.path.join(common.DATA_HOME, "imikolov", "imikolov_train-*") WORD_DICT_PATH = os.path.join(common.DATA_HOME, "imikolov/word_dict.pickle") TRAINER_ID = int(os.getenv("PADDLE_INIT_TRAINER_ID", "-1")) TRAINER_COUNT = int(os.getenv("PADDLE_INIT_NUM_GRADIENT_SERVERS", "-1")) def prepare_dataset(): word_dict = paddle.dataset.imikolov.build_dict() with open(WORD_DICT_PATH, "w") as fn: pickle.dump(word_dict, fn) # NOTE: convert should be done by other job. def cluster_reader_recordio(trainer_id, trainer_count): ''' read from cloud dataset which is stored as recordio format each trainer will read a subset of files of the whole dataset. ''' import recordio def reader(): file_list = glob.glob(TRAIN_FILES_PATH) file_list.sort() my_file_list = [] # read files for current trainer_id for idx, f in enumerate(file_list): if idx % trainer_count == trainer_id: my_file_list.append(f) for f in my_file_list: print "processing ", f reader = recordio.reader(f) record_raw = reader.read() while record_raw: yield pickle.loads(record_raw) record_raw = reader.read() reader.close() return reader def wordemb(inlayer): wordemb = paddle.layer.table_projection( input=inlayer, size=embsize, param_attr=paddle.attr.Param( name="_proj", initial_std=0.001, learning_rate=1, l2_rate=0, )) return wordemb def main(): paddle.init(use_gpu=False, trainer_count=1) # load dict from cloud file with open(WORD_DICT_PATH) as fn: word_dict = pickle.load(fn) dict_size = len(word_dict) firstword = paddle.layer.data( name="firstw", type=paddle.data_type.integer_value(dict_size)) secondword = paddle.layer.data( name="secondw", type=paddle.data_type.integer_value(dict_size)) thirdword = paddle.layer.data( name="thirdw", type=paddle.data_type.integer_value(dict_size)) fourthword = paddle.layer.data( name="fourthw", type=paddle.data_type.integer_value(dict_size)) nextword = paddle.layer.data( name="fifthw", type=paddle.data_type.integer_value(dict_size)) Efirst = wordemb(firstword) Esecond = wordemb(secondword) Ethird = wordemb(thirdword) Efourth = wordemb(fourthword) contextemb = paddle.layer.concat(input=[Efirst, Esecond, Ethird, Efourth]) hidden1 = paddle.layer.fc(input=contextemb, size=hiddensize, act=paddle.activation.Sigmoid(), layer_attr=paddle.attr.Extra(drop_rate=0.5), bias_attr=paddle.attr.Param(learning_rate=2), param_attr=paddle.attr.Param( initial_std=1. / math.sqrt(embsize * 8), learning_rate=1)) predictword = paddle.layer.fc(input=hidden1, size=dict_size, bias_attr=paddle.attr.Param(learning_rate=2), act=paddle.activation.Softmax()) def event_handler(event): if isinstance(event, paddle.event.EndIteration): if event.batch_id % 100 == 0: result = trainer.test( paddle.batch( # NOTE: if you're going to use cluster test files, # prepare them on the storage first paddle.dataset.imikolov.test(word_dict, N), 32)) print "Pass %d, Batch %d, Cost %f, %s, Testing metrics %s" % ( event.pass_id, event.batch_id, event.cost, event.metrics, result.metrics) cost = paddle.layer.classification_cost(input=predictword, label=nextword) parameters = paddle.parameters.create(cost) adam_optimizer = paddle.optimizer.Adam( learning_rate=3e-3, regularization=paddle.optimizer.L2Regularization(8e-4)) trainer = paddle.trainer.SGD(cost, parameters, adam_optimizer) trainer.train( paddle.batch(cluster_reader_recordio(TRAINER_ID, TRAINER_COUNT), 32), num_passes=30, event_handler=event_handler) if __name__ == '__main__': usage = "python train.py [prepare|train]" if len(sys.argv) != 2: print usage exit(1) if TRAINER_ID == -1 or TRAINER_COUNT == -1: print "no cloud environ found, must run on cloud" exit(1) if sys.argv[1] == "prepare": prepare_dataset() elif sys.argv[1] == "train": main()
apache-2.0
piotrek-golda/CivilHubIndependantCopy
postman/tests.py
6
92998
""" Test suite. - Do not put 'mailer' in INSTALLED_APPS, it disturbs the emails counting. - Make sure these templates are accessible: registration/login.html base.html 404.html To have a fast test session, set a minimal configuration as: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. 'NAME': ':memory:', # Or path to database file if using sqlite3. 'USER': '', # Not used with sqlite3. 'PASSWORD': '', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } } INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', # 'django.contrib.sites', # is optional 'django.contrib.admin', # 'pagination', # has to be before postman ; or use the mock # 'ajax_select', # is an option # 'notification', # is an option 'postman', ) """ from __future__ import unicode_literals import copy from datetime import datetime, timedelta import re import sys from django.conf import settings from django.contrib.auth import REDIRECT_FIELD_NAME try: from django.contrib.auth import get_user_model # Django 1.5 except ImportError: from postman.future_1_5 import get_user_model from django.contrib.auth.models import AnonymousUser from django.contrib.sites.models import Site from django.core import mail from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse, clear_url_caches, get_resolver, get_urlconf from django.db.models import Q from django.http import QueryDict from django.template import Template, Context, TemplateSyntaxError, TemplateDoesNotExist from django.test import TestCase, TransactionTestCase from django.utils.encoding import force_text from django.utils.formats import localize from django.utils import six from django.utils.six.moves import reload_module try: from django.utils.timezone import now # Django 1.4 aware datetimes except ImportError: now = datetime.now from django.utils.translation import activate, deactivate from . import OPTION_MESSAGES from .api import pm_broadcast, pm_write # because of reload()'s, do "from postman.fields import CommaSeparatedUserField" just before needs # because of reload()'s, do "from postman.forms import xxForm" just before needs from .models import ORDER_BY_KEY, ORDER_BY_MAPPER, Message, PendingMessage,\ STATUS_PENDING, STATUS_ACCEPTED, STATUS_REJECTED,\ get_order_by, get_user_representation # because of reload()'s, do "from postman.utils import notification" just before needs from .utils import format_body, format_subject class GenericTest(TestCase): """ Usual generic tests. """ def test_version(self): self.assertEqual(sys.modules['postman'].__version__, "3.2.2") class TransactionViewTest(TransactionTestCase): """ Test some transactional behavior. Can't use Django TestCase class, because it has a special treament for commit/rollback to speed up the database resetting. """ urls = 'postman.urls_for_tests' def setUp(self): self.user1 = get_user_model().objects.create_user('foo', 'foo@domain.com', 'pass') self.user2 = get_user_model().objects.create_user('bar', 'bar@domain.com', 'pass') def test(self): "Test possible clash between transaction.commit_on_success and transaction.atomic (Django 1.6)." url = reverse('postman_write') data = {'recipients': self.user2.get_username(), 'subject': 's'} self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.post(url, data) self.assertTrue(Message.objects.get()) class BaseTest(TestCase): """ Common configuration and helper functions for all tests. """ urls = 'postman.urls_for_tests' def setUp(self): deactivate() # necessary for 1.4 to consider a new settings.LANGUAGE_CODE; 1.3 is fine with or without settings.LANGUAGE_CODE = 'en' # do not bother about translation ; needed for the server side # added for 1.8, for the client side, to supersede the default language set as soon as the creation of auth's permissions, # initiated via a post_migrate signal. activate('en') for a in ( 'POSTMAN_DISALLOW_ANONYMOUS', 'POSTMAN_DISALLOW_MULTIRECIPIENTS', 'POSTMAN_DISALLOW_COPIES_ON_REPLY', 'POSTMAN_DISABLE_USER_EMAILING', 'POSTMAN_AUTO_MODERATE_AS', 'POSTMAN_NOTIFIER_APP', 'POSTMAN_SHOW_USER_AS', 'POSTMAN_QUICKREPLY_QUOTE_BODY', ): if hasattr(settings, a): delattr(settings, a) settings.POSTMAN_MAILER_APP = None settings.POSTMAN_AUTOCOMPLETER_APP = { 'arg_default': 'postman_single_as1-1', # no default, mandatory to enable the feature } self.reload_modules() self.user1 = get_user_model().objects.create_user('foo', 'foo@domain.com', 'pass') self.user2 = get_user_model().objects.create_user('bar', 'bar@domain.com', 'pass') self.user3 = get_user_model().objects.create_user('baz', 'baz@domain.com', 'pass') self.email = 'qux@domain.com' def check_now(self, dt): "Check that a date is now. Well... almost." delta = dt - now() seconds = delta.days * (24*60*60) + delta.seconds self.assertTrue(-2 <= seconds <= 1) # -1 is not enough for Mysql def check_status(self, m, status=STATUS_PENDING, is_new=True, is_replied=False, parent=None, thread=None, moderation_date=False, moderation_by=None, moderation_reason='', sender_archived=False, recipient_archived=False, sender_deleted_at=False, recipient_deleted_at=False): "Check a bunch of properties of a message." self.assertEqual(m.is_pending(), status==STATUS_PENDING) self.assertEqual(m.is_rejected(), status==STATUS_REJECTED) self.assertEqual(m.is_accepted(), status==STATUS_ACCEPTED) self.assertEqual(m.is_new, is_new) self.assertEqual(m.is_replied, is_replied) self.check_now(m.sent_at) self.assertEqual(m.parent, parent) self.assertEqual(m.thread, thread) self.assertEqual(m.sender_archived, sender_archived) self.assertEqual(m.recipient_archived, recipient_archived) if sender_deleted_at: if isinstance(sender_deleted_at, datetime): self.assertEqual(m.sender_deleted_at, sender_deleted_at) else: self.assertNotEqual(m.sender_deleted_at, None) else: self.assertEqual(m.sender_deleted_at, None) if recipient_deleted_at: if isinstance(recipient_deleted_at, datetime): self.assertEqual(m.recipient_deleted_at, recipient_deleted_at) else: self.assertNotEqual(m.recipient_deleted_at, None) else: self.assertEqual(m.recipient_deleted_at, None) if moderation_date: if isinstance(moderation_date, datetime): self.assertEqual(m.moderation_date, moderation_date) else: self.assertNotEqual(m.moderation_date, None) else: self.assertEqual(m.moderation_date, None) self.assertEqual(m.moderation_by, moderation_by) self.assertEqual(m.moderation_reason, moderation_reason) def create(self, *args, **kwargs): "Create a message." kwargs.update(subject='s') return Message.objects.create(*args, **kwargs) def create_accepted(self, *args, **kwargs): "Create a message with a default status as 'accepted'." kwargs.setdefault('moderation_status', STATUS_ACCEPTED) return self.create(*args, **kwargs) # set of message creations def c12(self, *args, **kwargs): kwargs.update(sender=self.user1, recipient=self.user2) return self.create_accepted(*args, **kwargs) def c13(self, *args, **kwargs): kwargs.update(sender=self.user1, recipient=self.user3) return self.create_accepted(*args, **kwargs) def c21(self, *args, **kwargs): kwargs.update(sender=self.user2, recipient=self.user1) return self.create_accepted(*args, **kwargs) def c23(self, *args, **kwargs): kwargs.update(sender=self.user2, recipient=self.user3) return self.create_accepted(*args, **kwargs) def c32(self, *args, **kwargs): kwargs.update(sender=self.user3, recipient=self.user2) return self.create_accepted(*args, **kwargs) def reload_modules(self): "Reload some modules after a change in settings." clear_url_caches() try: reload_module(sys.modules['postman.utils']) reload_module(sys.modules['postman.fields']) reload_module(sys.modules['postman.forms']) reload_module(sys.modules['postman.views']) reload_module(sys.modules['postman.urls']) except KeyError: # happens once at the setUp pass reload_module(get_resolver(get_urlconf()).urlconf_module) class ViewTest(BaseTest): """ Test the views. """ def test_home(self): response = self.client.get('/messages/') self.assertRedirects(response, reverse('postman_inbox'), status_code=301, target_status_code=302) def check_folder(self, folder): url = reverse('postman_' + folder, args=[OPTION_MESSAGES]) template = "postman/{0}.html".format(folder) # anonymous response = self.client.get(url) self.assertRedirects(response, "{0}?{1}={2}".format(settings.LOGIN_URL, REDIRECT_FIELD_NAME, url)) # authenticated self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.get(url) self.assertTemplateUsed(response, template) url = reverse('postman_' + folder) response = self.client.get(url) self.assertTemplateUsed(response, template) def test_inbox(self): self.check_folder('inbox') def test_sent(self): self.check_folder('sent') def test_archives(self): self.check_folder('archives') def test_trash(self): self.check_folder('trash') def check_template(self, action, args): # don't want to bother with additional templates; test only the parameter passing url = reverse('postman_' + action + '_template', args=args) self.assertRaises(TemplateDoesNotExist, self.client.get, url) def test_template(self): "Test the 'template_name' parameter." m1 = self.c12() m1.read_at, m1.thread = now(), m1 m2 = self.c21(parent=m1, thread=m1.thread) m1.replied_at = m2.sent_at; m1.save() self.assertTrue(self.client.login(username='foo', password='pass')) for actions, args in [ (('inbox', 'sent', 'archives', 'trash', 'write'), []), (('view', 'view_conversation'), [m1.pk]), (('reply',), [m2.pk]), ]: for action in actions: self.check_template(action, args) def test_write_authentication(self): "Test permission and what template & form are used." url = reverse('postman_write') template = "postman/write.html" # anonymous is allowed response = self.client.get(url) self.assertTemplateUsed(response, template) from postman.forms import AnonymousWriteForm self.assertTrue(isinstance(response.context['form'], AnonymousWriteForm)) # anonymous is not allowed settings.POSTMAN_DISALLOW_ANONYMOUS = True self.reload_modules() response = self.client.get(url) self.assertRedirects(response, "{0}?{1}={2}".format(settings.LOGIN_URL, REDIRECT_FIELD_NAME, url)) # authenticated self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.get(url) self.assertTemplateUsed(response, template) from postman.forms import WriteForm self.assertTrue(isinstance(response.context['form'], WriteForm)) def test_write_recipient(self): "Test the passing of recipient names in URL." template = "postman/write.html" url = reverse('postman_write', args=['foo']) response = self.client.get(url) self.assertContains(response, 'value="foo"') url = reverse('postman_write', args=['foo:bar']) response = self.client.get(url) self.assertContains(response, 'value="bar, foo"') url = reverse('postman_write', args=[':foo::intruder:bar:a-b+c@d.com:foo:']) response = self.client.get(url) self.assertContains(response, 'value="bar, foo"') # because of Custom User Model, do allow almost any character, not only '^[\w.@+-]+$' of the legacy django.contrib.auth.User model get_user_model().objects.create_user("Le Créac'h", 'foobar@domain.com', 'pass') # even: space, accentued, qootes url = reverse('postman_write', args=["Le Créac'h"]) response = self.client.get(url) self.assertContains(response, 'value="Le Créac&#39;h"') def test_write_auto_complete(self): "Test the 'autocomplete_channels' parameter." url = reverse('postman_write_auto_complete') # anonymous response = self.client.get(url) f = response.context['form'].fields['recipients'] if hasattr(f, 'channel'): # app may not be in INSTALLED_APPS self.assertEqual(f.channel, 'postman_single_as1-1') # authenticated self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.get(url) f = response.context['form'].fields['recipients'] if hasattr(f, 'channel'): self.assertEqual(f.channel, 'postman_multiple_as1-1') def check_init_by_query_string(self, action, args=[]): template = "postman/{0}.html".format(action) url = reverse('postman_' + action, args=args) response = self.client.get(url + '?subject=that%20is%20the%20subject') self.assertContains(response, 'value="that is the subject"') response = self.client.get(url + '?body=this%20is%20my%20body') # before Dj 1.5: 'name="body">this is my body' ; after: 'name="body">\r\nthis is my body' self.assertContains(response, 'this is my body</textarea>') def test_write_querystring(self): "Test the prefilling by query string." self.check_init_by_query_string('write') def check_message(self, m, is_anonymous=False, subject='s', body='b', recipient_username='bar'): "Check some message properties, status, and that no mail is sent." self.assertEqual(m.subject, subject) self.assertEqual(m.body, body) self.assertEqual(m.email, 'a@b.com' if is_anonymous else '') self.assertEqual(m.sender, self.user1 if not is_anonymous else None) self.assertEqual(m.recipient.get_username(), recipient_username) if is_anonymous: self.check_status(m, sender_deleted_at=True) self.assertEqual(len(mail.outbox), 0) def check_contrib_messages(self, response, text): if 'messages' in response.context: # contrib\messages\context_processors.py may be not there messages = response.context['messages'] if messages != []: # contrib\messages\middleware.py may be not there self.assertEqual(len(messages), 1) for message in messages: # can only be iterated self.assertEqual(str(message), text) def check_write_post(self, extra={}, is_anonymous=False): "Check message generation, redirection, and mandatory fields." url = reverse('postman_write') url_with_success_url = reverse('postman_write_with_success_url_to_sent') data = {'recipients': self.user2.get_username(), 'subject': 's', 'body': 'b'} data.update(extra) # default redirect is to the requestor page response = self.client.post(url, data, HTTP_REFERER=url, follow=True) self.assertRedirects(response, url) self.check_contrib_messages(response, 'Message successfully sent.') # no such check for the following posts, one is enough m = Message.objects.get() pk = m.pk self.check_message(m, is_anonymous) # fallback redirect is to inbox. So redirect again when login is required response = self.client.post(url, data) self.assertRedirects(response, reverse('postman_inbox'), target_status_code=302 if is_anonymous else 200) self.check_message(Message.objects.get(pk=pk+1), is_anonymous) # redirect url may be superseded response = self.client.post(url_with_success_url, data, HTTP_REFERER=url) self.assertRedirects(response, reverse('postman_sent'), target_status_code=302 if is_anonymous else 200) self.check_message(Message.objects.get(pk=pk+2), is_anonymous) # query string has highest precedence response = self.client.post(url_with_success_url + '?next=' + url, data, HTTP_REFERER='does not matter') self.assertRedirects(response, url) self.check_message(Message.objects.get(pk=pk+3), is_anonymous) for f in data.keys(): if f in ('body',): continue d = data.copy() del d[f] response = self.client.post(url, d, HTTP_REFERER=url) self.assertFormError(response, 'form', f, 'This field is required.') def test_write_post_anonymous(self): self.check_write_post({'email': 'a@b.com'}, is_anonymous=True) def test_write_post_authenticated(self): self.assertTrue(self.client.login(username='foo', password='pass')) self.check_write_post() def test_write_post_multirecipient(self): "Test number of recipients constraint." from postman.fields import CommaSeparatedUserField url = reverse('postman_write') data = { 'email': 'a@b.com', 'subject': 's', 'body': 'b', 'recipients': '{0}, {1}'.format(self.user2.get_username(), self.user3.get_username())} # anonymous response = self.client.post(url, data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', CommaSeparatedUserField.default_error_messages['max'].format(limit_value=1, show_value=2)) # authenticated self.assertTrue(self.client.login(username='foo', password='pass')) del data['email'] response = self.client.post(url, data, HTTP_REFERER=url) self.assertRedirects(response, url) msgs = list(Message.objects.all()) self.check_message(msgs[0], recipient_username='baz') self.check_message(msgs[1]) url_with_max = reverse('postman_write_with_max') response = self.client.post(url_with_max, data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', CommaSeparatedUserField.default_error_messages['max'].format(limit_value=1, show_value=2)) settings.POSTMAN_DISALLOW_MULTIRECIPIENTS = True response = self.client.post(url, data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', CommaSeparatedUserField.default_error_messages['max'].format(limit_value=1, show_value=2)) def test_write_post_filters(self): "Test user- and exchange- filters." url = reverse('postman_write') data = { 'subject': 's', 'body': 'b', 'recipients': '{0}, {1}'.format(self.user2.get_username(), self.user3.get_username())} self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.post(reverse('postman_write_with_user_filter_reason'), data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', "Some usernames are rejected: bar (some reason).") response = self.client.post(reverse('postman_write_with_user_filter_no_reason'), data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', "Some usernames are rejected: bar, baz.") response = self.client.post(reverse('postman_write_with_user_filter_false'), data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', "Some usernames are rejected: bar, baz.") response = self.client.post(reverse('postman_write_with_user_filter_exception'), data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', ['first good reason',"anyway, I don't like bar"]) response = self.client.post(reverse('postman_write_with_exch_filter_reason'), data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', "Writing to some users is not possible: bar (some reason).") response = self.client.post(reverse('postman_write_with_exch_filter_no_reason'), data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', "Writing to some users is not possible: bar, baz.") response = self.client.post(reverse('postman_write_with_exch_filter_false'), data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', "Writing to some users is not possible: bar, baz.") response = self.client.post(reverse('postman_write_with_exch_filter_exception'), data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', ['first good reason',"anyway, I don't like bar"]) def test_write_post_moderate(self): "Test 'auto_moderators' parameter." url = reverse('postman_write') data = {'subject': 's', 'body': 'b', 'recipients': self.user2.get_username()} self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.post(reverse('postman_write_moderate'), data, HTTP_REFERER=url, follow=True) self.assertRedirects(response, url) self.check_contrib_messages(response, 'Message rejected for at least one recipient.') self.check_status(Message.objects.get(), status=STATUS_REJECTED, recipient_deleted_at=True, moderation_date=True, moderation_reason="some reason") def test_write_notification(self): "Test the fallback for the site name in the generation of a notification, when the django.contrib.sites app is not installed." settings.POSTMAN_AUTO_MODERATE_AS = True # will generate an acceptance notification url = reverse('postman_write') data = {'subject': 's', 'body': 'b', 'recipients': self.user2.get_username()} self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.post(url, data, HTTP_REFERER=url) self.assertRedirects(response, url) self.check_status(Message.objects.get(), status=STATUS_ACCEPTED, moderation_date=True) self.assertEqual(len(mail.outbox), 1) # can't use get_current_site(response.request) because response.request is not an HttpRequest and doesn't have a get_host attribute if Site._meta.installed: sitename = Site.objects.get_current().name else: sitename = "testserver" # the SERVER_NAME environment variable is not accessible here self.assertTrue(sitename in mail.outbox[0].subject) def test_reply_authentication(self): "Test permission and what template & form are used." template = "postman/reply.html" pk = self.c21(body="this is my body").pk url = reverse('postman_reply', args=[pk]) # anonymous response = self.client.get(url) self.assertRedirects(response, "{0}?{1}={2}".format(settings.LOGIN_URL, REDIRECT_FIELD_NAME, url)) # authenticated self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.get(url) self.assertTemplateUsed(response, template) from postman.forms import FullReplyForm self.assertTrue(isinstance(response.context['form'], FullReplyForm)) self.assertContains(response, 'value="Re: s"') self.assertContains(response, '\n\nbar wrote:\n&gt; this is my body\n</textarea>') self.assertEqual(response.context['recipient'], 'bar') settings.POSTMAN_QUICKREPLY_QUOTE_BODY = True # no influence here, acts only for Quick Reply self.reload_modules() response = self.client.get(url) self.assertContains(response, 'value="Re: s"') self.assertContains(response, '\n\nbar wrote:\n&gt; this is my body\n</textarea>') def test_reply_formatters(self): "Test the 'formatters' parameter." template = "postman/reply.html" pk = self.c21(body="this is my body").pk url = reverse('postman_reply_formatters', args=[pk]) self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.get(url) self.assertTemplateUsed(response, template) self.assertContains(response, 'value="Re_ s"') self.assertContains(response, 'bar _ this is my body</textarea>') # POSTMAN_QUICKREPLY_QUOTE_BODY setting is not involved def test_reply_auto_complete(self): "Test the 'autocomplete_channel' parameter." pk = self.c21().pk url = reverse('postman_reply_auto_complete', args=[pk]) self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.get(url) f = response.context['form'].fields['recipients'] if hasattr(f, 'channel'): self.assertEqual(f.channel, 'postman_multiple_as1-1') def check_404(self, view_name, pk): "Return is a 404 page." url = reverse(view_name, args=[pk]) response = self.client.get(url) self.assertEqual(response.status_code, 404) def check_reply_404(self, pk): self.check_404('postman_reply', pk) def test_reply_id(self): "Test all sort of failures." self.assertTrue(self.client.login(username='foo', password='pass')) # invalid message id self.check_reply_404(1000) # existent message but you are the sender, not the recipient self.check_reply_404(Message.objects.get(pk=self.c12().pk).pk) # create & verify really there # existent message but not yours at all self.check_reply_404(Message.objects.get(pk=self.c23().pk).pk) # existent message but not yet visible to you self.check_reply_404(Message.objects.get(pk=self.create(sender=self.user2, recipient=self.user1).pk).pk) # cannot reply to a deleted message self.check_reply_404(Message.objects.get(pk=self.c21(recipient_deleted_at=now()).pk).pk) def test_reply_querystring(self): "Test the prefilling by query string." self.assertTrue(self.client.login(username='foo', password='pass')) self.check_init_by_query_string('reply', [self.c21().pk]) def test_reply_post(self): "Test message generation and redirection." pk = self.c21().pk url = reverse('postman_reply', args=[pk]) url_with_success_url = reverse('postman_reply_with_success_url_to_sent', args=[pk]) data = {'subject': 's', 'body': 'b'} self.assertTrue(self.client.login(username='foo', password='pass')) # default redirect is to the requestor page response = self.client.post(url, data, HTTP_REFERER=url) self.assertRedirects(response, url) # the check_contrib_messages() in test_write_post() is enough self.check_message(Message.objects.get(pk=pk+1)) # fallback redirect is to inbox response = self.client.post(url, data) self.assertRedirects(response, reverse('postman_inbox')) self.check_message(Message.objects.get(pk=pk+2)) # redirect url may be superseded response = self.client.post(url_with_success_url, data, HTTP_REFERER=url) self.assertRedirects(response, reverse('postman_sent')) self.check_message(Message.objects.get(pk=pk+3)) # query string has highest precedence response = self.client.post(url_with_success_url + '?next=' + url, data, HTTP_REFERER='does not matter') self.assertRedirects(response, url) self.check_message(Message.objects.get(pk=pk+4)) # missing subject is valid, as in quick reply response = self.client.post(url, {}, HTTP_REFERER=url) self.assertRedirects(response, url) self.check_message(Message.objects.get(pk=pk+5), subject='Re: s', body='') def test_reply_post_copies(self): "Test number of recipients constraint." from postman.fields import CommaSeparatedUserField pk = self.c21().pk url = reverse('postman_reply', args=[pk]) data = {'subject': 's', 'body': 'b', 'recipients': self.user3.get_username()} self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.post(url, data, HTTP_REFERER=url) self.assertRedirects(response, url) self.check_message(Message.objects.get(pk=pk+1)) self.check_message(Message.objects.get(pk=pk+2), recipient_username='baz') url_with_max = reverse('postman_reply_with_max', args=[pk]) data.update(recipients='{0}, {1}'.format(self.user2.get_username(), self.user3.get_username())) response = self.client.post(url_with_max, data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', CommaSeparatedUserField.default_error_messages['max'].format(limit_value=1, show_value=2)) settings.POSTMAN_DISALLOW_COPIES_ON_REPLY = True self.reload_modules() response = self.client.post(url, data, HTTP_REFERER=url) self.assertRedirects(response, url) self.check_message(Message.objects.get(pk=pk+3)) self.assertRaises(Message.DoesNotExist, Message.objects.get, pk=pk+4) def test_reply_post_filters(self): "Test user- and exchange- filters." pk = self.c21().pk url = reverse('postman_reply', args=[pk]) data = {'subject': 's', 'body': 'b', 'recipients': '{0}, {1}'.format(self.user2.get_username(), self.user3.get_username())} self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.post(reverse('postman_reply_with_user_filter_reason', args=[pk]), data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', "Some usernames are rejected: bar (some reason).") response = self.client.post(reverse('postman_reply_with_user_filter_no_reason', args=[pk]), data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', "Some usernames are rejected: bar, baz.") response = self.client.post(reverse('postman_reply_with_user_filter_false', args=[pk]), data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', "Some usernames are rejected: bar, baz.") response = self.client.post(reverse('postman_reply_with_user_filter_exception', args=[pk]), data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', ['first good reason',"anyway, I don't like bar"]) response = self.client.post(reverse('postman_reply_with_exch_filter_reason', args=[pk]), data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', "Writing to some users is not possible: bar (some reason).") response = self.client.post(reverse('postman_reply_with_exch_filter_no_reason', args=[pk]), data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', "Writing to some users is not possible: bar, baz.") response = self.client.post(reverse('postman_reply_with_exch_filter_false', args=[pk]), data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', "Writing to some users is not possible: bar, baz.") response = self.client.post(reverse('postman_reply_with_exch_filter_exception', args=[pk]), data, HTTP_REFERER=url) self.assertFormError(response, 'form', 'recipients', ['first good reason',"anyway, I don't like bar"]) def test_reply_post_moderate(self): "Test 'auto_moderators' parameter." m = self.c21() pk = m.pk url = reverse('postman_reply', args=[pk]) data = {'subject': 's', 'body': 'b'} self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.post(reverse('postman_reply_moderate', args=[pk]), data, HTTP_REFERER=url) self.assertRedirects(response, url) # the check_contrib_messages() in test_write_post_moderate() is enough self.check_status(Message.objects.get(pk=pk+1), status=STATUS_REJECTED, recipient_deleted_at=True, parent=m, thread=m, moderation_date=True, moderation_reason="some reason") def test_view_authentication(self): "Test permission, what template and form are used, set-as-read." template = "postman/view.html" pk1 = self.c12().pk pk2 = self.c21(body="this is my body").pk url = reverse('postman_view', args=[pk1]) # anonymous response = self.client.get(url) self.assertRedirects(response, "{0}?{1}={2}".format(settings.LOGIN_URL, REDIRECT_FIELD_NAME, url)) # authenticated self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.get(url) self.assertTemplateUsed(response, template) self.assertFalse(response.context['archived']) self.assertTrue(response.context['reply_to_pk'] is None) self.assertTrue(response.context['form'] is None) self.check_status(Message.objects.get(pk=pk1), status=STATUS_ACCEPTED) url = reverse('postman_view', args=[pk2]) response = self.client.get(url) self.assertFalse(response.context['archived']) self.assertEqual(response.context['reply_to_pk'], pk2) from postman.forms import QuickReplyForm self.assertTrue(isinstance(response.context['form'], QuickReplyForm)) self.assertNotContains(response, 'value="Re: s"') self.assertContains(response, '>\r\n</textarea>') # as in django\forms\widgets.py\Textarea self.check_status(Message.objects.get(pk=pk2), status=STATUS_ACCEPTED, is_new=False) settings.POSTMAN_QUICKREPLY_QUOTE_BODY = True self.reload_modules() response = self.client.get(url) self.assertContains(response, '\n\nbar wrote:\n&gt; this is my body\n</textarea>') def test_view_formatters(self): "Test the 'formatters' parameter." template = "postman/view.html" pk = self.c21(body="this is my body").pk url = reverse('postman_view_formatters', args=[pk]) self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.get(url) self.assertTemplateUsed(response, template) self.assertNotContains(response, 'value="Re_ s"') self.assertContains(response, 'bar _ this is my body</textarea>') # POSTMAN_QUICKREPLY_QUOTE_BODY setting is not involved def check_view_404(self, pk): self.check_404('postman_view', pk) def test_view_id(self): "Test all sort of failures." self.assertTrue(self.client.login(username='foo', password='pass')) # invalid message id self.check_view_404(1000) # existent message but not yours self.check_view_404(Message.objects.get(pk=self.c23().pk).pk) # create & verify really there # existent message but not yet visible to you self.check_view_404(Message.objects.get(pk=self.create(sender=self.user2, recipient=self.user1).pk).pk) def test_view_conversation_authentication(self): "Test permission, what template and form are used, number of messages in the conversation, set-as-read." template = "postman/view.html" m1 = self.c12() m1.read_at, m1.thread = now(), m1 m2 = self.c21(parent=m1, thread=m1.thread, body="this is my body") m1.replied_at = m2.sent_at; m1.save() url = reverse('postman_view_conversation', args=[m1.pk]) self.check_status(Message.objects.get(pk=m1.pk), status=STATUS_ACCEPTED, is_new=False, is_replied=True, thread=m1) # anonymous response = self.client.get(url) self.assertRedirects(response, "{0}?{1}={2}".format(settings.LOGIN_URL, REDIRECT_FIELD_NAME, url)) # authenticated self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.get(url) self.assertTemplateUsed(response, template) self.assertFalse(response.context['archived']) self.assertEqual(response.context['reply_to_pk'], m2.pk) from postman.forms import QuickReplyForm self.assertTrue(isinstance(response.context['form'], QuickReplyForm)) self.assertNotContains(response, 'value="Re: s"') self.assertContains(response, '>\r\n</textarea>') # as in django\forms\widgets.py\Textarea self.assertEqual(len(response.context['pm_messages']), 2) self.check_status(Message.objects.get(pk=m2.pk), status=STATUS_ACCEPTED, is_new=False, parent=m1, thread=m1) settings.POSTMAN_QUICKREPLY_QUOTE_BODY = True self.reload_modules() response = self.client.get(url) self.assertContains(response, '\n\nbar wrote:\n&gt; this is my body\n</textarea>') def check_view_conversation_404(self, thread_id): self.check_404('postman_view_conversation', thread_id) def test_view_conversation_id(self): "Test all sort of failures." self.assertTrue(self.client.login(username='foo', password='pass')) # invalid conversation id self.check_view_conversation_404(1000) # existent conversation but not yours m1 = self.c23() m1.read_at, m1.thread = now(), m1 m2 = self.c32(parent=m1, thread=m1.thread) m1.replied_at = m2.sent_at; m1.save() self.check_view_conversation_404(m1.thread_id) def test_view_conversation(self): "Test message visibility." m1 = self.c12() m1.read_at, m1.thread = now(), m1 m1.save() m2 = self.create(sender=self.user2, recipient=self.user1, parent=m1, thread=m1.thread) url = reverse('postman_view_conversation', args=[m1.pk]) self.check_status(Message.objects.get(pk=m1.pk), status=STATUS_ACCEPTED, is_new=False, thread=m1) # existent response but not yet visible to you self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.get(url) self.assertEqual(len(response.context['pm_messages']), 1) self.check_status(Message.objects.get(pk=m2.pk), parent=m1, thread=m1) # complete view on the other side self.assertTrue(self.client.login(username='bar', password='pass')) response = self.client.get(url) self.assertEqual(len(response.context['pm_messages']), 2) def check_update(self, view_name, success_msg, field_bit, pk, field_value=None): "Check permission, redirection, field updates, invalid cases." url = reverse(view_name) url_with_success_url = reverse(view_name + '_with_success_url_to_archives') data = {'pks': (str(pk), str(pk+1), str(pk+2))} # anonymous response = self.client.post(url, data) self.assertRedirects(response, "{0}?{1}={2}".format(settings.LOGIN_URL, REDIRECT_FIELD_NAME, url)) # authenticated self.assertTrue(self.client.login(username='foo', password='pass')) # default redirect is to the requestor page redirect_url = reverse('postman_sent') response = self.client.post(url, data, HTTP_REFERER=redirect_url, follow=True) # 'follow' to access messages self.assertRedirects(response, redirect_url) self.check_contrib_messages(response, success_msg) sender_kw = 'sender_{0}'.format(field_bit) recipient_kw = 'recipient_{0}'.format(field_bit) self.check_status(Message.objects.get(pk=pk), status=STATUS_ACCEPTED, **{sender_kw: field_value}) self.check_status(Message.objects.get(pk=pk+1), status=STATUS_ACCEPTED, **{recipient_kw: field_value}) self.check_status(Message.objects.get(pk=pk+2), status=STATUS_ACCEPTED, **{sender_kw: field_value}) self.check_status(Message.objects.get(pk=pk+3), status=STATUS_ACCEPTED) # fallback redirect is to inbox response = self.client.post(url, data) # doesn't hurt if already archived|deleted|undeleted self.assertRedirects(response, reverse('postman_inbox')) # redirect url may be superseded response = self.client.post(url_with_success_url, data, HTTP_REFERER=redirect_url) self.assertRedirects(response, reverse('postman_archives')) # query string has highest precedence response = self.client.post(url_with_success_url + '?next=' + redirect_url, data, HTTP_REFERER='does not matter') self.assertRedirects(response, redirect_url) # missing payload response = self.client.post(url, follow=True) self.assertRedirects(response, reverse('postman_inbox')) self.check_contrib_messages(response, 'Select at least one object.') # not a POST response = self.client.get(url, data) self.assertEqual(response.status_code, 405) # not yours self.assertTrue(self.client.login(username='baz', password='pass')) response = self.client.post(url, data) self.assertEqual(response.status_code, 404) def check_update_conversation(self, view_name, root_msg, field_bit, field_value=None): "Check redirection, field updates, invalid cases." url = reverse(view_name) pk = root_msg.pk data = {'tpks': str(pk)} self.assertTrue(self.client.login(username='foo', password='pass')) response = self.client.post(url, data) self.assertRedirects(response, reverse('postman_inbox')) # contrib.messages are already tested with check_update() sender_kw = 'sender_{0}'.format(field_bit) recipient_kw = 'recipient_{0}'.format(field_bit) self.check_status(Message.objects.get(pk=pk), status=STATUS_ACCEPTED, is_new=False, is_replied=True, thread=root_msg, **{sender_kw: field_value}) self.check_status(Message.objects.get(pk=pk+1), status=STATUS_ACCEPTED, parent=root_msg, thread=root_msg, **{recipient_kw: field_value}) # missing payload response = self.client.post(url) self.assertRedirects(response, reverse('postman_inbox')) # not a POST response = self.client.get(url, data) self.assertEqual(response.status_code, 405) # not yours self.assertTrue(self.client.login(username='baz', password='pass')) response = self.client.post(url, data) self.assertEqual(response.status_code, 404) def test_archive(self): "Test archive action on messages." pk = self.c12().pk self.c21() self.c12() self.c13() self.check_update('postman_archive', 'Messages or conversations successfully archived.', 'archived', pk, True) def test_archive_conversation(self): "Test archive action on conversations." m1 = self.c12() m1.read_at, m1.thread = now(), m1 m2 = self.c21(parent=m1, thread=m1.thread) m1.replied_at = m2.sent_at; m1.save() self.check_update_conversation('postman_archive', m1, 'archived', True) def test_delete(self): "Test delete action on messages." pk = self.c12().pk self.c21() self.c12() self.c13() self.check_update('postman_delete', 'Messages or conversations successfully deleted.', 'deleted_at', pk, True) def test_delete_conversation(self): "Test delete action on conversations." m1 = self.c12() m1.read_at, m1.thread = now(), m1 m2 = self.c21(parent=m1, thread=m1.thread) m1.replied_at = m2.sent_at; m1.save() self.check_update_conversation('postman_delete', m1, 'deleted_at', True) def test_undelete(self): "Test undelete action on messages." pk = self.c12(sender_deleted_at=now()).pk self.c21(recipient_deleted_at=now()) self.c12(sender_deleted_at=now()) self.c13() self.check_update('postman_undelete', 'Messages or conversations successfully recovered.', 'deleted_at', pk) def test_undelete_conversation(self): "Test undelete action on conversations." m1 = self.c12(sender_deleted_at=now()) m1.read_at, m1.thread = now(), m1 m2 = self.c21(parent=m1, thread=m1.thread, recipient_deleted_at=now()) m1.replied_at = m2.sent_at; m1.save() self.check_update_conversation('postman_undelete', m1, 'deleted_at') class FieldTest(BaseTest): """ Test the CommaSeparatedUserField. """ def test_label(self): "Test the plural/singular of the label." from postman.fields import CommaSeparatedUserField f = CommaSeparatedUserField(label=('plural','singular')) self.assertEqual(f.label, 'plural') f.set_max(1) self.assertEqual(f.label, 'singular') f = CommaSeparatedUserField(label=('plural','singular'), max=1) self.assertEqual(f.label, 'singular') f.set_max(2) self.assertEqual(f.label, 'plural') f = CommaSeparatedUserField(label=('plural','singular'), max=2) self.assertEqual(f.label, 'plural') f.set_max(1) self.assertEqual(f.label, 'singular') def test_to_python(self): "Test the conversion to a python list." from postman.fields import CommaSeparatedUserField f = CommaSeparatedUserField() self.assertEqual(f.to_python(''), []) self.assertEqual(f.to_python('foo'), ['foo']) self.assertEqual(frozenset(f.to_python('foo, bar')), frozenset(['foo', 'bar'])) self.assertEqual(frozenset(f.to_python('foo, bar,baz')), frozenset(['foo', 'bar', 'baz'])) self.assertEqual(f.to_python(' foo , foo '), ['foo']) self.assertEqual(frozenset(f.to_python('foo,, bar,')), frozenset(['foo', 'bar'])) self.assertEqual(frozenset(f.to_python(',foo, \t , bar')), frozenset(['foo', 'bar'])) def test_clean(self): "Test the 'clean' validation." from postman.fields import CommaSeparatedUserField f = CommaSeparatedUserField(required=False) self.assertEqual(f.clean(''), []) self.assertEqual(f.clean('foo'), [self.user1]) self.assertEqual(frozenset(f.clean('foo, bar')), frozenset([self.user1, self.user2])) # 'intruder' is not a username self.assertRaises(ValidationError, f.clean, 'foo, intruder, bar') # only active users are considered self.user1.is_active = False self.user1.save() self.assertRaises(ValidationError, f.clean, 'foo, bar') def test_user_filter(self): "Test the 'user_filter' argument." from postman.fields import CommaSeparatedUserField f = CommaSeparatedUserField(user_filter=lambda u: None) self.assertEqual(frozenset(f.clean('foo, bar')), frozenset([self.user1, self.user2])) # no reason f = CommaSeparatedUserField(user_filter=lambda u: '' if u == self.user1 else None) self.assertRaises(ValidationError, f.clean, 'foo, bar') # with reason f = CommaSeparatedUserField(user_filter=lambda u: 'some reason' if u == self.user1 else None) self.assertRaises(ValidationError, f.clean, 'foo, bar') def test_min(self): "Test the 'min' argument." from postman.fields import CommaSeparatedUserField f = CommaSeparatedUserField(required=False, min=1) self.assertEqual(f.clean(''), []) f = CommaSeparatedUserField(min=1) self.assertEqual(f.clean('foo'), [self.user1]) f = CommaSeparatedUserField(min=2) self.assertEqual(frozenset(f.clean('foo, bar')), frozenset([self.user1, self.user2])) self.assertRaises(ValidationError, f.clean, 'foo') def test_max(self): "Test the 'max' argument." from postman.fields import CommaSeparatedUserField f = CommaSeparatedUserField(max=1) self.assertEqual(f.clean('foo'), [self.user1]) self.assertRaises(ValidationError, f.clean, 'foo, bar') class MessageManagerTest(BaseTest): """ Test the Message manager. """ def test_num_queries(self): "Test the number of queries." # not available in django v1.2.3 if not hasattr(self, 'assertNumQueries'): return pk = self.c12().pk self.c21() self.c12(sender_archived=True, recipient_deleted_at=now()) self.c21(sender_archived=True, recipient_deleted_at=now()) for u in (self.user1, self.user2): with self.assertNumQueries(1): msgs = list(Message.objects.sent(u, option=OPTION_MESSAGES)) user = msgs[0].recipient with self.assertNumQueries(1): msgs = list(Message.objects.inbox(u, option=OPTION_MESSAGES)) user = msgs[0].sender with self.assertNumQueries(1): msgs = list(Message.objects.archives(u, option=OPTION_MESSAGES)) user = msgs[0].sender user = msgs[0].recipient with self.assertNumQueries(1): msgs = list(Message.objects.trash(u, option=OPTION_MESSAGES)) user = msgs[0].sender user = msgs[0].recipient with self.assertNumQueries(1): msgs = list(Message.objects.thread(u, Q(pk=pk))) user = msgs[0].sender user = msgs[0].recipient def test(self): """ user1 user2 ----------- ----------- read repl arch del arch del ---... ---X x ------>| x x |<------| x x |------> ------> ------> x <------ ...--- x X--- """ m1 = self.c12(moderation_status=STATUS_PENDING) m2 = self.c12(moderation_status=STATUS_REJECTED, recipient_deleted_at=now()) m3 = self.c12() m3.read_at, m3.thread = now(), m3 m4 = self.c21(parent=m3, thread=m3.thread) m3.replied_at = m4.sent_at; m3.save() m4.read_at = now() m5 = self.c12(parent=m4, thread=m4.thread) m4.replied_at = m5.sent_at; m4.save() m6 = self.c12() m7 = self.c12() m7.read_at = now(); m7.save() m8 = self.c21() m9 = self.c21(moderation_status=STATUS_PENDING) m10 = self.c21(moderation_status=STATUS_REJECTED, recipient_deleted_at=now()) def pk(x): return x.pk def pk_cnt(x): return (x.pk, x.count) self.assertEqual(Message.objects.count(), 10) self.assertEqual(Message.objects.inbox_unread_count(self.user1), 1) self.assertEqual(Message.objects.inbox_unread_count(self.user2), 2) self.assertEqual(self.user1.sent_messages.count(), 6) self.assertEqual(self.user1.received_messages.count(), 4) self.assertEqual(self.user2.sent_messages.count(), 4) self.assertEqual(self.user2.received_messages.count(), 6) self.assertEqual(set(m3.child_messages.all()), set([m3,m4,m5])) self.assertEqual(list(m3.next_messages.all()), [m4]) self.assertEqual(m3.get_replies_count(), 1) self.assertEqual(list(m4.next_messages.all()), [m5]) self.assertEqual(m4.get_replies_count(), 1) self.assertEqual(m5.get_replies_count(), 0) # by messages self.assertQuerysetEqual(Message.objects.sent(self.user1, option=OPTION_MESSAGES), [m7.pk,m6.pk,m5.pk,m3.pk,m2.pk,m1.pk], transform=pk) self.assertQuerysetEqual(Message.objects.sent(self.user2, option=OPTION_MESSAGES), [m10.pk,m9.pk,m8.pk,m4.pk], transform=pk) self.assertQuerysetEqual(Message.objects.inbox(self.user1, option=OPTION_MESSAGES), [m8.pk,m4.pk], transform=pk) self.assertQuerysetEqual(Message.objects.inbox(self.user2, option=OPTION_MESSAGES), [m7.pk,m6.pk,m5.pk,m3.pk], transform=pk) self.assertQuerysetEqual(Message.objects.archives(self.user1, option=OPTION_MESSAGES), [], transform=pk) self.assertQuerysetEqual(Message.objects.archives(self.user2, option=OPTION_MESSAGES), [], transform=pk) self.assertQuerysetEqual(Message.objects.trash(self.user1, option=OPTION_MESSAGES), [], transform=pk) self.assertQuerysetEqual(Message.objects.trash(self.user2, option=OPTION_MESSAGES), [], transform=pk) # by conversations self.assertQuerysetEqual(Message.objects.sent(self.user1), [(m7.pk,0),(m6.pk,0),(m5.pk,2),(m2.pk,0),(m1.pk,0)], transform=pk_cnt) self.assertQuerysetEqual(Message.objects.sent(self.user2), [(m10.pk,0),(m9.pk,0),(m8.pk,0),(m4.pk,1)], transform=pk_cnt) self.assertQuerysetEqual(Message.objects.inbox(self.user1), [(m8.pk,0),(m4.pk,1)], transform=pk_cnt) self.assertQuerysetEqual(Message.objects.inbox(self.user2), [(m7.pk,0),(m6.pk,0),(m5.pk,2)], transform=pk_cnt) self.assertQuerysetEqual(Message.objects.thread(self.user1, Q(thread=m3.pk)), [m3.pk,m4.pk,m5.pk], transform=pk) self.assertQuerysetEqual(Message.objects.thread(self.user1, Q(pk=m4.pk)), [m4.pk], transform=pk) self.assertQuerysetEqual(Message.objects.thread(self.user2, Q(thread=m3.pk)), [m3.pk,m4.pk,m5.pk], transform=pk) self.assertQuerysetEqual(Message.objects.thread(self.user2, Q(pk=m4.pk)), [m4.pk], transform=pk) # mark as archived and deleted """ user1 user2 ----------- ----------- read repl arch del arch del X ---... X ---X x X X ------>| x x |<------| X X x x |------> X ------> X ------> X x X <------ ...--- X x X--- X """ m1.sender_archived = True; m1.save() m2.sender_deleted_at = now(); m2.save() m3.sender_archived, m3.sender_deleted_at = True, now(); m3.save() m4.sender_archived, m4.sender_deleted_at = True, now(); m4.save() m6.sender_archived, m6.recipient_archived = True, True; m6.save() m7.recipient_deleted_at = now(); m7.save() m8.recipient_deleted_at = now(); m8.save() m9.sender_deleted_at = now(); m9.save() m10.sender_archived = True; m10.save() self.assertEqual(Message.objects.inbox_unread_count(self.user1), 0) self.assertEqual(Message.objects.inbox_unread_count(self.user2), 1) # by messages self.assertQuerysetEqual(Message.objects.archives(self.user1, option=OPTION_MESSAGES), [m6.pk,m1.pk], transform=pk) self.assertQuerysetEqual(Message.objects.archives(self.user2, option=OPTION_MESSAGES), [m10.pk,m6.pk], transform=pk) self.assertQuerysetEqual(Message.objects.trash(self.user1, option=OPTION_MESSAGES), [m8.pk,m3.pk,m2.pk], transform=pk) self.assertQuerysetEqual(Message.objects.trash(self.user2, option=OPTION_MESSAGES), [m9.pk,m7.pk,m4.pk], transform=pk) self.assertQuerysetEqual(Message.objects.sent(self.user1, option=OPTION_MESSAGES), [m7.pk,m5.pk], transform=pk) self.assertQuerysetEqual(Message.objects.sent(self.user2, option=OPTION_MESSAGES), [m8.pk], transform=pk) self.assertQuerysetEqual(Message.objects.inbox(self.user1, option=OPTION_MESSAGES), [m4.pk], transform=pk) self.assertQuerysetEqual(Message.objects.inbox(self.user2, option=OPTION_MESSAGES), [m5.pk,m3.pk], transform=pk) # by conversations self.assertQuerysetEqual(Message.objects.sent(self.user1), [(m7.pk,0),(m5.pk,1)], transform=pk_cnt) self.assertQuerysetEqual(Message.objects.sent(self.user2), [(m8.pk,0)], transform=pk_cnt) self.assertQuerysetEqual(Message.objects.inbox(self.user1), [(m4.pk,1)], transform=pk_cnt) self.assertQuerysetEqual(Message.objects.inbox(self.user2), [(m5.pk,2)], transform=pk_cnt) self.assertQuerysetEqual(Message.objects.thread(self.user1, Q(thread=m3.pk)), [m3.pk,m4.pk,m5.pk], transform=pk) self.assertQuerysetEqual(Message.objects.thread(self.user1, Q(pk=m4.pk)), [m4.pk], transform=pk) self.assertQuerysetEqual(Message.objects.thread(self.user2, Q(thread=m3.pk)), [m3.pk,m4.pk,m5.pk], transform=pk) self.assertQuerysetEqual(Message.objects.thread(self.user2, Q(pk=m4.pk)), [m4.pk], transform=pk) # mark as read self.assertEqual(Message.objects.set_read(self.user2, Q(thread=m3.pk)), 1) m = Message.objects.get(pk=m5.pk) self.check_status(m, status=STATUS_ACCEPTED, is_new=False, parent=m4, thread=m3) self.check_now(m.read_at) self.assertEqual(Message.objects.set_read(self.user2, Q(pk=m6.pk)), 1) m = Message.objects.get(pk=m6.pk) self.check_status(m, status=STATUS_ACCEPTED, is_new=False, sender_archived=True, recipient_archived=True) self.check_now(m.read_at) self.assertEqual(Message.objects.set_read(self.user1, Q(pk=m8.pk)), 1) m = Message.objects.get(pk=m8.pk) self.check_status(m, status=STATUS_ACCEPTED, is_new=False, recipient_deleted_at=True) self.check_now(m.read_at) class MessageTest(BaseTest): """ Test the Message model. """ def check_parties(self, m, s=None, r=None, email=''): "Check party related properties." obfuscated_email_re = re.compile('^[0-9a-f]{4}..[0-9a-f]{4}@domain$') m.sender, m.recipient, m.email = s, r, email if s or email: m.clean() else: self.assertRaises(ValidationError, m.clean) self.assertEqual(m.admin_sender(), s.get_username() if s else '<'+email+'>') self.assertEqual(m.clear_sender, m.admin_sender()) if s: self.assertEqual(m.obfuscated_sender, s.get_username()) elif email: self.assertTrue(obfuscated_email_re.match(m.obfuscated_sender)) else: self.assertEqual(m.obfuscated_sender, '') self.assertEqual(m.admin_recipient(), r.get_username() if r else '<'+email+'>') self.assertEqual(m.clear_recipient, m.admin_recipient()) if r: self.assertEqual(m.obfuscated_recipient, r.get_username()) elif email: self.assertTrue(obfuscated_email_re.match(m.obfuscated_recipient)) else: self.assertEqual(m.obfuscated_recipient, '') def test_parties(self): "Test sender/recipient/email." m = Message() self.check_parties(m) self.check_parties(m, s=self.user1) self.check_parties(m, r=self.user2) self.check_parties(m, s=self.user1, r=self.user2) self.check_parties(m, s=self.user1, email=self.email ) self.check_parties(m, email=self.email, r=self.user2) def test_representation(self): "Test the message representation as text." m = Message(sender=self.user1, recipient=self.user2) m.subject = 'one two three four last' self.assertEqual(str(m), 'foo>bar:one two three four last') m.subject = 'one two three four last over' self.assertEqual(str(m), 'foo>bar:one two three four last...') def test_status(self): "Test status." m = Message.objects.create(subject='s') self.check_status(m) m = Message.objects.create(subject='s', moderation_status=STATUS_REJECTED) self.check_status(m, status=STATUS_REJECTED) m = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED) self.check_status(m, status=STATUS_ACCEPTED) m = Message.objects.create(subject='s', read_at=now()) self.check_status(m, is_new=False) m = Message.objects.create(subject='s', replied_at=now()) self.check_status(m, is_replied=True) def test_moderated_count(self): "Test 'moderated_messages' count." msg = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED, moderation_date=now(), moderation_by=self.user1) msg.save() self.assertEqual(list(self.user1.moderated_messages.all()), [msg]) def test_moderation_from_pending(self): "Test moderation management when leaving 'pending' status." msg = Message.objects.create(subject='s') # pending -> pending: nothing changes m = copy.copy(msg) m.clean_moderation(STATUS_PENDING, self.user1) self.check_status(m) # pending -> rejected m = copy.copy(msg) m.moderation_status = STATUS_REJECTED m.clean_moderation(STATUS_PENDING, self.user1) # one try with moderator self.check_status(m, status=STATUS_REJECTED, moderation_date=True, moderation_by=self.user1, recipient_deleted_at=True) self.check_now(m.moderation_date) self.check_now(m.recipient_deleted_at) # pending -> accepted m = copy.copy(msg) m.moderation_status = STATUS_ACCEPTED m.clean_moderation(STATUS_PENDING) # one try without moderator self.check_status(m, status=STATUS_ACCEPTED, moderation_date=True) self.check_now(m.moderation_date) def test_moderation_from_rejected(self): "Test moderation management when leaving 'rejected' status." date_in_past = now() - timedelta(days=2) # any value, just to avoid now() reason = 'some good reason' msg = Message.objects.create(subject='s', moderation_status=STATUS_REJECTED, moderation_date=date_in_past, moderation_by=self.user1, moderation_reason=reason, recipient_deleted_at=date_in_past) # rejected -> rejected: nothing changes m = copy.copy(msg) m.clean_moderation(STATUS_REJECTED, self.user2) self.check_status(m, status=STATUS_REJECTED, moderation_date=date_in_past, moderation_by=self.user1, moderation_reason=reason, recipient_deleted_at=date_in_past) # rejected -> pending m = copy.copy(msg) m.moderation_status = STATUS_PENDING m.clean_moderation(STATUS_REJECTED) # one try without moderator self.check_status(m, status=STATUS_PENDING, moderation_date=True, moderation_reason=reason, recipient_deleted_at=False) self.check_now(m.moderation_date) # rejected -> accepted m = copy.copy(msg) m.moderation_status = STATUS_ACCEPTED m.clean_moderation(STATUS_REJECTED, self.user2) # one try with moderator self.check_status(m, status=STATUS_ACCEPTED, moderation_date=True, moderation_by=self.user2, moderation_reason=reason, recipient_deleted_at=False) self.check_now(m.moderation_date) def test_moderation_from_accepted(self): "Test moderation management when leaving 'accepted' status." date_in_past = now() - timedelta(days=2) # any value, just to avoid now() msg = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED, moderation_date=date_in_past, moderation_by=self.user1, recipient_deleted_at=date_in_past) # accepted -> accepted: nothing changes m = copy.copy(msg) m.clean_moderation(STATUS_ACCEPTED, self.user2) self.check_status(m, status=STATUS_ACCEPTED, moderation_date=date_in_past, moderation_by=self.user1, recipient_deleted_at=date_in_past) # accepted -> pending m = copy.copy(msg) m.moderation_status = STATUS_PENDING m.clean_moderation(STATUS_ACCEPTED, self.user2) # one try with moderator self.check_status(m, status=STATUS_PENDING, moderation_date=True, moderation_by=self.user2, recipient_deleted_at=date_in_past) self.check_now(m.moderation_date) # accepted -> rejected m = copy.copy(msg) m.moderation_status = STATUS_REJECTED m.clean_moderation(STATUS_ACCEPTED) # one try without moderator self.check_status(m, status=STATUS_REJECTED, moderation_date=True, recipient_deleted_at=True) self.check_now(m.moderation_date) self.check_now(m.recipient_deleted_at) def test_visitor(self): "Test clean_for_visitor()." date_in_past = now() - timedelta(days=2) # any value, just to avoid now() # as the sender m = Message.objects.create(subject='s', recipient=self.user1) m.clean_for_visitor() self.check_status(m, sender_deleted_at=True) self.check_now(m.sender_deleted_at) # as the recipient msg = Message.objects.create(subject='s', sender=self.user1) # pending m = copy.copy(msg) m.read_at=date_in_past m.recipient_deleted_at=date_in_past m.clean_for_visitor() self.check_status(m, recipient_deleted_at=False) # rejected m = copy.copy(msg) m.moderation_status = STATUS_REJECTED m.read_at=date_in_past m.recipient_deleted_at=date_in_past m.clean_for_visitor() self.check_status(m, status=STATUS_REJECTED, recipient_deleted_at=date_in_past) # accepted m = copy.copy(msg) m.moderation_status = STATUS_ACCEPTED m.clean_for_visitor() self.check_status(m, status=STATUS_ACCEPTED, is_new=False, recipient_deleted_at=True) self.check_now(m.read_at) self.check_now(m.recipient_deleted_at) def test_update_parent(self): "Test update_parent()." parent = Message.objects.create(subject='s', sender=self.user1, recipient=self.user2, moderation_status=STATUS_ACCEPTED) parent.thread = parent parent.save() # any previous rejected reply should not interfere rejected_reply = Message.objects.create(subject='s', sender=self.user2, recipient=self.user1, parent=parent, thread=parent.thread, moderation_status=STATUS_REJECTED) # any previous pending reply should not interfere pending_reply = Message.objects.create(subject='s', sender=self.user2, recipient=self.user1, parent=parent, thread=parent.thread, moderation_status=STATUS_PENDING) reply = Message.objects.create(subject='s', sender=self.user2, recipient=self.user1, parent=parent, thread=parent.thread) # the reply is accepted r = copy.deepcopy(reply) r.moderation_status = STATUS_ACCEPTED # accepted -> accepted: no change r.update_parent(STATUS_ACCEPTED) self.check_status(r.parent, status=STATUS_ACCEPTED, thread=parent) # pending -> accepted: parent is replied r.update_parent(STATUS_PENDING) p = Message.objects.get(pk=parent.pk) # better to ask the DB to check the save() self.check_status(p, status=STATUS_ACCEPTED, thread=parent, is_replied=True) self.assertEqual(p.replied_at.timetuple(), r.sent_at.timetuple()) # mysql doesn't store microseconds # rejected -> accepted: same as pending -> accepted # so check here the acceptance of an anterior date # note: use again the some object for convenience but another reply is more realistic r.sent_at = r.sent_at - timedelta(days=1) r.update_parent(STATUS_REJECTED) p = Message.objects.get(pk=parent.pk) self.check_status(p, status=STATUS_ACCEPTED, thread=parent, is_replied=True) self.assertEqual(p.replied_at.timetuple(), r.sent_at.timetuple()) # a reply is withdrawn and no other reply r = copy.deepcopy(reply) r.parent.replied_at = r.sent_at r.moderation_status = STATUS_REJECTED # could be STATUS_PENDING # rejected -> rejected: no change. In real case, parent.replied_at would be already empty r.update_parent(STATUS_REJECTED) self.check_status(r.parent, status=STATUS_ACCEPTED, thread=parent, is_replied=True) # pending -> rejected: no change. In real case, parent.replied_at would be already empty r.update_parent(STATUS_PENDING) self.check_status(r.parent, status=STATUS_ACCEPTED, thread=parent, is_replied=True) # accepted -> rejected: parent is no more replied r.update_parent(STATUS_ACCEPTED) p = Message.objects.get(pk=parent.pk) self.check_status(p, status=STATUS_ACCEPTED, thread=parent) # note: accepted -> rejected, with the existence of another suitable reply # is covered in the accepted -> pending case # a reply is withdrawn but there is another suitable reply other_reply = Message.objects.create(subject='s', sender=self.user2, recipient=self.user1, parent=parent, thread=parent.thread, moderation_status=STATUS_ACCEPTED) r = copy.deepcopy(reply) r.parent.replied_at = r.sent_at r.moderation_status = STATUS_PENDING # could be STATUS_REJECTED # pending -> pending: no change. In real case, parent.replied_at would be from another reply object r.update_parent(STATUS_PENDING) self.check_status(r.parent, status=STATUS_ACCEPTED, thread=parent, is_replied=True) # rejected -> pending: no change. In real case, parent.replied_at would be from another reply object r.update_parent(STATUS_REJECTED) self.check_status(r.parent, status=STATUS_ACCEPTED, thread=parent, is_replied=True) # accepted -> pending: parent is still replied but by another object r.update_parent(STATUS_ACCEPTED) p = Message.objects.get(pk=parent.pk) self.check_status(p, status=STATUS_ACCEPTED, thread=parent, is_replied=True) self.assertEqual(p.replied_at.timetuple(), other_reply.sent_at.timetuple()) # note: accepted -> pending, with no other suitable reply # is covered in the accepted -> rejected case def check_notification(self, m, mail_number, email=None, is_auto_moderated=True, notice_label=None): "Check number of mails, recipient, and notice creation." m.notify_users(STATUS_PENDING, Site.objects.get_current() if Site._meta.installed else None, is_auto_moderated) self.assertEqual(len(mail.outbox), mail_number) if mail_number: self.assertEqual(mail.outbox[0].to, [email]) from postman.utils import notification if notification and notice_label: if hasattr(notification, "Notice"): # exists for django-notification 0.2.0, but no more in 1.0 notice = notification.Notice.objects.get() self.assertEqual(notice.notice_type.label, notice_label) def test_notification_rejection_visitor(self): "Test notify_users() for rejection, sender is a visitor." m = Message.objects.create(subject='s', moderation_status=STATUS_REJECTED, email=self.email, recipient=self.user2) self.check_notification(m, 1, self.email) def test_notification_rejection_user(self): "Test notify_users() for rejection, sender is a User." m = Message.objects.create(subject='s', moderation_status=STATUS_REJECTED, sender=self.user1, recipient=self.user2) self.check_notification(m, 1, self.user1.email, is_auto_moderated=False, notice_label='postman_rejection') def test_notification_rejection_user_auto_moderated(self): "Test notify_users() for rejection, sender is a User, and is alerted online." m = Message.objects.create(subject='s', moderation_status=STATUS_REJECTED, sender=self.user1, recipient=self.user2) self.check_notification(m, 0, is_auto_moderated=True) def test_notification_rejection_user_inactive(self): "Test notify_users() for rejection, sender is a User, but must be active." m = Message.objects.create(subject='s', moderation_status=STATUS_REJECTED, sender=self.user1, recipient=self.user2) self.user1.is_active = False self.check_notification(m, 0, is_auto_moderated=False, notice_label='postman_rejection') def test_notification_rejection_user_disable(self): "Test notify_users() for rejection, sender is a User, but emailing is disabled." m = Message.objects.create(subject='s', moderation_status=STATUS_REJECTED, sender=self.user1, recipient=self.user2) settings.POSTMAN_DISABLE_USER_EMAILING = True settings.POSTMAN_NOTIFIER_APP = None self.reload_modules() self.check_notification(m, 0, is_auto_moderated=False) def test_notification_acceptance_visitor(self): "Test notify_users() for acceptance, recipient is a visitor." m = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED, sender=self.user1, email=self.email) self.check_notification(m, 1, self.email) def test_notification_acceptance_user(self): "Test notify_users() for acceptance, recipient is a User." m = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED, sender=self.user1, recipient=self.user2) self.check_notification(m, 1, self.user2.email, notice_label='postman_message') def test_notification_acceptance_user_inactive(self): "Test notify_users() for acceptance, recipient is a User, but must be active." m = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED, sender=self.user1, recipient=self.user2) self.user2.is_active = False self.check_notification(m, 0, notice_label='postman_message') def test_notification_acceptance_user_disable(self): "Test notify_users() for acceptance, recipient is a User, but emailing is disabled." m = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED, sender=self.user1, recipient=self.user2) settings.POSTMAN_DISABLE_USER_EMAILING = True settings.POSTMAN_NOTIFIER_APP = None self.reload_modules() self.check_notification(m, 0, notice_label='postman_message') def test_notification_acceptance_reply(self): "Test notify_users() for acceptance, for a reply, recipient is a User." p = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED, sender=self.user2, recipient=self.user1) m = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED, sender=self.user1, recipient=self.user2, parent=p, thread=p) self.check_notification(m, 1, self.user2.email, notice_label='postman_reply') def test_dates(self): "Test set_dates(), get_dates()." m = Message() set = now(), now(), now() m.set_dates(*set) get = m.get_dates() self.assertEqual(get, set) def test_moderation(self): "Test set_moderation(), get_moderation()." m = Message() set = STATUS_ACCEPTED, self.user1.pk, now(), 'some reason' m.set_moderation(*set) get = m.get_moderation() self.assertEqual(get, set) def check_auto_moderation(self, msg, seq, default): "Check auto-moderation results." for mod, result in seq: m = copy.copy(msg) m.auto_moderate(mod) changes = {} if result is True: changes['status'] = STATUS_ACCEPTED elif result is None: changes['status'] = default else: changes['status'] = STATUS_REJECTED changes['moderation_reason'] = result m.sent_at = now() # refresh, as we recycle the same base message self.check_status(m, **changes) def test_auto_moderation(self): "Test auto-moderation function combination." msg = Message.objects.create(subject='s') def moderate_as_none(m): return None def moderate_as_true(m): return True def moderate_as_false(m): return False def moderate_as_0(m): return 0 def moderate_as_100(m): return 100 def moderate_as_50(m): return 50 def moderate_as_49_default_reason(m): return 49 moderate_as_49_default_reason.default_reason = 'moderate_as_49 default_reason' def moderate_as_49_with_reason(m): return (49, 'moderate_as_49 with_reason') moderate_as_49_with_reason.default_reason = 'is not used' def moderate_as_1(m): return (1, 'moderate_as_1') def moderate_as_1_no_reason(m): return (1, ' ') def moderate_as_2(m): return (2, 'moderate_as_2') def moderate_as_98(m): return 98 moderate_as_98.default_reason = 'useless; never used' def moderate_badly_as_negative(m): return -1 def moderate_badly_as_too_high(m): return 101 def moderate_as_0_with_reason(m): return (0, 'moderate_as_0 with_reason') def invalid_moderator_1(m): return (0, ) def invalid_moderator_2(m): return (0, 'reason', 'extra') for mod in [invalid_moderator_1, invalid_moderator_2]: m = copy.copy(msg) self.assertRaises(ValueError, m.auto_moderate, mod) seq = ( # no moderator, no valid rating, or moderator is unable to state, default applies ([], None), (moderate_badly_as_negative, None), (moderate_badly_as_too_high, None), (moderate_as_none, None), # firm decision (moderate_as_false, ''), (moderate_as_0, ''), (moderate_as_true, True), (moderate_as_100, True), # round to up (moderate_as_50, True), # reasons (moderate_as_49_default_reason, moderate_as_49_default_reason.default_reason), (moderate_as_49_with_reason, 'moderate_as_49 with_reason'), # priority is left to right ([moderate_as_none, moderate_as_false, moderate_as_true], ''), ([moderate_as_none, moderate_as_true, moderate_as_false], True), # keep only reasons for ratings below 50, non empty or whitespace ([moderate_as_1, moderate_as_98], 'moderate_as_1'), ([moderate_as_1, moderate_as_2, moderate_as_50], 'moderate_as_1, moderate_as_2'), ([moderate_as_1, moderate_as_1_no_reason, moderate_as_2], 'moderate_as_1, moderate_as_2'), # a firm reject imposes its reason ([moderate_as_1, moderate_as_2, moderate_as_50, moderate_as_0_with_reason], 'moderate_as_0 with_reason'), # neutral or invalid moderators do not count in the average ([moderate_as_50, moderate_as_none, moderate_badly_as_negative, moderate_badly_as_too_high], True), ) # no default auto moderation # settings.POSTMAN_AUTO_MODERATE_AS = None self.check_auto_moderation(msg, seq, STATUS_PENDING) # default is: accepted settings.POSTMAN_AUTO_MODERATE_AS = True self.check_auto_moderation(msg, seq, STATUS_ACCEPTED) # default is: rejected settings.POSTMAN_AUTO_MODERATE_AS = False self.check_auto_moderation(msg, seq, STATUS_REJECTED) class PendingMessageManagerTest(BaseTest): """ Test the PendingMessage manager. """ def test(self): msg1 = self.create() msg2 = self.create(moderation_status=STATUS_REJECTED) msg3 = self.create(moderation_status=STATUS_ACCEPTED) msg4 = self.create() self.assertQuerysetEqual(PendingMessage.objects.all(), [msg4.pk, msg1.pk], transform=lambda x: x.pk) class PendingMessageTest(BaseTest): """ Test the PendingMessage model. """ def test(self): m = PendingMessage() self.assertTrue(m.is_pending()) m.set_accepted() self.assertTrue(m.is_accepted()) m.set_rejected() self.assertTrue(m.is_rejected()) class FiltersTest(BaseTest): """ Test the filters. """ def check_sub(self, x, y, value): t = Template("{% load postman_tags %}{% with "+x+"|sub:"+y+" as var %}{{ var }}{% endwith %}") self.assertEqual(t.render(Context({})), value) def test_sub(self): "Test '|sub'." self.check_sub('6', '2', '4') self.check_sub('6', "'X'", '6') self.check_sub("'X'", '2', 'X') def check_or_me(self, x, value, user=None, m=None): t = Template("{% load postman_tags %}{{ "+x+"|or_me:user }}") # do not load i18n to be able to check the untranslated pattern self.assertEqual(t.render(Context({'user': user or AnonymousUser(), 'message': m})), value) def test_or_me(self): "Test '|or_me'." self.check_or_me("'foo'", 'foo') self.check_or_me("'foo'", '&lt;me&gt;', self.user1) self.check_or_me("'bar'", 'bar', self.user1) self.check_or_me("user", '&lt;me&gt;', self.user1) m = self.c12() self.check_or_me("message.obfuscated_sender", '&lt;me&gt;', self.user1, m=m) self.check_or_me("message.obfuscated_recipient", 'bar', self.user1, m=m) settings.POSTMAN_SHOW_USER_AS = 'email' self.check_or_me("message.obfuscated_sender", '&lt;me&gt;', self.user1, m=m) self.check_or_me("message.obfuscated_recipient", 'bar@domain.com', self.user1, m=m) def check_compact_date(self, date, value, format='H:i,d b,d/m/y'): # use 'H', 'd', 'm' instead of 'G', 'j', 'n' because no strftime equivalents t = Template('{% load postman_tags %}{{ date|compact_date:"'+format+'" }}') self.assertEqual(t.render(Context({'date': date})), value) def test_compact_date(self): "Test '|compact_date'." dt = now() try: from django.utils.timezone import localtime # Django 1.4 aware datetimes # (1.4) template/base.py/_render_value_in_context() dt = localtime(dt) except ImportError: pass # (1.2) template/__init__.py/_render_value_in_context() # (1.3) template/base.py/_render_value_in_context() # (1.6) template/base.py/render_value_in_context() default = force_text(localize(dt)) self.check_compact_date(dt, default, format='') self.check_compact_date(dt, default, format='one') self.check_compact_date(dt, default, format='one,two') self.check_compact_date(dt, dt.strftime('%H:%M')) dt2 = dt - timedelta(days=1) # little fail: do not work on Jan, 1st, because the year changes as well self.check_compact_date(dt2, dt2.strftime('%d %b').lower()) # filter's 'b' is lowercase dt2 = dt - timedelta(days=365) self.check_compact_date(dt2, dt2.strftime('%d/%m/%y')) class TagsTest(BaseTest): """ Test the template tags. """ def check_postman_unread(self, value, user=None, asvar=''): t = Template("{% load postman_tags %}{% postman_unread " + asvar +" %}") ctx = Context({'user': user} if user else {}) self.assertEqual(t.render(ctx), value) return ctx def test_postman_unread(self): "Test 'postman_unread'." self.check_postman_unread('') self.check_postman_unread('', AnonymousUser()) self.check_postman_unread('0', self.user1) Message.objects.create(subject='s', recipient=self.user1) self.check_postman_unread('0', self.user1) Message.objects.create(subject='s', recipient=self.user1, moderation_status=STATUS_ACCEPTED) self.check_postman_unread('1', self.user1) ctx = self.check_postman_unread('', self.user1, 'as var') self.assertEqual(ctx['var'], 1) self.assertRaises(TemplateSyntaxError, self.check_postman_unread, '', self.user1, 'as var extra') self.assertRaises(TemplateSyntaxError, self.check_postman_unread, '', self.user1, 'As var') def check_order_by(self, keyword, value_list, context=None): t = Template("{% load postman_tags %}{% postman_order_by " + keyword +" %}") r = t.render(Context({'gets': QueryDict(context)} if context else {})) self.assertEqual(r[0], '?') self.assertEqual(set(r[1:].split('&')), set([k+'='+v for k, v in value_list])) def test_order_by(self): "Test 'postman_order_by'." for k, v in ORDER_BY_MAPPER.items(): self.check_order_by(k, [(ORDER_BY_KEY, v)]) self.check_order_by('subject', [(ORDER_BY_KEY, 's')], ORDER_BY_KEY+'=foo') self.check_order_by('subject', [(ORDER_BY_KEY, 'S')], ORDER_BY_KEY+'=s') self.check_order_by('subject', [(ORDER_BY_KEY, 's'), ('page', '12')], 'page=12') self.check_order_by('subject', [('foo', 'bar'), (ORDER_BY_KEY, 's'), ('baz', 'qux')], 'foo=bar&'+ORDER_BY_KEY+'=S&baz=qux') self.assertRaises(TemplateSyntaxError, self.check_order_by, '', None) self.assertRaises(TemplateSyntaxError, self.check_order_by, 'subject extra', None) self.assertRaises(TemplateSyntaxError, self.check_order_by, 'unknown', None) class UtilsTest(BaseTest): """ Test helper functions. """ def test_format_body(self): "Test format_body()." header = "\n\nfoo wrote:\n" footer = "\n" self.assertEqual(format_body(self.user1, "foo bar"), header+"> foo bar"+footer) self.assertEqual(format_body(self.user1, "foo bar", indent='|_'), header+"|_foo bar"+footer) self.assertEqual(format_body(self.user1, width=10, body="34 67 90"), header+"> 34 67 90"+footer) self.assertEqual(format_body(self.user1, width=10, body="34 67 901"), header+"> 34 67\n> 901"+footer) self.assertEqual(format_body(self.user1, width=10, body="> 34 67 901"), header+"> > 34 67 901"+footer) self.assertEqual(format_body(self.user1, width=10, body= "34 67\n" "\n" " \n" " .\n" "End"), header+"> 34 67\n" "> \n" "> \n" "> .\n" "> End"+footer) def test_format_subject(self): "Test format_subject()." self.assertEqual(format_subject("foo bar"), "Re: foo bar") self.assertEqual(format_subject("Re: foo bar"), "Re: foo bar") self.assertEqual(format_subject("rE: foo bar"), "rE: foo bar") def test_get_order_by(self): "Test get_order_by()." self.assertEqual(get_order_by({}), None) self.assertEqual(get_order_by({ORDER_BY_KEY: 'f'}), 'sender__{0}'.format(get_user_model().USERNAME_FIELD)) self.assertEqual(get_order_by({ORDER_BY_KEY: 'D'}), '-sent_at') def test_get_user_representation(self): "Test get_user_representation()." # no setting self.assertEqual(get_user_representation(self.user1), "foo") # a wrong setting settings.POSTMAN_SHOW_USER_AS = 'unknown_attribute' self.assertEqual(get_user_representation(self.user1), "foo") # a valid setting but an empty attribute settings.POSTMAN_SHOW_USER_AS = 'first_name' self.assertEqual(get_user_representation(self.user1), "foo") # a property name settings.POSTMAN_SHOW_USER_AS = 'email' self.assertEqual(get_user_representation(self.user1), "foo@domain.com") if not six.PY3: # avoid six.PY2, not available in six 1.2.0 settings.POSTMAN_SHOW_USER_AS = b'email' # usage on PY3 is nonsense self.assertEqual(get_user_representation(self.user1), "foo@domain.com") # a method name # can't use get_full_name(), an empty string in our case # get_absolute_url() doesn't exist anymore since Django 1.7 settings.POSTMAN_SHOW_USER_AS = 'natural_key' # avoid get_username(), already used for the default representation self.assertEqual(get_user_representation(self.user1), "(u'foo',)" if not six.PY3 else "('foo',)") # a function settings.POSTMAN_SHOW_USER_AS = lambda u: u.natural_key() self.assertEqual(get_user_representation(self.user1), "(u'foo',)" if not six.PY3 else "('foo',)") class ApiTest(BaseTest): """ Test the API functions. """ def check_message(self, m, subject='s', body='b', recipient_username='bar'): "Check some message properties." self.assertEqual(m.subject, subject) self.assertEqual(m.body, body) self.assertEqual(m.email, '') self.assertEqual(m.sender, self.user1) self.assertEqual(m.recipient.get_username(), recipient_username) def test_pm_broadcast(self): "Test the case of a single recipient." pm_broadcast(sender=self.user1, recipients=self.user2, subject='s', body='b') m = Message.objects.get() self.check_status(m, status=STATUS_ACCEPTED, moderation_date=True, sender_archived=True, sender_deleted_at=True) self.check_now(m.sender_deleted_at) self.check_now(m.moderation_date) self.check_message(m) self.assertEqual(len(mail.outbox), 1) def test_pm_broadcast_skip_notification(self): "Test the notification skipping." pm_broadcast(sender=self.user1, recipients=self.user2, subject='s', skip_notification=True) self.assertEqual(len(mail.outbox), 0) def test_pm_broadcast_multi(self): "Test the case of more than a single recipient." pm_broadcast(sender=self.user1, recipients=[self.user2, self.user3], subject='s', body='b') msgs = list(Message.objects.all()) self.check_message(msgs[0], recipient_username='baz') self.check_message(msgs[1]) def test_pm_write(self): "Test the basic minimal use." pm_write(sender=self.user1, recipient=self.user2, subject='s', body='b') m = Message.objects.get() self.check_status(m, status=STATUS_ACCEPTED, moderation_date=True) self.check_now(m.moderation_date) self.check_message(m) self.assertEqual(len(mail.outbox), 1) # notify the recipient def test_pm_write_skip_notification(self): "Test the notification skipping." pm_write(sender=self.user1, recipient=self.user2, subject='s', skip_notification=True) self.assertEqual(len(mail.outbox), 0) def test_pm_write_auto_archive(self): "Test the auto_archive parameter." pm_write(sender=self.user1, recipient=self.user2, subject='s', auto_archive=True) m = Message.objects.get() self.check_status(m, status=STATUS_ACCEPTED, moderation_date=True, sender_archived=True) def test_pm_write_auto_delete(self): "Test the auto_delete parameter." pm_write(sender=self.user1, recipient=self.user2, subject='s', auto_delete=True) m = Message.objects.get() self.check_status(m, status=STATUS_ACCEPTED, moderation_date=True, sender_deleted_at=True) self.check_now(m.sender_deleted_at) def test_pm_write_auto_moderators_accepted(self): "Test the auto_moderators parameter, moderate as accepted." pm_write(sender=self.user1, recipient=self.user2, subject='s', auto_moderators=lambda m: True) m = Message.objects.get() self.check_status(m, status=STATUS_ACCEPTED, moderation_date=True) def test_pm_write_auto_moderators_pending(self): "Test the auto_moderators parameter, no moderation decision is taken. Test the parameter as a list." pm_write(sender=self.user1, recipient=self.user2, subject='s', auto_moderators=[lambda m: None]) m = Message.objects.get() self.check_status(m) self.assertEqual(len(mail.outbox), 0) # no one to notify def test_pm_write_auto_moderators_rejected(self): "Test the auto_moderators parameter, moderate as rejected. Test the parameter as a tuple." pm_write(sender=self.user1, recipient=self.user2, subject='s', auto_moderators=(lambda m: False, )) m = Message.objects.get() self.check_status(m, status=STATUS_REJECTED, moderation_date=True, recipient_deleted_at=True) self.check_now(m.moderation_date) self.check_now(m.recipient_deleted_at) self.assertEqual(len(mail.outbox), 0) # sender is not notified in the case of auto moderation
gpl-3.0
sajuptpm/neutron-ipam
neutron/tests/unit/services/vpn/test_vpnaas_extension.py
11
23551
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Swaminathan Vasudevan, Hewlett-Packard. import copy import mock from webob import exc from neutron.extensions import vpnaas from neutron.openstack.common import uuidutils from neutron.plugins.common import constants from neutron.tests.unit import test_api_v2 from neutron.tests.unit import test_api_v2_extension _uuid = uuidutils.generate_uuid _get_path = test_api_v2._get_path class VpnaasExtensionTestCase(test_api_v2_extension.ExtensionTestCase): fmt = 'json' def setUp(self): super(VpnaasExtensionTestCase, self).setUp() plural_mappings = {'ipsecpolicy': 'ipsecpolicies', 'ikepolicy': 'ikepolicies', 'ipsec_site_connection': 'ipsec-site-connections'} self._setUpExtension( 'neutron.extensions.vpnaas.VPNPluginBase', constants.VPN, vpnaas.RESOURCE_ATTRIBUTE_MAP, vpnaas.Vpnaas, 'vpn', plural_mappings=plural_mappings, use_quota=True) def test_ikepolicy_create(self): """Test case to create an ikepolicy.""" ikepolicy_id = _uuid() data = {'ikepolicy': {'name': 'ikepolicy1', 'description': 'myikepolicy1', 'auth_algorithm': 'sha1', 'encryption_algorithm': 'aes-128', 'phase1_negotiation_mode': 'main', 'lifetime': { 'units': 'seconds', 'value': 3600}, 'ike_version': 'v1', 'pfs': 'group5', 'tenant_id': _uuid()}} return_value = copy.copy(data['ikepolicy']) return_value.update({'id': ikepolicy_id}) instance = self.plugin.return_value instance.create_ikepolicy.return_value = return_value res = self.api.post(_get_path('vpn/ikepolicies', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_ikepolicy.assert_called_with(mock.ANY, ikepolicy=data) self.assertEqual(res.status_int, exc.HTTPCreated.code) res = self.deserialize(res) self.assertIn('ikepolicy', res) self.assertEqual(res['ikepolicy'], return_value) def test_ikepolicy_list(self): """Test case to list all ikepolicies.""" ikepolicy_id = _uuid() return_value = [{'name': 'ikepolicy1', 'auth_algorithm': 'sha1', 'encryption_algorithm': 'aes-128', 'pfs': 'group5', 'ike_version': 'v1', 'id': ikepolicy_id}] instance = self.plugin.return_value instance.get_ikepolicies.return_value = return_value res = self.api.get(_get_path('vpn/ikepolicies', fmt=self.fmt)) instance.get_ikepolicies.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY) self.assertEqual(res.status_int, exc.HTTPOk.code) def test_ikepolicy_update(self): """Test case to update an ikepolicy.""" ikepolicy_id = _uuid() update_data = {'ikepolicy': {'name': 'ikepolicy1', 'encryption_algorithm': 'aes-256'}} return_value = {'name': 'ikepolicy1', 'auth_algorithm': 'sha1', 'encryption_algorithm': 'aes-256', 'phase1_negotiation_mode': 'main', 'lifetime': { 'units': 'seconds', 'value': 3600}, 'ike_version': 'v1', 'pfs': 'group5', 'tenant_id': _uuid(), 'id': ikepolicy_id} instance = self.plugin.return_value instance.update_ikepolicy.return_value = return_value res = self.api.put(_get_path('vpn/ikepolicies', id=ikepolicy_id, fmt=self.fmt), self.serialize(update_data)) instance.update_ikepolicy.assert_called_with(mock.ANY, ikepolicy_id, ikepolicy=update_data) self.assertEqual(res.status_int, exc.HTTPOk.code) res = self.deserialize(res) self.assertIn('ikepolicy', res) self.assertEqual(res['ikepolicy'], return_value) def test_ikepolicy_get(self): """Test case to get or show an ikepolicy.""" ikepolicy_id = _uuid() return_value = {'name': 'ikepolicy1', 'auth_algorithm': 'sha1', 'encryption_algorithm': 'aes-128', 'phase1_negotiation_mode': 'main', 'lifetime': { 'units': 'seconds', 'value': 3600}, 'ike_version': 'v1', 'pfs': 'group5', 'tenant_id': _uuid(), 'id': ikepolicy_id} instance = self.plugin.return_value instance.get_ikepolicy.return_value = return_value res = self.api.get(_get_path('vpn/ikepolicies', id=ikepolicy_id, fmt=self.fmt)) instance.get_ikepolicy.assert_called_with(mock.ANY, ikepolicy_id, fields=mock.ANY) self.assertEqual(res.status_int, exc.HTTPOk.code) res = self.deserialize(res) self.assertIn('ikepolicy', res) self.assertEqual(res['ikepolicy'], return_value) def test_ikepolicy_delete(self): """Test case to delete an ikepolicy.""" self._test_entity_delete('ikepolicy') def test_ipsecpolicy_create(self): """Test case to create an ipsecpolicy.""" ipsecpolicy_id = _uuid() data = {'ipsecpolicy': {'name': 'ipsecpolicy1', 'description': 'myipsecpolicy1', 'auth_algorithm': 'sha1', 'encryption_algorithm': 'aes-128', 'encapsulation_mode': 'tunnel', 'lifetime': { 'units': 'seconds', 'value': 3600}, 'transform_protocol': 'esp', 'pfs': 'group5', 'tenant_id': _uuid()}} return_value = copy.copy(data['ipsecpolicy']) return_value.update({'id': ipsecpolicy_id}) instance = self.plugin.return_value instance.create_ipsecpolicy.return_value = return_value res = self.api.post(_get_path('vpn/ipsecpolicies', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_ipsecpolicy.assert_called_with(mock.ANY, ipsecpolicy=data) self.assertEqual(res.status_int, exc.HTTPCreated.code) res = self.deserialize(res) self.assertIn('ipsecpolicy', res) self.assertEqual(res['ipsecpolicy'], return_value) def test_ipsecpolicy_list(self): """Test case to list an ipsecpolicy.""" ipsecpolicy_id = _uuid() return_value = [{'name': 'ipsecpolicy1', 'auth_algorithm': 'sha1', 'encryption_algorithm': 'aes-128', 'pfs': 'group5', 'id': ipsecpolicy_id}] instance = self.plugin.return_value instance.get_ipsecpolicies.return_value = return_value res = self.api.get(_get_path('vpn/ipsecpolicies', fmt=self.fmt)) instance.get_ipsecpolicies.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY) self.assertEqual(res.status_int, exc.HTTPOk.code) def test_ipsecpolicy_update(self): """Test case to update an ipsecpolicy.""" ipsecpolicy_id = _uuid() update_data = {'ipsecpolicy': {'name': 'ipsecpolicy1', 'encryption_algorithm': 'aes-256'}} return_value = {'name': 'ipsecpolicy1', 'auth_algorithm': 'sha1', 'encryption_algorithm': 'aes-128', 'encapsulation_mode': 'tunnel', 'lifetime': { 'units': 'seconds', 'value': 3600}, 'transform_protocol': 'esp', 'pfs': 'group5', 'tenant_id': _uuid(), 'id': ipsecpolicy_id} instance = self.plugin.return_value instance.update_ipsecpolicy.return_value = return_value res = self.api.put(_get_path('vpn/ipsecpolicies', id=ipsecpolicy_id, fmt=self.fmt), self.serialize(update_data)) instance.update_ipsecpolicy.assert_called_with(mock.ANY, ipsecpolicy_id, ipsecpolicy=update_data) self.assertEqual(res.status_int, exc.HTTPOk.code) res = self.deserialize(res) self.assertIn('ipsecpolicy', res) self.assertEqual(res['ipsecpolicy'], return_value) def test_ipsecpolicy_get(self): """Test case to get or show an ipsecpolicy.""" ipsecpolicy_id = _uuid() return_value = {'name': 'ipsecpolicy1', 'auth_algorithm': 'sha1', 'encryption_algorithm': 'aes-128', 'encapsulation_mode': 'tunnel', 'lifetime': { 'units': 'seconds', 'value': 3600}, 'transform_protocol': 'esp', 'pfs': 'group5', 'tenant_id': _uuid(), 'id': ipsecpolicy_id} instance = self.plugin.return_value instance.get_ipsecpolicy.return_value = return_value res = self.api.get(_get_path('vpn/ipsecpolicies', id=ipsecpolicy_id, fmt=self.fmt)) instance.get_ipsecpolicy.assert_called_with(mock.ANY, ipsecpolicy_id, fields=mock.ANY) self.assertEqual(res.status_int, exc.HTTPOk.code) res = self.deserialize(res) self.assertIn('ipsecpolicy', res) self.assertEqual(res['ipsecpolicy'], return_value) def test_ipsecpolicy_delete(self): """Test case to delete an ipsecpolicy.""" self._test_entity_delete('ipsecpolicy') def test_vpnservice_create(self): """Test case to create a vpnservice.""" vpnservice_id = _uuid() data = {'vpnservice': {'name': 'vpnservice1', 'description': 'descr_vpn1', 'subnet_id': _uuid(), 'router_id': _uuid(), 'admin_state_up': True, 'tenant_id': _uuid()}} return_value = copy.copy(data['vpnservice']) return_value.update({'status': "ACTIVE", 'id': vpnservice_id}) instance = self.plugin.return_value instance.create_vpnservice.return_value = return_value res = self.api.post(_get_path('vpn/vpnservices', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_vpnservice.assert_called_with(mock.ANY, vpnservice=data) self.assertEqual(res.status_int, exc.HTTPCreated.code) res = self.deserialize(res) self.assertIn('vpnservice', res) self.assertEqual(res['vpnservice'], return_value) def test_vpnservice_list(self): """Test case to list all vpnservices.""" vpnservice_id = _uuid() return_value = [{'name': 'vpnservice1', 'tenant_id': _uuid(), 'status': 'ACTIVE', 'id': vpnservice_id}] instance = self.plugin.return_value instance.get_vpnservice.return_value = return_value res = self.api.get(_get_path('vpn/vpnservices', fmt=self.fmt)) instance.get_vpnservices.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY) self.assertEqual(res.status_int, exc.HTTPOk.code) def test_vpnservice_update(self): """Test case to update a vpnservice.""" vpnservice_id = _uuid() update_data = {'vpnservice': {'admin_state_up': False}} return_value = {'name': 'vpnservice1', 'admin_state_up': False, 'subnet_id': _uuid(), 'router_id': _uuid(), 'tenant_id': _uuid(), 'status': "ACTIVE", 'id': vpnservice_id} instance = self.plugin.return_value instance.update_vpnservice.return_value = return_value res = self.api.put(_get_path('vpn/vpnservices', id=vpnservice_id, fmt=self.fmt), self.serialize(update_data)) instance.update_vpnservice.assert_called_with(mock.ANY, vpnservice_id, vpnservice=update_data) self.assertEqual(res.status_int, exc.HTTPOk.code) res = self.deserialize(res) self.assertIn('vpnservice', res) self.assertEqual(res['vpnservice'], return_value) def test_vpnservice_get(self): """Test case to get or show a vpnservice.""" vpnservice_id = _uuid() return_value = {'name': 'vpnservice1', 'admin_state_up': True, 'subnet_id': _uuid(), 'router_id': _uuid(), 'tenant_id': _uuid(), 'status': "ACTIVE", 'id': vpnservice_id} instance = self.plugin.return_value instance.get_vpnservice.return_value = return_value res = self.api.get(_get_path('vpn/vpnservices', id=vpnservice_id, fmt=self.fmt)) instance.get_vpnservice.assert_called_with(mock.ANY, vpnservice_id, fields=mock.ANY) self.assertEqual(res.status_int, exc.HTTPOk.code) res = self.deserialize(res) self.assertIn('vpnservice', res) self.assertEqual(res['vpnservice'], return_value) def test_vpnservice_delete(self): """Test case to delete a vpnservice.""" self._test_entity_delete('vpnservice') def test_ipsec_site_connection_create(self): """Test case to create a ipsec_site_connection.""" ipsecsite_con_id = _uuid() ikepolicy_id = _uuid() ipsecpolicy_id = _uuid() data = { 'ipsec_site_connection': {'name': 'connection1', 'description': 'Remote-connection1', 'peer_address': '192.168.1.10', 'peer_id': '192.168.1.10', 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], 'mtu': 1500, 'psk': 'abcd', 'initiator': 'bi-directional', 'dpd': { 'action': 'hold', 'interval': 30, 'timeout': 120}, 'ikepolicy_id': ikepolicy_id, 'ipsecpolicy_id': ipsecpolicy_id, 'vpnservice_id': _uuid(), 'admin_state_up': True, 'tenant_id': _uuid()} } return_value = copy.copy(data['ipsec_site_connection']) return_value.update({'status': "ACTIVE", 'id': ipsecsite_con_id}) instance = self.plugin.return_value instance.create_ipsec_site_connection.return_value = return_value res = self.api.post(_get_path('vpn/ipsec-site-connections', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_ipsec_site_connection.assert_called_with( mock.ANY, ipsec_site_connection=data ) self.assertEqual(res.status_int, exc.HTTPCreated.code) res = self.deserialize(res) self.assertIn('ipsec_site_connection', res) self.assertEqual(res['ipsec_site_connection'], return_value) def test_ipsec_site_connection_list(self): """Test case to list all ipsec_site_connections.""" ipsecsite_con_id = _uuid() return_value = [{'name': 'connection1', 'peer_address': '192.168.1.10', 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], 'route_mode': 'static', 'auth_mode': 'psk', 'tenant_id': _uuid(), 'status': 'ACTIVE', 'id': ipsecsite_con_id}] instance = self.plugin.return_value instance.get_ipsec_site_connections.return_value = return_value res = self.api.get( _get_path('vpn/ipsec-site-connections', fmt=self.fmt)) instance.get_ipsec_site_connections.assert_called_with( mock.ANY, fields=mock.ANY, filters=mock.ANY ) self.assertEqual(res.status_int, exc.HTTPOk.code) def test_ipsec_site_connection_update(self): """Test case to update a ipsec_site_connection.""" ipsecsite_con_id = _uuid() update_data = {'ipsec_site_connection': {'admin_state_up': False}} return_value = {'name': 'connection1', 'description': 'Remote-connection1', 'peer_address': '192.168.1.10', 'peer_id': '192.168.1.10', 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], 'mtu': 1500, 'psk': 'abcd', 'initiator': 'bi-directional', 'dpd': { 'action': 'hold', 'interval': 30, 'timeout': 120}, 'ikepolicy_id': _uuid(), 'ipsecpolicy_id': _uuid(), 'vpnservice_id': _uuid(), 'admin_state_up': False, 'tenant_id': _uuid(), 'status': 'ACTIVE', 'id': ipsecsite_con_id} instance = self.plugin.return_value instance.update_ipsec_site_connection.return_value = return_value res = self.api.put(_get_path('vpn/ipsec-site-connections', id=ipsecsite_con_id, fmt=self.fmt), self.serialize(update_data)) instance.update_ipsec_site_connection.assert_called_with( mock.ANY, ipsecsite_con_id, ipsec_site_connection=update_data ) self.assertEqual(res.status_int, exc.HTTPOk.code) res = self.deserialize(res) self.assertIn('ipsec_site_connection', res) self.assertEqual(res['ipsec_site_connection'], return_value) def test_ipsec_site_connection_get(self): """Test case to get or show a ipsec_site_connection.""" ipsecsite_con_id = _uuid() return_value = {'name': 'connection1', 'description': 'Remote-connection1', 'peer_address': '192.168.1.10', 'peer_id': '192.168.1.10', 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], 'mtu': 1500, 'psk': 'abcd', 'initiator': 'bi-directional', 'dpd': { 'action': 'hold', 'interval': 30, 'timeout': 120}, 'ikepolicy_id': _uuid(), 'ipsecpolicy_id': _uuid(), 'vpnservice_id': _uuid(), 'admin_state_up': True, 'tenant_id': _uuid(), 'status': 'ACTIVE', 'id': ipsecsite_con_id} instance = self.plugin.return_value instance.get_ipsec_site_connection.return_value = return_value res = self.api.get(_get_path('vpn/ipsec-site-connections', id=ipsecsite_con_id, fmt=self.fmt)) instance.get_ipsec_site_connection.assert_called_with( mock.ANY, ipsecsite_con_id, fields=mock.ANY ) self.assertEqual(res.status_int, exc.HTTPOk.code) res = self.deserialize(res) self.assertIn('ipsec_site_connection', res) self.assertEqual(res['ipsec_site_connection'], return_value) def test_ipsec_site_connection_delete(self): """Test case to delete a ipsec_site_connection.""" self._test_entity_delete('ipsec_site_connection') class VpnaasExtensionTestCaseXML(VpnaasExtensionTestCase): fmt = 'xml'
apache-2.0
astropy/astropy
astropy/io/misc/asdf/tags/table/tests/test_table.py
5
7179
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- import pytest asdf = pytest.importorskip('asdf') import numpy as np from packaging.version import Version import astropy.units as u from astropy import table from astropy.time import Time, TimeDelta from astropy.coordinates import SkyCoord, EarthLocation from astropy.io.misc.asdf.tags.helpers import skycoord_equal from asdf.tests import helpers from asdf.tags.core.ndarray import NDArrayType from astropy.io.misc.asdf.tags.tests.helpers import run_schema_example_test def test_table(tmpdir): data_rows = [(1, 2.0, 'x'), (4, 5.0, 'y'), (5, 8.2, 'z')] t = table.Table(rows=data_rows, names=('a', 'b', 'c'), dtype=('i4', 'f8', 'S1')) t.columns['a'].description = 'RA' t.columns['a'].unit = 'degree' t.columns['a'].meta = {'foo': 'bar'} t.columns['c'].description = 'Some description of some sort' def check(ff): assert len(ff.blocks) == 3 helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_array_columns(tmpdir): a = np.array([([[1, 2], [3, 4]], 2.0, 'x'), ([[5, 6], [7, 8]], 5.0, 'y'), ([[9, 10], [11, 12]], 8.2, 'z')], dtype=[('a', '<i4', (2, 2)), ('b', '<f8'), ('c', '|S1')]) t = table.Table(a, copy=False) assert t.columns['a'].shape == (3, 2, 2) def check(ff): assert len(ff.blocks) == 1 helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_structured_array_columns(tmpdir): a = np.array([((1, 'a'), 2.0, 'x'), ((4, 'b'), 5.0, 'y'), ((5, 'c'), 8.2, 'z')], dtype=[('a', [('a0', '<i4'), ('a1', '|S1')]), ('b', '<f8'), ('c', '|S1')]) t = table.Table(a, copy=False) def check(ff): assert len(ff.blocks) == 1 helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_table_row_order(tmpdir): a = np.array([(1, 2.0, 'x'), (4, 5.0, 'y'), (5, 8.2, 'z')], dtype=[('a', '<i4'), ('b', '<f8'), ('c', '|S1')]) t = table.Table(a, copy=False) t.columns['a'].description = 'RA' t.columns['a'].unit = 'degree' t.columns['a'].meta = {'foo': 'bar'} t.columns['c'].description = 'Some description of some sort' def check(ff): assert len(ff.blocks) == 1 helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_table_inline(tmpdir): data_rows = [(1, 2.0, 'x'), (4, 5.0, 'y'), (5, 8.2, 'z')] t = table.Table(rows=data_rows, names=('a', 'b', 'c'), dtype=('i4', 'f8', 'S1')) t.columns['a'].description = 'RA' t.columns['a'].unit = 'degree' t.columns['a'].meta = {'foo': 'bar'} t.columns['c'].description = 'Some description of some sort' def check(ff): assert len(list(ff.blocks.internal_blocks)) == 0 if Version(asdf.__version__) >= Version('2.8.0'): # The auto_inline argument is deprecated as of asdf 2.8.0. with asdf.config_context() as config: config.array_inline_threshold = 64 helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) else: helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check, write_options={'auto_inline': 64}) def test_mismatched_columns(): yaml = """ table: !<tag:astropy.org:astropy/table/table-1.0.0> columns: - !core/column-1.0.0 data: !core/ndarray-1.0.0 data: [0, 1, 2] name: a - !core/column-1.0.0 data: !core/ndarray-1.0.0 data: [0, 1, 2, 3] name: b colnames: [a, b] """ buff = helpers.yaml_to_asdf(yaml) with pytest.raises(ValueError) as err: with asdf.open(buff) as ff: pass assert 'Inconsistent data column lengths' in str(err.value) def test_masked_table(tmpdir): data_rows = [(1, 2.0, 'x'), (4, 5.0, 'y'), (5, 8.2, 'z')] t = table.Table(rows=data_rows, names=('a', 'b', 'c'), dtype=('i4', 'f8', 'S1'), masked=True) t.columns['a'].description = 'RA' t.columns['a'].unit = 'degree' t.columns['a'].meta = {'foo': 'bar'} t.columns['a'].mask = [True, False, True] t.columns['c'].description = 'Some description of some sort' def check(ff): assert len(ff.blocks) == 4 helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_quantity_mixin(tmpdir): t = table.QTable() t['a'] = [1, 2, 3] t['b'] = ['x', 'y', 'z'] t['c'] = [2.0, 5.0, 8.2] * u.m def check(ff): assert isinstance(ff['table']['c'], u.Quantity) helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_time_mixin(tmpdir): t = table.Table() t['a'] = [1, 2] t['b'] = ['x', 'y'] t['c'] = Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02']) def check(ff): assert isinstance(ff['table']['c'], Time) helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_timedelta_mixin(tmpdir): t = table.Table() t['a'] = [1, 2] t['b'] = ['x', 'y'] t['c'] = TimeDelta([1, 2] * u.day) def check(ff): assert isinstance(ff['table']['c'], TimeDelta) helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_skycoord_mixin(tmpdir): t = table.Table() t['a'] = [1, 2] t['b'] = ['x', 'y'] t['c'] = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4', obstime='J1990.5') def check(ff): assert isinstance(ff['table']['c'], SkyCoord) def tree_match(old, new): NDArrayType.assert_equal(new['a'], old['a']) NDArrayType.assert_equal(new['b'], old['b']) assert skycoord_equal(new['c'], old['c']) helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check, tree_match_func=tree_match) def test_earthlocation_mixin(tmpdir): t = table.Table() t['a'] = [1, 2] t['b'] = ['x', 'y'] t['c'] = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km) def check(ff): assert isinstance(ff['table']['c'], EarthLocation) helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check) def test_ndarray_mixin(tmpdir): t = table.Table() t['a'] = [1, 2] t['b'] = ['x', 'y'] t['c'] = table.NdarrayMixin([5, 6]) helpers.assert_roundtrip_tree({'table': t}, tmpdir) def test_backwards_compat(): """ Make sure that we can continue to read tables that use the schema from the ASDF Standard. This test uses the examples in the table schema from the ASDF Standard, since these make no reference to Astropy's own table definition. """ def check(asdffile): assert isinstance(asdffile['example'], table.Table) run_schema_example_test('stsci.edu', 'asdf', 'core/table', '1.0.0', check)
bsd-3-clause
zaydhach/PyBotWarPro
libs/jython/Lib/compiler/visitor.py
112
3902
from compiler import ast # XXX should probably rename ASTVisitor to ASTWalker # XXX can it be made even more generic? class ASTVisitor: """Performs a depth-first walk of the AST The ASTVisitor will walk the AST, performing either a preorder or postorder traversal depending on which method is called. methods: preorder(tree, visitor) postorder(tree, visitor) tree: an instance of ast.Node visitor: an instance with visitXXX methods The ASTVisitor is responsible for walking over the tree in the correct order. For each node, it checks the visitor argument for a method named 'visitNodeType' where NodeType is the name of the node's class, e.g. Class. If the method exists, it is called with the node as its sole argument. The visitor method for a particular node type can control how child nodes are visited during a preorder walk. (It can't control the order during a postorder walk, because it is called _after_ the walk has occurred.) The ASTVisitor modifies the visitor argument by adding a visit method to the visitor; this method can be used to visit a child node of arbitrary type. """ VERBOSE = 0 def __init__(self): self.node = None self._cache = {} def default(self, node, *args): for child in node.getChildNodes(): self.dispatch(child, *args) def dispatch(self, node, *args): self.node = node klass = node.__class__ meth = self._cache.get(klass, None) if meth is None: className = klass.__name__ meth = getattr(self.visitor, 'visit' + className, self.default) self._cache[klass] = meth ## if self.VERBOSE > 0: ## className = klass.__name__ ## if self.VERBOSE == 1: ## if meth == 0: ## print "dispatch", className ## else: ## print "dispatch", className, (meth and meth.__name__ or '') return meth(node, *args) def preorder(self, tree, visitor, *args): """Do preorder walk of tree using visitor""" self.visitor = visitor visitor.visit = self.dispatch self.dispatch(tree, *args) # XXX *args make sense? class ExampleASTVisitor(ASTVisitor): """Prints examples of the nodes that aren't visited This visitor-driver is only useful for development, when it's helpful to develop a visitor incrementally, and get feedback on what you still have to do. """ examples = {} def dispatch(self, node, *args): self.node = node meth = self._cache.get(node.__class__, None) className = node.__class__.__name__ if meth is None: meth = getattr(self.visitor, 'visit' + className, 0) self._cache[node.__class__] = meth if self.VERBOSE > 1: print "dispatch", className, (meth and meth.__name__ or '') if meth: meth(node, *args) elif self.VERBOSE > 0: klass = node.__class__ if not self.examples.has_key(klass): self.examples[klass] = klass print print self.visitor print klass for attr in dir(node): if attr[0] != '_': print "\t", "%-12.12s" % attr, getattr(node, attr) print return self.default(node, *args) # XXX this is an API change _walker = ASTVisitor def walk(tree, visitor, walker=None, verbose=None): if walker is None: walker = _walker() if verbose is not None: walker.VERBOSE = verbose walker.preorder(tree, visitor) return walker.visitor def dumpNode(node): print node.__class__ for attr in dir(node): if attr[0] != '_': print "\t", "%-10.10s" % attr, getattr(node, attr)
gpl-2.0
qianwenming/mapnik
scons/scons-local-2.3.1/SCons/Tool/MSCommon/arch.py
8
2045
# # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/MSCommon/arch.py 2014/03/02 14:18:15 garyo" __doc__ = """Module to define supported Windows chip architectures. """ import os class ArchDefinition(object): """ A class for defining architecture-specific settings and logic. """ def __init__(self, arch, synonyms=[]): self.arch = arch self.synonyms = synonyms SupportedArchitectureList = [ ArchitectureDefinition( 'x86', ['i386', 'i486', 'i586', 'i686'], ), ArchitectureDefinition( 'x86_64', ['AMD64', 'amd64', 'em64t', 'EM64T', 'x86_64'], ), ArchitectureDefinition( 'ia64', ['IA64'], ), ] SupportedArchitectureMap = {} for a in SupportedArchitectureList: SupportedArchitectureMap[a.arch] = a for s in a.synonyms: SupportedArchitectureMap[s] = a
lgpl-2.1
RenderBroken/Victara-Stock-kernel
Documentation/networking/cxacru-cf.py
14668
1626
#!/usr/bin/env python # Copyright 2009 Simon Arlott # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 59 # Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Usage: cxacru-cf.py < cxacru-cf.bin # Output: values string suitable for the sysfs adsl_config attribute # # Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110 # contains mis-aligned values which will stop the modem from being able # to make a connection. If the first and last two bytes are removed then # the values become valid, but the modulation will be forced to ANSI # T1.413 only which may not be appropriate. # # The original binary format is a packed list of le32 values. import sys import struct i = 0 while True: buf = sys.stdin.read(4) if len(buf) == 0: break elif len(buf) != 4: sys.stdout.write("\n") sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf))) sys.exit(1) if i > 0: sys.stdout.write(" ") sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0])) i += 1 sys.stdout.write("\n")
gpl-2.0
RPGOne/Skynet
node-master/deps/v8/tools/testrunner/server/presence_handler.py
123
4443
# Copyright 2012 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import socket import SocketServer import threading try: import ujson as json except: import json from . import constants from ..objects import peer STARTUP_REQUEST = "V8 test peer starting up" STARTUP_RESPONSE = "Let's rock some tests!" EXIT_REQUEST = "V8 testing peer going down" def GetOwnIP(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) ip = s.getsockname()[0] s.close() return ip class PresenceHandler(SocketServer.BaseRequestHandler): def handle(self): data = json.loads(self.request[0].strip()) if data[0] == STARTUP_REQUEST: jobs = data[1] relative_perf = data[2] pubkey_fingerprint = data[3] trusted = self.server.daemon.IsTrusted(pubkey_fingerprint) response = [STARTUP_RESPONSE, self.server.daemon.jobs, self.server.daemon.relative_perf, self.server.daemon.pubkey_fingerprint, trusted] response = json.dumps(response) self.server.SendTo(self.client_address[0], response) p = peer.Peer(self.client_address[0], jobs, relative_perf, pubkey_fingerprint) p.trusted = trusted self.server.daemon.AddPeer(p) elif data[0] == STARTUP_RESPONSE: jobs = data[1] perf = data[2] pubkey_fingerprint = data[3] p = peer.Peer(self.client_address[0], jobs, perf, pubkey_fingerprint) p.trusted = self.server.daemon.IsTrusted(pubkey_fingerprint) p.trusting_me = data[4] self.server.daemon.AddPeer(p) elif data[0] == EXIT_REQUEST: self.server.daemon.DeletePeer(self.client_address[0]) if self.client_address[0] == self.server.daemon.ip: self.server.shutdown_lock.release() class PresenceDaemon(SocketServer.ThreadingMixIn, SocketServer.UDPServer): def __init__(self, daemon): self.daemon = daemon address = (daemon.ip, constants.PRESENCE_PORT) SocketServer.UDPServer.__init__(self, address, PresenceHandler) self.shutdown_lock = threading.Lock() def shutdown(self): self.shutdown_lock.acquire() self.SendToAll(json.dumps([EXIT_REQUEST])) self.shutdown_lock.acquire() self.shutdown_lock.release() SocketServer.UDPServer.shutdown(self) def SendTo(self, target, message): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.sendto(message, (target, constants.PRESENCE_PORT)) sock.close() def SendToAll(self, message): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ip = self.daemon.ip.split(".") for i in range(1, 254): ip[-1] = str(i) sock.sendto(message, (".".join(ip), constants.PRESENCE_PORT)) sock.close() def FindPeers(self): request = [STARTUP_REQUEST, self.daemon.jobs, self.daemon.relative_perf, self.daemon.pubkey_fingerprint] request = json.dumps(request) self.SendToAll(request)
bsd-3-clause
igboyes/virtool
virtool/samples/api.py
1
14520
import asyncio.tasks from copy import deepcopy from cerberus import Validator import virtool.analyses.utils import virtool.analyses.db import virtool.api.utils import virtool.files.db import virtool.jobs.db import virtool.samples.db import virtool.db.utils import virtool.errors import virtool.http.routes import virtool.samples.utils import virtool.subtractions.db import virtool.utils import virtool.validators from virtool.api.response import bad_request, insufficient_rights, invalid_query, \ json_response, no_content, not_found QUERY_SCHEMA = { "find": { "type": "string", "default": "", "coerce": (str, virtool.validators.strip) }, "page": { "type": "integer", "coerce": int, "default": 1, "min": 1 }, "per_page": { "type": "integer", "coerce": int, "default": 15, "min": 1, "max": 100 } } routes = virtool.http.routes.Routes() @routes.get("/api/samples") async def find(req): """ Find samples, filtering by data passed as URL parameters. """ db = req.app["db"] workflow_query = virtool.samples.db.compose_analysis_query(req.query) v = Validator(QUERY_SCHEMA, allow_unknown=True) if not v.validate(dict(req.query)): return invalid_query(v.errors) query = v.document rights_filter = [ # The requesting user is the sample owner {"user.id": req["client"].user_id}, # The sample rights allow all users to view the sample. {"all_read": True} ] if req["client"].groups: # The sample rights allow owner group members to view the sample and the requesting user is a member of # the owner group. rights_filter.append({ "group_read": True, "group": {"$in": req["client"].groups} }) base_query = { "$or": rights_filter } db_query = dict() term = query.get("find") if term: db_query = virtool.api.utils.compose_regex_query(term, ["name", "user.id"]) if workflow_query: if db_query: db_query = { "$and": [ db_query, workflow_query ] } else: db_query = workflow_query data = await virtool.api.utils.paginate( db.samples, db_query, req.query, sort="created_at", projection=virtool.samples.db.LIST_PROJECTION, base_query=base_query, reverse=True ) return json_response(data) @routes.get("/api/samples/{sample_id}") async def get(req): """ Get a complete sample document. """ db = req.app["db"] sample_id = req.match_info["sample_id"] document = await db.samples.find_one(sample_id) if not document: return not_found() if not virtool.samples.utils.get_sample_rights(document, req["client"])[0]: return insufficient_rights() caches = list() async for cache in db.caches.find({"sample.id": sample_id}): caches.append(virtool.utils.base_processor(cache)) document["caches"] = caches if document["ready"] is True: # Only update file fields if sample creation is complete. for index, file in enumerate(document["files"]): snake_case = document["name"].replace(" ", "_") file.update({ "name": file["name"].replace("reads_", f"{snake_case}_"), "download_url": file["download_url"].replace("reads_", f"{snake_case}_"), "replace_url": f"/upload/samples/{sample_id}/files/{index + 1}" }) await virtool.subtractions.db.attach_subtraction(db, document) return json_response(virtool.utils.base_processor(document)) @routes.post("/api/samples", permission="create_sample", schema={ "name": { "type": "string", "coerce": virtool.validators.strip, "empty": False, "required": True }, "host": { "type": "string", "coerce": virtool.validators.strip, }, "isolate": { "type": "string", "coerce": virtool.validators.strip, }, "group": { "type": "string" }, "locale": { "type": "string", "coerce": virtool.validators.strip, }, "library_type": { "type": "string", "allowed": [ "normal", "srna", "amplicon" ], "default": "normal" }, "subtraction": { "type": "string", "required": True }, "files": { "type": "list", "minlength": 1, "maxlength": 2, "required": True } }) async def create(req): db = req.app["db"] data = req["data"] user_id = req["client"].user_id settings = req.app["settings"] name_error_message = await virtool.samples.db.check_name(db, req.app["settings"], data["name"]) if name_error_message: return bad_request(name_error_message) # Make sure a subtraction host was submitted and it exists. if not await db.subtraction.count_documents({"_id": data["subtraction"], "is_host": True}): return bad_request("Subtraction does not exist") # Make sure all of the passed file ids exist. if not await virtool.db.utils.ids_exist(db.files, data["files"]): return bad_request("File does not exist") sample_id = await virtool.db.utils.get_new_id(db.samples) document = deepcopy(data) sample_group_setting = settings["sample_group"] # Require a valid ``group`` field if the ``sample_group`` setting is ``users_primary_group``. if sample_group_setting == "force_choice": force_choice_error_message = await virtool.samples.db.validate_force_choice_group(db, data) if force_choice_error_message: if "not found" in force_choice_error_message: return bad_request(force_choice_error_message) return bad_request(force_choice_error_message) # Assign the user"s primary group as the sample owner group if the setting is ``users_primary_group``. elif sample_group_setting == "users_primary_group": document["group"] = await virtool.db.utils.get_one_field(db.users, "primary_group", user_id) # Make the owner group none if the setting is none. elif sample_group_setting == "none": document["group"] = "none" document.update({ "_id": sample_id, "nuvs": False, "pathoscope": False, "created_at": virtool.utils.timestamp(), "format": "fastq", "ready": False, "quality": None, "hold": True, "group_read": settings["sample_group_read"], "group_write": settings["sample_group_write"], "all_read": settings["sample_all_read"], "all_write": settings["sample_all_write"], "library_type": data["library_type"], "subtraction": { "id": data["subtraction"] }, "user": { "id": user_id }, "paired": len(data["files"]) == 2 }) files = [await db.files.find_one(file_id, ["_id", "name", "size"]) for file_id in data["files"]] files = [virtool.utils.base_processor(file) for file in files] document["files"] = files await db.samples.insert_one(document) await virtool.files.db.reserve(db, data["files"]) task_args = { "sample_id": sample_id, "files": files } # Create job document. job = await virtool.jobs.db.create( db, req.app["settings"], "create_sample", task_args, user_id ) await req.app["jobs"].enqueue(job["_id"]) headers = { "Location": "/api/samples/" + sample_id } return json_response(virtool.utils.base_processor(document), status=201, headers=headers) @routes.patch("/api/samples/{sample_id}", schema={ "name": { "type": "string", "coerce": virtool.validators.strip, "empty": False }, "host": { "type": "string", "coerce": virtool.validators.strip, }, "isolate": { "type": "string", "coerce": virtool.validators.strip, }, "locale": { "type": "string", "coerce": virtool.validators.strip, } }) async def edit(req): """ Update specific fields in the sample document. """ db = req.app["db"] data = req["data"] sample_id = req.match_info["sample_id"] if not await virtool.samples.db.check_rights(db, sample_id, req["client"]): return insufficient_rights() message = await virtool.samples.db.check_name(db, req.app["settings"], data["name"], sample_id=sample_id) if message: return bad_request(message) document = await db.samples.find_one_and_update({"_id": sample_id}, { "$set": data }, projection=virtool.samples.db.LIST_PROJECTION) processed = virtool.utils.base_processor(document) return json_response(processed) @routes.put("/api/samples/{sample_id}/update_job") async def replace(req): sample_id = req.match_info["sample_id"] await virtool.samples.db.attempt_file_replacement( req.app, sample_id, req["client"].user_id ) document = await req.app["db"].samples.find_one(sample_id, virtool.samples.db.PROJECTION) return json_response(virtool.utils.base_processor(document)) @routes.patch("/api/samples/{sample_id}/rights", schema={ "group": { "type": "string" }, "all_read": { "type": "boolean" }, "all_write": { "type": "boolean" }, "group_read": { "type": "boolean" }, "group_write": { "type": "boolean" } }) async def set_rights(req): """ Change rights settings for the specified sample document. """ db = req.app["db"] data = req["data"] sample_id = req.match_info["sample_id"] if not await db.samples.count_documents({"_id": sample_id}): return not_found() user_id = req["client"].user_id # Only update the document if the connected user owns the samples or is an administrator. if not req["client"].administrator and user_id != await virtool.samples.db.get_sample_owner(db, sample_id): return insufficient_rights("Must be administrator or sample owner") group = data.get("group") if group: existing_group_ids = await db.groups.distinct("_id") + ["none"] if group not in existing_group_ids: return bad_request("Group does not exist") # Update the sample document with the new rights. document = await db.samples.find_one_and_update({"_id": sample_id}, { "$set": data }, projection=virtool.samples.db.RIGHTS_PROJECTION) return json_response(document) @routes.delete("/api/samples/{sample_id}") async def remove(req): """ Remove a sample document and all associated analyses. """ db = req.app["db"] sample_id = req.match_info["sample_id"] try: if not await virtool.samples.db.check_rights(db, sample_id, req["client"]): return insufficient_rights() except virtool.errors.DatabaseError as err: if "Sample does not exist" in str(err): return not_found() raise await virtool.samples.db.remove_samples( db, req.app["settings"], [sample_id] ) return no_content() @routes.get("/api/samples/{sample_id}/analyses") async def find_analyses(req): """ List the analyses associated with the given ``sample_id``. """ db = req.app["db"] sample_id = req.match_info["sample_id"] try: if not await virtool.samples.db.check_rights(db, sample_id, req["client"], write=False): return insufficient_rights() except virtool.errors.DatabaseError as err: if "Sample does not exist" in str(err): return not_found() raise term = req.query.get("term") db_query = dict() if term: db_query.update(virtool.api.utils.compose_regex_query(term, ["reference.name", "user.id"])) base_query = { "sample.id": sample_id } data = await virtool.api.utils.paginate( db.analyses, db_query, req.query, base_query=base_query, projection=virtool.analyses.db.PROJECTION, sort=[("created_at", -1)] ) await asyncio.tasks.gather(*[virtool.subtractions.db.attach_subtraction(db, d) for d in data["documents"]]) return json_response(data) @routes.post("/api/samples/{sample_id}/analyses", schema={ "ref_id": { "type": "string", "required": True }, "subtraction_id": { "type": "string" }, "workflow": { "type": "string", "required": True, "allowed": virtool.analyses.utils.WORKFLOW_NAMES }, }) async def analyze(req): """ Starts an analysis job for a given sample. """ db = req.app["db"] data = req["data"] sample_id = req.match_info["sample_id"] ref_id = data["ref_id"] try: if not await virtool.samples.db.check_rights(db, sample_id, req["client"]): return insufficient_rights() except virtool.errors.DatabaseError as err: if "Sample does not exist" in str(err): return not_found() raise if not await db.references.count_documents({"_id": ref_id}): return bad_request("Reference does not exist") if not await db.indexes.count_documents({"reference.id": ref_id, "ready": True}): return bad_request("No ready index") subtraction_id = data.get("subtraction_id") if subtraction_id is None: subtraction = await virtool.db.utils.get_one_field(db.samples, "subtraction", sample_id) subtraction_id = subtraction["id"] # Generate a unique _id for the analysis entry document = await virtool.analyses.db.new( req.app, sample_id, ref_id, subtraction_id, req["client"].user_id, data["workflow"] ) document = virtool.utils.base_processor(document) sample = await virtool.samples.db.recalculate_workflow_tags(db, sample_id) await req.app["dispatcher"].dispatch("samples", "update", virtool.utils.base_processor(sample)) analysis_id = document["id"] return json_response( document, status=201, headers={ "Location": f"/api/analyses/{analysis_id}" } )
mit
jsternberg/ansible-modules-core
commands/script.py
125
2330
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = """ --- module: script version_added: "0.9" short_description: Runs a local script on a remote node after transferring it description: - "The M(script) module takes the script name followed by a list of space-delimited arguments. " - "The local script at path will be transferred to the remote node and then executed. " - "The given script will be processed through the shell environment on the remote node. " - "This module does not require python on the remote system, much like the M(raw) module. " options: free_form: description: - path to the local script file followed by optional arguments. required: true default: null aliases: [] creates: description: - a filename, when it already exists, this step will B(not) be run. required: no default: null version_added: "1.5" removes: description: - a filename, when it does not exist, this step will B(not) be run. required: no default: null version_added: "1.5" notes: - It is usually preferable to write Ansible modules than pushing scripts. Convert your script to an Ansible module for bonus points! author: - Ansible Core Team - Michael DeHaan """ EXAMPLES = ''' # Example from Ansible Playbooks - script: /some/local/script.sh --some-arguments 1234 # Run a script that creates a file, but only if the file is not yet created - script: /some/local/create_file.sh --some-arguments 1234 creates=/the/created/file.txt # Run a script that removes a file, but only if the file is not yet removed - script: /some/local/remove_file.sh --some-arguments 1234 removes=/the/removed/file.txt '''
gpl-3.0
janrain/lazydict
test.py
1
1588
from unittest import TestCase import lazydict class TestLazyDictionary(TestCase): def test_circular_reference_error(self): d = lazydict.LazyDictionary() d['foo'] = lambda s: s['foo'] self.assertRaises(lazydict.CircularReferenceError, d.__getitem__, 'foo') def test_constant_redefinition_error(self): d = lazydict.LazyDictionary() d['a'] = 1 d['b'] = 2 d['sum'] = lambda s: s['a'] + s['b'] x = d['sum'] self.assertRaises(lazydict.ConstantRedefinitionError, d.__setitem__, 'a', 'hotdog') self.assertRaises(lazydict.ConstantRedefinitionError, d.__delitem__, 'a') def test_lazy_evaluation(self): d = lazydict.LazyDictionary() d['sum'] = lambda s: s['a'] + s['b'] d['a'] = 1 d['b'] = 2 self.assertEqual(d['sum'], 3) def test_str(self): d = lazydict.LazyDictionary({'a': {'b': 1}}) self.assertEqual(str(d), "{'a': {'b': 1}}") def test_repr(self): d = lazydict.LazyDictionary({'a': {'b': 1}}) self.assertEqual(repr(d), "LazyDictionary({'a': {'b': 1}})") def test_atomic_evaluation(self): d = lazydict.LazyDictionary() d['division'] = lambda: 1/0 self.assertEqual(d.states['division'], 'defined') self.assertRaises(ZeroDivisionError, d.__getitem__, 'division') # second call checks lazydict.CircularReferenceError is not raised. self.assertRaises(ZeroDivisionError, d.__getitem__, 'division') self.assertEqual(d.states['division'], 'error')
mit
thresholdsoftware/asylum-v2.0
openerp/cli/server.py
20
9575
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## """ OpenERP - Server OpenERP is an ERP+CRM program for small and medium businesses. The whole source code is distributed under the terms of the GNU Public Licence. (c) 2003-TODAY, Fabien Pinckaers - OpenERP SA """ import logging import os import signal import sys import threading import traceback import time import openerp from . import Command __author__ = openerp.release.author __version__ = openerp.release.version # Also use the `openerp` logger for the main script. _logger = logging.getLogger('openerp') def check_root_user(): """ Exit if the process's user is 'root' (on POSIX system).""" if os.name == 'posix': import pwd if pwd.getpwuid(os.getuid())[0] == 'root' : sys.stderr.write("Running as user 'root' is a security risk, aborting.\n") sys.exit(1) def check_postgres_user(): """ Exit if the configured database user is 'postgres'. This function assumes the configuration has been initialized. """ config = openerp.tools.config if config['db_user'] == 'postgres': sys.stderr.write("Using the database user 'postgres' is a security risk, aborting.") sys.exit(1) def report_configuration(): """ Log the server version and some configuration values. This function assumes the configuration has been initialized. """ config = openerp.tools.config _logger.info("OpenERP version %s", __version__) for name, value in [('addons paths', config['addons_path']), ('database hostname', config['db_host'] or 'localhost'), ('database port', config['db_port'] or '5432'), ('database user', config['db_user'])]: _logger.info("%s: %s", name, value) def setup_pid_file(): """ Create a file with the process id written in it. This function assumes the configuration has been initialized. """ config = openerp.tools.config if config['pidfile']: fd = open(config['pidfile'], 'w') pidtext = "%d" % (os.getpid()) fd.write(pidtext) fd.close() def preload_registry(dbname): """ Preload a registry, and start the cron.""" try: update_module = True if openerp.tools.config['init'] or openerp.tools.config['update'] else False db, registry = openerp.pooler.get_db_and_pool(dbname,update_module=update_module) except Exception: _logger.exception('Failed to initialize database `%s`.', dbname) return False return registry._assertion_report.failures == 0 def run_test_file(dbname, test_file): """ Preload a registry, possibly run a test file, and start the cron.""" try: config = openerp.tools.config db, registry = openerp.pooler.get_db_and_pool(dbname, update_module=config['init'] or config['update']) cr = db.cursor() _logger.info('loading test file %s', test_file) openerp.tools.convert_yaml_import(cr, 'base', file(test_file), 'test', {}, 'init') if config['test_commit']: _logger.info('test %s has been commited', test_file) cr.commit() else: _logger.info('test %s has been rollbacked', test_file) cr.rollback() cr.close() except Exception: _logger.exception('Failed to initialize database `%s` and run test file `%s`.', dbname, test_file) def export_translation(): config = openerp.tools.config dbname = config['db_name'] if config["language"]: msg = "language %s" % (config["language"],) else: msg = "new language" _logger.info('writing translation file for %s to %s', msg, config["translate_out"]) fileformat = os.path.splitext(config["translate_out"])[-1][1:].lower() buf = file(config["translate_out"], "w") cr = openerp.pooler.get_db(dbname).cursor() openerp.tools.trans_export(config["language"], config["translate_modules"] or ["all"], buf, fileformat, cr) cr.close() buf.close() _logger.info('translation file written successfully') def import_translation(): config = openerp.tools.config context = {'overwrite': config["overwrite_existing_translations"]} dbname = config['db_name'] cr = openerp.pooler.get_db(dbname).cursor() openerp.tools.trans_load( cr, config["translate_in"], config["language"], context=context) cr.commit() cr.close() # Variable keeping track of the number of calls to the signal handler defined # below. This variable is monitored by ``quit_on_signals()``. quit_signals_received = 0 def signal_handler(sig, frame): """ Signal handler: exit ungracefully on the second handled signal. :param sig: the signal number :param frame: the interrupted stack frame or None """ global quit_signals_received quit_signals_received += 1 if quit_signals_received > 1: # logging.shutdown was already called at this point. sys.stderr.write("Forced shutdown.\n") os._exit(0) def dumpstacks(sig, frame): """ Signal handler: dump a stack trace for each existing thread.""" # code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696 # modified for python 2.5 compatibility threads_info = dict([(th.ident, {'name': th.name, 'uid': getattr(th,'uid','n/a')}) for th in threading.enumerate()]) code = [] for threadId, stack in sys._current_frames().items(): thread_info = threads_info.get(threadId) code.append("\n# Thread: %s (id:%s) (uid:%s)" % \ (thread_info and thread_info['name'] or 'n/a', threadId, thread_info and thread_info['uid'] or 'n/a')) for filename, lineno, name, line in traceback.extract_stack(stack): code.append('File: "%s", line %d, in %s' % (filename, lineno, name)) if line: code.append(" %s" % (line.strip())) _logger.info("\n".join(code)) def setup_signal_handlers(): """ Register the signal handler defined above. """ SIGNALS = map(lambda x: getattr(signal, "SIG%s" % x), "INT TERM".split()) if os.name == 'posix': map(lambda sig: signal.signal(sig, signal_handler), SIGNALS) signal.signal(signal.SIGQUIT, dumpstacks) elif os.name == 'nt': import win32api win32api.SetConsoleCtrlHandler(lambda sig: signal_handler(sig, None), 1) def quit_on_signals(): """ Wait for one or two signals then shutdown the server. The first SIGINT or SIGTERM signal will initiate a graceful shutdown while a second one if any will force an immediate exit. """ # Wait for a first signal to be handled. (time.sleep will be interrupted # by the signal handler.) The try/except is for the win32 case. try: while quit_signals_received == 0: time.sleep(60) except KeyboardInterrupt: pass config = openerp.tools.config openerp.service.stop_services() if getattr(openerp, 'phoenix', False): # like the phoenix, reborn from ashes... openerp.service._reexec() return if config['pidfile']: os.unlink(config['pidfile']) sys.exit(0) def main(args): check_root_user() openerp.tools.config.parse_config(args) check_postgres_user() openerp.netsvc.init_logger() report_configuration() config = openerp.tools.config setup_signal_handlers() if config["test_file"]: run_test_file(config['db_name'], config['test_file']) sys.exit(0) if config["translate_out"]: export_translation() sys.exit(0) if config["translate_in"]: import_translation() sys.exit(0) if not config["stop_after_init"]: setup_pid_file() # Some module register themselves when they are loaded so we need the # services to be running before loading any registry. if config['workers']: openerp.service.start_services_workers() else: openerp.service.start_services() rc = 0 if config['db_name']: for dbname in config['db_name'].split(','): if not preload_registry(dbname): rc += 1 if config["stop_after_init"]: sys.exit(rc) _logger.info('OpenERP server is running, waiting for connections...') quit_on_signals() class Server(Command): def run(self, args): main(args) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
imanmafi/High-Frequency-Trading-Model-with-IB
params/strategy_parameters.py
7
1981
""" Author: James Ma Email stuff here: jamesmawm@gmail.com """ from datetime import datetime import pandas as pd import datetime as dt class StrategyParameters: def __init__(self, evaluation_time_secs, resample_interval_secs): self.resample_interval_secs = resample_interval_secs self.__evaluation_time_secs = evaluation_time_secs self.__bootstrap_completed = False self.last_evaluation_time = datetime.now() self.__COL_BETA = 'beta' self.__COL_VOLATILITY_RATIO = 'volatility_ratio' self.indicators = pd.DataFrame(columns=[self.__COL_BETA, self.__COL_VOLATILITY_RATIO]) def add_indicators(self, beta, volatility_ratio): timestamp = dt.datetime.now() self.indicators.loc[timestamp] = [beta, volatility_ratio] self.indicators.sort_index(inplace=True) def trim_indicators_series(self, cutoff_timestamp): self.indicators = self.indicators[ self.indicators.index >= cutoff_timestamp] def get_volatility_ratio(self): return self.__get_latest_indicator_value(self.__COL_VOLATILITY_RATIO, 1) def get_beta(self): return self.__get_latest_indicator_value(self.__COL_BETA) def __get_latest_indicator_value(self, column_name, default_value=0): if len(self.indicators) > 0: return self.indicators[column_name].values[-1] return default_value def set_bootstrap_completed(self): self.__bootstrap_completed = True self.set_new_evaluation_time() def is_evaluation_time_elapsed(self): seconds_elapsed = (datetime.now() - self.last_evaluation_time).seconds return seconds_elapsed > self.__evaluation_time_secs def set_new_evaluation_time(self): self.last_evaluation_time = datetime.now() def is_bootstrap_completed(self): return self.__bootstrap_completed
mit
hubsaysnuaa/odoo
addons/mrp/wizard/change_production_qty.py
245
4852
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ import openerp.addons.decimal_precision as dp class change_production_qty(osv.osv_memory): _name = 'change.production.qty' _description = 'Change Quantity of Products' _columns = { 'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), required=True), } def default_get(self, cr, uid, fields, context=None): """ To get default values for the object. @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param fields: List of fields for which we want default values @param context: A standard dictionary @return: A dictionary which of fields with values. """ if context is None: context = {} res = super(change_production_qty, self).default_get(cr, uid, fields, context=context) prod_obj = self.pool.get('mrp.production') prod = prod_obj.browse(cr, uid, context.get('active_id'), context=context) if 'product_qty' in fields: res.update({'product_qty': prod.product_qty}) return res def _update_product_to_produce(self, cr, uid, prod, qty, context=None): move_lines_obj = self.pool.get('stock.move') for m in prod.move_created_ids: move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': qty}) def change_prod_qty(self, cr, uid, ids, context=None): """ Changes the Quantity of Product. @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param ids: List of IDs selected @param context: A standard dictionary @return: """ record_id = context and context.get('active_id',False) assert record_id, _('Active Id not found') prod_obj = self.pool.get('mrp.production') bom_obj = self.pool.get('mrp.bom') move_obj = self.pool.get('stock.move') for wiz_qty in self.browse(cr, uid, ids, context=context): prod = prod_obj.browse(cr, uid, record_id, context=context) prod_obj.write(cr, uid, [prod.id], {'product_qty': wiz_qty.product_qty}) prod_obj.action_compute(cr, uid, [prod.id]) for move in prod.move_lines: bom_point = prod.bom_id bom_id = prod.bom_id.id if not bom_point: bom_id = bom_obj._bom_find(cr, uid, product_id=prod.product_id.id, context=context) if not bom_id: raise osv.except_osv(_('Error!'), _("Cannot find bill of material for this product.")) prod_obj.write(cr, uid, [prod.id], {'bom_id': bom_id}) bom_point = bom_obj.browse(cr, uid, [bom_id])[0] if not bom_id: raise osv.except_osv(_('Error!'), _("Cannot find bill of material for this product.")) factor = prod.product_qty * prod.product_uom.factor / bom_point.product_uom.factor product_details, workcenter_details = \ bom_obj._bom_explode(cr, uid, bom_point, prod.product_id, factor / bom_point.product_qty, [], context=context) for r in product_details: if r['product_id'] == move.product_id.id: move_obj.write(cr, uid, [move.id], {'product_uom_qty': r['product_qty']}) if prod.move_prod_id: move_obj.write(cr, uid, [prod.move_prod_id.id], {'product_uom_qty' : wiz_qty.product_qty}) self._update_product_to_produce(cr, uid, prod, wiz_qty.product_qty, context=context) return {} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
ahamilton55/ansible
lib/ansible/modules/system/runit.py
27
9062
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2015, Brian Coca <bcoca@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/> ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['stableinterface'], 'supported_by': 'community'} # This is a modification of @bcoca's `svc` module DOCUMENTATION = ''' --- module: runit author: "James Sumners (@jsumners)" version_added: "2.3" short_description: Manage runit services. description: - Controls runit services on remote hosts using the sv utility. options: name: required: true description: - Name of the service to manage. state: required: false choices: [ started, stopped, restarted, killed, reloaded, once ] description: - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary. C(restarted) will always bounce the service (sv restart) and C(killed) will always bounce the service (sv force-stop). C(reloaded) will send a HUP (sv reload). C(once) will run a normally downed sv once (sv once), not really an idempotent operation. enabled: required: false choices: [ "yes", "no" ] description: - Wheater the service is enabled or not, if disabled it also implies stopped. service_dir: required: false default: /var/service description: - directory runsv watches for services service_src: required: false default: /etc/sv description: - directory where services are defined, the source of symlinks to service_dir. ''' EXAMPLES = ''' # Example action to start sv dnscache, if not running - runit: name: dnscache state: started # Example action to stop sv dnscache, if running - runit: name: dnscache state: stopped # Example action to kill sv dnscache, in all cases - runit: name: dnscache state: killed # Example action to restart sv dnscache, in all cases - runit: name: dnscache state: restarted # Example action to reload sv dnscache, in all cases - runit: name: dnscache state: reloaded # Example using alt sv directory location - runit: name: dnscache state: reloaded service_dir: /run/service ''' import platform import shlex from ansible.module_utils.pycompat24 import get_exception from ansible.module_utils.basic import * def _load_dist_subclass(cls, *args, **kwargs): ''' Used for derivative implementations ''' subclass = None distro = kwargs['module'].params['distro'] # get the most specific superclass for this platform if distro is not None: for sc in cls.__subclasses__(): if sc.distro is not None and sc.distro == distro: subclass = sc if subclass is None: subclass = cls return super(cls, subclass).__new__(subclass) class Sv(object): """ Main class that handles daemontools, can be subclassed and overridden in case we want to use a 'derivative' like encore, s6, etc """ #def __new__(cls, *args, **kwargs): # return _load_dist_subclass(cls, args, kwargs) def __init__(self, module): self.extra_paths = [ ] self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state'] self.module = module self.name = module.params['name'] self.service_dir = module.params['service_dir'] self.service_src = module.params['service_src'] self.enabled = None self.full_state = None self.state = None self.pid = None self.duration = None self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths) self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths) self.svc_full = '/'.join([ self.service_dir, self.name ]) self.src_full = '/'.join([ self.service_src, self.name ]) self.enabled = os.path.lexists(self.svc_full) if self.enabled: self.get_status() else: self.state = 'stopped' def enable(self): if os.path.exists(self.src_full): try: os.symlink(self.src_full, self.svc_full) except OSError: e = get_exception() self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % str(e)) else: self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full) def disable(self): self.execute_command([self.svc_cmd,'force-stop',self.src_full]) try: os.unlink(self.svc_full) except OSError: e = get_exception() self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % str(e)) def get_status(self): (rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full]) if err is not None and err: self.full_state = self.state = err else: self.full_state = out m = re.search('\(pid (\d+)\)', out) if m: self.pid = m.group(1) m = re.search(' (\d+)s', out) if m: self.duration = m.group(1) if re.search('run:', out): self.state = 'started' elif re.search('down:', out): self.state = 'stopped' else: self.state = 'unknown' return def started(self): return self.start() def start(self): return self.execute_command([self.svc_cmd, 'start', self.svc_full]) def stopped(self): return self.stop() def stop(self): return self.execute_command([self.svc_cmd, 'stop', self.svc_full]) def once(self): return self.execute_command([self.svc_cmd, 'once', self.svc_full]) def reloaded(self): return self.reload() def reload(self): return self.execute_command([self.svc_cmd, 'reload', self.svc_full]) def restarted(self): return self.restart() def restart(self): return self.execute_command([self.svc_cmd, 'restart', self.svc_full]) def killed(self): return self.kill() def kill(self): return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full]) def execute_command(self, cmd): try: (rc, out, err) = self.module.run_command(' '.join(cmd)) except Exception: e = get_exception() self.module.fail_json(msg="failed to execute: %s" % str(e)) return (rc, out, err) def report(self): self.get_status() states = {} for k in self.report_vars: states[k] = self.__dict__[k] return states # =========================================== # Main control flow def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'reloaded', 'once']), enabled = dict(required=False, type='bool'), dist = dict(required=False, default='runit'), service_dir = dict(required=False, default='/var/service'), service_src = dict(required=False, default='/etc/sv'), ), supports_check_mode=True, ) module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') state = module.params['state'] enabled = module.params['enabled'] sv = Sv(module) changed = False orig_state = sv.report() if enabled is not None and enabled != sv.enabled: changed = True if not module.check_mode: try: if enabled: sv.enable() else: sv.disable() except (OSError, IOError): e = get_exception() module.fail_json(msg="Could not change service link: %s" % str(e)) if state is not None and state != sv.state: changed = True if not module.check_mode: getattr(sv,state)() module.exit_json(changed=changed, sv=sv.report()) if __name__ == '__main__': main()
gpl-3.0
olgaivolga/google-python-exercises
basic/solution/list1.py
209
3656
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ # Basic list exercises # Fill in the code for the functions below. main() is already set up # to call the functions with a few different inputs, # printing 'OK' when each function is correct. # The starter code for each function includes a 'return' # which is just a placeholder for your code. # It's ok if you do not complete all the functions, and there # are some additional functions to try in list2.py. # A. match_ends # Given a list of strings, return the count of the number of # strings where the string length is 2 or more and the first # and last chars of the string are the same. # Note: python does not have a ++ operator, but += works. def match_ends(words): # +++your code here+++ # LAB(begin solution) count = 0 for word in words: if len(word) >= 2 and word[0] == word[-1]: count = count + 1 return count # LAB(replace solution) # return # LAB(end solution) # B. front_x # Given a list of strings, return a list with the strings # in sorted order, except group all the strings that begin with 'x' first. # e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields # ['xanadu', 'xyz', 'aardvark', 'apple', 'mix'] # Hint: this can be done by making 2 lists and sorting each of them # before combining them. def front_x(words): # +++your code here+++ # LAB(begin solution) # Put each word into the x_list or the other_list. x_list = [] other_list = [] for w in words: if w.startswith('x'): x_list.append(w) else: other_list.append(w) return sorted(x_list) + sorted(other_list) # LAB(replace solution) # return # LAB(end solution) # LAB(begin solution) # Extract the last element from a tuple -- used for custom sorting below. def last(a): return a[-1] # LAB(end solution) # C. sort_last # Given a list of non-empty tuples, return a list sorted in increasing # order by the last element in each tuple. # e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields # [(2, 2), (1, 3), (3, 4, 5), (1, 7)] # Hint: use a custom key= function to extract the last element form each tuple. def sort_last(tuples): # +++your code here+++ # LAB(begin solution) return sorted(tuples, key=last) # LAB(replace solution) # return # LAB(end solution) # Simple provided test() function used in main() to print # what each function returns vs. what it's supposed to return. def test(got, expected): if got == expected: prefix = ' OK ' else: prefix = ' X ' print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected)) # Calls the above functions with interesting inputs. def main(): print 'match_ends' test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3) test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2) test(match_ends(['aaa', 'be', 'abc', 'hello']), 1) print print 'front_x' test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']), ['xaa', 'xzz', 'axx', 'bbb', 'ccc']) test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']), ['xaa', 'xcc', 'aaa', 'bbb', 'ccc']) test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']), ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']) print print 'sort_last' test(sort_last([(1, 3), (3, 2), (2, 1)]), [(2, 1), (3, 2), (1, 3)]) test(sort_last([(2, 3), (1, 2), (3, 1)]), [(3, 1), (1, 2), (2, 3)]) test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]), [(2, 2), (1, 3), (3, 4, 5), (1, 7)]) if __name__ == '__main__': main()
apache-2.0
python-ivi/python-ivi
ivi/tektronix/tektronixMDO3012.py
2
1642
""" Python Interchangeable Virtual Instrument Library Copyright (c) 2016-2017 Alex Forencich Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from .tektronixMDO3000 import * class tektronixMDO3012(tektronixMDO3000): "Tektronix MDO3012 IVI oscilloscope driver" def __init__(self, *args, **kwargs): self.__dict__.setdefault('_instrument_id', 'MDO3012') super(tektronixMDO3012, self).__init__(*args, **kwargs) self._analog_channel_count = 2 self._digital_channel_count = 16 self._bandwidth = 100e6 # AFG option self._output_count = 1 self._init_channels() self._init_outputs()
mit
mmbtba/odoo
addons/l10n_sg/__openerp__.py
331
2380
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2014 Tech Receptives (<http://techreceptives.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Singapore - Accounting', 'version': '1.0', 'author': 'Tech Receptives', 'website': 'http://www.techreceptives.com', 'category': 'Localization/Account Charts', 'description': """ Singapore accounting chart and localization. ======================================================= After installing this module, the Configuration wizard for accounting is launched. * The Chart of Accounts consists of the list of all the general ledger accounts required to maintain the transactions of Singapore. * On that particular wizard, you will be asked to pass the name of the company, the chart template to follow, the no. of digits to generate, the code for your account and bank account, currency to create journals. * The Chart of Taxes would display the different types/groups of taxes such as Standard Rates, Zeroed, Exempted, MES and Out of Scope. * The tax codes are specified considering the Tax Group and for easy accessibility of submission of GST Tax Report. """, 'depends': ['base', 'account', 'account_chart'], 'demo': [ ], 'data': [ 'l10n_sg_chart_tax_code.xml', 'l10n_sg_chart.xml', 'l10n_sg_chart_tax.xml', 'l10n_sg_wizard.xml', ], 'installable': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
bdang2012/taiga-back-casting
taiga/projects/tasks/serializers.py
1
4475
# Copyright (C) 2014-2015 Andrey Antukh <niwi@niwi.be> # Copyright (C) 2014-2015 Jesús Espino <jespinog@gmail.com> # Copyright (C) 2014-2015 David Barragán <bameda@dbarragan.com> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from taiga.base.api import serializers from taiga.base.fields import TagsField from taiga.base.fields import PgArrayField from taiga.base.neighbors import NeighborsSerializerMixin from taiga.mdrender.service import render as mdrender from taiga.projects.validators import ProjectExistsValidator from taiga.projects.milestones.validators import SprintExistsValidator from taiga.projects.tasks.validators import TaskExistsValidator from taiga.projects.notifications.validators import WatchersValidator from taiga.projects.serializers import BasicTaskStatusSerializerSerializer from taiga.projects.notifications.mixins import EditableWatchedResourceModelSerializer from taiga.projects.votes.mixins.serializers import VoteResourceSerializerMixin from taiga.users.serializers import UserBasicInfoSerializer from . import models class TaskSerializer(WatchersValidator, VoteResourceSerializerMixin, EditableWatchedResourceModelSerializer, serializers.ModelSerializer): tags = TagsField(required=False, default=[]) external_reference = PgArrayField(required=False) comment = serializers.SerializerMethodField("get_comment") milestone_slug = serializers.SerializerMethodField("get_milestone_slug") blocked_note_html = serializers.SerializerMethodField("get_blocked_note_html") description_html = serializers.SerializerMethodField("get_description_html") is_closed = serializers.SerializerMethodField("get_is_closed") status_extra_info = BasicTaskStatusSerializerSerializer(source="status", required=False, read_only=True) assigned_to_extra_info = UserBasicInfoSerializer(source="assigned_to", required=False, read_only=True) owner_extra_info = UserBasicInfoSerializer(source="owner", required=False, read_only=True) class Meta: model = models.Task read_only_fields = ('id', 'ref', 'created_date', 'modified_date') def get_comment(self, obj): return "" def get_milestone_slug(self, obj): if obj.milestone: return obj.milestone.slug else: return None def get_blocked_note_html(self, obj): return mdrender(obj.project, obj.blocked_note) def get_description_html(self, obj): return mdrender(obj.project, obj.description) def get_is_closed(self, obj): return obj.status.is_closed class TaskListSerializer(TaskSerializer): class Meta: model = models.Task read_only_fields = ('id', 'ref', 'created_date', 'modified_date') exclude=("description", "description_html") class TaskNeighborsSerializer(NeighborsSerializerMixin, TaskSerializer): def serialize_neighbor(self, neighbor): return NeighborTaskSerializer(neighbor).data class NeighborTaskSerializer(serializers.ModelSerializer): class Meta: model = models.Task fields = ("id", "ref", "subject") depth = 0 class TasksBulkSerializer(ProjectExistsValidator, SprintExistsValidator, TaskExistsValidator, serializers.Serializer): project_id = serializers.IntegerField() sprint_id = serializers.IntegerField() status_id = serializers.IntegerField(required=False) us_id = serializers.IntegerField(required=False) bulk_tasks = serializers.CharField() ## Order bulk serializers class _TaskOrderBulkSerializer(TaskExistsValidator, serializers.Serializer): task_id = serializers.IntegerField() order = serializers.IntegerField() class UpdateTasksOrderBulkSerializer(ProjectExistsValidator, serializers.Serializer): project_id = serializers.IntegerField() bulk_tasks = _TaskOrderBulkSerializer(many=True)
agpl-3.0
SnakeJenny/TensorFlow
tensorflow/python/ops/tensor_array_grad.py
58
8687
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradients for operators defined in tensor_array_ops.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import tensor_array_ops # TODO(b/31222613): These ops may be differentiable, and there may be # latent bugs here. ops.NotDifferentiable("TensorArray") ops.NotDifferentiable("TensorArrayGrad") ops.NotDifferentiable("TensorArraySize") ops.NotDifferentiable("TensorArrayClose") ops.NotDifferentiable("TensorArrayV2") ops.NotDifferentiable("TensorArrayGradV2") ops.NotDifferentiable("TensorArraySizeV2") ops.NotDifferentiable("TensorArrayCloseV2") ops.NotDifferentiable("TensorArrayV3") ops.NotDifferentiable("TensorArrayGradV3") ops.NotDifferentiable("TensorArraySizeV3") ops.NotDifferentiable("TensorArrayCloseV3") def _GetGradSource(op_or_tensor): """Identify which call to tf.gradients created this gradient op or tensor. TensorArray gradient calls use an accumulator TensorArray object. If multiple gradients are calculated and run in the same session, the multiple gradient nodes may accidentally flow throuth the same accumulator TensorArray. This double counting breaks the TensorArray gradient flow. The solution is to identify which gradient call this particular TensorArray*Grad is being called in, by looking at the input gradient tensor's name, and create or lookup an accumulator gradient TensorArray associated with this specific call. This solves any confusion and ensures different gradients from the same forward graph get their own accumulators. This function creates the unique label associated with the tf.gradients call that is used to create the gradient TensorArray. Args: op_or_tensor: `Tensor` or `Operation` which is an input to a TensorArray*Grad call. Returns: A python string, the unique label associated with this particular gradients calculation. Raises: ValueError: If not called within a gradients calculation. """ name_tokens = op_or_tensor.name.split("/") grad_pos = [i for i, x in enumerate(name_tokens) if x.startswith("gradients")] if not grad_pos: raise ValueError( "Expected op/tensor name to start with gradients (excluding scope)" ", got: %s" % op_or_tensor.name) return "/".join(name_tokens[:grad_pos[-1] + 1]) @ops.RegisterGradient("TensorArrayRead") @ops.RegisterGradient("TensorArrayReadV2") @ops.RegisterGradient("TensorArrayReadV3") def _TensorArrayReadGrad(op, grad): """Gradient for TensorArrayRead. Args: op: Forward TensorArrayRead op. grad: Gradient `Tensor` to TensorArrayRead. Returns: A flow `Tensor`, which can be used in control dependencies to force the write of `grad` to the gradient `TensorArray`. """ # Note: the forward flow dependency in the call to grad() is necessary for # the case of dynamic sized TensorArrays. When creating the gradient # TensorArray, the final size of the forward array must be known. # For this we need to wait until it has been created by depending on # the input flow of the original op. handle = op.inputs[0] index = op.inputs[1] flow = op.inputs[2] dtype = op.get_attr("dtype") grad_source = _GetGradSource(grad) g = tensor_array_ops.TensorArray( dtype=dtype, handle=handle, flow=flow).grad( source=grad_source, flow=flow) w_g = g.write(index, grad) return [None, None, w_g.flow] @ops.RegisterGradient("TensorArrayWrite") @ops.RegisterGradient("TensorArrayWriteV2") @ops.RegisterGradient("TensorArrayWriteV3") def _TensorArrayWriteGrad(op, flow): """Gradient for TensorArrayWrite. Args: op: Forward TensorArrayWrite op. flow: Gradient `Tensor` flow to TensorArrayWrite. Returns: A grad `Tensor`, the gradient created in an upstream ReadGrad or PackGrad. """ # handle is the output store_handle of TensorArrayReadGrad or # the handle output of TensorArrayWriteGrad. we must use this one. handle = op.inputs[0] index = op.inputs[1] dtype = op.get_attr("T") grad_source = _GetGradSource(flow) g = tensor_array_ops.TensorArray( dtype=dtype, handle=handle, flow=flow).grad( source=grad_source, flow=flow) grad = g.read(index) return [None, None, grad, flow] @ops.RegisterGradient("TensorArrayGather") @ops.RegisterGradient("TensorArrayGatherV2") @ops.RegisterGradient("TensorArrayGatherV3") def _TensorArrayGatherGrad(op, grad): """Gradient for TensorArrayGather. Args: op: Forward TensorArrayGather op. grad: Gradient `Tensor` to TensorArrayGather. Returns: A flow `Tensor`, which can be used in control dependencies to force the write of `grad` to the gradient `TensorArray`. """ # Note: the forward flow dependency in the call to grad() is necessary for # the case of dynamic sized TensorArrays. When creating the gradient # TensorArray, the final size of the forward array must be known. # For this we need to wait until it has been created by depending on # the input flow of the original op. handle = op.inputs[0] indices = op.inputs[1] flow = op.inputs[2] dtype = op.get_attr("dtype") grad_source = _GetGradSource(grad) g = tensor_array_ops.TensorArray( dtype=dtype, handle=handle, flow=flow).grad( source=grad_source, flow=flow) u_g = g.scatter(indices, grad) return [None, None, u_g.flow] @ops.RegisterGradient("TensorArrayScatter") @ops.RegisterGradient("TensorArrayScatterV2") @ops.RegisterGradient("TensorArrayScatterV3") def _TensorArrayScatterGrad(op, flow): """Gradient for TensorArrayScatter. Args: op: Forward TensorArrayScatter op. flow: Gradient `Tensor` flow to TensorArrayScatter. Returns: A grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad. """ handle = op.inputs[0] indices = op.inputs[1] dtype = op.get_attr("T") grad_source = _GetGradSource(flow) g = tensor_array_ops.TensorArray( dtype=dtype, handle=handle, flow=flow).grad( source=grad_source, flow=flow) grad = g.gather(indices) return [None, None, grad, flow] @ops.RegisterGradient("TensorArrayConcat") @ops.RegisterGradient("TensorArrayConcatV2") @ops.RegisterGradient("TensorArrayConcatV3") def _TensorArrayConcatGrad(op, grad, unused_lengths_grad): """Gradient for TensorArrayConcat. Args: op: Forward TensorArrayConcat op. grad: Gradient `Tensor` to TensorArrayConcat. Returns: A flow `Tensor`, which can be used in control dependencies to force the write of `grad` to the gradient `TensorArray`. """ # Note: the forward flow dependency in the call to grad() is necessary for # the case of dynamic sized TensorArrays. When creating the gradient # TensorArray, the final size of the forward array must be known. # For this we need to wait until it has been created by depending on # the input flow of the original op. handle = op.inputs[0] flow = op.inputs[1] lengths = op.outputs[1] dtype = op.get_attr("dtype") grad_source = _GetGradSource(grad) g = tensor_array_ops.TensorArray( dtype=dtype, handle=handle, flow=flow).grad( source=grad_source, flow=flow) u_g = g.split(grad, lengths=lengths) # handle, flow_in return [None, u_g.flow] @ops.RegisterGradient("TensorArraySplit") @ops.RegisterGradient("TensorArraySplitV2") @ops.RegisterGradient("TensorArraySplitV3") def _TensorArraySplitGrad(op, flow): """Gradient for TensorArraySplit. Args: op: Forward TensorArraySplit op. flow: Gradient `Tensor` flow to TensorArraySplit. Returns: A grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad. """ handle = op.inputs[0] dtype = op.get_attr("T") grad_source = _GetGradSource(flow) g = tensor_array_ops.TensorArray( dtype=dtype, handle=handle, flow=flow).grad( source=grad_source, flow=flow) grad = g.concat() # handle, value, lengths, flow_in return [None, grad, None, flow]
apache-2.0
rajat1994/scikit-learn
sklearn/decomposition/tests/test_dict_learning.py
85
8565
import numpy as np from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import TempMemmap from sklearn.decomposition import DictionaryLearning from sklearn.decomposition import MiniBatchDictionaryLearning from sklearn.decomposition import SparseCoder from sklearn.decomposition import dict_learning_online from sklearn.decomposition import sparse_encode rng_global = np.random.RandomState(0) n_samples, n_features = 10, 8 X = rng_global.randn(n_samples, n_features) def test_dict_learning_shapes(): n_components = 5 dico = DictionaryLearning(n_components, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_overcomplete(): n_components = 12 dico = DictionaryLearning(n_components, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_reconstruction(): n_components = 12 dico = DictionaryLearning(n_components, transform_algorithm='omp', transform_alpha=0.001, random_state=0) code = dico.fit(X).transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X) dico.set_params(transform_algorithm='lasso_lars') code = dico.transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) # used to test lars here too, but there's no guarantee the number of # nonzero atoms is right. def test_dict_learning_reconstruction_parallel(): # regression test that parallel reconstruction works with n_jobs=-1 n_components = 12 dico = DictionaryLearning(n_components, transform_algorithm='omp', transform_alpha=0.001, random_state=0, n_jobs=-1) code = dico.fit(X).transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X) dico.set_params(transform_algorithm='lasso_lars') code = dico.transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) def test_dict_learning_lassocd_readonly_data(): n_components = 12 with TempMemmap(X) as X_read_only: dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd', transform_alpha=0.001, random_state=0, n_jobs=-1) code = dico.fit(X_read_only).transform(X_read_only) assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2) def test_dict_learning_nonzero_coefs(): n_components = 4 dico = DictionaryLearning(n_components, transform_algorithm='lars', transform_n_nonzero_coefs=3, random_state=0) code = dico.fit(X).transform(X[1]) assert_true(len(np.flatnonzero(code)) == 3) dico.set_params(transform_algorithm='omp') code = dico.transform(X[1]) assert_equal(len(np.flatnonzero(code)), 3) def test_dict_learning_unknown_fit_algorithm(): n_components = 5 dico = DictionaryLearning(n_components, fit_algorithm='<unknown>') assert_raises(ValueError, dico.fit, X) def test_dict_learning_split(): n_components = 5 dico = DictionaryLearning(n_components, transform_algorithm='threshold', random_state=0) code = dico.fit(X).transform(X) dico.split_sign = True split_code = dico.transform(X) assert_array_equal(split_code[:, :n_components] - split_code[:, n_components:], code) def test_dict_learning_online_shapes(): rng = np.random.RandomState(0) n_components = 8 code, dictionary = dict_learning_online(X, n_components=n_components, alpha=1, random_state=rng) assert_equal(code.shape, (n_samples, n_components)) assert_equal(dictionary.shape, (n_components, n_features)) assert_equal(np.dot(code, dictionary).shape, X.shape) def test_dict_learning_online_verbosity(): n_components = 5 # test verbosity from sklearn.externals.six.moves import cStringIO as StringIO import sys old_stdout = sys.stdout try: sys.stdout = StringIO() dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1, random_state=0) dico.fit(X) dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2, random_state=0) dico.fit(X) dict_learning_online(X, n_components=n_components, alpha=1, verbose=1, random_state=0) dict_learning_online(X, n_components=n_components, alpha=1, verbose=2, random_state=0) finally: sys.stdout = old_stdout assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_estimator_shapes(): n_components = 5 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0) dico.fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_overcomplete(): n_components = 12 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_initialization(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) dico = MiniBatchDictionaryLearning(n_components, n_iter=0, dict_init=V, random_state=0).fit(X) assert_array_equal(dico.components_, V) def test_dict_learning_online_partial_fit(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X), batch_size=1, alpha=1, shuffle=False, dict_init=V, random_state=0).fit(X) dict2 = MiniBatchDictionaryLearning(n_components, alpha=1, n_iter=1, dict_init=V, random_state=0) for i in range(10): for sample in X: dict2.partial_fit(sample) assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)) assert_array_almost_equal(dict1.components_, dict2.components_, decimal=2) def test_sparse_encode_shapes(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'): code = sparse_encode(X, V, algorithm=algo) assert_equal(code.shape, (n_samples, n_components)) def test_sparse_encode_error(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = sparse_encode(X, V, alpha=0.001) assert_true(not np.all(code == 0)) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) def test_sparse_encode_error_default_sparsity(): rng = np.random.RandomState(0) X = rng.randn(100, 64) D = rng.randn(2, 64) code = ignore_warnings(sparse_encode)(X, D, algorithm='omp', n_nonzero_coefs=None) assert_equal(code.shape, (100, 2)) def test_unknown_method(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>") def test_sparse_coder_estimator(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars', transform_alpha=0.001).transform(X) assert_true(not np.all(code == 0)) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
bsd-3-clause
paolodedios/tensorflow
tensorflow/python/keras/backend_config_test.py
6
1963
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for backend_config.""" from tensorflow.python.keras import backend from tensorflow.python.keras import backend_config from tensorflow.python.keras import combinations from tensorflow.python.platform import test @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class BackendConfigTest(test.TestCase): def test_backend(self): self.assertEqual(backend.backend(), 'tensorflow') def test_epsilon(self): epsilon = 1e-2 backend_config.set_epsilon(epsilon) self.assertEqual(backend_config.epsilon(), epsilon) backend_config.set_epsilon(1e-7) self.assertEqual(backend_config.epsilon(), 1e-7) def test_floatx(self): floatx = 'float64' backend_config.set_floatx(floatx) self.assertEqual(backend_config.floatx(), floatx) backend_config.set_floatx('float32') self.assertEqual(backend_config.floatx(), 'float32') def test_image_data_format(self): image_data_format = 'channels_first' backend_config.set_image_data_format(image_data_format) self.assertEqual(backend_config.image_data_format(), image_data_format) backend_config.set_image_data_format('channels_last') self.assertEqual(backend_config.image_data_format(), 'channels_last') if __name__ == '__main__': test.main()
apache-2.0
AOSPU/external_chromium_org
third_party/jinja2/lexer.py
635
28393
# -*- coding: utf-8 -*- """ jinja2.lexer ~~~~~~~~~~~~ This module implements a Jinja / Python combination lexer. The `Lexer` class provided by this module is used to do some preprocessing for Jinja. On the one hand it filters out invalid operators like the bitshift operators we don't allow in templates. On the other hand it separates template code and python code in expressions. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ import re from operator import itemgetter from collections import deque from jinja2.exceptions import TemplateSyntaxError from jinja2.utils import LRUCache from jinja2._compat import next, iteritems, implements_iterator, text_type, \ intern # cache for the lexers. Exists in order to be able to have multiple # environments with the same lexer _lexer_cache = LRUCache(50) # static regular expressions whitespace_re = re.compile(r'\s+', re.U) string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'" r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S) integer_re = re.compile(r'\d+') # we use the unicode identifier rule if this python version is able # to handle unicode identifiers, otherwise the standard ASCII one. try: compile('föö', '<unknown>', 'eval') except SyntaxError: name_re = re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b') else: from jinja2 import _stringdefs name_re = re.compile(r'[%s][%s]*' % (_stringdefs.xid_start, _stringdefs.xid_continue)) float_re = re.compile(r'(?<!\.)\d+\.\d+') newline_re = re.compile(r'(\r\n|\r|\n)') # internal the tokens and keep references to them TOKEN_ADD = intern('add') TOKEN_ASSIGN = intern('assign') TOKEN_COLON = intern('colon') TOKEN_COMMA = intern('comma') TOKEN_DIV = intern('div') TOKEN_DOT = intern('dot') TOKEN_EQ = intern('eq') TOKEN_FLOORDIV = intern('floordiv') TOKEN_GT = intern('gt') TOKEN_GTEQ = intern('gteq') TOKEN_LBRACE = intern('lbrace') TOKEN_LBRACKET = intern('lbracket') TOKEN_LPAREN = intern('lparen') TOKEN_LT = intern('lt') TOKEN_LTEQ = intern('lteq') TOKEN_MOD = intern('mod') TOKEN_MUL = intern('mul') TOKEN_NE = intern('ne') TOKEN_PIPE = intern('pipe') TOKEN_POW = intern('pow') TOKEN_RBRACE = intern('rbrace') TOKEN_RBRACKET = intern('rbracket') TOKEN_RPAREN = intern('rparen') TOKEN_SEMICOLON = intern('semicolon') TOKEN_SUB = intern('sub') TOKEN_TILDE = intern('tilde') TOKEN_WHITESPACE = intern('whitespace') TOKEN_FLOAT = intern('float') TOKEN_INTEGER = intern('integer') TOKEN_NAME = intern('name') TOKEN_STRING = intern('string') TOKEN_OPERATOR = intern('operator') TOKEN_BLOCK_BEGIN = intern('block_begin') TOKEN_BLOCK_END = intern('block_end') TOKEN_VARIABLE_BEGIN = intern('variable_begin') TOKEN_VARIABLE_END = intern('variable_end') TOKEN_RAW_BEGIN = intern('raw_begin') TOKEN_RAW_END = intern('raw_end') TOKEN_COMMENT_BEGIN = intern('comment_begin') TOKEN_COMMENT_END = intern('comment_end') TOKEN_COMMENT = intern('comment') TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin') TOKEN_LINESTATEMENT_END = intern('linestatement_end') TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin') TOKEN_LINECOMMENT_END = intern('linecomment_end') TOKEN_LINECOMMENT = intern('linecomment') TOKEN_DATA = intern('data') TOKEN_INITIAL = intern('initial') TOKEN_EOF = intern('eof') # bind operators to token types operators = { '+': TOKEN_ADD, '-': TOKEN_SUB, '/': TOKEN_DIV, '//': TOKEN_FLOORDIV, '*': TOKEN_MUL, '%': TOKEN_MOD, '**': TOKEN_POW, '~': TOKEN_TILDE, '[': TOKEN_LBRACKET, ']': TOKEN_RBRACKET, '(': TOKEN_LPAREN, ')': TOKEN_RPAREN, '{': TOKEN_LBRACE, '}': TOKEN_RBRACE, '==': TOKEN_EQ, '!=': TOKEN_NE, '>': TOKEN_GT, '>=': TOKEN_GTEQ, '<': TOKEN_LT, '<=': TOKEN_LTEQ, '=': TOKEN_ASSIGN, '.': TOKEN_DOT, ':': TOKEN_COLON, '|': TOKEN_PIPE, ',': TOKEN_COMMA, ';': TOKEN_SEMICOLON } reverse_operators = dict([(v, k) for k, v in iteritems(operators)]) assert len(operators) == len(reverse_operators), 'operators dropped' operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x)))) ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT, TOKEN_COMMENT_END, TOKEN_WHITESPACE, TOKEN_WHITESPACE, TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END, TOKEN_LINECOMMENT]) ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA, TOKEN_COMMENT, TOKEN_LINECOMMENT]) def _describe_token_type(token_type): if token_type in reverse_operators: return reverse_operators[token_type] return { TOKEN_COMMENT_BEGIN: 'begin of comment', TOKEN_COMMENT_END: 'end of comment', TOKEN_COMMENT: 'comment', TOKEN_LINECOMMENT: 'comment', TOKEN_BLOCK_BEGIN: 'begin of statement block', TOKEN_BLOCK_END: 'end of statement block', TOKEN_VARIABLE_BEGIN: 'begin of print statement', TOKEN_VARIABLE_END: 'end of print statement', TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement', TOKEN_LINESTATEMENT_END: 'end of line statement', TOKEN_DATA: 'template data / text', TOKEN_EOF: 'end of template' }.get(token_type, token_type) def describe_token(token): """Returns a description of the token.""" if token.type == 'name': return token.value return _describe_token_type(token.type) def describe_token_expr(expr): """Like `describe_token` but for token expressions.""" if ':' in expr: type, value = expr.split(':', 1) if type == 'name': return value else: type = expr return _describe_token_type(type) def count_newlines(value): """Count the number of newline characters in the string. This is useful for extensions that filter a stream. """ return len(newline_re.findall(value)) def compile_rules(environment): """Compiles all the rules from the environment into a list of rules.""" e = re.escape rules = [ (len(environment.comment_start_string), 'comment', e(environment.comment_start_string)), (len(environment.block_start_string), 'block', e(environment.block_start_string)), (len(environment.variable_start_string), 'variable', e(environment.variable_start_string)) ] if environment.line_statement_prefix is not None: rules.append((len(environment.line_statement_prefix), 'linestatement', r'^[ \t\v]*' + e(environment.line_statement_prefix))) if environment.line_comment_prefix is not None: rules.append((len(environment.line_comment_prefix), 'linecomment', r'(?:^|(?<=\S))[^\S\r\n]*' + e(environment.line_comment_prefix))) return [x[1:] for x in sorted(rules, reverse=True)] class Failure(object): """Class that raises a `TemplateSyntaxError` if called. Used by the `Lexer` to specify known errors. """ def __init__(self, message, cls=TemplateSyntaxError): self.message = message self.error_class = cls def __call__(self, lineno, filename): raise self.error_class(self.message, lineno, filename) class Token(tuple): """Token class.""" __slots__ = () lineno, type, value = (property(itemgetter(x)) for x in range(3)) def __new__(cls, lineno, type, value): return tuple.__new__(cls, (lineno, intern(str(type)), value)) def __str__(self): if self.type in reverse_operators: return reverse_operators[self.type] elif self.type == 'name': return self.value return self.type def test(self, expr): """Test a token against a token expression. This can either be a token type or ``'token_type:token_value'``. This can only test against string values and types. """ # here we do a regular string equality check as test_any is usually # passed an iterable of not interned strings. if self.type == expr: return True elif ':' in expr: return expr.split(':', 1) == [self.type, self.value] return False def test_any(self, *iterable): """Test against multiple token expressions.""" for expr in iterable: if self.test(expr): return True return False def __repr__(self): return 'Token(%r, %r, %r)' % ( self.lineno, self.type, self.value ) @implements_iterator class TokenStreamIterator(object): """The iterator for tokenstreams. Iterate over the stream until the eof token is reached. """ def __init__(self, stream): self.stream = stream def __iter__(self): return self def __next__(self): token = self.stream.current if token.type is TOKEN_EOF: self.stream.close() raise StopIteration() next(self.stream) return token @implements_iterator class TokenStream(object): """A token stream is an iterable that yields :class:`Token`\s. The parser however does not iterate over it but calls :meth:`next` to go one token ahead. The current active token is stored as :attr:`current`. """ def __init__(self, generator, name, filename): self._iter = iter(generator) self._pushed = deque() self.name = name self.filename = filename self.closed = False self.current = Token(1, TOKEN_INITIAL, '') next(self) def __iter__(self): return TokenStreamIterator(self) def __bool__(self): return bool(self._pushed) or self.current.type is not TOKEN_EOF __nonzero__ = __bool__ # py2 eos = property(lambda x: not x, doc="Are we at the end of the stream?") def push(self, token): """Push a token back to the stream.""" self._pushed.append(token) def look(self): """Look at the next token.""" old_token = next(self) result = self.current self.push(result) self.current = old_token return result def skip(self, n=1): """Got n tokens ahead.""" for x in range(n): next(self) def next_if(self, expr): """Perform the token test and return the token if it matched. Otherwise the return value is `None`. """ if self.current.test(expr): return next(self) def skip_if(self, expr): """Like :meth:`next_if` but only returns `True` or `False`.""" return self.next_if(expr) is not None def __next__(self): """Go one token ahead and return the old one""" rv = self.current if self._pushed: self.current = self._pushed.popleft() elif self.current.type is not TOKEN_EOF: try: self.current = next(self._iter) except StopIteration: self.close() return rv def close(self): """Close the stream.""" self.current = Token(self.current.lineno, TOKEN_EOF, '') self._iter = None self.closed = True def expect(self, expr): """Expect a given token type and return it. This accepts the same argument as :meth:`jinja2.lexer.Token.test`. """ if not self.current.test(expr): expr = describe_token_expr(expr) if self.current.type is TOKEN_EOF: raise TemplateSyntaxError('unexpected end of template, ' 'expected %r.' % expr, self.current.lineno, self.name, self.filename) raise TemplateSyntaxError("expected token %r, got %r" % (expr, describe_token(self.current)), self.current.lineno, self.name, self.filename) try: return self.current finally: next(self) def get_lexer(environment): """Return a lexer which is probably cached.""" key = (environment.block_start_string, environment.block_end_string, environment.variable_start_string, environment.variable_end_string, environment.comment_start_string, environment.comment_end_string, environment.line_statement_prefix, environment.line_comment_prefix, environment.trim_blocks, environment.lstrip_blocks, environment.newline_sequence, environment.keep_trailing_newline) lexer = _lexer_cache.get(key) if lexer is None: lexer = Lexer(environment) _lexer_cache[key] = lexer return lexer class Lexer(object): """Class that implements a lexer for a given environment. Automatically created by the environment class, usually you don't have to do that. Note that the lexer is not automatically bound to an environment. Multiple environments can share the same lexer. """ def __init__(self, environment): # shortcuts c = lambda x: re.compile(x, re.M | re.S) e = re.escape # lexing rules for tags tag_rules = [ (whitespace_re, TOKEN_WHITESPACE, None), (float_re, TOKEN_FLOAT, None), (integer_re, TOKEN_INTEGER, None), (name_re, TOKEN_NAME, None), (string_re, TOKEN_STRING, None), (operator_re, TOKEN_OPERATOR, None) ] # assemble the root lexing rule. because "|" is ungreedy # we have to sort by length so that the lexer continues working # as expected when we have parsing rules like <% for block and # <%= for variables. (if someone wants asp like syntax) # variables are just part of the rules if variable processing # is required. root_tag_rules = compile_rules(environment) # block suffix if trimming is enabled block_suffix_re = environment.trim_blocks and '\\n?' or '' # strip leading spaces if lstrip_blocks is enabled prefix_re = {} if environment.lstrip_blocks: # use '{%+' to manually disable lstrip_blocks behavior no_lstrip_re = e('+') # detect overlap between block and variable or comment strings block_diff = c(r'^%s(.*)' % e(environment.block_start_string)) # make sure we don't mistake a block for a variable or a comment m = block_diff.match(environment.comment_start_string) no_lstrip_re += m and r'|%s' % e(m.group(1)) or '' m = block_diff.match(environment.variable_start_string) no_lstrip_re += m and r'|%s' % e(m.group(1)) or '' # detect overlap between comment and variable strings comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string)) m = comment_diff.match(environment.variable_start_string) no_variable_re = m and r'(?!%s)' % e(m.group(1)) or '' lstrip_re = r'^[ \t]*' block_prefix_re = r'%s%s(?!%s)|%s\+?' % ( lstrip_re, e(environment.block_start_string), no_lstrip_re, e(environment.block_start_string), ) comment_prefix_re = r'%s%s%s|%s\+?' % ( lstrip_re, e(environment.comment_start_string), no_variable_re, e(environment.comment_start_string), ) prefix_re['block'] = block_prefix_re prefix_re['comment'] = comment_prefix_re else: block_prefix_re = '%s' % e(environment.block_start_string) self.newline_sequence = environment.newline_sequence self.keep_trailing_newline = environment.keep_trailing_newline # global lexing rules self.rules = { 'root': [ # directives (c('(.*?)(?:%s)' % '|'.join( [r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % ( e(environment.block_start_string), block_prefix_re, e(environment.block_end_string), e(environment.block_end_string) )] + [ r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r)) for n, r in root_tag_rules ])), (TOKEN_DATA, '#bygroup'), '#bygroup'), # data (c('.+'), TOKEN_DATA, None) ], # comments TOKEN_COMMENT_BEGIN: [ (c(r'(.*?)((?:\-%s\s*|%s)%s)' % ( e(environment.comment_end_string), e(environment.comment_end_string), block_suffix_re )), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'), (c('(.)'), (Failure('Missing end of comment tag'),), None) ], # blocks TOKEN_BLOCK_BEGIN: [ (c('(?:\-%s\s*|%s)%s' % ( e(environment.block_end_string), e(environment.block_end_string), block_suffix_re )), TOKEN_BLOCK_END, '#pop'), ] + tag_rules, # variables TOKEN_VARIABLE_BEGIN: [ (c('\-%s\s*|%s' % ( e(environment.variable_end_string), e(environment.variable_end_string) )), TOKEN_VARIABLE_END, '#pop') ] + tag_rules, # raw block TOKEN_RAW_BEGIN: [ (c('(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % ( e(environment.block_start_string), block_prefix_re, e(environment.block_end_string), e(environment.block_end_string), block_suffix_re )), (TOKEN_DATA, TOKEN_RAW_END), '#pop'), (c('(.)'), (Failure('Missing end of raw directive'),), None) ], # line statements TOKEN_LINESTATEMENT_BEGIN: [ (c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop') ] + tag_rules, # line comments TOKEN_LINECOMMENT_BEGIN: [ (c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT, TOKEN_LINECOMMENT_END), '#pop') ] } def _normalize_newlines(self, value): """Called for strings and template data to normalize it to unicode.""" return newline_re.sub(self.newline_sequence, value) def tokenize(self, source, name=None, filename=None, state=None): """Calls tokeniter + tokenize and wraps it in a token stream. """ stream = self.tokeniter(source, name, filename, state) return TokenStream(self.wrap(stream, name, filename), name, filename) def wrap(self, stream, name=None, filename=None): """This is called with the stream as returned by `tokenize` and wraps every token in a :class:`Token` and converts the value. """ for lineno, token, value in stream: if token in ignored_tokens: continue elif token == 'linestatement_begin': token = 'block_begin' elif token == 'linestatement_end': token = 'block_end' # we are not interested in those tokens in the parser elif token in ('raw_begin', 'raw_end'): continue elif token == 'data': value = self._normalize_newlines(value) elif token == 'keyword': token = value elif token == 'name': value = str(value) elif token == 'string': # try to unescape string try: value = self._normalize_newlines(value[1:-1]) \ .encode('ascii', 'backslashreplace') \ .decode('unicode-escape') except Exception as e: msg = str(e).split(':')[-1].strip() raise TemplateSyntaxError(msg, lineno, name, filename) # if we can express it as bytestring (ascii only) # we do that for support of semi broken APIs # as datetime.datetime.strftime. On python 3 this # call becomes a noop thanks to 2to3 try: value = str(value) except UnicodeError: pass elif token == 'integer': value = int(value) elif token == 'float': value = float(value) elif token == 'operator': token = operators[value] yield Token(lineno, token, value) def tokeniter(self, source, name, filename=None, state=None): """This method tokenizes the text and returns the tokens in a generator. Use this method if you just want to tokenize a template. """ source = text_type(source) lines = source.splitlines() if self.keep_trailing_newline and source: for newline in ('\r\n', '\r', '\n'): if source.endswith(newline): lines.append('') break source = '\n'.join(lines) pos = 0 lineno = 1 stack = ['root'] if state is not None and state != 'root': assert state in ('variable', 'block'), 'invalid state' stack.append(state + '_begin') else: state = 'root' statetokens = self.rules[stack[-1]] source_length = len(source) balancing_stack = [] while 1: # tokenizer loop for regex, tokens, new_state in statetokens: m = regex.match(source, pos) # if no match we try again with the next rule if m is None: continue # we only match blocks and variables if braces / parentheses # are balanced. continue parsing with the lower rule which # is the operator rule. do this only if the end tags look # like operators if balancing_stack and \ tokens in ('variable_end', 'block_end', 'linestatement_end'): continue # tuples support more options if isinstance(tokens, tuple): for idx, token in enumerate(tokens): # failure group if token.__class__ is Failure: raise token(lineno, filename) # bygroup is a bit more complex, in that case we # yield for the current token the first named # group that matched elif token == '#bygroup': for key, value in iteritems(m.groupdict()): if value is not None: yield lineno, key, value lineno += value.count('\n') break else: raise RuntimeError('%r wanted to resolve ' 'the token dynamically' ' but no group matched' % regex) # normal group else: data = m.group(idx + 1) if data or token not in ignore_if_empty: yield lineno, token, data lineno += data.count('\n') # strings as token just are yielded as it. else: data = m.group() # update brace/parentheses balance if tokens == 'operator': if data == '{': balancing_stack.append('}') elif data == '(': balancing_stack.append(')') elif data == '[': balancing_stack.append(']') elif data in ('}', ')', ']'): if not balancing_stack: raise TemplateSyntaxError('unexpected \'%s\'' % data, lineno, name, filename) expected_op = balancing_stack.pop() if expected_op != data: raise TemplateSyntaxError('unexpected \'%s\', ' 'expected \'%s\'' % (data, expected_op), lineno, name, filename) # yield items if data or tokens not in ignore_if_empty: yield lineno, tokens, data lineno += data.count('\n') # fetch new position into new variable so that we can check # if there is a internal parsing error which would result # in an infinite loop pos2 = m.end() # handle state changes if new_state is not None: # remove the uppermost state if new_state == '#pop': stack.pop() # resolve the new state by group checking elif new_state == '#bygroup': for key, value in iteritems(m.groupdict()): if value is not None: stack.append(key) break else: raise RuntimeError('%r wanted to resolve the ' 'new state dynamically but' ' no group matched' % regex) # direct state name given else: stack.append(new_state) statetokens = self.rules[stack[-1]] # we are still at the same position and no stack change. # this means a loop without break condition, avoid that and # raise error elif pos2 == pos: raise RuntimeError('%r yielded empty string without ' 'stack change' % regex) # publish new function and start again pos = pos2 break # if loop terminated without break we haven't found a single match # either we are at the end of the file or we have a problem else: # end of text if pos >= source_length: return # something went wrong raise TemplateSyntaxError('unexpected char %r at %d' % (source[pos], pos), lineno, name, filename)
bsd-3-clause
supriyantomaftuh/django
django/contrib/gis/db/backends/oracle/introspection.py
539
1977
import sys import cx_Oracle from django.db.backends.oracle.introspection import DatabaseIntrospection from django.utils import six class OracleIntrospection(DatabaseIntrospection): # Associating any OBJECTVAR instances with GeometryField. Of course, # this won't work right on Oracle objects that aren't MDSYS.SDO_GEOMETRY, # but it is the only object type supported within Django anyways. data_types_reverse = DatabaseIntrospection.data_types_reverse.copy() data_types_reverse[cx_Oracle.OBJECT] = 'GeometryField' def get_geometry_type(self, table_name, geo_col): cursor = self.connection.cursor() try: # Querying USER_SDO_GEOM_METADATA to get the SRID and dimension information. try: cursor.execute( 'SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" ' 'WHERE "TABLE_NAME"=%s AND "COLUMN_NAME"=%s', (table_name.upper(), geo_col.upper()) ) row = cursor.fetchone() except Exception as msg: new_msg = ( 'Could not find entry in USER_SDO_GEOM_METADATA ' 'corresponding to "%s"."%s"\n' 'Error message: %s.') % (table_name, geo_col, msg) six.reraise(Exception, Exception(new_msg), sys.exc_info()[2]) # TODO: Research way to find a more specific geometry field type for # the column's contents. field_type = 'GeometryField' # Getting the field parameters. field_params = {} dim, srid = row if srid != 4326: field_params['srid'] = srid # Length of object array ( SDO_DIM_ARRAY ) is number of dimensions. dim = len(dim) if dim != 2: field_params['dim'] = dim finally: cursor.close() return field_type, field_params
bsd-3-clause
JohannesEbke/gtest
test/gtest_throw_on_failure_test.py
2917
5766
#!/usr/bin/env python # # Copyright 2009, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests Google Test's throw-on-failure mode with exceptions disabled. This script invokes gtest_throw_on_failure_test_ (a program written with Google Test) with different environments and command line flags. """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import gtest_test_utils # Constants. # The command line flag for enabling/disabling the throw-on-failure mode. THROW_ON_FAILURE = 'gtest_throw_on_failure' # Path to the gtest_throw_on_failure_test_ program, compiled with # exceptions disabled. EXE_PATH = gtest_test_utils.GetTestExecutablePath( 'gtest_throw_on_failure_test_') # Utilities. def SetEnvVar(env_var, value): """Sets an environment variable to a given value; unsets it when the given value is None. """ env_var = env_var.upper() if value is not None: os.environ[env_var] = value elif env_var in os.environ: del os.environ[env_var] def Run(command): """Runs a command; returns True/False if its exit code is/isn't 0.""" print 'Running "%s". . .' % ' '.join(command) p = gtest_test_utils.Subprocess(command) return p.exited and p.exit_code == 0 # The tests. TODO(wan@google.com): refactor the class to share common # logic with code in gtest_break_on_failure_unittest.py. class ThrowOnFailureTest(gtest_test_utils.TestCase): """Tests the throw-on-failure mode.""" def RunAndVerify(self, env_var_value, flag_value, should_fail): """Runs gtest_throw_on_failure_test_ and verifies that it does (or does not) exit with a non-zero code. Args: env_var_value: value of the GTEST_BREAK_ON_FAILURE environment variable; None if the variable should be unset. flag_value: value of the --gtest_break_on_failure flag; None if the flag should not be present. should_fail: True iff the program is expected to fail. """ SetEnvVar(THROW_ON_FAILURE, env_var_value) if env_var_value is None: env_var_value_msg = ' is not set' else: env_var_value_msg = '=' + env_var_value if flag_value is None: flag = '' elif flag_value == '0': flag = '--%s=0' % THROW_ON_FAILURE else: flag = '--%s' % THROW_ON_FAILURE command = [EXE_PATH] if flag: command.append(flag) if should_fail: should_or_not = 'should' else: should_or_not = 'should not' failed = not Run(command) SetEnvVar(THROW_ON_FAILURE, None) msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero ' 'exit code.' % (THROW_ON_FAILURE, env_var_value_msg, ' '.join(command), should_or_not)) self.assert_(failed == should_fail, msg) def testDefaultBehavior(self): """Tests the behavior of the default mode.""" self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False) def testThrowOnFailureEnvVar(self): """Tests using the GTEST_THROW_ON_FAILURE environment variable.""" self.RunAndVerify(env_var_value='0', flag_value=None, should_fail=False) self.RunAndVerify(env_var_value='1', flag_value=None, should_fail=True) def testThrowOnFailureFlag(self): """Tests using the --gtest_throw_on_failure flag.""" self.RunAndVerify(env_var_value=None, flag_value='0', should_fail=False) self.RunAndVerify(env_var_value=None, flag_value='1', should_fail=True) def testThrowOnFailureFlagOverridesEnvVar(self): """Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE.""" self.RunAndVerify(env_var_value='0', flag_value='0', should_fail=False) self.RunAndVerify(env_var_value='0', flag_value='1', should_fail=True) self.RunAndVerify(env_var_value='1', flag_value='0', should_fail=False) self.RunAndVerify(env_var_value='1', flag_value='1', should_fail=True) if __name__ == '__main__': gtest_test_utils.Main()
bsd-3-clause