repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
jgorgulho/installs
workstationInstall.py
1
8596
#!/bin/env python import os import sys import shlex import subprocess import shutil # # Constants # WELCOME_STRING = """ ######################################################## # Running Installation Script for Workstation # ########################################################\n\n\n """ RAN_SCRIP_STRING = """\n ######################################################## # Finished running Installation Script for Workstation # ######################################################## """ GNOME_SHELL_EXTENSIONS_FOLDER = "gnomeShellExtentionsToInstall" caffeineInstallScriptFile =(GNOME_SHELL_EXTENSIONS_FOLDER + "/" + "caffeineInstallScript.sh") caffeineInstallScriptFileContents = """ #!/bin/env bash ROOT=$(pwd) mkdir tempGnomeExtensionsInstallFolder && cd tempGnomeExtensionsInstallFolder && rm -rf gnome-shell-extension-caffeine && git clone git://github.com/eonpatapon/gnome-shell-extension-caffeine.git && cd gnome-shell-extension-caffeine && ./update-locale.sh && glib-compile-schemas --strict --targetdir=caffeine@patapon.info/schemas/ caffeine@patapon.info/schemas && cp -r caffeine@patapon.info ~/.local/share/gnome-shell/extensions """ RUN_SCRIPT_AS_ROOT_STRING = "\n\nPlease run this script as root or equivalent.\n\n" DNF_CONST_FILE = "/etc/dnf/dnf.conf" DNF_DELTARPM_CONFIG_STRING = "deltarpm=1" OS_UPDATE_SYSTEM = "sudo dnf update -y" SUDO_GET_PASSWORD = "sudo touch /tmp/tempFileForInstallationScript" SUDO_FORGET_PASSWORD = "sudo -k" SUDO_FORGET_PASSWORD_STRING = "\n\nForgetting sudo password.\n\n" INSTALL_PACKAGE_CMD = "sudo dnf install -y " FEDORA_VERSION_NUMBER = subprocess.check_output(['rpm','-E %fedora']) RPM_FUSION_FREE_DOWNLOAD_URL = ("\"https://download1.rpmfusion.org/free/fedora" "/rpmfusion-free-release-" + FEDORA_VERSION_NUMBER.strip() + ".noarch.rpm\"") RPM_FUSION_NONFREE_DOWNLOAD_URL = ("\"https://download1.rpmfusion.org/nonfree" "/fedora/rpmfusion-nonfree-release-" + FEDORA_VERSION_NUMBER.strip() + ".noarch.rpm\"") ATOM_EDITOR_DOWNLOAD_URL = "https://atom.io/download/rpm" PACKAGES_FILE = "gnomeShell3Packages.txt" PACKAGE_TO_INSTALL_LIST = " " FILES_IN_FOLDER = " " LIST_OF_FILES_TO_KEEP_AFTER_RUNNING_FILE = "filesToKeep.txt" ERROR_OPENING_PACKAGES_TO_KEEP_FILE = ("\n\nPlease make sure that the file " + LIST_OF_FILES_TO_KEEP_AFTER_RUNNING_FILE + " exists.\n\n") FILES_TO_KEEP_AFTER_RUNNING = " " ERROR_OPENING_PACKAGES_FILE = ("\n\nPlease make sure that the file " + PACKAGES_FILE + " exists.\n\n") ERROR_GETTING_LIST_OF_FILES_IN_FOLDER = ("\n\nCouldn't get list of files from" + "folder.\n\n ") ERROR_RUNNING_COMMAND = "\n\n Error running the command: " ERROR_OPENING_FILE = "\n\n Error opening the command: " COMMAND_GET_FILES_TO_KEEP = "cat filesToKeep.txt" KEEP_PASSWORD = 0 TEMP_POST_INSTALL_SCRIPT_FILE = "tempPostInstallScript.sh" # # Functions # def getListOfFilesToKeepAfterRunning(): global FILES_TO_KEEP_AFTER_RUNNING try: with open(LIST_OF_FILES_TO_KEEP_AFTER_RUNNING_FILE) as f: FILES_TO_KEEP_AFTER_RUNNING = f.readlines() FILES_TO_KEEP_AFTER_RUNNING = [x.strip() for x in FILES_TO_KEEP_AFTER_RUNNING] except: print(ERROR_OPENING_PACKAGES_TO_KEEP_FILE) exitScript(KEEP_PASSWORD) print("Finished getting files to keep after install.") def writeContentsToFile(localFileToWriteTo, localContentsToWrite): try: localTempFileToWriteContents = open(localFileToWriteTo,"w") localTempFileToWriteContents.write(localContentsToWrite) localTempFileToWriteContents.close() except: fileNotOpenSuccessfully(localFileToWriteTo) exitScript(KEEP_PASSWORD) def executeFile(localFileToRun): runCommand("sh ./" + localFileToRun) def makeFileExecutable(localFileToTurnExecutable): runCommand("chmod +x " + localFileToTurnExecutable) def runCommand(localCommandToRun): try: subprocess.call(shlex.split(localCommandToRun)) except: commandNotRanSuccessfully(localCommandToRun) exitScript(KEEP_PASSWORD) def fileNotOpenSuccessfully(localFileNotOpen): print(ERROR_OPENING_FILE + localFileNotOpen +" \n\n\n") def commandNotRanSuccessfully(commandRan): print(ERROR_RUNNING_COMMAND + commandRan +" \n\n\n") def exitScript(forgetPass): if(forgetPass == 0): makeSudoForgetPass() printEndString() exit() def setDeltaRpm(): fobj = open(DNF_CONST_FILE) dnfConfFile = fobj.read().strip().split() stringToSearch = DNF_DELTARPM_CONFIG_STRING if stringToSearch in dnfConfFile: print("Delta rpm already configured.\n") else: print('Setting delta rpm...\n') fobj.close() commandToRun = "sudo sh -c 'echo " + DNF_DELTARPM_CONFIG_STRING + " >> " + DNF_CONST_FILE +"'" runCommand(commandToRun) def performUpdate(): print("\nUpdating system...\n") runCommand(OS_UPDATE_SYSTEM) print("\nUpdated system.\n") def performInstallFirstStage(): setDeltaRpm() def installPackage(localPackageToInstall): commandToRun = INSTALL_PACKAGE_CMD + localPackageToInstall runCommand(commandToRun) def installRpmFusion(): print("\nInstalling rpmfusion...\n") installPackage(RPM_FUSION_FREE_DOWNLOAD_URL) installPackage(RPM_FUSION_NONFREE_DOWNLOAD_URL) print("\nInstaled rpmfusion.\n") def installAtomEditor(): print("\nInstalling Atom editor...\n") installPackage(ATOM_EDITOR_DOWNLOAD_URL) print("\nInstaled Atom editor.\n") def getListOfPackagesToInstall(): print("Getting list of packages to install from " + PACKAGES_FILE + " ...") global PACKAGE_TO_INSTALL_LIST try: PACKAGE_TO_INSTALL_LIST = subprocess.check_output(['cat',PACKAGES_FILE]) except: print(ERROR_OPENING_PACKAGES_FILE) exitScript(KEEP_PASSWORD) print("Finished getting package list.") def installPackagesFromFile(): print("Installing packages from list...") installPackage(PACKAGE_TO_INSTALL_LIST) print("Finished installing package list.") def getListOfFilesInFolder(): print("Getting list of files in folder ...") global FILES_IN_FOLDER tempCurrentFolder = os.getcwd() FILES_IN_FOLDER = os.listdir(tempCurrentFolder) print("Finished getting list of files in folder.") def cleanAfterInstall(): getListOfFilesToKeepAfterRunning() getListOfFilesInFolder() FILES_IN_FOLDER.sort() FILES_TO_KEEP_AFTER_RUNNING.sort() for fileInFolder in FILES_IN_FOLDER: #for fileToKeep in FILES_TO_KEEP_AFTER_RUNNING: if(fileInFolder not in FILES_TO_KEEP_AFTER_RUNNING): print(fileInFolder + " is not in files to keep.") try: os.remove(fileInFolder) except OSError, e: try: shutil.rmtree(fileInFolder) except OSError, e: print ("Error: %s - %s." % (e.filename,e.strerror)) def installCaffeineGnomeExtention(): # Caffeine Gnome Shell Extension print("Installing Caffeine Gnome Shell Extensions...") writeContentsToFile(caffeineInstallScriptFile,caffeineInstallScriptFileContents) makeFileExecutable(caffeineInstallScriptFile) executeFile(caffeineInstallScriptFile) print("Instaled Caffeine Gnome Shell Extensions.") def performInstallFourthStage(): installCaffeineGnomeExtention() def performInstallThirdStage(): getListOfPackagesToInstall() installPackagesFromFile() def performInstallSecondtStage(): installRpmFusion() def performInstall(): performInstallFirstStage() performUpdate() performInstallSecondtStage() performUpdate() performInstallThirdStage() performInstallFourthStage() cleanAfterInstall() makeFileExecutable(TEMP_POST_INSTALL_SCRIPT_FILE) executeFile(TEMP_POST_INSTALL_SCRIPT_FILE) def checkIfUserHasRootRights(): return(os.geteuid()) def printWelcomeString(): print(WELCOME_STRING) def printNeedRootRightsString(): print(RUN_SCRIPT_AS_ROOT_STRING) def printEndString(): print(RAN_SCRIP_STRING) def getSudoPass(): runCommand(SUDO_GET_PASSWORD) def makeSudoForgetPass(): print(SUDO_FORGET_PASSWORD_STRING) runCommand(SUDO_FORGET_PASSWORD) def main(): printWelcomeString() if(checkIfUserHasRootRights() == 0): performInstall() else: try: getSudoPass() except: printNeedRootRightsString() exitScript(KEEP_PASSWORD) performInstall() exitScript(KEEP_PASSWORD) # # Run Main Script # main()
gpl-3.0
achang97/YouTunes
lib/python2.7/site-packages/pip/index.py
336
39950
"""Routines related to PyPI, indexes""" from __future__ import absolute_import import logging import cgi from collections import namedtuple import itertools import sys import os import re import mimetypes import posixpath import warnings from pip._vendor.six.moves.urllib import parse as urllib_parse from pip._vendor.six.moves.urllib import request as urllib_request from pip.compat import ipaddress from pip.utils import ( cached_property, splitext, normalize_path, ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, ) from pip.utils.deprecation import RemovedInPip10Warning from pip.utils.logging import indent_log from pip.utils.packaging import check_requires_python from pip.exceptions import ( DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename, UnsupportedWheel, ) from pip.download import HAS_TLS, is_url, path_to_url, url_to_path from pip.wheel import Wheel, wheel_ext from pip.pep425tags import get_supported from pip._vendor import html5lib, requests, six from pip._vendor.packaging.version import parse as parse_version from pip._vendor.packaging.utils import canonicalize_name from pip._vendor.packaging import specifiers from pip._vendor.requests.exceptions import SSLError from pip._vendor.distlib.compat import unescape __all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder'] SECURE_ORIGINS = [ # protocol, hostname, port # Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC) ("https", "*", "*"), ("*", "localhost", "*"), ("*", "127.0.0.0/8", "*"), ("*", "::1/128", "*"), ("file", "*", None), # ssh is always secure. ("ssh", "*", "*"), ] logger = logging.getLogger(__name__) class InstallationCandidate(object): def __init__(self, project, version, location): self.project = project self.version = parse_version(version) self.location = location self._key = (self.project, self.version, self.location) def __repr__(self): return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format( self.project, self.version, self.location, ) def __hash__(self): return hash(self._key) def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ge__(self, other): return self._compare(other, lambda s, o: s >= o) def __gt__(self, other): return self._compare(other, lambda s, o: s > o) def __ne__(self, other): return self._compare(other, lambda s, o: s != o) def _compare(self, other, method): if not isinstance(other, InstallationCandidate): return NotImplemented return method(self._key, other._key) class PackageFinder(object): """This finds packages. This is meant to match easy_install's technique for looking for packages, by reading pages and looking for appropriate links. """ def __init__(self, find_links, index_urls, allow_all_prereleases=False, trusted_hosts=None, process_dependency_links=False, session=None, format_control=None, platform=None, versions=None, abi=None, implementation=None): """Create a PackageFinder. :param format_control: A FormatControl object or None. Used to control the selection of source packages / binary packages when consulting the index and links. :param platform: A string or None. If None, searches for packages that are supported by the current system. Otherwise, will find packages that can be built on the platform passed in. These packages will only be downloaded for distribution: they will not be built locally. :param versions: A list of strings or None. This is passed directly to pep425tags.py in the get_supported() method. :param abi: A string or None. This is passed directly to pep425tags.py in the get_supported() method. :param implementation: A string or None. This is passed directly to pep425tags.py in the get_supported() method. """ if session is None: raise TypeError( "PackageFinder() missing 1 required keyword argument: " "'session'" ) # Build find_links. If an argument starts with ~, it may be # a local file relative to a home directory. So try normalizing # it and if it exists, use the normalized version. # This is deliberately conservative - it might be fine just to # blindly normalize anything starting with a ~... self.find_links = [] for link in find_links: if link.startswith('~'): new_link = normalize_path(link) if os.path.exists(new_link): link = new_link self.find_links.append(link) self.index_urls = index_urls self.dependency_links = [] # These are boring links that have already been logged somehow: self.logged_links = set() self.format_control = format_control or FormatControl(set(), set()) # Domains that we won't emit warnings for when not using HTTPS self.secure_origins = [ ("*", host, "*") for host in (trusted_hosts if trusted_hosts else []) ] # Do we want to allow _all_ pre-releases? self.allow_all_prereleases = allow_all_prereleases # Do we process dependency links? self.process_dependency_links = process_dependency_links # The Session we'll use to make requests self.session = session # The valid tags to check potential found wheel candidates against self.valid_tags = get_supported( versions=versions, platform=platform, abi=abi, impl=implementation, ) # If we don't have TLS enabled, then WARN if anyplace we're looking # relies on TLS. if not HAS_TLS: for link in itertools.chain(self.index_urls, self.find_links): parsed = urllib_parse.urlparse(link) if parsed.scheme == "https": logger.warning( "pip is configured with locations that require " "TLS/SSL, however the ssl module in Python is not " "available." ) break def add_dependency_links(self, links): # # FIXME: this shouldn't be global list this, it should only # # apply to requirements of the package that specifies the # # dependency_links value # # FIXME: also, we should track comes_from (i.e., use Link) if self.process_dependency_links: warnings.warn( "Dependency Links processing has been deprecated and will be " "removed in a future release.", RemovedInPip10Warning, ) self.dependency_links.extend(links) @staticmethod def _sort_locations(locations, expand_dir=False): """ Sort locations into "files" (archives) and "urls", and return a pair of lists (files,urls) """ files = [] urls = [] # puts the url for the given file path into the appropriate list def sort_path(path): url = path_to_url(path) if mimetypes.guess_type(url, strict=False)[0] == 'text/html': urls.append(url) else: files.append(url) for url in locations: is_local_path = os.path.exists(url) is_file_url = url.startswith('file:') if is_local_path or is_file_url: if is_local_path: path = url else: path = url_to_path(url) if os.path.isdir(path): if expand_dir: path = os.path.realpath(path) for item in os.listdir(path): sort_path(os.path.join(path, item)) elif is_file_url: urls.append(url) elif os.path.isfile(path): sort_path(path) else: logger.warning( "Url '%s' is ignored: it is neither a file " "nor a directory.", url) elif is_url(url): # Only add url with clear scheme urls.append(url) else: logger.warning( "Url '%s' is ignored. It is either a non-existing " "path or lacks a specific scheme.", url) return files, urls def _candidate_sort_key(self, candidate): """ Function used to generate link sort key for link tuples. The greater the return value, the more preferred it is. If not finding wheels, then sorted by version only. If finding wheels, then the sort order is by version, then: 1. existing installs 2. wheels ordered via Wheel.support_index_min(self.valid_tags) 3. source archives Note: it was considered to embed this logic into the Link comparison operators, but then different sdist links with the same version, would have to be considered equal """ support_num = len(self.valid_tags) if candidate.location.is_wheel: # can raise InvalidWheelFilename wheel = Wheel(candidate.location.filename) if not wheel.supported(self.valid_tags): raise UnsupportedWheel( "%s is not a supported wheel for this platform. It " "can't be sorted." % wheel.filename ) pri = -(wheel.support_index_min(self.valid_tags)) else: # sdist pri = -(support_num) return (candidate.version, pri) def _validate_secure_origin(self, logger, location): # Determine if this url used a secure transport mechanism parsed = urllib_parse.urlparse(str(location)) origin = (parsed.scheme, parsed.hostname, parsed.port) # The protocol to use to see if the protocol matches. # Don't count the repository type as part of the protocol: in # cases such as "git+ssh", only use "ssh". (I.e., Only verify against # the last scheme.) protocol = origin[0].rsplit('+', 1)[-1] # Determine if our origin is a secure origin by looking through our # hardcoded list of secure origins, as well as any additional ones # configured on this PackageFinder instance. for secure_origin in (SECURE_ORIGINS + self.secure_origins): if protocol != secure_origin[0] and secure_origin[0] != "*": continue try: # We need to do this decode dance to ensure that we have a # unicode object, even on Python 2.x. addr = ipaddress.ip_address( origin[1] if ( isinstance(origin[1], six.text_type) or origin[1] is None ) else origin[1].decode("utf8") ) network = ipaddress.ip_network( secure_origin[1] if isinstance(secure_origin[1], six.text_type) else secure_origin[1].decode("utf8") ) except ValueError: # We don't have both a valid address or a valid network, so # we'll check this origin against hostnames. if (origin[1] and origin[1].lower() != secure_origin[1].lower() and secure_origin[1] != "*"): continue else: # We have a valid address and network, so see if the address # is contained within the network. if addr not in network: continue # Check to see if the port patches if (origin[2] != secure_origin[2] and secure_origin[2] != "*" and secure_origin[2] is not None): continue # If we've gotten here, then this origin matches the current # secure origin and we should return True return True # If we've gotten to this point, then the origin isn't secure and we # will not accept it as a valid location to search. We will however # log a warning that we are ignoring it. logger.warning( "The repository located at %s is not a trusted or secure host and " "is being ignored. If this repository is available via HTTPS it " "is recommended to use HTTPS instead, otherwise you may silence " "this warning and allow it anyways with '--trusted-host %s'.", parsed.hostname, parsed.hostname, ) return False def _get_index_urls_locations(self, project_name): """Returns the locations found via self.index_urls Checks the url_name on the main (first in the list) index and use this url_name to produce all locations """ def mkurl_pypi_url(url): loc = posixpath.join( url, urllib_parse.quote(canonicalize_name(project_name))) # For maximum compatibility with easy_install, ensure the path # ends in a trailing slash. Although this isn't in the spec # (and PyPI can handle it without the slash) some other index # implementations might break if they relied on easy_install's # behavior. if not loc.endswith('/'): loc = loc + '/' return loc return [mkurl_pypi_url(url) for url in self.index_urls] def find_all_candidates(self, project_name): """Find all available InstallationCandidate for project_name This checks index_urls, find_links and dependency_links. All versions found are returned as an InstallationCandidate list. See _link_package_versions for details on which files are accepted """ index_locations = self._get_index_urls_locations(project_name) index_file_loc, index_url_loc = self._sort_locations(index_locations) fl_file_loc, fl_url_loc = self._sort_locations( self.find_links, expand_dir=True) dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links) file_locations = ( Link(url) for url in itertools.chain( index_file_loc, fl_file_loc, dep_file_loc) ) # We trust every url that the user has given us whether it was given # via --index-url or --find-links # We explicitly do not trust links that came from dependency_links # We want to filter out any thing which does not have a secure origin. url_locations = [ link for link in itertools.chain( (Link(url) for url in index_url_loc), (Link(url) for url in fl_url_loc), (Link(url) for url in dep_url_loc), ) if self._validate_secure_origin(logger, link) ] logger.debug('%d location(s) to search for versions of %s:', len(url_locations), project_name) for location in url_locations: logger.debug('* %s', location) canonical_name = canonicalize_name(project_name) formats = fmt_ctl_formats(self.format_control, canonical_name) search = Search(project_name, canonical_name, formats) find_links_versions = self._package_versions( # We trust every directly linked archive in find_links (Link(url, '-f') for url in self.find_links), search ) page_versions = [] for page in self._get_pages(url_locations, project_name): logger.debug('Analyzing links from page %s', page.url) with indent_log(): page_versions.extend( self._package_versions(page.links, search) ) dependency_versions = self._package_versions( (Link(url) for url in self.dependency_links), search ) if dependency_versions: logger.debug( 'dependency_links found: %s', ', '.join([ version.location.url for version in dependency_versions ]) ) file_versions = self._package_versions(file_locations, search) if file_versions: file_versions.sort(reverse=True) logger.debug( 'Local files found: %s', ', '.join([ url_to_path(candidate.location.url) for candidate in file_versions ]) ) # This is an intentional priority ordering return ( file_versions + find_links_versions + page_versions + dependency_versions ) def find_requirement(self, req, upgrade): """Try to find a Link matching req Expects req, an InstallRequirement and upgrade, a boolean Returns a Link if found, Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise """ all_candidates = self.find_all_candidates(req.name) # Filter out anything which doesn't match our specifier compatible_versions = set( req.specifier.filter( # We turn the version object into a str here because otherwise # when we're debundled but setuptools isn't, Python will see # packaging.version.Version and # pkg_resources._vendor.packaging.version.Version as different # types. This way we'll use a str as a common data interchange # format. If we stop using the pkg_resources provided specifier # and start using our own, we can drop the cast to str(). [str(c.version) for c in all_candidates], prereleases=( self.allow_all_prereleases if self.allow_all_prereleases else None ), ) ) applicable_candidates = [ # Again, converting to str to deal with debundling. c for c in all_candidates if str(c.version) in compatible_versions ] if applicable_candidates: best_candidate = max(applicable_candidates, key=self._candidate_sort_key) else: best_candidate = None if req.satisfied_by is not None: installed_version = parse_version(req.satisfied_by.version) else: installed_version = None if installed_version is None and best_candidate is None: logger.critical( 'Could not find a version that satisfies the requirement %s ' '(from versions: %s)', req, ', '.join( sorted( set(str(c.version) for c in all_candidates), key=parse_version, ) ) ) raise DistributionNotFound( 'No matching distribution found for %s' % req ) best_installed = False if installed_version and ( best_candidate is None or best_candidate.version <= installed_version): best_installed = True if not upgrade and installed_version is not None: if best_installed: logger.debug( 'Existing installed version (%s) is most up-to-date and ' 'satisfies requirement', installed_version, ) else: logger.debug( 'Existing installed version (%s) satisfies requirement ' '(most up-to-date version is %s)', installed_version, best_candidate.version, ) return None if best_installed: # We have an existing version, and its the best version logger.debug( 'Installed version (%s) is most up-to-date (past versions: ' '%s)', installed_version, ', '.join(sorted(compatible_versions, key=parse_version)) or "none", ) raise BestVersionAlreadyInstalled logger.debug( 'Using version %s (newest of versions: %s)', best_candidate.version, ', '.join(sorted(compatible_versions, key=parse_version)) ) return best_candidate.location def _get_pages(self, locations, project_name): """ Yields (page, page_url) from the given locations, skipping locations that have errors. """ seen = set() for location in locations: if location in seen: continue seen.add(location) page = self._get_page(location) if page is None: continue yield page _py_version_re = re.compile(r'-py([123]\.?[0-9]?)$') def _sort_links(self, links): """ Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates """ eggs, no_eggs = [], [] seen = set() for link in links: if link not in seen: seen.add(link) if link.egg_fragment: eggs.append(link) else: no_eggs.append(link) return no_eggs + eggs def _package_versions(self, links, search): result = [] for link in self._sort_links(links): v = self._link_package_versions(link, search) if v is not None: result.append(v) return result def _log_skipped_link(self, link, reason): if link not in self.logged_links: logger.debug('Skipping link %s; %s', link, reason) self.logged_links.add(link) def _link_package_versions(self, link, search): """Return an InstallationCandidate or None""" version = None if link.egg_fragment: egg_info = link.egg_fragment ext = link.ext else: egg_info, ext = link.splitext() if not ext: self._log_skipped_link(link, 'not a file') return if ext not in SUPPORTED_EXTENSIONS: self._log_skipped_link( link, 'unsupported archive format: %s' % ext) return if "binary" not in search.formats and ext == wheel_ext: self._log_skipped_link( link, 'No binaries permitted for %s' % search.supplied) return if "macosx10" in link.path and ext == '.zip': self._log_skipped_link(link, 'macosx10 one') return if ext == wheel_ext: try: wheel = Wheel(link.filename) except InvalidWheelFilename: self._log_skipped_link(link, 'invalid wheel filename') return if canonicalize_name(wheel.name) != search.canonical: self._log_skipped_link( link, 'wrong project name (not %s)' % search.supplied) return if not wheel.supported(self.valid_tags): self._log_skipped_link( link, 'it is not compatible with this Python') return version = wheel.version # This should be up by the search.ok_binary check, but see issue 2700. if "source" not in search.formats and ext != wheel_ext: self._log_skipped_link( link, 'No sources permitted for %s' % search.supplied) return if not version: version = egg_info_matches(egg_info, search.supplied, link) if version is None: self._log_skipped_link( link, 'wrong project name (not %s)' % search.supplied) return match = self._py_version_re.search(version) if match: version = version[:match.start()] py_version = match.group(1) if py_version != sys.version[:3]: self._log_skipped_link( link, 'Python version is incorrect') return try: support_this_python = check_requires_python(link.requires_python) except specifiers.InvalidSpecifier: logger.debug("Package %s has an invalid Requires-Python entry: %s", link.filename, link.requires_python) support_this_python = True if not support_this_python: logger.debug("The package %s is incompatible with the python" "version in use. Acceptable python versions are:%s", link, link.requires_python) return logger.debug('Found link %s, version: %s', link, version) return InstallationCandidate(search.supplied, version, link) def _get_page(self, link): return HTMLPage.get_page(link, session=self.session) def egg_info_matches( egg_info, search_name, link, _egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)): """Pull the version part out of a string. :param egg_info: The string to parse. E.g. foo-2.1 :param search_name: The name of the package this belongs to. None to infer the name. Note that this cannot unambiguously parse strings like foo-2-2 which might be foo, 2-2 or foo-2, 2. :param link: The link the string came from, for logging on failure. """ match = _egg_info_re.search(egg_info) if not match: logger.debug('Could not parse version from link: %s', link) return None if search_name is None: full_match = match.group(0) return full_match[full_match.index('-'):] name = match.group(0).lower() # To match the "safe" name that pkg_resources creates: name = name.replace('_', '-') # project name and version must be separated by a dash look_for = search_name.lower() + "-" if name.startswith(look_for): return match.group(0)[len(look_for):] else: return None class HTMLPage(object): """Represents one page, along with its URL""" def __init__(self, content, url, headers=None): # Determine if we have any encoding information in our headers encoding = None if headers and "Content-Type" in headers: content_type, params = cgi.parse_header(headers["Content-Type"]) if "charset" in params: encoding = params['charset'] self.content = content self.parsed = html5lib.parse( self.content, transport_encoding=encoding, namespaceHTMLElements=False, ) self.url = url self.headers = headers def __str__(self): return self.url @classmethod def get_page(cls, link, skip_archives=True, session=None): if session is None: raise TypeError( "get_page() missing 1 required keyword argument: 'session'" ) url = link.url url = url.split('#', 1)[0] # Check for VCS schemes that do not support lookup as web pages. from pip.vcs import VcsSupport for scheme in VcsSupport.schemes: if url.lower().startswith(scheme) and url[len(scheme)] in '+:': logger.debug('Cannot look at %s URL %s', scheme, link) return None try: if skip_archives: filename = link.filename for bad_ext in ARCHIVE_EXTENSIONS: if filename.endswith(bad_ext): content_type = cls._get_content_type( url, session=session, ) if content_type.lower().startswith('text/html'): break else: logger.debug( 'Skipping page %s because of Content-Type: %s', link, content_type, ) return logger.debug('Getting page %s', url) # Tack index.html onto file:// URLs that point to directories (scheme, netloc, path, params, query, fragment) = \ urllib_parse.urlparse(url) if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))): # add trailing slash if not present so urljoin doesn't trim # final segment if not url.endswith('/'): url += '/' url = urllib_parse.urljoin(url, 'index.html') logger.debug(' file: URL is directory, getting %s', url) resp = session.get( url, headers={ "Accept": "text/html", "Cache-Control": "max-age=600", }, ) resp.raise_for_status() # The check for archives above only works if the url ends with # something that looks like an archive. However that is not a # requirement of an url. Unless we issue a HEAD request on every # url we cannot know ahead of time for sure if something is HTML # or not. However we can check after we've downloaded it. content_type = resp.headers.get('Content-Type', 'unknown') if not content_type.lower().startswith("text/html"): logger.debug( 'Skipping page %s because of Content-Type: %s', link, content_type, ) return inst = cls(resp.content, resp.url, resp.headers) except requests.HTTPError as exc: cls._handle_fail(link, exc, url) except SSLError as exc: reason = ("There was a problem confirming the ssl certificate: " "%s" % exc) cls._handle_fail(link, reason, url, meth=logger.info) except requests.ConnectionError as exc: cls._handle_fail(link, "connection error: %s" % exc, url) except requests.Timeout: cls._handle_fail(link, "timed out", url) else: return inst @staticmethod def _handle_fail(link, reason, url, meth=None): if meth is None: meth = logger.debug meth("Could not fetch URL %s: %s - skipping", link, reason) @staticmethod def _get_content_type(url, session): """Get the Content-Type of the given url, using a HEAD request""" scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url) if scheme not in ('http', 'https'): # FIXME: some warning or something? # assertion error? return '' resp = session.head(url, allow_redirects=True) resp.raise_for_status() return resp.headers.get("Content-Type", "") @cached_property def base_url(self): bases = [ x for x in self.parsed.findall(".//base") if x.get("href") is not None ] if bases and bases[0].get("href"): return bases[0].get("href") else: return self.url @property def links(self): """Yields all links in the page""" for anchor in self.parsed.findall(".//a"): if anchor.get("href"): href = anchor.get("href") url = self.clean_link( urllib_parse.urljoin(self.base_url, href) ) pyrequire = anchor.get('data-requires-python') pyrequire = unescape(pyrequire) if pyrequire else None yield Link(url, self, requires_python=pyrequire) _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) def clean_link(self, url): """Makes sure a link is fully encoded. That is, if a ' ' shows up in the link, it will be rewritten to %20 (while not over-quoting % or other characters).""" return self._clean_re.sub( lambda match: '%%%2x' % ord(match.group(0)), url) class Link(object): def __init__(self, url, comes_from=None, requires_python=None): """ Object representing a parsed link from https://pypi.python.org/simple/* url: url of the resource pointed to (href of the link) comes_from: instance of HTMLPage where the link was found, or string. requires_python: String containing the `Requires-Python` metadata field, specified in PEP 345. This may be specified by a data-requires-python attribute in the HTML link tag, as described in PEP 503. """ # url can be a UNC windows share if url.startswith('\\\\'): url = path_to_url(url) self.url = url self.comes_from = comes_from self.requires_python = requires_python if requires_python else None def __str__(self): if self.requires_python: rp = ' (requires-python:%s)' % self.requires_python else: rp = '' if self.comes_from: return '%s (from %s)%s' % (self.url, self.comes_from, rp) else: return str(self.url) def __repr__(self): return '<Link %s>' % self def __eq__(self, other): if not isinstance(other, Link): return NotImplemented return self.url == other.url def __ne__(self, other): if not isinstance(other, Link): return NotImplemented return self.url != other.url def __lt__(self, other): if not isinstance(other, Link): return NotImplemented return self.url < other.url def __le__(self, other): if not isinstance(other, Link): return NotImplemented return self.url <= other.url def __gt__(self, other): if not isinstance(other, Link): return NotImplemented return self.url > other.url def __ge__(self, other): if not isinstance(other, Link): return NotImplemented return self.url >= other.url def __hash__(self): return hash(self.url) @property def filename(self): _, netloc, path, _, _ = urllib_parse.urlsplit(self.url) name = posixpath.basename(path.rstrip('/')) or netloc name = urllib_parse.unquote(name) assert name, ('URL %r produced no filename' % self.url) return name @property def scheme(self): return urllib_parse.urlsplit(self.url)[0] @property def netloc(self): return urllib_parse.urlsplit(self.url)[1] @property def path(self): return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2]) def splitext(self): return splitext(posixpath.basename(self.path.rstrip('/'))) @property def ext(self): return self.splitext()[1] @property def url_without_fragment(self): scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url) return urllib_parse.urlunsplit((scheme, netloc, path, query, None)) _egg_fragment_re = re.compile(r'[#&]egg=([^&]*)') @property def egg_fragment(self): match = self._egg_fragment_re.search(self.url) if not match: return None return match.group(1) _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)') @property def subdirectory_fragment(self): match = self._subdirectory_fragment_re.search(self.url) if not match: return None return match.group(1) _hash_re = re.compile( r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)' ) @property def hash(self): match = self._hash_re.search(self.url) if match: return match.group(2) return None @property def hash_name(self): match = self._hash_re.search(self.url) if match: return match.group(1) return None @property def show_url(self): return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0]) @property def is_wheel(self): return self.ext == wheel_ext @property def is_artifact(self): """ Determines if this points to an actual artifact (e.g. a tarball) or if it points to an "abstract" thing like a path or a VCS location. """ from pip.vcs import vcs if self.scheme in vcs.all_schemes: return False return True FormatControl = namedtuple('FormatControl', 'no_binary only_binary') """This object has two fields, no_binary and only_binary. If a field is falsy, it isn't set. If it is {':all:'}, it should match all packages except those listed in the other field. Only one field can be set to {':all:'} at a time. The rest of the time exact package name matches are listed, with any given package only showing up in one field at a time. """ def fmt_ctl_handle_mutual_exclude(value, target, other): new = value.split(',') while ':all:' in new: other.clear() target.clear() target.add(':all:') del new[:new.index(':all:') + 1] if ':none:' not in new: # Without a none, we want to discard everything as :all: covers it return for name in new: if name == ':none:': target.clear() continue name = canonicalize_name(name) other.discard(name) target.add(name) def fmt_ctl_formats(fmt_ctl, canonical_name): result = set(["binary", "source"]) if canonical_name in fmt_ctl.only_binary: result.discard('source') elif canonical_name in fmt_ctl.no_binary: result.discard('binary') elif ':all:' in fmt_ctl.only_binary: result.discard('source') elif ':all:' in fmt_ctl.no_binary: result.discard('binary') return frozenset(result) def fmt_ctl_no_binary(fmt_ctl): fmt_ctl_handle_mutual_exclude( ':all:', fmt_ctl.no_binary, fmt_ctl.only_binary) def fmt_ctl_no_use_wheel(fmt_ctl): fmt_ctl_no_binary(fmt_ctl) warnings.warn( '--no-use-wheel is deprecated and will be removed in the future. ' ' Please use --no-binary :all: instead.', RemovedInPip10Warning, stacklevel=2) Search = namedtuple('Search', 'supplied canonical formats') """Capture key aspects of a search. :attribute supplied: The user supplied package. :attribute canonical: The canonical package name. :attribute formats: The formats allowed for this package. Should be a set with 'binary' or 'source' or both in it. """
mit
justinvforvendetta/electrum-rby
gui/qt/qrcodewidget.py
1
3721
from PyQt4.QtGui import * from PyQt4.QtCore import * import PyQt4.QtCore as QtCore import PyQt4.QtGui as QtGui import os import qrcode import electrum_rby from electrum_rby import bmp from electrum_rby.i18n import _ class QRCodeWidget(QWidget): def __init__(self, data = None, fixedSize=False): QWidget.__init__(self) self.data = None self.qr = None self.fixedSize=fixedSize if fixedSize: self.setFixedSize(fixedSize, fixedSize) self.setData(data) def setData(self, data): if self.data != data: self.data = data if self.data: self.qr = qrcode.QRCode() self.qr.add_data(self.data) if not self.fixedSize: k = len(self.qr.get_matrix()) self.setMinimumSize(k*5,k*5) else: self.qr = None self.update() def paintEvent(self, e): if not self.data: return black = QColor(0, 0, 0, 255) white = QColor(255, 255, 255, 255) if not self.qr: qp = QtGui.QPainter() qp.begin(self) qp.setBrush(white) qp.setPen(white) r = qp.viewport() qp.drawRect(0, 0, r.width(), r.height()) qp.end() return matrix = self.qr.get_matrix() k = len(matrix) qp = QtGui.QPainter() qp.begin(self) r = qp.viewport() margin = 10 framesize = min(r.width(), r.height()) boxsize = int( (framesize - 2*margin)/k ) size = k*boxsize left = (r.width() - size)/2 top = (r.height() - size)/2 # Make a white margin around the QR in case of dark theme use qp.setBrush(white) qp.setPen(white) qp.drawRect(left-margin, top-margin, size+(margin*2), size+(margin*2)) for r in range(k): for c in range(k): if matrix[r][c]: qp.setBrush(black) qp.setPen(black) else: qp.setBrush(white) qp.setPen(white) qp.drawRect(left+c*boxsize, top+r*boxsize, boxsize, boxsize) qp.end() class QRDialog(QDialog): def __init__(self, data, parent=None, title = "", show_text=False): QDialog.__init__(self, parent) d = self d.setWindowTitle(title) vbox = QVBoxLayout() qrw = QRCodeWidget(data) vbox.addWidget(qrw, 1) if show_text: text = QTextEdit() text.setText(data) text.setReadOnly(True) vbox.addWidget(text) hbox = QHBoxLayout() hbox.addStretch(1) config = electrum_rby.get_config() if config: filename = os.path.join(config.path, "qrcode.bmp") def print_qr(): bmp.save_qrcode(qrw.qr, filename) QMessageBox.information(None, _('Message'), _("QR code saved to file") + " " + filename, _('OK')) def copy_to_clipboard(): bmp.save_qrcode(qrw.qr, filename) QApplication.clipboard().setImage(QImage(filename)) QMessageBox.information(None, _('Message'), _("QR code saved to clipboard"), _('OK')) b = QPushButton(_("Copy")) hbox.addWidget(b) b.clicked.connect(copy_to_clipboard) b = QPushButton(_("Save")) hbox.addWidget(b) b.clicked.connect(print_qr) b = QPushButton(_("Close")) hbox.addWidget(b) b.clicked.connect(d.accept) b.setDefault(True) vbox.addLayout(hbox) d.setLayout(vbox)
gpl-3.0
remotesyssupport/cobbler-1
scripts/debuginator.py
15
1436
#!/usr/bin/python """ Quick test script to read the cobbler configurations and touch and mkdir -p any files neccessary to trivially debug another user's configuration even if the distros don't exist yet Intended for basic support questions only. Not for production use. Copyright 2008-2009, Red Hat, Inc Michael DeHaan <mdehaan@redhat.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """ import glob import cobbler.yaml as camel import os.path import os for f in glob.glob("/var/lib/cobbler/config/distros.d/*"): fh = open(f) data = fh.read() fh.close() d = camel.load(data).next() k = d["kernel"] i = d["initrd"] dir = os.path.dirname(k) if not os.path.exists(dir): os.system("mkdir -p %s" % dir) os.system("touch %s" % k) os.system("touch %s" % i)
gpl-2.0
panmari/tensorflow
tensorflow/examples/skflow/boston.py
1
1485
# Copyright 2015-present Scikit Flow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from sklearn import datasets, cross_validation, metrics from sklearn import preprocessing from tensorflow.contrib import skflow # Load dataset boston = datasets.load_boston() X, y = boston.data, boston.target # Split dataset into train / test X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2, random_state=42) # scale data (training set) to 0 mean and unit Std. dev scaler = preprocessing.StandardScaler() X_train = scaler.fit_transform(X_train) # Build 2 layer fully connected DNN with 10, 10 units respecitvely. regressor = skflow.TensorFlowDNNRegressor(hidden_units=[10, 10], steps=5000, learning_rate=0.1, batch_size=1) # Fit regressor.fit(X_train, y_train) # Predict and score score = metrics.mean_squared_error(regressor.predict(scaler.fit_transform(X_test)), y_test) print('MSE: {0:f}'.format(score))
apache-2.0
aliaksandrb/anydo_api
docs/conf.py
1
8486
#!/usr/bin/env python # -*- coding: utf-8 -*- # # anydo_api documentation build configuration file, created by # sphinx-quickstart on Tue Jul 9 22:26:36 2013. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory is # relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # Get the project root dir, which is the parent dir of this cwd = os.getcwd() project_root = os.path.dirname(cwd) # Insert the project root dir as the first element in the PYTHONPATH. # This lets us ensure that the source package is imported, and that its # version is used. sys.path.insert(0, project_root) import anydo_api # -- General configuration --------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'AnyDo API Python' copyright = u'2015, Aliaksandr Buhayeu' # The version info for the project you're documenting, acts as replacement # for |version| and |release|, also used in various other places throughout # the built documents. # # The short X.Y version. version = anydo_api.__version__ # The full version, including alpha/beta/rc tags. release = anydo_api.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to # some non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built # documents. #keep_warnings = False # -- Options for HTML output ------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'classic' # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as # html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the # top of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon # of the docs. This file should be a Windows icon file (.ico) being # 16x16 or 32x32 pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) # here, relative to this directory. They are copied after the builtin # static files, so a file named "default.css" will overwrite the builtin # "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names # to template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. # Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. # Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages # will contain a <link> tag referring to it. The value of this option # must be the base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'anydo_apidoc' # -- Options for LaTeX output ------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'anydo_api.tex', u'AnyDo API Python Documentation', u'Aliaksandr Buhayeu', 'manual'), ] # The name of an image file (relative to this directory) to place at # the top of the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings # are parts, not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'anydo_api', u'AnyDo API Python Documentation', [u'Aliaksandr Buhayeu'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ---------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'anydo_api', u'AnyDo API Python Documentation', u'Aliaksandr Buhayeu', 'anydo_api', 'Unofficial AnyDo API client in object-oriented style.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
mit
josenavas/mustached-octo-ironman
moi/job.py
2
7636
# ----------------------------------------------------------------------------- # Copyright (c) 2014--, The qiita Development Team. # # Distributed under the terms of the BSD 3-clause License. # # The full license is in the file LICENSE, distributed with this software. # ----------------------------------------------------------------------------- import sys import traceback import json from functools import partial from datetime import datetime from subprocess import Popen, PIPE from moi import r_client, ctxs, ctx_default, REDIS_KEY_TIMEOUT from moi.group import create_info from moi.context import Context def system_call(cmd, **kwargs): """Call cmd and return (stdout, stderr, return_value). Parameters ---------- cmd: str Can be either a string containing the command to be run, or a sequence of strings that are the tokens of the command. kwargs : dict, optional Ignored. Available so that this function is compatible with _redis_wrap. Notes ----- This function is ported from QIIME (http://www.qiime.org), previously named qiime_system_call. QIIME is a GPL project, but we obtained permission from the authors of this function to port it to pyqi (and keep it under pyqi's BSD license). """ proc = Popen(cmd, universal_newlines=True, shell=True, stdout=PIPE, stderr=PIPE) # communicate pulls all stdout/stderr from the PIPEs to # avoid blocking -- don't remove this line! stdout, stderr = proc.communicate() return_value = proc.returncode if return_value != 0: raise ValueError("Failed to execute: %s\nstdout: %s\nstderr: %s" % (cmd, stdout, stderr)) return stdout, stderr, return_value def _status_change(id, new_status): """Update the status of a job The status associated with the id is updated, an update command is issued to the job's pubsub, and and the old status is returned. Parameters ---------- id : str The job ID new_status : str The status change Returns ------- str The old status """ job_info = json.loads(r_client.get(id)) old_status = job_info['status'] job_info['status'] = new_status _deposit_payload(job_info) return old_status def _deposit_payload(to_deposit): """Store job info, and publish an update Parameters ---------- to_deposit : dict The job info """ pubsub = to_deposit['pubsub'] id = to_deposit['id'] with r_client.pipeline() as pipe: pipe.set(id, json.dumps(to_deposit), ex=REDIS_KEY_TIMEOUT) pipe.publish(pubsub, json.dumps({"update": [id]})) pipe.execute() def _redis_wrap(job_info, func, *args, **kwargs): """Wrap something to compute The function that will have available, via kwargs['moi_update_status'], a method to modify the job status. This method can be used within the executing function by: old_status = kwargs['moi_update_status']('my new status') Parameters ---------- job_info : dict Redis job details func : function A function to execute. This function must accept ``**kwargs``, and will have ``moi_update_status``, ``moi_context`` and ``moi_parent_id`` available. Raises ------ Exception If the function called raises, that exception is propagated. Returns ------- Anything the function executed returns. """ status_changer = partial(_status_change, job_info['id']) kwargs['moi_update_status'] = status_changer kwargs['moi_context'] = job_info['context'] kwargs['moi_parent_id'] = job_info['parent'] job_info['status'] = 'Running' job_info['date_start'] = str(datetime.now()) _deposit_payload(job_info) caught = None try: result = func(*args, **kwargs) job_info['status'] = 'Success' except Exception as e: result = traceback.format_exception(*sys.exc_info()) job_info['status'] = 'Failed' caught = e finally: job_info['result'] = result job_info['date_end'] = str(datetime.now()) _deposit_payload(job_info) if caught is None: return result else: raise caught def submit(ctx_name, parent_id, name, url, func, *args, **kwargs): """Submit through a context Parameters ---------- ctx_name : str The name of the context to submit through parent_id : str The ID of the group that the job is a part of. name : str The name of the job url : str The handler that can take the results (e.g., /beta_diversity/) func : function The function to execute. Any returns from this function will be serialized and deposited into Redis using the uuid for a key. This function should raise if the method fails. args : tuple or None Any args for ``func`` kwargs : dict or None Any kwargs for ``func`` Returns ------- tuple, (str, str, AsyncResult) The job ID, parent ID and the IPython's AsyncResult object of the job """ if isinstance(ctx_name, Context): ctx = ctx_name else: ctx = ctxs.get(ctx_name, ctxs[ctx_default]) return _submit(ctx, parent_id, name, url, func, *args, **kwargs) def _submit(ctx, parent_id, name, url, func, *args, **kwargs): """Submit a function to a cluster Parameters ---------- parent_id : str The ID of the group that the job is a part of. name : str The name of the job url : str The handler that can take the results (e.g., /beta_diversity/) func : function The function to execute. Any returns from this function will be serialized and deposited into Redis using the uuid for a key. This function should raise if the method fails. args : tuple or None Any args for ``func`` kwargs : dict or None Any kwargs for ``func`` Returns ------- tuple, (str, str, AsyncResult) The job ID, parent ID and the IPython's AsyncResult object of the job """ parent_info = r_client.get(parent_id) if parent_info is None: parent_info = create_info('unnamed', 'group', id=parent_id) parent_id = parent_info['id'] r_client.set(parent_id, json.dumps(parent_info)) parent_pubsub_key = parent_id + ':pubsub' job_info = create_info(name, 'job', url=url, parent=parent_id, context=ctx.name, store=True) job_info['status'] = 'Queued' job_id = job_info['id'] with r_client.pipeline() as pipe: pipe.set(job_id, json.dumps(job_info)) pipe.publish(parent_pubsub_key, json.dumps({'add': [job_id]})) pipe.execute() ar = ctx.bv.apply_async(_redis_wrap, job_info, func, *args, **kwargs) return job_id, parent_id, ar def submit_nouser(func, *args, **kwargs): """Submit a function to a cluster without an associated user Parameters ---------- func : function The function to execute. Any returns from this function will be serialized and deposited into Redis using the uuid for a key. args : tuple or None Any args for ``f`` kwargs : dict or None Any kwargs for ``f`` Returns ------- tuple, (str, str) The job ID, parent ID and the IPython's AsyncResult object of the job """ return submit(ctx_default, "no-user", "unnamed", None, func, *args, **kwargs)
bsd-3-clause
Jorge-Rodriguez/ansible
lib/ansible/modules/network/avi/avi_wafpolicy.py
31
5690
#!/usr/bin/python # # @author: Gaurav Rastogi (grastogi@avinetworks.com) # Eric Anderson (eanderson@avinetworks.com) # module_check: supported # # Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: avi_wafpolicy author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com> short_description: Module for setup of WafPolicy Avi RESTful Object description: - This module is used to configure WafPolicy object - more examples at U(https://github.com/avinetworks/devops) requirements: [ avisdk ] version_added: "2.5" options: state: description: - The state that should be applied on the entity. default: present choices: ["absent", "present"] avi_api_update_method: description: - Default method for object update is HTTP PUT. - Setting to patch will override that behavior to use HTTP PATCH. version_added: "2.5" default: put choices: ["put", "patch"] avi_api_patch_op: description: - Patch operation to use when using avi_api_update_method as patch. version_added: "2.5" choices: ["add", "replace", "delete"] created_by: description: - Creator name. - Field introduced in 17.2.4. crs_groups: description: - Waf rules are categorized in to groups based on their characterization. - These groups are system created with crs groups. - Field introduced in 17.2.1. description: description: - Field introduced in 17.2.1. mode: description: - Waf policy mode. - This can be detection or enforcement. - Enum options - WAF_MODE_DETECTION_ONLY, WAF_MODE_ENFORCEMENT. - Field introduced in 17.2.1. - Default value when not specified in API or module is interpreted by Avi Controller as WAF_MODE_DETECTION_ONLY. required: true name: description: - Field introduced in 17.2.1. required: true paranoia_level: description: - Waf ruleset paranoia mode. - This is used to select rules based on the paranoia-level tag. - Enum options - WAF_PARANOIA_LEVEL_LOW, WAF_PARANOIA_LEVEL_MEDIUM, WAF_PARANOIA_LEVEL_HIGH, WAF_PARANOIA_LEVEL_EXTREME. - Field introduced in 17.2.1. - Default value when not specified in API or module is interpreted by Avi Controller as WAF_PARANOIA_LEVEL_LOW. post_crs_groups: description: - Waf rules are categorized in to groups based on their characterization. - These groups are created by the user and will be enforced after the crs groups. - Field introduced in 17.2.1. pre_crs_groups: description: - Waf rules are categorized in to groups based on their characterization. - These groups are created by the user and will be enforced before the crs groups. - Field introduced in 17.2.1. tenant_ref: description: - It is a reference to an object of type tenant. - Field introduced in 17.2.1. url: description: - Avi controller URL of the object. uuid: description: - Field introduced in 17.2.1. waf_profile_ref: description: - Waf profile for waf policy. - It is a reference to an object of type wafprofile. - Field introduced in 17.2.1. required: true extends_documentation_fragment: - avi ''' EXAMPLES = """ - name: Example to create WafPolicy object avi_wafpolicy: controller: 10.10.25.42 username: admin password: something state: present name: sample_wafpolicy """ RETURN = ''' obj: description: WafPolicy (api/wafpolicy) object returned: success, changed type: dict ''' from ansible.module_utils.basic import AnsibleModule try: from ansible.module_utils.network.avi.avi import ( avi_common_argument_spec, HAS_AVI, avi_ansible_api) except ImportError: HAS_AVI = False def main(): argument_specs = dict( state=dict(default='present', choices=['absent', 'present']), avi_api_update_method=dict(default='put', choices=['put', 'patch']), avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), created_by=dict(type='str',), crs_groups=dict(type='list',), description=dict(type='str',), mode=dict(type='str', required=True), name=dict(type='str', required=True), paranoia_level=dict(type='str',), post_crs_groups=dict(type='list',), pre_crs_groups=dict(type='list',), tenant_ref=dict(type='str',), url=dict(type='str',), uuid=dict(type='str',), waf_profile_ref=dict(type='str', required=True), ) argument_specs.update(avi_common_argument_spec()) module = AnsibleModule( argument_spec=argument_specs, supports_check_mode=True) if not HAS_AVI: return module.fail_json(msg=( 'Avi python API SDK (avisdk>=17.1) is not installed. ' 'For more details visit https://github.com/avinetworks/sdk.')) return avi_ansible_api(module, 'wafpolicy', set([])) if __name__ == '__main__': main()
gpl-3.0
embeddedarm/android_external_chromium_org
tools/telemetry/telemetry/page/actions/wait.py
23
3155
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import re import time from telemetry.core import util from telemetry.page.actions import page_action class WaitAction(page_action.PageAction): def __init__(self, attributes=None): self.timeout = 60 super(WaitAction, self).__init__(attributes) self._SetTimelineMarkerBaseName('WaitAction::RunAction') def RunsPreviousAction(self): return (getattr(self, 'condition', None) == 'navigate' or getattr(self, 'condition', None) == 'href_change') def RunAction(self, page, tab, previous_action): tab.ExecuteJavaScript( 'console.time("' + self.GetTimelineMarkerName() + '")') if hasattr(self, 'seconds'): time.sleep(self.seconds) elif getattr(self, 'condition', None) == 'navigate': if not previous_action: raise page_action.PageActionFailed('You need to perform an action ' 'before waiting for navigate.') previous_action.WillRunAction(page, tab) action_to_perform = lambda: previous_action.RunAction(page, tab, None) tab.PerformActionAndWaitForNavigate(action_to_perform, self.timeout) tab.WaitForDocumentReadyStateToBeInteractiveOrBetter() elif getattr(self, 'condition', None) == 'href_change': if not previous_action: raise page_action.PageActionFailed('You need to perform an action ' 'before waiting for a href change.') previous_action.WillRunAction(page, tab) old_url = tab.EvaluateJavaScript('document.location.href') previous_action.RunAction(page, tab, None) tab.WaitForJavaScriptExpression( 'document.location.href != "%s"' % old_url, self.timeout) elif getattr(self, 'condition', None) == 'element': if hasattr(self, 'text'): callback_code = 'function(element) { return element != null; }' util.WaitFor( lambda: util.FindElementAndPerformAction( tab, self.text, callback_code), self.timeout) elif hasattr(self, 'selector'): tab.WaitForJavaScriptExpression( 'document.querySelector("%s") != null' % re.escape(self.selector), self.timeout) elif hasattr(self, 'xpath'): code = ('document.evaluate("%s",' 'document,' 'null,' 'XPathResult.FIRST_ORDERED_NODE_TYPE,' 'null)' '.singleNodeValue' % re.escape(self.xpath)) tab.WaitForJavaScriptExpression('%s != null' % code, self.timeout) else: raise page_action.PageActionFailed( 'No element condition given to wait') elif hasattr(self, 'javascript'): tab.WaitForJavaScriptExpression(self.javascript, self.timeout) else: raise page_action.PageActionFailed('No wait condition found') tab.ExecuteJavaScript( 'console.timeEnd("' + self.GetTimelineMarkerName() + '")')
bsd-3-clause
feroda/odoo
addons/mail/res_partner.py
379
2454
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.tools.translate import _ from openerp.osv import fields, osv class res_partner_mail(osv.Model): """ Update partner to add a field about notification preferences """ _name = "res.partner" _inherit = ['res.partner', 'mail.thread'] _mail_flat_thread = False _mail_mass_mailing = _('Customers') _columns = { 'notify_email': fields.selection([ ('none', 'Never'), ('always', 'All Messages'), ], 'Receive Inbox Notifications by Email', required=True, oldname='notification_email_send', help="Policy to receive emails for new messages pushed to your personal Inbox:\n" "- Never: no emails are sent\n" "- All Messages: for every notification you receive in your Inbox"), } _defaults = { 'notify_email': lambda *args: 'always' } def message_get_suggested_recipients(self, cr, uid, ids, context=None): recipients = super(res_partner_mail, self).message_get_suggested_recipients(cr, uid, ids, context=context) for partner in self.browse(cr, uid, ids, context=context): self._message_add_suggested_recipient(cr, uid, recipients, partner, partner=partner, reason=_('Partner Profile')) return recipients def message_get_default_recipients(self, cr, uid, ids, context=None): return dict((id, {'partner_ids': [id], 'email_to': False, 'email_cc': False}) for id in ids)
agpl-3.0
Khan/git-bigfile
vendor/boto/dynamodb/layer1.py
153
24057
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import time from binascii import crc32 import boto from boto.connection import AWSAuthConnection from boto.exception import DynamoDBResponseError from boto.provider import Provider from boto.dynamodb import exceptions as dynamodb_exceptions from boto.compat import json class Layer1(AWSAuthConnection): """ This is the lowest-level interface to DynamoDB. Methods at this layer map directly to API requests and parameters to the methods are either simple, scalar values or they are the Python equivalent of the JSON input as defined in the DynamoDB Developer's Guide. All responses are direct decoding of the JSON response bodies to Python data structures via the json or simplejson modules. :ivar throughput_exceeded_events: An integer variable that keeps a running total of the number of ThroughputExceeded responses this connection has received from Amazon DynamoDB. """ DefaultRegionName = 'us-east-1' """The default region name for DynamoDB API.""" ServiceName = 'DynamoDB' """The name of the Service""" Version = '20111205' """DynamoDB API version.""" ThruputError = "ProvisionedThroughputExceededException" """The error response returned when provisioned throughput is exceeded""" SessionExpiredError = 'com.amazon.coral.service#ExpiredTokenException' """The error response returned when session token has expired""" ConditionalCheckFailedError = 'ConditionalCheckFailedException' """The error response returned when a conditional check fails""" ValidationError = 'ValidationException' """The error response returned when an item is invalid in some way""" ResponseError = DynamoDBResponseError NumberRetries = 10 """The number of times an error is retried.""" def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, debug=0, security_token=None, region=None, validate_certs=True, validate_checksums=True, profile_name=None): if not region: region_name = boto.config.get('DynamoDB', 'region', self.DefaultRegionName) for reg in boto.dynamodb.regions(): if reg.name == region_name: region = reg break self.region = region super(Layer1, self).__init__(self.region.endpoint, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, debug=debug, security_token=security_token, validate_certs=validate_certs, profile_name=profile_name) self.throughput_exceeded_events = 0 self._validate_checksums = boto.config.getbool( 'DynamoDB', 'validate_checksums', validate_checksums) def _get_session_token(self): self.provider = Provider(self._provider_type) self._auth_handler.update_provider(self.provider) def _required_auth_capability(self): return ['hmac-v4'] def make_request(self, action, body='', object_hook=None): """ :raises: ``DynamoDBExpiredTokenError`` if the security token expires. """ headers = {'X-Amz-Target': '%s_%s.%s' % (self.ServiceName, self.Version, action), 'Host': self.region.endpoint, 'Content-Type': 'application/x-amz-json-1.0', 'Content-Length': str(len(body))} http_request = self.build_base_http_request('POST', '/', '/', {}, headers, body, None) start = time.time() response = self._mexe(http_request, sender=None, override_num_retries=self.NumberRetries, retry_handler=self._retry_handler) elapsed = (time.time() - start) * 1000 request_id = response.getheader('x-amzn-RequestId') boto.log.debug('RequestId: %s' % request_id) boto.perflog.debug('%s: id=%s time=%sms', headers['X-Amz-Target'], request_id, int(elapsed)) response_body = response.read().decode('utf-8') boto.log.debug(response_body) return json.loads(response_body, object_hook=object_hook) def _retry_handler(self, response, i, next_sleep): status = None if response.status == 400: response_body = response.read().decode('utf-8') boto.log.debug(response_body) data = json.loads(response_body) if self.ThruputError in data.get('__type'): self.throughput_exceeded_events += 1 msg = "%s, retry attempt %s" % (self.ThruputError, i) next_sleep = self._exponential_time(i) i += 1 status = (msg, i, next_sleep) if i == self.NumberRetries: # If this was our last retry attempt, raise # a specific error saying that the throughput # was exceeded. raise dynamodb_exceptions.DynamoDBThroughputExceededError( response.status, response.reason, data) elif self.SessionExpiredError in data.get('__type'): msg = 'Renewing Session Token' self._get_session_token() status = (msg, i + self.num_retries - 1, 0) elif self.ConditionalCheckFailedError in data.get('__type'): raise dynamodb_exceptions.DynamoDBConditionalCheckFailedError( response.status, response.reason, data) elif self.ValidationError in data.get('__type'): raise dynamodb_exceptions.DynamoDBValidationError( response.status, response.reason, data) else: raise self.ResponseError(response.status, response.reason, data) expected_crc32 = response.getheader('x-amz-crc32') if self._validate_checksums and expected_crc32 is not None: boto.log.debug('Validating crc32 checksum for body: %s', response.read().decode('utf-8')) actual_crc32 = crc32(response.read()) & 0xffffffff expected_crc32 = int(expected_crc32) if actual_crc32 != expected_crc32: msg = ("The calculated checksum %s did not match the expected " "checksum %s" % (actual_crc32, expected_crc32)) status = (msg, i + 1, self._exponential_time(i)) return status def _exponential_time(self, i): if i == 0: next_sleep = 0 else: next_sleep = min(0.05 * (2 ** i), boto.config.get('Boto', 'max_retry_delay', 60)) return next_sleep def list_tables(self, limit=None, start_table=None): """ Returns a dictionary of results. The dictionary contains a **TableNames** key whose value is a list of the table names. The dictionary could also contain a **LastEvaluatedTableName** key whose value would be the last table name returned if the complete list of table names was not returned. This value would then be passed as the ``start_table`` parameter on a subsequent call to this method. :type limit: int :param limit: The maximum number of tables to return. :type start_table: str :param start_table: The name of the table that starts the list. If you ran a previous list_tables and not all results were returned, the response dict would include a LastEvaluatedTableName attribute. Use that value here to continue the listing. """ data = {} if limit: data['Limit'] = limit if start_table: data['ExclusiveStartTableName'] = start_table json_input = json.dumps(data) return self.make_request('ListTables', json_input) def describe_table(self, table_name): """ Returns information about the table including current state of the table, primary key schema and when the table was created. :type table_name: str :param table_name: The name of the table to describe. """ data = {'TableName': table_name} json_input = json.dumps(data) return self.make_request('DescribeTable', json_input) def create_table(self, table_name, schema, provisioned_throughput): """ Add a new table to your account. The table name must be unique among those associated with the account issuing the request. This request triggers an asynchronous workflow to begin creating the table. When the workflow is complete, the state of the table will be ACTIVE. :type table_name: str :param table_name: The name of the table to create. :type schema: dict :param schema: A Python version of the KeySchema data structure as defined by DynamoDB :type provisioned_throughput: dict :param provisioned_throughput: A Python version of the ProvisionedThroughput data structure defined by DynamoDB. """ data = {'TableName': table_name, 'KeySchema': schema, 'ProvisionedThroughput': provisioned_throughput} json_input = json.dumps(data) response_dict = self.make_request('CreateTable', json_input) return response_dict def update_table(self, table_name, provisioned_throughput): """ Updates the provisioned throughput for a given table. :type table_name: str :param table_name: The name of the table to update. :type provisioned_throughput: dict :param provisioned_throughput: A Python version of the ProvisionedThroughput data structure defined by DynamoDB. """ data = {'TableName': table_name, 'ProvisionedThroughput': provisioned_throughput} json_input = json.dumps(data) return self.make_request('UpdateTable', json_input) def delete_table(self, table_name): """ Deletes the table and all of it's data. After this request the table will be in the DELETING state until DynamoDB completes the delete operation. :type table_name: str :param table_name: The name of the table to delete. """ data = {'TableName': table_name} json_input = json.dumps(data) return self.make_request('DeleteTable', json_input) def get_item(self, table_name, key, attributes_to_get=None, consistent_read=False, object_hook=None): """ Return a set of attributes for an item that matches the supplied key. :type table_name: str :param table_name: The name of the table containing the item. :type key: dict :param key: A Python version of the Key data structure defined by DynamoDB. :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :type consistent_read: bool :param consistent_read: If True, a consistent read request is issued. Otherwise, an eventually consistent request is issued. """ data = {'TableName': table_name, 'Key': key} if attributes_to_get: data['AttributesToGet'] = attributes_to_get if consistent_read: data['ConsistentRead'] = True json_input = json.dumps(data) response = self.make_request('GetItem', json_input, object_hook=object_hook) if 'Item' not in response: raise dynamodb_exceptions.DynamoDBKeyNotFoundError( "Key does not exist." ) return response def batch_get_item(self, request_items, object_hook=None): """ Return a set of attributes for a multiple items in multiple tables using their primary keys. :type request_items: dict :param request_items: A Python version of the RequestItems data structure defined by DynamoDB. """ # If the list is empty, return empty response if not request_items: return {} data = {'RequestItems': request_items} json_input = json.dumps(data) return self.make_request('BatchGetItem', json_input, object_hook=object_hook) def batch_write_item(self, request_items, object_hook=None): """ This operation enables you to put or delete several items across multiple tables in a single API call. :type request_items: dict :param request_items: A Python version of the RequestItems data structure defined by DynamoDB. """ data = {'RequestItems': request_items} json_input = json.dumps(data) return self.make_request('BatchWriteItem', json_input, object_hook=object_hook) def put_item(self, table_name, item, expected=None, return_values=None, object_hook=None): """ Create a new item or replace an old item with a new item (including all attributes). If an item already exists in the specified table with the same primary key, the new item will completely replace the old item. You can perform a conditional put by specifying an expected rule. :type table_name: str :param table_name: The name of the table in which to put the item. :type item: dict :param item: A Python version of the Item data structure defined by DynamoDB. :type expected: dict :param expected: A Python version of the Expected data structure defined by DynamoDB. :type return_values: str :param return_values: Controls the return of attribute name-value pairs before then were changed. Possible values are: None or 'ALL_OLD'. If 'ALL_OLD' is specified and the item is overwritten, the content of the old item is returned. """ data = {'TableName': table_name, 'Item': item} if expected: data['Expected'] = expected if return_values: data['ReturnValues'] = return_values json_input = json.dumps(data) return self.make_request('PutItem', json_input, object_hook=object_hook) def update_item(self, table_name, key, attribute_updates, expected=None, return_values=None, object_hook=None): """ Edits an existing item's attributes. You can perform a conditional update (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values). :type table_name: str :param table_name: The name of the table. :type key: dict :param key: A Python version of the Key data structure defined by DynamoDB which identifies the item to be updated. :type attribute_updates: dict :param attribute_updates: A Python version of the AttributeUpdates data structure defined by DynamoDB. :type expected: dict :param expected: A Python version of the Expected data structure defined by DynamoDB. :type return_values: str :param return_values: Controls the return of attribute name-value pairs before then were changed. Possible values are: None or 'ALL_OLD'. If 'ALL_OLD' is specified and the item is overwritten, the content of the old item is returned. """ data = {'TableName': table_name, 'Key': key, 'AttributeUpdates': attribute_updates} if expected: data['Expected'] = expected if return_values: data['ReturnValues'] = return_values json_input = json.dumps(data) return self.make_request('UpdateItem', json_input, object_hook=object_hook) def delete_item(self, table_name, key, expected=None, return_values=None, object_hook=None): """ Delete an item and all of it's attributes by primary key. You can perform a conditional delete by specifying an expected rule. :type table_name: str :param table_name: The name of the table containing the item. :type key: dict :param key: A Python version of the Key data structure defined by DynamoDB. :type expected: dict :param expected: A Python version of the Expected data structure defined by DynamoDB. :type return_values: str :param return_values: Controls the return of attribute name-value pairs before then were changed. Possible values are: None or 'ALL_OLD'. If 'ALL_OLD' is specified and the item is overwritten, the content of the old item is returned. """ data = {'TableName': table_name, 'Key': key} if expected: data['Expected'] = expected if return_values: data['ReturnValues'] = return_values json_input = json.dumps(data) return self.make_request('DeleteItem', json_input, object_hook=object_hook) def query(self, table_name, hash_key_value, range_key_conditions=None, attributes_to_get=None, limit=None, consistent_read=False, scan_index_forward=True, exclusive_start_key=None, object_hook=None, count=False): """ Perform a query of DynamoDB. This version is currently punting and expecting you to provide a full and correct JSON body which is passed as is to DynamoDB. :type table_name: str :param table_name: The name of the table to query. :type hash_key_value: dict :param key: A DynamoDB-style HashKeyValue. :type range_key_conditions: dict :param range_key_conditions: A Python version of the RangeKeyConditions data structure. :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :type limit: int :param limit: The maximum number of items to return. :type count: bool :param count: If True, Amazon DynamoDB returns a total number of items for the Query operation, even if the operation has no matching items for the assigned filter. :type consistent_read: bool :param consistent_read: If True, a consistent read request is issued. Otherwise, an eventually consistent request is issued. :type scan_index_forward: bool :param scan_index_forward: Specified forward or backward traversal of the index. Default is forward (True). :type exclusive_start_key: list or tuple :param exclusive_start_key: Primary key of the item from which to continue an earlier query. This would be provided as the LastEvaluatedKey in that query. """ data = {'TableName': table_name, 'HashKeyValue': hash_key_value} if range_key_conditions: data['RangeKeyCondition'] = range_key_conditions if attributes_to_get: data['AttributesToGet'] = attributes_to_get if limit: data['Limit'] = limit if count: data['Count'] = True if consistent_read: data['ConsistentRead'] = True if scan_index_forward: data['ScanIndexForward'] = True else: data['ScanIndexForward'] = False if exclusive_start_key: data['ExclusiveStartKey'] = exclusive_start_key json_input = json.dumps(data) return self.make_request('Query', json_input, object_hook=object_hook) def scan(self, table_name, scan_filter=None, attributes_to_get=None, limit=None, exclusive_start_key=None, object_hook=None, count=False): """ Perform a scan of DynamoDB. This version is currently punting and expecting you to provide a full and correct JSON body which is passed as is to DynamoDB. :type table_name: str :param table_name: The name of the table to scan. :type scan_filter: dict :param scan_filter: A Python version of the ScanFilter data structure. :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :type limit: int :param limit: The maximum number of items to evaluate. :type count: bool :param count: If True, Amazon DynamoDB returns a total number of items for the Scan operation, even if the operation has no matching items for the assigned filter. :type exclusive_start_key: list or tuple :param exclusive_start_key: Primary key of the item from which to continue an earlier query. This would be provided as the LastEvaluatedKey in that query. """ data = {'TableName': table_name} if scan_filter: data['ScanFilter'] = scan_filter if attributes_to_get: data['AttributesToGet'] = attributes_to_get if limit: data['Limit'] = limit if count: data['Count'] = True if exclusive_start_key: data['ExclusiveStartKey'] = exclusive_start_key json_input = json.dumps(data) return self.make_request('Scan', json_input, object_hook=object_hook)
mit
mxOBS/deb-pkg_trusty_chromium-browser
tools/telemetry/telemetry/unittest_util/run_tests.py
11
6847
# Copyright 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import sys from telemetry import decorators from telemetry.core import browser_finder from telemetry.core import browser_finder_exceptions from telemetry.core import browser_options from telemetry.core import command_line from telemetry.core import util from telemetry.unittest_util import options_for_unittests from telemetry.unittest_util import browser_test_case util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'third_party', 'typ') import typ class RunTestsCommand(command_line.OptparseCommand): """Run unit tests""" usage = '[test_name ...] [<options>]' def __init__(self): super(RunTestsCommand, self).__init__() self.stream = sys.stdout @classmethod def CreateParser(cls): options = browser_options.BrowserFinderOptions() options.browser_type = 'any' parser = options.CreateParser('%%prog %s' % cls.usage) return parser @classmethod def AddCommandLineArgs(cls, parser): parser.add_option('--repeat-count', type='int', default=1, help='Repeats each a provided number of times.') parser.add_option('-d', '--also-run-disabled-tests', dest='run_disabled_tests', action='store_true', default=False, help='Ignore @Disabled and @Enabled restrictions.') parser.add_option('--exact-test-filter', action='store_true', default=False, help='Treat test filter as exact matches (default is ' 'substring matches).') typ.ArgumentParser.add_option_group(parser, "Options for running the tests", running=True, skip=['-d', '-v', '--verbose']) typ.ArgumentParser.add_option_group(parser, "Options for reporting the results", reporting=True) @classmethod def ProcessCommandLineArgs(cls, parser, args): # We retry failures by default unless we're running a list of tests # explicitly. if not args.retry_limit and not args.positional_args: args.retry_limit = 3 try: possible_browser = browser_finder.FindBrowser(args) except browser_finder_exceptions.BrowserFinderException, ex: parser.error(ex) if not possible_browser: parser.error('No browser found of type %s. Cannot run tests.\n' 'Re-run with --browser=list to see ' 'available browser types.' % args.browser_type) @classmethod def main(cls, args=None, stream=None): # pylint: disable=W0221 # We override the superclass so that we can hook in the 'stream' arg. parser = cls.CreateParser() cls.AddCommandLineArgs(parser) options, positional_args = parser.parse_args(args) options.positional_args = positional_args cls.ProcessCommandLineArgs(parser, options) obj = cls() if stream is not None: obj.stream = stream return obj.Run(options) def Run(self, args): possible_browser = browser_finder.FindBrowser(args) runner = typ.Runner() if self.stream: runner.host.stdout = self.stream # Telemetry seems to overload the system if we run one test per core, # so we scale things back a fair amount. Many of the telemetry tests # are long-running, so there's a limit to how much parallelism we # can effectively use for now anyway. # # It should be possible to handle multiple devices if we adjust # the browser_finder code properly, but for now we only handle the one # on Android and ChromeOS. if possible_browser.platform.GetOSName() in ('android', 'chromeos'): runner.args.jobs = 1 elif possible_browser.platform.GetOSVersionName() == 'xp': # For an undiagnosed reason, XP falls over with more parallelism. # See crbug.com/388256 runner.args.jobs = max(int(args.jobs) // 4, 1) else: runner.args.jobs = max(int(args.jobs) // 2, 1) runner.args.metadata = args.metadata runner.args.passthrough = args.passthrough runner.args.path = args.path runner.args.retry_limit = args.retry_limit runner.args.test_results_server = args.test_results_server runner.args.test_type = args.test_type runner.args.timing = args.timing runner.args.top_level_dir = args.top_level_dir runner.args.verbose = args.verbosity runner.args.write_full_results_to = args.write_full_results_to runner.args.write_trace_to = args.write_trace_to runner.args.path.append(util.GetUnittestDataDir()) runner.classifier = GetClassifier(args, possible_browser) runner.context = args runner.setup_fn = _SetUpProcess runner.teardown_fn = _TearDownProcess runner.win_multiprocessing = typ.WinMultiprocessing.importable try: ret, _, _ = runner.run() except KeyboardInterrupt: print >> sys.stderr, "interrupted, exiting" ret = 130 return ret def GetClassifier(args, possible_browser): def ClassifyTest(test_set, test): name = test.id() if args.positional_args: if _MatchesSelectedTest(name, args.positional_args, args.exact_test_filter): assert hasattr(test, '_testMethodName') method = getattr(test, test._testMethodName) # pylint: disable=W0212 if decorators.ShouldBeIsolated(method, possible_browser): test_set.isolated_tests.append(typ.TestInput(name)) else: test_set.parallel_tests.append(typ.TestInput(name)) else: assert hasattr(test, '_testMethodName') method = getattr(test, test._testMethodName) # pylint: disable=W0212 should_skip, reason = decorators.ShouldSkip(method, possible_browser) if should_skip and not args.run_disabled_tests: test_set.tests_to_skip.append(typ.TestInput(name, msg=reason)) elif decorators.ShouldBeIsolated(method, possible_browser): test_set.isolated_tests.append(typ.TestInput(name)) else: test_set.parallel_tests.append(typ.TestInput(name)) return ClassifyTest def _MatchesSelectedTest(name, selected_tests, selected_tests_are_exact): if not selected_tests: return False if selected_tests_are_exact: return any(name in selected_tests) else: return any(test in name for test in selected_tests) def _SetUpProcess(child, context): # pylint: disable=W0613 args = context options_for_unittests.Push(args) def _TearDownProcess(child, context): # pylint: disable=W0613 browser_test_case.teardown_browser() options_for_unittests.Pop() if __name__ == '__main__': ret_code = RunTestsCommand.main() sys.exit(ret_code)
bsd-3-clause
viaregio/mezzanine
mezzanine/generic/models.py
38
5089
from __future__ import unicode_literals from future.builtins import map, str from django.contrib.contenttypes.fields import GenericForeignKey from django.db import models from django.template.defaultfilters import truncatewords_html from django.utils.translation import ugettext, ugettext_lazy as _ from django.utils.encoding import python_2_unicode_compatible from django_comments.models import Comment from mezzanine.generic.fields import RatingField from mezzanine.generic.managers import CommentManager, KeywordManager from mezzanine.core.models import Slugged, Orderable from mezzanine.conf import settings from mezzanine.utils.models import get_user_model_name from mezzanine.utils.sites import current_site_id class ThreadedComment(Comment): """ Extend the ``Comment`` model from ``django_comments`` to add comment threading. ``Comment`` provides its own site foreign key, so we can't inherit from ``SiteRelated`` in ``mezzanine.core``, and therefore need to set the site on ``save``. ``CommentManager`` inherits from Mezzanine's ``CurrentSiteManager``, so everything else site related is already provided. """ by_author = models.BooleanField(_("By the blog author"), default=False) replied_to = models.ForeignKey("self", null=True, editable=False, related_name="comments") rating = RatingField(verbose_name=_("Rating")) objects = CommentManager() class Meta: verbose_name = _("Comment") verbose_name_plural = _("Comments") def get_absolute_url(self): """ Use the URL for the comment's content object, with a URL hash appended that references the individual comment. """ url = self.content_object.get_absolute_url() return "%s#comment-%s" % (url, self.id) def save(self, *args, **kwargs): """ Set the current site ID, and ``is_public`` based on the setting ``COMMENTS_DEFAULT_APPROVED``. """ if not self.id: self.is_public = settings.COMMENTS_DEFAULT_APPROVED self.site_id = current_site_id() super(ThreadedComment, self).save(*args, **kwargs) ################################ # Admin listing column methods # ################################ def intro(self): return truncatewords_html(self.comment, 20) intro.short_description = _("Comment") def avatar_link(self): from mezzanine.core.templatetags.mezzanine_tags import gravatar_url vars = (self.user_email, gravatar_url(self.email), self.user_name) return ("<a href='mailto:%s'><img style='vertical-align:middle; " "margin-right:3px;' src='%s' />%s</a>" % vars) avatar_link.allow_tags = True avatar_link.short_description = _("User") def admin_link(self): return "<a href='%s'>%s</a>" % (self.get_absolute_url(), ugettext("View on site")) admin_link.allow_tags = True admin_link.short_description = "" # Exists for backward compatibility when the gravatar_url template # tag which took the email address hash instead of the email address. @property def email_hash(self): return self.email class Keyword(Slugged): """ Keywords/tags which are managed via a custom JavaScript based widget in the admin. """ objects = KeywordManager() class Meta: verbose_name = _("Keyword") verbose_name_plural = _("Keywords") @python_2_unicode_compatible class AssignedKeyword(Orderable): """ A ``Keyword`` assigned to a model instance. """ keyword = models.ForeignKey("Keyword", verbose_name=_("Keyword"), related_name="assignments") content_type = models.ForeignKey("contenttypes.ContentType") object_pk = models.IntegerField() content_object = GenericForeignKey("content_type", "object_pk") class Meta: order_with_respect_to = "content_object" def __str__(self): return str(self.keyword) class Rating(models.Model): """ A rating that can be given to a piece of content. """ value = models.IntegerField(_("Value")) rating_date = models.DateTimeField(_("Rating date"), auto_now_add=True, null=True) content_type = models.ForeignKey("contenttypes.ContentType") object_pk = models.IntegerField() content_object = GenericForeignKey("content_type", "object_pk") user = models.ForeignKey(get_user_model_name(), verbose_name=_("Rater"), null=True, related_name="%(class)ss") class Meta: verbose_name = _("Rating") verbose_name_plural = _("Ratings") def save(self, *args, **kwargs): """ Validate that the rating falls between the min and max values. """ valid = map(str, settings.RATINGS_RANGE) if str(self.value) not in valid: raise ValueError("Invalid rating. %s is not in %s" % (self.value, ", ".join(valid))) super(Rating, self).save(*args, **kwargs)
bsd-2-clause
chand3040/sree_odoo
openerp/addons/stock_account/wizard/__init__.py
351
1105
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import stock_change_standard_price import stock_invoice_onshipping import stock_valuation_history import stock_return_picking
agpl-3.0
Mj258/weiboapi
srapyDemo/envs/Lib/site-packages/pip/commands/install.py
187
14659
from __future__ import absolute_import import logging import operator import os import tempfile import shutil import warnings try: import wheel except ImportError: wheel = None from pip.req import RequirementSet from pip.basecommand import RequirementCommand from pip.locations import virtualenv_no_global, distutils_scheme from pip.index import PackageFinder from pip.exceptions import ( InstallationError, CommandError, PreviousBuildDirError, ) from pip import cmdoptions from pip.utils import ensure_dir from pip.utils.build import BuildDirectory from pip.utils.deprecation import RemovedInPip8Warning from pip.utils.filesystem import check_path_owner from pip.wheel import WheelCache, WheelBuilder logger = logging.getLogger(__name__) class InstallCommand(RequirementCommand): """ Install packages from: - PyPI (and other indexes) using requirement specifiers. - VCS project urls. - Local project directories. - Local or remote source archives. pip also supports installing from "requirements files", which provide an easy way to specify a whole environment to be installed. """ name = 'install' usage = """ %prog [options] <requirement specifier> [package-index-options] ... %prog [options] -r <requirements file> [package-index-options] ... %prog [options] [-e] <vcs project url> ... %prog [options] [-e] <local project path> ... %prog [options] <archive url/path> ...""" summary = 'Install packages.' def __init__(self, *args, **kw): super(InstallCommand, self).__init__(*args, **kw) cmd_opts = self.cmd_opts cmd_opts.add_option(cmdoptions.constraints()) cmd_opts.add_option(cmdoptions.editable()) cmd_opts.add_option(cmdoptions.requirements()) cmd_opts.add_option(cmdoptions.build_dir()) cmd_opts.add_option( '-t', '--target', dest='target_dir', metavar='dir', default=None, help='Install packages into <dir>. ' 'By default this will not replace existing files/folders in ' '<dir>. Use --upgrade to replace existing packages in <dir> ' 'with new versions.' ) cmd_opts.add_option( '-d', '--download', '--download-dir', '--download-directory', dest='download_dir', metavar='dir', default=None, help=("Download packages into <dir> instead of installing them, " "regardless of what's already installed."), ) cmd_opts.add_option(cmdoptions.download_cache()) cmd_opts.add_option(cmdoptions.src()) cmd_opts.add_option( '-U', '--upgrade', dest='upgrade', action='store_true', help='Upgrade all specified packages to the newest available ' 'version. This process is recursive regardless of whether ' 'a dependency is already satisfied.' ) cmd_opts.add_option( '--force-reinstall', dest='force_reinstall', action='store_true', help='When upgrading, reinstall all packages even if they are ' 'already up-to-date.') cmd_opts.add_option( '-I', '--ignore-installed', dest='ignore_installed', action='store_true', help='Ignore the installed packages (reinstalling instead).') cmd_opts.add_option(cmdoptions.no_deps()) cmd_opts.add_option(cmdoptions.install_options()) cmd_opts.add_option(cmdoptions.global_options()) cmd_opts.add_option( '--user', dest='use_user_site', action='store_true', help="Install to the Python user install directory for your " "platform. Typically ~/.local/, or %APPDATA%\Python on " "Windows. (See the Python documentation for site.USER_BASE " "for full details.)") cmd_opts.add_option( '--egg', dest='as_egg', action='store_true', help="Install packages as eggs, not 'flat', like pip normally " "does. This option is not about installing *from* eggs. " "(WARNING: Because this option overrides pip's normal install" " logic, requirements files may not behave as expected.)") cmd_opts.add_option( '--root', dest='root_path', metavar='dir', default=None, help="Install everything relative to this alternate root " "directory.") cmd_opts.add_option( "--compile", action="store_true", dest="compile", default=True, help="Compile py files to pyc", ) cmd_opts.add_option( "--no-compile", action="store_false", dest="compile", help="Do not compile py files to pyc", ) cmd_opts.add_option(cmdoptions.use_wheel()) cmd_opts.add_option(cmdoptions.no_use_wheel()) cmd_opts.add_option(cmdoptions.no_binary()) cmd_opts.add_option(cmdoptions.only_binary()) cmd_opts.add_option( '--pre', action='store_true', default=False, help="Include pre-release and development versions. By default, " "pip only finds stable versions.") cmd_opts.add_option(cmdoptions.no_clean()) index_opts = cmdoptions.make_option_group( cmdoptions.index_group, self.parser, ) self.parser.insert_option_group(0, index_opts) self.parser.insert_option_group(0, cmd_opts) def _build_package_finder(self, options, index_urls, session): """ Create a package finder appropriate to this install command. This method is meant to be overridden by subclasses, not called directly. """ return PackageFinder( find_links=options.find_links, format_control=options.format_control, index_urls=index_urls, allow_external=options.allow_external, allow_unverified=options.allow_unverified, allow_all_external=options.allow_all_external, trusted_hosts=options.trusted_hosts, allow_all_prereleases=options.pre, process_dependency_links=options.process_dependency_links, session=session, ) def run(self, options, args): cmdoptions.resolve_wheel_no_use_binary(options) cmdoptions.check_install_build_global(options) if options.download_dir: options.ignore_installed = True if options.build_dir: options.build_dir = os.path.abspath(options.build_dir) options.src_dir = os.path.abspath(options.src_dir) install_options = options.install_options or [] if options.use_user_site: if virtualenv_no_global(): raise InstallationError( "Can not perform a '--user' install. User site-packages " "are not visible in this virtualenv." ) install_options.append('--user') install_options.append('--prefix=') temp_target_dir = None if options.target_dir: options.ignore_installed = True temp_target_dir = tempfile.mkdtemp() options.target_dir = os.path.abspath(options.target_dir) if (os.path.exists(options.target_dir) and not os.path.isdir(options.target_dir)): raise CommandError( "Target path exists but is not a directory, will not " "continue." ) install_options.append('--home=' + temp_target_dir) global_options = options.global_options or [] index_urls = [options.index_url] + options.extra_index_urls if options.no_index: logger.info('Ignoring indexes: %s', ','.join(index_urls)) index_urls = [] if options.download_cache: warnings.warn( "--download-cache has been deprecated and will be removed in " "the future. Pip now automatically uses and configures its " "cache.", RemovedInPip8Warning, ) with self._build_session(options) as session: finder = self._build_package_finder(options, index_urls, session) build_delete = (not (options.no_clean or options.build_dir)) wheel_cache = WheelCache(options.cache_dir, options.format_control) if options.cache_dir and not check_path_owner(options.cache_dir): logger.warning( "The directory '%s' or its parent directory is not owned " "by the current user and caching wheels has been " "disabled. check the permissions and owner of that " "directory. If executing pip with sudo, you may want " "sudo's -H flag.", options.cache_dir, ) options.cache_dir = None with BuildDirectory(options.build_dir, delete=build_delete) as build_dir: requirement_set = RequirementSet( build_dir=build_dir, src_dir=options.src_dir, download_dir=options.download_dir, upgrade=options.upgrade, as_egg=options.as_egg, ignore_installed=options.ignore_installed, ignore_dependencies=options.ignore_dependencies, force_reinstall=options.force_reinstall, use_user_site=options.use_user_site, target_dir=temp_target_dir, session=session, pycompile=options.compile, isolated=options.isolated_mode, wheel_cache=wheel_cache, ) self.populate_requirement_set( requirement_set, args, options, finder, session, self.name, wheel_cache ) if not requirement_set.has_requirements: return try: if (options.download_dir or not wheel or not options.cache_dir): # on -d don't do complex things like building # wheels, and don't try to build wheels when wheel is # not installed. requirement_set.prepare_files(finder) else: # build wheels before install. wb = WheelBuilder( requirement_set, finder, build_options=[], global_options=[], ) # Ignore the result: a failed wheel will be # installed from the sdist/vcs whatever. wb.build(autobuilding=True) if not options.download_dir: requirement_set.install( install_options, global_options, root=options.root_path, ) reqs = sorted( requirement_set.successfully_installed, key=operator.attrgetter('name')) items = [] for req in reqs: item = req.name try: if hasattr(req, 'installed_version'): if req.installed_version: item += '-' + req.installed_version except Exception: pass items.append(item) installed = ' '.join(items) if installed: logger.info('Successfully installed %s', installed) else: downloaded = ' '.join([ req.name for req in requirement_set.successfully_downloaded ]) if downloaded: logger.info( 'Successfully downloaded %s', downloaded ) except PreviousBuildDirError: options.no_clean = True raise finally: # Clean up if not options.no_clean: requirement_set.cleanup_files() if options.target_dir: ensure_dir(options.target_dir) lib_dir = distutils_scheme('', home=temp_target_dir)['purelib'] for item in os.listdir(lib_dir): target_item_dir = os.path.join(options.target_dir, item) if os.path.exists(target_item_dir): if not options.upgrade: logger.warning( 'Target directory %s already exists. Specify ' '--upgrade to force replacement.', target_item_dir ) continue if os.path.islink(target_item_dir): logger.warning( 'Target directory %s already exists and is ' 'a link. Pip will not automatically replace ' 'links, please remove if replacement is ' 'desired.', target_item_dir ) continue if os.path.isdir(target_item_dir): shutil.rmtree(target_item_dir) else: os.remove(target_item_dir) shutil.move( os.path.join(lib_dir, item), target_item_dir ) shutil.rmtree(temp_target_dir) return requirement_set
mit
jakirkham/ilastik
ilastik/applets/objectClassification/objectClassificationSerializer.py
4
2910
############################################################################### # ilastik: interactive learning and segmentation toolkit # # Copyright (C) 2011-2014, the ilastik developers # <team@ilastik.org> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # In addition, as a special exception, the copyright holders of # ilastik give you permission to combine ilastik with applets, # workflows and plugins which are not covered under the GNU # General Public License. # # See the LICENSE file for details. License information is also available # on the ilastik web site at: # http://ilastik.org/license.html ############################################################################### import warnings from ilastik.applets.base.appletSerializer import \ AppletSerializer, SerialSlot, SerialDictSlot, \ SerialClassifierSlot, SerialListSlot class SerialDictSlotWithoutDeserialization(SerialDictSlot): def __init__(self, slot, mainOperator, **kwargs): super(SerialDictSlotWithoutDeserialization, self).__init__(slot, **kwargs) self.mainOperator = mainOperator def serialize(self, group): #if self.slot.ready() and self.mainOperator._predict_enabled: return SerialDictSlot.serialize(self, group) def deserialize(self, group): # Do not deserialize this slot pass class ObjectClassificationSerializer(AppletSerializer): # FIXME: predictions can only be saved, not loaded, because it # would call setValue() on a connected slot def __init__(self, topGroupName, operator): serialSlots = [ SerialDictSlot(operator.SelectedFeatures, transform=str), SerialListSlot(operator.LabelNames, transform=str), SerialListSlot(operator.LabelColors, transform=lambda x: tuple(x.flat)), SerialListSlot(operator.PmapColors, transform=lambda x: tuple(x.flat)), SerialDictSlot(operator.LabelInputs, transform=int), SerialClassifierSlot(operator.Classifier, operator.classifier_cache, name="ClassifierForests"), SerialDictSlot(operator.CachedProbabilities, operator.InputProbabilities, transform=int), #SerialDictSlotWithoutDeserialization(operator.Probabilities, operator, transform=str) ] super(ObjectClassificationSerializer, self ).__init__(topGroupName, slots=serialSlots, operator=operator)
gpl-3.0
jmrozanec/white-bkg-classification
scripts/preprocessing.py
1
1441
#https://github.com/tflearn/tflearn/issues/180 from __future__ import division, print_function, absolute_import import tflearn from tflearn.data_utils import shuffle, to_categorical from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.normalization import local_response_normalization, batch_normalization from tflearn.layers.estimator import regression from tflearn.data_utils import image_preloader import skimage from skimage import data from skimage import filters import os from skimage import io import numpy as np from scipy import ndimage import matplotlib.pyplot as plt reds="../images/pictures/red/" greens="../images/pictures/green/" redshist="../images/histograms/red/" greenshist="../images/histograms/green/" directory=reds histdirectory=redshist for filename in os.listdir(directory): if filename.endswith(".jpg"): img = io.imread(os.path.join(directory, filename)) hist, bin_edges = np.histogram(img, bins=255) bin_centers = 0.5*(bin_edges[:-1] + bin_edges[1:]) binary_img = img > 0.8 plt.figure(figsize=(1,1)) fig, ax = plt.subplots(nrows=1, ncols=1) #http://stackoverflow.com/questions/9622163/save-plot-to-image-file-instead-of-displaying-it-using-matplotlib-so-it-can-be plt.plot(bin_centers, hist, lw=2) fig.savefig(os.path.join(histdirectory, filename), bbox_inches='tight') plt.close() else: continue
apache-2.0
barnsnake351/neutron
neutron/db/migration/alembic_migrations/versions/juno_initial.py
32
2925
# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """juno_initial Revision ID: juno Revises: None """ # revision identifiers, used by Alembic. revision = 'juno' down_revision = None from neutron.db.migration.alembic_migrations import agent_init_ops from neutron.db.migration.alembic_migrations import brocade_init_ops from neutron.db.migration.alembic_migrations import cisco_init_ops from neutron.db.migration.alembic_migrations import core_init_ops from neutron.db.migration.alembic_migrations import dvr_init_opts from neutron.db.migration.alembic_migrations import firewall_init_ops from neutron.db.migration.alembic_migrations import l3_init_ops from neutron.db.migration.alembic_migrations import lb_init_ops from neutron.db.migration.alembic_migrations import loadbalancer_init_ops from neutron.db.migration.alembic_migrations import metering_init_ops from neutron.db.migration.alembic_migrations import ml2_init_ops from neutron.db.migration.alembic_migrations import mlnx_init_ops from neutron.db.migration.alembic_migrations import nec_init_ops from neutron.db.migration.alembic_migrations import nuage_init_opts from neutron.db.migration.alembic_migrations import other_extensions_init_ops from neutron.db.migration.alembic_migrations import other_plugins_init_ops from neutron.db.migration.alembic_migrations import ovs_init_ops from neutron.db.migration.alembic_migrations import portsec_init_ops from neutron.db.migration.alembic_migrations import ryu_init_ops from neutron.db.migration.alembic_migrations import secgroup_init_ops from neutron.db.migration.alembic_migrations import vmware_init_ops from neutron.db.migration.alembic_migrations import vpn_init_ops def upgrade(): agent_init_ops.upgrade() core_init_ops.upgrade() l3_init_ops.upgrade() secgroup_init_ops.upgrade() portsec_init_ops.upgrade() other_extensions_init_ops.upgrade() lb_init_ops.upgrade() ovs_init_ops.upgrade() ml2_init_ops.upgrade() dvr_init_opts.upgrade() firewall_init_ops.upgrade() loadbalancer_init_ops.upgrade() vpn_init_ops.upgrade() metering_init_ops.upgrade() brocade_init_ops.upgrade() cisco_init_ops.upgrade() mlnx_init_ops.upgrade() nec_init_ops.upgrade() other_plugins_init_ops.upgrade() ryu_init_ops.upgrade() vmware_init_ops.upgrade() nuage_init_opts.upgrade()
apache-2.0
karban/agros2d
data/scripts/test/test_current_planar.py
2
1590
# model newdocument("Feeder", "planar", "current", 3, 5) # boundaries addboundary("Neumann", "current_inward_current_flow", 0) addboundary("Zero", "current_potential", 0) addboundary("Voltage", "current_potential", 1) # materials addmaterial("mat 1", 1e7) addmaterial("mat 2", 1e5) addmaterial("mat 3", 1e3) # edges addedge(0, 0, 0.6, 0, 0, "Zero") addedge(0, 0.8, 0, 0.5, 0, "Neumann") addedge(0, 0.5, 0, 0, 0, "Neumann") addedge(0, 0, 0.35, 0.5, 0, "none") addedge(0.35, 0.5, 0.6, 0.5, 0, "none") addedge(0.6, 0.8, 0.6, 0.5, 0, "Neumann") addedge(0.6, 0.5, 0.6, 0, 0, "Neumann") addedge(0, 0.5, 0.35, 0.5, 0, "none") addedge(0, 0.8, 0.6, 0.8, 0, "Voltage") # labels addlabel(0.3, 0.670924, 0, 0, "mat 1") addlabel(0.105779, 0.364111, 0, 0, "mat 2") addlabel(0.394296, 0.203668, 0, 0, "mat 3") # solve zoombestfit() solve() # point value point = pointresult(0.11879, 0.346203) testV = test("Scalar potential", point["V"], 0.928377) testE = test("Electric field", point["E"], 0.486928) testEx = test("Electric field - x", point["Ex"], -0.123527) testEy = test("Electric field - y", point["Ey"], -0.470999) testJ = test("Current density", point["J"], 48692.830437) testJx = test("Current density - x", point["Jx"], -12352.691339) testJy = test("Current density - y", point["Jy"], -47099.923064) testpj = test("Losses", point["pj"], 23709.917359) # surface integral surface = surfaceintegral(0) testI = test("Current", surface["I"], 3629.425713) print("Test: Current field - planar: " + str(testV and testE and testEx and testEy and testJ and testJx and testJy and testpj and testI))
gpl-2.0
omni360/avatar
icloth/bower_components/bootstrap/node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/tools/pretty_vcproj.py
2637
9586
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Make the format of a vcproj really pretty. This script normalize and sort an xml. It also fetches all the properties inside linked vsprops and include them explicitly in the vcproj. It outputs the resulting xml to stdout. """ __author__ = 'nsylvain (Nicolas Sylvain)' import os import sys from xml.dom.minidom import parse from xml.dom.minidom import Node REPLACEMENTS = dict() ARGUMENTS = None class CmpTuple(object): """Compare function between 2 tuple.""" def __call__(self, x, y): return cmp(x[0], y[0]) class CmpNode(object): """Compare function between 2 xml nodes.""" def __call__(self, x, y): def get_string(node): node_string = "node" node_string += node.nodeName if node.nodeValue: node_string += node.nodeValue if node.attributes: # We first sort by name, if present. node_string += node.getAttribute("Name") all_nodes = [] for (name, value) in node.attributes.items(): all_nodes.append((name, value)) all_nodes.sort(CmpTuple()) for (name, value) in all_nodes: node_string += name node_string += value return node_string return cmp(get_string(x), get_string(y)) def PrettyPrintNode(node, indent=0): if node.nodeType == Node.TEXT_NODE: if node.data.strip(): print '%s%s' % (' '*indent, node.data.strip()) return if node.childNodes: node.normalize() # Get the number of attributes attr_count = 0 if node.attributes: attr_count = node.attributes.length # Print the main tag if attr_count == 0: print '%s<%s>' % (' '*indent, node.nodeName) else: print '%s<%s' % (' '*indent, node.nodeName) all_attributes = [] for (name, value) in node.attributes.items(): all_attributes.append((name, value)) all_attributes.sort(CmpTuple()) for (name, value) in all_attributes: print '%s %s="%s"' % (' '*indent, name, value) print '%s>' % (' '*indent) if node.nodeValue: print '%s %s' % (' '*indent, node.nodeValue) for sub_node in node.childNodes: PrettyPrintNode(sub_node, indent=indent+2) print '%s</%s>' % (' '*indent, node.nodeName) def FlattenFilter(node): """Returns a list of all the node and sub nodes.""" node_list = [] if (node.attributes and node.getAttribute('Name') == '_excluded_files'): # We don't add the "_excluded_files" filter. return [] for current in node.childNodes: if current.nodeName == 'Filter': node_list.extend(FlattenFilter(current)) else: node_list.append(current) return node_list def FixFilenames(filenames, current_directory): new_list = [] for filename in filenames: if filename: for key in REPLACEMENTS: filename = filename.replace(key, REPLACEMENTS[key]) os.chdir(current_directory) filename = filename.strip('"\' ') if filename.startswith('$'): new_list.append(filename) else: new_list.append(os.path.abspath(filename)) return new_list def AbsoluteNode(node): """Makes all the properties we know about in this node absolute.""" if node.attributes: for (name, value) in node.attributes.items(): if name in ['InheritedPropertySheets', 'RelativePath', 'AdditionalIncludeDirectories', 'IntermediateDirectory', 'OutputDirectory', 'AdditionalLibraryDirectories']: # We want to fix up these paths path_list = value.split(';') new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1])) node.setAttribute(name, ';'.join(new_list)) if not value: node.removeAttribute(name) def CleanupVcproj(node): """For each sub node, we call recursively this function.""" for sub_node in node.childNodes: AbsoluteNode(sub_node) CleanupVcproj(sub_node) # Normalize the node, and remove all extranous whitespaces. for sub_node in node.childNodes: if sub_node.nodeType == Node.TEXT_NODE: sub_node.data = sub_node.data.replace("\r", "") sub_node.data = sub_node.data.replace("\n", "") sub_node.data = sub_node.data.rstrip() # Fix all the semicolon separated attributes to be sorted, and we also # remove the dups. if node.attributes: for (name, value) in node.attributes.items(): sorted_list = sorted(value.split(';')) unique_list = [] for i in sorted_list: if not unique_list.count(i): unique_list.append(i) node.setAttribute(name, ';'.join(unique_list)) if not value: node.removeAttribute(name) if node.childNodes: node.normalize() # For each node, take a copy, and remove it from the list. node_array = [] while node.childNodes and node.childNodes[0]: # Take a copy of the node and remove it from the list. current = node.childNodes[0] node.removeChild(current) # If the child is a filter, we want to append all its children # to this same list. if current.nodeName == 'Filter': node_array.extend(FlattenFilter(current)) else: node_array.append(current) # Sort the list. node_array.sort(CmpNode()) # Insert the nodes in the correct order. for new_node in node_array: # But don't append empty tool node. if new_node.nodeName == 'Tool': if new_node.attributes and new_node.attributes.length == 1: # This one was empty. continue if new_node.nodeName == 'UserMacro': continue node.appendChild(new_node) def GetConfiguationNodes(vcproj): #TODO(nsylvain): Find a better way to navigate the xml. nodes = [] for node in vcproj.childNodes: if node.nodeName == "Configurations": for sub_node in node.childNodes: if sub_node.nodeName == "Configuration": nodes.append(sub_node) return nodes def GetChildrenVsprops(filename): dom = parse(filename) if dom.documentElement.attributes: vsprops = dom.documentElement.getAttribute('InheritedPropertySheets') return FixFilenames(vsprops.split(';'), os.path.dirname(filename)) return [] def SeekToNode(node1, child2): # A text node does not have properties. if child2.nodeType == Node.TEXT_NODE: return None # Get the name of the current node. current_name = child2.getAttribute("Name") if not current_name: # There is no name. We don't know how to merge. return None # Look through all the nodes to find a match. for sub_node in node1.childNodes: if sub_node.nodeName == child2.nodeName: name = sub_node.getAttribute("Name") if name == current_name: return sub_node # No match. We give up. return None def MergeAttributes(node1, node2): # No attributes to merge? if not node2.attributes: return for (name, value2) in node2.attributes.items(): # Don't merge the 'Name' attribute. if name == 'Name': continue value1 = node1.getAttribute(name) if value1: # The attribute exist in the main node. If it's equal, we leave it # untouched, otherwise we concatenate it. if value1 != value2: node1.setAttribute(name, ';'.join([value1, value2])) else: # The attribute does nto exist in the main node. We append this one. node1.setAttribute(name, value2) # If the attribute was a property sheet attributes, we remove it, since # they are useless. if name == 'InheritedPropertySheets': node1.removeAttribute(name) def MergeProperties(node1, node2): MergeAttributes(node1, node2) for child2 in node2.childNodes: child1 = SeekToNode(node1, child2) if child1: MergeProperties(child1, child2) else: node1.appendChild(child2.cloneNode(True)) def main(argv): """Main function of this vcproj prettifier.""" global ARGUMENTS ARGUMENTS = argv # check if we have exactly 1 parameter. if len(argv) < 2: print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] ' '[key2=value2]' % argv[0]) return 1 # Parse the keys for i in range(2, len(argv)): (key, value) = argv[i].split('=') REPLACEMENTS[key] = value # Open the vcproj and parse the xml. dom = parse(argv[1]) # First thing we need to do is find the Configuration Node and merge them # with the vsprops they include. for configuration_node in GetConfiguationNodes(dom.documentElement): # Get the property sheets associated with this configuration. vsprops = configuration_node.getAttribute('InheritedPropertySheets') # Fix the filenames to be absolute. vsprops_list = FixFilenames(vsprops.strip().split(';'), os.path.dirname(argv[1])) # Extend the list of vsprops with all vsprops contained in the current # vsprops. for current_vsprops in vsprops_list: vsprops_list.extend(GetChildrenVsprops(current_vsprops)) # Now that we have all the vsprops, we need to merge them. for current_vsprops in vsprops_list: MergeProperties(configuration_node, parse(current_vsprops).documentElement) # Now that everything is merged, we need to cleanup the xml. CleanupVcproj(dom.documentElement) # Finally, we use the prett xml function to print the vcproj back to the # user. #print dom.toprettyxml(newl="\n") PrettyPrintNode(dom.documentElement) return 0 if __name__ == '__main__': sys.exit(main(sys.argv))
apache-2.0
kubeflow/pipelines
manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/test_sync.py
2
10679
import os from unittest import mock import threading from sync import get_settings_from_env, server_factory import json import pytest import requests # Data sets passed to server DATA_INCORRECT_CHILDREN = { "parent": { "metadata": { "labels": { "pipelines.kubeflow.org/enabled": "true" }, "name": "myName" } }, "children": { "Secret.v1": [], "ConfigMap.v1": [], "Deployment.apps/v1": [], "Service.v1": [], "DestinationRule.networking.istio.io/v1alpha3": [], "AuthorizationPolicy.security.istio.io/v1beta1": [], } } DATA_CORRECT_CHILDREN = { "parent": { "metadata": { "labels": { "pipelines.kubeflow.org/enabled": "true" }, "name": "myName" } }, "children": { "Secret.v1": [1], "ConfigMap.v1": [1], "Deployment.apps/v1": [1, 1], "Service.v1": [1, 1], "DestinationRule.networking.istio.io/v1alpha3": [1], "AuthorizationPolicy.security.istio.io/v1beta1": [1], } } DATA_MISSING_PIPELINE_ENABLED = {"parent": {}, "children": {}} # Default values when environments are not explicit DEFAULT_FRONTEND_IMAGE = "gcr.io/ml-pipeline/frontend" DEFAULT_VISUALIZATION_IMAGE = "gcr.io/ml-pipeline/visualization-server" # Variables used for environment variable sets VISUALIZATION_SERVER_IMAGE = "vis-image" VISUALIZATION_SERVER_TAG = "somenumber.1.2.3" FRONTEND_IMAGE = "frontend-image" FRONTEND_TAG = "somehash" KFP_VERSION = "x.y.z" MINIO_ACCESS_KEY = "abcdef" MINIO_SECRET_KEY = "uvwxyz" # "Environments" used in tests ENV_VARIABLES_BASE = { "MINIO_ACCESS_KEY": MINIO_ACCESS_KEY, "MINIO_SECRET_KEY": MINIO_SECRET_KEY, "CONTROLLER_PORT": "0", # HTTPServer randomly assigns the port to a free port } ENV_KFP_VERSION_ONLY = dict(ENV_VARIABLES_BASE, **{ "KFP_VERSION": KFP_VERSION, } ) ENV_IMAGES_NO_TAGS = dict(ENV_VARIABLES_BASE, **{ "KFP_VERSION": KFP_VERSION, "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, "FRONTEND_IMAGE": FRONTEND_IMAGE, } ) ENV_IMAGES_WITH_TAGS = dict(ENV_VARIABLES_BASE, **{ "VISUALIZATION_SERVER_IMAGE": VISUALIZATION_SERVER_IMAGE, "FRONTEND_IMAGE": FRONTEND_IMAGE, "VISUALIZATION_SERVER_TAG": VISUALIZATION_SERVER_TAG, "FRONTEND_TAG": FRONTEND_TAG, } ) ENV_IMAGES_WITH_TAGS_AND_ISTIO = dict(ENV_IMAGES_WITH_TAGS, **{ "DISABLE_ISTIO_SIDECAR": "false", } ) def generate_image_name(imagename, tag): return f"{str(imagename)}:{str(tag)}" @pytest.fixture( scope="function", ) def sync_server(request): """ Starts the sync HTTP server for a given set of environment variables on a separate thread Yields: * the server (useful to interrogate for the server address) * environment variables (useful to interrogate for correct responses) """ environ = request.param with mock.patch.dict(os.environ, environ): # Create a server at an available port and serve it on a thread as a daemon # This will result in a collection of servers being active - not a great way # if this fixture is run many times during a test, but ok for now settings = get_settings_from_env() server = server_factory(**settings) server_thread = threading.Thread(target=server.serve_forever) # Put on daemon so it doesn't keep pytest from ending server_thread.daemon = True server_thread.start() yield server, environ @pytest.fixture( scope="function", ) def sync_server_from_arguments(request): """ Starts the sync HTTP server for a given set of parameters passed as arguments, with server on a separate thread Yields: * the server (useful to interrogate for the server address) * environment variables (useful to interrogate for correct responses) """ environ = {k.lower(): v for k, v in request.param.items()} settings = environ server = server_factory(**settings) server_thread = threading.Thread(target=server.serve_forever) # Put on daemon so it doesn't keep pytest from ending server_thread.daemon = True server_thread.start() yield server, environ @pytest.mark.parametrize( "sync_server, data, expected_status, expected_visualization_server_image, expected_frontend_server_image", [ ( ENV_KFP_VERSION_ONLY, DATA_INCORRECT_CHILDREN, {"kubeflow-pipelines-ready": "False"}, generate_image_name(DEFAULT_VISUALIZATION_IMAGE, KFP_VERSION), generate_image_name(DEFAULT_FRONTEND_IMAGE, KFP_VERSION), ), ( ENV_IMAGES_NO_TAGS, DATA_INCORRECT_CHILDREN, {"kubeflow-pipelines-ready": "False"}, generate_image_name(ENV_IMAGES_NO_TAGS["VISUALIZATION_SERVER_IMAGE"], KFP_VERSION), generate_image_name(ENV_IMAGES_NO_TAGS["FRONTEND_IMAGE"], KFP_VERSION), ), ( ENV_IMAGES_WITH_TAGS, DATA_INCORRECT_CHILDREN, {"kubeflow-pipelines-ready": "False"}, generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), ), ( ENV_IMAGES_WITH_TAGS, DATA_CORRECT_CHILDREN, {"kubeflow-pipelines-ready": "True"}, generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), ), ], indirect=["sync_server"] ) def test_sync_server_with_pipeline_enabled(sync_server, data, expected_status, expected_visualization_server_image, expected_frontend_server_image): """ Nearly end-to-end test of how Controller serves .sync as a POST Tests case where metadata.labels.pipelines.kubeflow.org/enabled exists, and thus we should produce children Only does spot checks on children to see if key properties are correct """ server, environ = sync_server # server.server_address = (url, port_as_integer) url = f"http://{server.server_address[0]}:{str(server.server_address[1])}" print("url: ", url) print("data") print(json.dumps(data, indent=2)) x = requests.post(url, data=json.dumps(data)) results = json.loads(x.text) # Test overall status of whether children are ok assert results['status'] == expected_status # Poke a few children to test things that can vary by environment variable assert results['children'][1]["spec"]["template"]["spec"]["containers"][0][ "image"] == expected_visualization_server_image assert results['children'][5]["spec"]["template"]["spec"]["containers"][0][ "image"] == expected_frontend_server_image @pytest.mark.parametrize( "sync_server_from_arguments, data, expected_status, expected_visualization_server_image, " "expected_frontend_server_image", [ ( ENV_IMAGES_WITH_TAGS_AND_ISTIO, DATA_CORRECT_CHILDREN, {"kubeflow-pipelines-ready": "True"}, generate_image_name(ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_IMAGE"], ENV_IMAGES_WITH_TAGS["VISUALIZATION_SERVER_TAG"]), generate_image_name(ENV_IMAGES_WITH_TAGS["FRONTEND_IMAGE"], ENV_IMAGES_WITH_TAGS["FRONTEND_TAG"]), ), ], indirect=["sync_server_from_arguments"] ) def test_sync_server_with_direct_passing_of_settings( sync_server_from_arguments, data, expected_status, expected_visualization_server_image, expected_frontend_server_image): """ Nearly end-to-end test of how Controller serves .sync as a POST, taking variables as arguments Only does spot checks on children to see if key properties are correct """ server, environ = sync_server_from_arguments # server.server_address = (url, port_as_integer) url = f"http://{server.server_address[0]}:{str(server.server_address[1])}" print("url: ", url) print("data") print(json.dumps(data, indent=2)) x = requests.post(url, data=json.dumps(data)) results = json.loads(x.text) # Test overall status of whether children are ok assert results['status'] == expected_status # Poke a few children to test things that can vary by environment variable assert results['children'][1]["spec"]["template"]["spec"]["containers"][0][ "image"] == expected_visualization_server_image assert results['children'][5]["spec"]["template"]["spec"]["containers"][0][ "image"] == expected_frontend_server_image @pytest.mark.parametrize( "sync_server, data, expected_status, expected_children", [ (ENV_IMAGES_WITH_TAGS, DATA_MISSING_PIPELINE_ENABLED, {}, []), ], indirect=["sync_server"] ) def test_sync_server_without_pipeline_enabled(sync_server, data, expected_status, expected_children): """ Nearly end-to-end test of how Controller serves .sync as a POST Tests case where metadata.labels.pipelines.kubeflow.org/enabled does not exist and thus server returns an empty reply """ server, environ = sync_server # server.server_address = (url, port_as_integer) url = f"http://{server.server_address[0]}:{str(server.server_address[1])}" x = requests.post(url, data=json.dumps(data)) results = json.loads(x.text) # Test overall status of whether children are ok assert results['status'] == expected_status assert results['children'] == expected_children
apache-2.0
kubeflow/pipelines
backend/api/python_http_client/test/test_api_resource_key.py
2
1464
# coding: utf-8 """ Kubeflow Pipelines API This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition. Contact: kubeflow-pipelines@google.com Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import unittest import datetime import kfp_server_api from kfp_server_api.models.api_resource_key import ApiResourceKey # noqa: E501 from kfp_server_api.rest import ApiException class TestApiResourceKey(unittest.TestCase): """ApiResourceKey unit test stubs""" def setUp(self): pass def tearDown(self): pass def make_instance(self, include_optional): """Test ApiResourceKey include_option is a boolean, when False only required params are included, when True both required and optional params are included """ # model = kfp_server_api.models.api_resource_key.ApiResourceKey() # noqa: E501 if include_optional : return ApiResourceKey( type = 'UNKNOWN_RESOURCE_TYPE', id = '0' ) else : return ApiResourceKey( ) def testApiResourceKey(self): """Test ApiResourceKey""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) if __name__ == '__main__': unittest.main()
apache-2.0
JingGe/incubator-eagle
eagle-external/hadoop_jmx_collector/lib/kafka-python/kafka/producer/base.py
18
7821
from __future__ import absolute_import import logging import time try: from queue import Empty except ImportError: from Queue import Empty from collections import defaultdict from multiprocessing import Queue, Process import six from kafka.common import ( ProduceRequest, TopicAndPartition, UnsupportedCodecError ) from kafka.protocol import CODEC_NONE, ALL_CODECS, create_message_set log = logging.getLogger("kafka") BATCH_SEND_DEFAULT_INTERVAL = 20 BATCH_SEND_MSG_COUNT = 20 STOP_ASYNC_PRODUCER = -1 def _send_upstream(queue, client, codec, batch_time, batch_size, req_acks, ack_timeout): """ Listen on the queue for a specified number of messages or till a specified timeout and send them upstream to the brokers in one request NOTE: Ideally, this should have been a method inside the Producer class. However, multiprocessing module has issues in windows. The functionality breaks unless this function is kept outside of a class """ stop = False client.reinit() while not stop: timeout = batch_time count = batch_size send_at = time.time() + timeout msgset = defaultdict(list) # Keep fetching till we gather enough messages or a # timeout is reached while count > 0 and timeout >= 0: try: topic_partition, msg, key = queue.get(timeout=timeout) except Empty: break # Check if the controller has requested us to stop if topic_partition == STOP_ASYNC_PRODUCER: stop = True break # Adjust the timeout to match the remaining period count -= 1 timeout = send_at - time.time() msgset[topic_partition].append(msg) # Send collected requests upstream reqs = [] for topic_partition, msg in msgset.items(): messages = create_message_set(msg, codec, key) req = ProduceRequest(topic_partition.topic, topic_partition.partition, messages) reqs.append(req) try: client.send_produce_request(reqs, acks=req_acks, timeout=ack_timeout) except Exception: log.exception("Unable to send message") class Producer(object): """ Base class to be used by producers Arguments: client: The Kafka client instance to use async: If set to true, the messages are sent asynchronously via another thread (process). We will not wait for a response to these WARNING!!! current implementation of async producer does not guarantee message delivery. Use at your own risk! Or help us improve with a PR! req_acks: A value indicating the acknowledgements that the server must receive before responding to the request ack_timeout: Value (in milliseconds) indicating a timeout for waiting for an acknowledgement batch_send: If True, messages are send in batches batch_send_every_n: If set, messages are send in batches of this size batch_send_every_t: If set, messages are send after this timeout """ ACK_NOT_REQUIRED = 0 # No ack is required ACK_AFTER_LOCAL_WRITE = 1 # Send response after it is written to log ACK_AFTER_CLUSTER_COMMIT = -1 # Send response after data is committed DEFAULT_ACK_TIMEOUT = 1000 def __init__(self, client, async=False, req_acks=ACK_AFTER_LOCAL_WRITE, ack_timeout=DEFAULT_ACK_TIMEOUT, codec=None, batch_send=False, batch_send_every_n=BATCH_SEND_MSG_COUNT, batch_send_every_t=BATCH_SEND_DEFAULT_INTERVAL): if batch_send: async = True assert batch_send_every_n > 0 assert batch_send_every_t > 0 else: batch_send_every_n = 1 batch_send_every_t = 3600 self.client = client self.async = async self.req_acks = req_acks self.ack_timeout = ack_timeout if codec is None: codec = CODEC_NONE elif codec not in ALL_CODECS: raise UnsupportedCodecError("Codec 0x%02x unsupported" % codec) self.codec = codec if self.async: log.warning("async producer does not guarantee message delivery!") log.warning("Current implementation does not retry Failed messages") log.warning("Use at your own risk! (or help improve with a PR!)") self.queue = Queue() # Messages are sent through this queue self.proc = Process(target=_send_upstream, args=(self.queue, self.client.copy(), self.codec, batch_send_every_t, batch_send_every_n, self.req_acks, self.ack_timeout)) # Process will die if main thread exits self.proc.daemon = True self.proc.start() def send_messages(self, topic, partition, *msg): """ Helper method to send produce requests @param: topic, name of topic for produce request -- type str @param: partition, partition number for produce request -- type int @param: *msg, one or more message payloads -- type bytes @returns: ResponseRequest returned by server raises on error Note that msg type *must* be encoded to bytes by user. Passing unicode message will not work, for example you should encode before calling send_messages via something like `unicode_message.encode('utf-8')` All messages produced via this method will set the message 'key' to Null """ return self._send_messages(topic, partition, *msg) def _send_messages(self, topic, partition, *msg, **kwargs): key = kwargs.pop('key', None) # Guarantee that msg is actually a list or tuple (should always be true) if not isinstance(msg, (list, tuple)): raise TypeError("msg is not a list or tuple!") # Raise TypeError if any message is not encoded as bytes if any(not isinstance(m, six.binary_type) for m in msg): raise TypeError("all produce message payloads must be type bytes") # Raise TypeError if the key is not encoded as bytes if key is not None and not isinstance(key, six.binary_type): raise TypeError("the key must be type bytes") if self.async: for m in msg: self.queue.put((TopicAndPartition(topic, partition), m, key)) resp = [] else: messages = create_message_set(msg, self.codec, key) req = ProduceRequest(topic, partition, messages) try: resp = self.client.send_produce_request([req], acks=self.req_acks, timeout=self.ack_timeout) except Exception: log.exception("Unable to send messages") raise return resp def stop(self, timeout=1): """ Stop the producer. Optionally wait for the specified timeout before forcefully cleaning up. """ if self.async: self.queue.put((STOP_ASYNC_PRODUCER, None, None)) self.proc.join(timeout) if self.proc.is_alive(): self.proc.terminate()
apache-2.0
shsingh/ansible
test/units/modules/network/fortios/test_fortios_web_proxy_profile.py
20
11217
# Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <https://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import pytest from mock import ANY from ansible.module_utils.network.fortios.fortios import FortiOSHandler try: from ansible.modules.network.fortios import fortios_web_proxy_profile except ImportError: pytest.skip("Could not load required modules for testing", allow_module_level=True) @pytest.fixture(autouse=True) def connection_mock(mocker): connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_web_proxy_profile.Connection') return connection_class_mock fos_instance = FortiOSHandler(connection_mock) def test_web_proxy_profile_creation(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'web_proxy_profile': { 'header_client_ip': 'pass', 'header_front_end_https': 'pass', 'header_via_request': 'pass', 'header_via_response': 'pass', 'header_x_authenticated_groups': 'pass', 'header_x_authenticated_user': 'pass', 'header_x_forwarded_for': 'pass', 'log_header_change': 'enable', 'name': 'default_name_11', 'strip_encoding': 'enable' }, 'vdom': 'root'} is_error, changed, response = fortios_web_proxy_profile.fortios_web_proxy(input_data, fos_instance) expected_data = { 'header-client-ip': 'pass', 'header-front-end-https': 'pass', 'header-via-request': 'pass', 'header-via-response': 'pass', 'header-x-authenticated-groups': 'pass', 'header-x-authenticated-user': 'pass', 'header-x-forwarded-for': 'pass', 'log-header-change': 'enable', 'name': 'default_name_11', 'strip-encoding': 'enable' } set_method_mock.assert_called_with('web-proxy', 'profile', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200 def test_web_proxy_profile_creation_fails(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'web_proxy_profile': { 'header_client_ip': 'pass', 'header_front_end_https': 'pass', 'header_via_request': 'pass', 'header_via_response': 'pass', 'header_x_authenticated_groups': 'pass', 'header_x_authenticated_user': 'pass', 'header_x_forwarded_for': 'pass', 'log_header_change': 'enable', 'name': 'default_name_11', 'strip_encoding': 'enable' }, 'vdom': 'root'} is_error, changed, response = fortios_web_proxy_profile.fortios_web_proxy(input_data, fos_instance) expected_data = { 'header-client-ip': 'pass', 'header-front-end-https': 'pass', 'header-via-request': 'pass', 'header-via-response': 'pass', 'header-x-authenticated-groups': 'pass', 'header-x-authenticated-user': 'pass', 'header-x-forwarded-for': 'pass', 'log-header-change': 'enable', 'name': 'default_name_11', 'strip-encoding': 'enable' } set_method_mock.assert_called_with('web-proxy', 'profile', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 500 def test_web_proxy_profile_removal(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result) input_data = { 'username': 'admin', 'state': 'absent', 'web_proxy_profile': { 'header_client_ip': 'pass', 'header_front_end_https': 'pass', 'header_via_request': 'pass', 'header_via_response': 'pass', 'header_x_authenticated_groups': 'pass', 'header_x_authenticated_user': 'pass', 'header_x_forwarded_for': 'pass', 'log_header_change': 'enable', 'name': 'default_name_11', 'strip_encoding': 'enable' }, 'vdom': 'root'} is_error, changed, response = fortios_web_proxy_profile.fortios_web_proxy(input_data, fos_instance) delete_method_mock.assert_called_with('web-proxy', 'profile', mkey=ANY, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200 def test_web_proxy_profile_deletion_fails(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500} delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result) input_data = { 'username': 'admin', 'state': 'absent', 'web_proxy_profile': { 'header_client_ip': 'pass', 'header_front_end_https': 'pass', 'header_via_request': 'pass', 'header_via_response': 'pass', 'header_x_authenticated_groups': 'pass', 'header_x_authenticated_user': 'pass', 'header_x_forwarded_for': 'pass', 'log_header_change': 'enable', 'name': 'default_name_11', 'strip_encoding': 'enable' }, 'vdom': 'root'} is_error, changed, response = fortios_web_proxy_profile.fortios_web_proxy(input_data, fos_instance) delete_method_mock.assert_called_with('web-proxy', 'profile', mkey=ANY, vdom='root') schema_method_mock.assert_not_called() assert is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 500 def test_web_proxy_profile_idempotent(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'web_proxy_profile': { 'header_client_ip': 'pass', 'header_front_end_https': 'pass', 'header_via_request': 'pass', 'header_via_response': 'pass', 'header_x_authenticated_groups': 'pass', 'header_x_authenticated_user': 'pass', 'header_x_forwarded_for': 'pass', 'log_header_change': 'enable', 'name': 'default_name_11', 'strip_encoding': 'enable' }, 'vdom': 'root'} is_error, changed, response = fortios_web_proxy_profile.fortios_web_proxy(input_data, fos_instance) expected_data = { 'header-client-ip': 'pass', 'header-front-end-https': 'pass', 'header-via-request': 'pass', 'header-via-response': 'pass', 'header-x-authenticated-groups': 'pass', 'header-x-authenticated-user': 'pass', 'header-x-forwarded-for': 'pass', 'log-header-change': 'enable', 'name': 'default_name_11', 'strip-encoding': 'enable' } set_method_mock.assert_called_with('web-proxy', 'profile', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 404 def test_web_proxy_profile_filter_foreign_attributes(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'web_proxy_profile': { 'random_attribute_not_valid': 'tag', 'header_client_ip': 'pass', 'header_front_end_https': 'pass', 'header_via_request': 'pass', 'header_via_response': 'pass', 'header_x_authenticated_groups': 'pass', 'header_x_authenticated_user': 'pass', 'header_x_forwarded_for': 'pass', 'log_header_change': 'enable', 'name': 'default_name_11', 'strip_encoding': 'enable' }, 'vdom': 'root'} is_error, changed, response = fortios_web_proxy_profile.fortios_web_proxy(input_data, fos_instance) expected_data = { 'header-client-ip': 'pass', 'header-front-end-https': 'pass', 'header-via-request': 'pass', 'header-via-response': 'pass', 'header-x-authenticated-groups': 'pass', 'header-x-authenticated-user': 'pass', 'header-x-forwarded-for': 'pass', 'log-header-change': 'enable', 'name': 'default_name_11', 'strip-encoding': 'enable' } set_method_mock.assert_called_with('web-proxy', 'profile', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200
gpl-3.0
HelsinkiHacklab/asylum
project/access/migrations/0002_auto_20151206_1842.py
4
2456
# -*- coding: utf-8 -*- from __future__ import unicode_literals import django_markdown.models from django.db import migrations, models import asylum.mixins class Migration(migrations.Migration): dependencies = [ ('access', '0001_initial'), ] operations = [ migrations.CreateModel( name='NonMemberToken', fields=[ ('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)), ('label', models.CharField(max_length=200, verbose_name='Label', blank=True)), ('value', models.CharField(max_length=200, verbose_name='Token value')), ('revoked', models.BooleanField(verbose_name='Revoked', default=False)), ('contact', models.CharField(max_length=200, verbose_name='Contact')), ('notes', django_markdown.models.MarkdownField(verbose_name='Notes', blank=True)), ], options={ 'verbose_name_plural': 'Non-member tokens', 'verbose_name': 'Non-member token', }, bases=(asylum.mixins.AtomicVersionMixin, asylum.mixins.CleanSaveMixin, models.Model), ), migrations.AlterModelOptions( name='accesstype', options={'verbose_name_plural': 'Access Types', 'verbose_name': 'Access Type'}, ), migrations.AlterModelOptions( name='grant', options={'verbose_name_plural': 'Grants', 'verbose_name': 'Grant'}, ), migrations.AlterModelOptions( name='token', options={'verbose_name_plural': 'Tokens', 'verbose_name': 'Token'}, ), migrations.AlterModelOptions( name='tokentype', options={'verbose_name_plural': 'Token Types', 'verbose_name': 'Token Type'}, ), migrations.AddField( model_name='grant', name='notes', field=django_markdown.models.MarkdownField(verbose_name='Notes', blank=True), ), migrations.AddField( model_name='nonmembertoken', name='grants', field=models.ManyToManyField(to='access.Grant', blank=True), ), migrations.AddField( model_name='nonmembertoken', name='ttype', field=models.ForeignKey(to='access.TokenType', related_name='+', verbose_name='Token type'), ), ]
mit
dimonaks/siman
siman/functions.py
1
29689
from __future__ import division, unicode_literals, absolute_import import os, tempfile, copy, math, itertools, sys import numpy as np from operator import itemgetter from itertools import product try: import scipy except: print('functions.py: no scipy, smoother() will not work()') from siman import header from siman.header import print_and_log, printlog, runBash, eV_A_to_J_m from siman.small_functions import is_list_like, is_string_like, gunzip_file, makedir, grep_file, setting_sshpass def unique_elements(seq, idfun=None): # return only unique_elements order preserving if idfun is None: def idfun(x): return x seen = {} result = [] for item in seq: marker = idfun(item) # in old Python versions: # if seen.has_key(marker) # but in new ones: if marker in seen: continue seen[marker] = 1 result.append(item) return result def smoother(x, n, mul = 1, align = 1): """ mul - additionally multiplies values #align - find first non-zero point and return it to zero #n - smooth value, if algo = 'gaus' than it is sigma use something like 0.8 if algo = 'my' n of 10-15 is good """ algo = 'gaus' # algo = 'my' if algo == 'my': x_smooth = [] L = len(x) store = np.zeros((n,1),float) for u in range(L-n): for v in range(n): store[v] = x[u+v] av = float(sum(store)) / n x_smooth.append(av*mul) for u in range(L-n,L): for v in range(L-u-1): store[v] = x[u+v] av = float(sum(store)) / n x_smooth.append(av*mul) elif algo == 'gaus': x_smooth =x # x_smooth = scipy.ndimage.filters.median_filter(x,size =4) # print('sigma is ', n) x_smooth = scipy.ndimage.filters.gaussian_filter1d(x_smooth, n, order =0) # x_smooth = scipy.ndimage.interpolation.spline_filter1d(x, 4) else: x_smooth = x if align: # print(x_smooth[0]) x_smooth[0] = 0 # sys.exit() return np.asarray(x_smooth) def run_on_server(command, addr = None): printlog('Running', command, 'on server ...') command = command.replace('\\', '/') # make sure is POSIX # sys.exit() # print(header.sshpass) # sys.exit() if addr is None: addr = header.cluster_address if header.ssh_object: # printlog('Using paramiko ...', imp = 'y') # if 'ne' in header.warnings: # sys.exit() out = header.ssh_object.run(command, noerror = True, printout = 'ne' in header.warnings) elif header.sshpass and header.sshpass == 'proxy': com = 'ssh -tt sdv sshpass -f '+ header.path2pass +' ssh '+addr+' "'+command+'"' # print(com) # sys.exit() out = runBash(com) # print(out) out = out.split('Connection to')[0] # remove last message Connection to ipaddress closed # sys.exit() elif header.sshpass: com = 'sshpass -f '+header.path2pass+' ssh '+addr+' "'+command+'"' # print(com) # sys.exit() out = runBash(com) # sys.exit() else: bash_comm = 'ssh '+addr+' "'+command+'"' # print(bash_comm) # sys.exit() out = runBash(bash_comm) out = out.split('#')[-1].strip() printlog(out) # print(out) # sys.exit() return out def push_to_server(files = None, to = None, addr = None): """ if header.ssh_object then use paramiko to (str) - path to remote folder ! """ if not is_list_like(files): files = [files] to = to.replace('\\', '/') # make sure is POSIX files_str = ' '.join(np.array(files )) command = ' mkdir -p {:}'.format( to ) # print('asfsadfdsf', to) printlog('push_to_server():', command, run_on_server(command, addr)) # sys.exit() printlog('push_to_server(): uploading files ', files, 'to', addr, to) if header.ssh_object: for file in files: # print(file, to) header.ssh_object.put(file, to+'/'+os.path.basename(file) ) out = '' elif header.sshpass and header.sshpass == 'proxy': com = 'tar cf - '+ files_str + ' | ssh sdv "sshpass -f ~/.ssh/p ssh '+addr+' \\"cd '+header.cluster_home+' && tar xvf -\\"" ' # print(com) # sys.exit() out = runBash(com) # print(out) # sys.exit() elif header.sshpass: # if '@' not in addr: # printlog('Error! Please provide address in the form user@address') # l = addr.split('@') # print(l) # user = l[0] # ad = l[1] # com = 'rsync --rsh='+"'sshpass -f /home/aksenov/.ssh/p ssh' " +' -uaz '+files_str+ ' '+addr+':'+to com = 'rsync --rsh='+"'sshpass -f "+header.path2pass+" ssh' " +' -uaz '+files_str+ ' '+addr+':'+to # print(com) # sys.exit() out = runBash(com) else: out = runBash('rsync -uaz '+files_str+ ' '+addr+':'+to) printlog(out) return out def file_exists_on_server(file, addr): file = file.replace('\\', '/') # make sure is POSIX printlog('Checking existence of file', file, 'on server', addr ) exist = run_on_server(' ls '+file, addr) # if header.ssh_object: # exist = header.ssh_object.fexists(file) # else: # exist = runBash('ssh '+addr+' ls '+file) if 'No such file' in exist: exist = '' else: exist = 'file exists' if exist: res = True else: res = False printlog('File exist? ', res) return res def get_from_server(files = None, to = None, to_file = None, addr = None, trygz = True): """ Download files using either paramiko (higher priority) or rsync; For paramiko header.ssh_object should be defined files (list of str) - files on cluster to download to (str) - path to local folder ! to_file (str) - path to local file (if name should be changed); in this case len(files) should be 1 The gz file is also checked RETURN result of download TODO: now for each file new connection is opened, copy them in one connection """ # print(addr) # sys.exit() def download(file, to_file): # print(header.sshpass) if header.ssh_object: exist = file_exists_on_server(file, addr) # try: if exist: printlog('Using paramiko: ssh_object.get(): from to ', file, to_file) header.ssh_object.get(file, to_file ) out = '' # except FileNotFoundError: else: out = 'error, file not found' elif header.sshpass and header.sshpass == 'proxy': # com = 'ssh sdv "sshpass -f ~/.ssh/p ssh ' + addr + ' \\"tar zcf - '+ file +'\\"" | tar zxf - '+to_file # does not work? com = 'ssh sdv "sshpass -f ~/.ssh/p ssh ' + addr + ' \\"tar cf - '+ file +'\\"" > '+to_file # print('sshpass',com) # sys.exit() out = runBash(com) elif header.sshpass: #com = 'rsync --rsh='+"'sshpass -f /home/aksenov/.ssh/p ssh' " +' -uaz '+addr+':'+file+ ' '+to_file com = 'rsync --rsh='+"'sshpass -f "+header.path2pass+" ssh' " +' -uaz '+addr+':'+file+ ' '+to_file out = runBash(com) # print(addr) # sys.exit() else: # print(addr,file,to_file) out = runBash('rsync -uaz '+addr+':'+file+ ' '+to_file) if 'error' in out: res = out else: res = 'OK' out = '' printlog('Download result is ', res) return out if '*' in files: printlog('get_from_server(): get by template') files = run_on_server('ls '+files, addr).splitlines() # print(files) # sys.exit() printlog('get_from_server(): I download', files) elif not is_list_like(files): files = [files] files = [file.replace('\\', '/') for file in files] #make sure the path is POSIX files_str = ', '.join(np.array(files )) printlog('Trying to download', files_str, 'from server', imp = 'n') for file in files: if not to and not to_file: #use temporary file with tempfile.NamedTemporaryFile() as f: to_file_l = f.name #system independent filename elif not to_file: #obtain filename to_file_l = os.path.join(to, os.path.basename(file) ) else: to_file_l = to_file makedir(to_file_l) out = download(file, to_file_l) if out and trygz: printlog('File', file, 'does not exist, trying gz', imp = 'n') # run_on_server files = run_on_server(' ls '+file+'*', addr) file = files.split()[-1] # print(file) nz = file.count('gz') ext = '.gz'*nz # file+='.gz' to_file_l+=ext if file: out = download(file, to_file_l) printlog(' gz found with multiplicity', ext, imp = 'n') for i in range(nz): printlog('unzipping', to_file_l) gunzip_file(to_file_l) to_file_l = to_file_l[:-3] else: printlog(' No gz either!', imp = 'n') # if '5247' in file: # sys.exit() return out def salary_inflation(): """Calculate salary growth in Russia taking into account inflation""" inflation2000_2014 = [ 5.34, 6.45, 6.58, 6.10, 8.78, 8.80, 13.28, 11.87, 9.00 , 10.91, 11.74, 11.99, 15.06, 18.8, 20.1] init_salary = 1500 # in jan 2000; other sources 2000 - very important for i, l in enumerate( reversed(inflation2000_2014) ): init_salary = (1+l/100)*init_salary print( init_salary, i+2000) salary2014 = 30000 increase = salary2014/init_salary print( increase) # salary_inflation() def element_name_inv(el): el_dict = header.el_dict nu_dict = header.nu_dict # print type(el), el, type(str('sdf') ) if is_string_like(el): try: elinv = el_dict[el] except: print_and_log("Error! Unknown element: " +str(el)) raise RuntimeError else: el = int(el) try: elinv = nu_dict[el] except: print_and_log("Error! Unknown element: "+str(el)) raise RuntimeError return elinv # inversed notion of element invert = element_name_inv def return_atoms_to_cell(st): st = st.return_atoms_to_cell() return st def calc_ac(a1, c1, a2, c2, a_b = 0.1, c_b = 0.1, type = "two_atoms"): """ Calculate values of hexagonal lattice parameters for cell with two different atoms. The used assumption is: 1. Provided lattice constants are for large enougth cells, in which excess volume (dV) of impurity does not depend on the size of cell. 2. Two atoms do not interact with each other, which allows to use dV(CO) = dV(C) + dV(O) Two regimes: two_atoms - calculate cell sizes if additional atom was added double_cell - if cell was doubled; only first cell and second_cell are needed Input: a1, c1 - lattice constants of cell with first impurity atom (first cell) a2, c2 - lattice constants of cell with second impurity atom (second cell) a_b, c_b - lattice constants of cell with pure hexagonal metall Output: a, c - lattice constants of cell with two atoms """ hstring = ("%s #on %s"% (traceback.extract_stack(None, 2)[0][3], datetime.date.today() ) ) if hstring != header.history[-1]: header.history.append( hstring ) A = (a1**2 * c1) + (a2**2 * c2) - (a_b**2 * c_b) B = 0.5 * (c1/a1 + c2/a2) C = ( (a1**2 * c1) + (a2**2 * c2) ) * 0.5 #sum of cell volumes divided by 2 since during the construction of new cell we will use multiplication by 2 # print "A,B=",A,B a = (A/B)**(1./3) c = a * B a = round(a,5) c = round(c,5) print_and_log( "a, c, c/a for cell with pure hcp ", a_b, c_b, round(c_b/a_b,4), imp ='y' ) print_and_log( "a, c, c/a for cell with first atom ", a1, c1, round(c1/a1,4), imp ='y' ) print_and_log( "a, c, c/a for cell with second atom ", a2, c2, round(c2/a2,4), imp ='y' ) #for double cell a3 = (C/B)**(1./3) c3 = a3 * B a3 = round(a3,5) c3 = round(c3,5) if type == "two_atoms": print_and_log( "a, c, c/a for cell with two atoms ", a, c, round(c/a,4), "# the same cell but with two atoms\n", imp ='y') elif type == "double_cell": print_and_log( "a, c, c/a for new cell ", a3, c3, round(c3/a3,4), "# for cell with V = V(first_cell) + V(second cell), but only for the case if V(second cell) == V(first_cell)", imp ='y') return a, c def read_charge_den_vasp(): """ Read CHG vasp file and return ChargeDen object """ class ChargeDen(): """docstring for ChargeDen""" def __init__(self, ): # self.arg = arg pass def rotation_matrix(axis,theta): axis = axis/math.sqrt(np.dot(axis,axis)) a = math.cos(theta/2) b,c,d = -axis*math.sin(theta/2) return np.array([[a*a+b*b-c*c-d*d, 2*(b*c-a*d), 2*(b*d+a*c)], [2*(b*c+a*d), a*a+c*c-b*b-d*d, 2*(c*d-a*b)], [2*(b*d-a*c), 2*(c*d+a*b), a*a+d*d-b*b-c*c]]) def rotate(): v = np.array([3,5,0]) axis = np.array([4,4,1]) theta = 1.2 print(np.dot(rotation_matrix(axis,theta),v)) # [ 2.74911638 4.77180932 1.91629719] def rotation_matrix_from_vectors(vec1, vec2): """ Find the rotation matrix that aligns vec1 to vec2 :param vec1: A 3d "source" vector :param vec2: A 3d "destination" vector :return mat: A transform matrix (3x3) which when applied to vec1, aligns it with vec2. """ a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3) v = np.cross(a, b) c = np.dot(a, b) s = np.linalg.norm(v) kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]) rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2)) return rotation_matrix def plot_charge_den(): """Test function; Was not used""" from mpl_toolkits.mplot3d import axes3d import matplotlib.pyplot as plt from matplotlib import cm fig = plt.figure() ax = fig.gca(projection='3d') X, Y, Z = axes3d.get_test_data(0.05) # print X # print Y # print Z ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3) # cset = ax.contourf(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm) # cset = ax.contourf(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm) # cset = ax.contourf(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm) ax.set_xlabel('X') ax.set_xlim(-40, 40) ax.set_ylabel('Y') ax.set_ylim(-40, 40) ax.set_zlabel('Z') ax.set_zlim(-100, 100) plt.show() return def plot_interaction(calclist, calc): """ For calculation of interaction parameter alpha; Take in mind that this parameter is obtained under aproximation of redular solution """ e_seg = [] dX = [] for id in calclist: Xgb = calc[id].Xgb X = calc[id].X dX.append(Xgb/1 - X) e_seg.append(calc[id].e_seg) # print calc[id].e_seg # print calc[id].X #print dX coeffs1 = np.polyfit(dX, e_seg, 1) fit_func1 = np.poly1d(coeffs1) print( "list of seg energies: ", e_seg ) print( "list of dX : ", dX ) print( "Fitting using linear function:" ) print( fit_func1 ) print( "E_seg0 = {0:0.0f} meV, standart enthalpy of segregation".format(fit_func1[0]) ) print( "alpha = {0:0.0f} meV, interaction coefficient".format(-fit_func1[1]/2) ) return def calculate_voronoi(self, state = 'end'): # By default two quantities per atom are calculated by this compute. # The first is the volume of the Voronoi cell around each atom. # Any point in an atom's Voronoi cell is closer to that atom than any other. # The second is the number of faces of the Voronoi cell, which # is also the number of nearest neighbors of the atom in the middle of the cell. # state - init or end; if init then saved in self.init.vorovol; if end than saved in self.vorovol write_lammps(self, state, filepath = 'voronoi_analysis/structure.lammps') #write structure for lammps runBash("rm voronoi_analysis/dump.voro; /home/aksenov/installed/lammps-1Feb14/src/lmp_serial < voronoi_analysis/voronoi.in > voronoi_analysis/log") if state == 'end': self.vorovol = [] self.vorofaces = [] vorovol = self.vorovol vorofaces = self.vorofaces elif state == 'init': self.init.vorovol = [] self.init.vorofaces = [] vorovol = self.init.vorovol vorofaces = self.init.vorofaces vsum=0 wlist = [] with open('voronoi_analysis/dump.voro','r') as volfile: #analyze dump.voro for line in volfile: if 'ITEM: ATOMS ' in line: break for line in volfile: ll = line.split() if int(ll[1]) > 1: wlist.append( [ll[0], ll[5], ll[6], ll[2]] ) # print 'Volume of atom ',ll[0],'is', ll[5] vsum= vsum+float(ll[5]) print_and_log( 'Check total volume ', vsum, self.end.vol) wlist.sort(key = itemgetter(0)) #sort according to the position of atoms print_and_log( "atom #, voronoi vol, voronoi faces, x coordinate: ", ) print_and_log( wlist) for w in wlist: vorovol.append(float(w[1])) vorofaces.append(int(w[2])) # print 'Voro vol ',self.end.vorovol # print 'Voro faces',self.end.vorofaces # print len(wlist) if hasattr(self, 'vorovol'): voro = '' if len(vorovol) == 2: #C and O voro = " {0:5.2f} & {1:2d} & {2:5.2f} & {3:2d} ".format(vorovol[0], vorofaces[0], vorovol[1], vorofaces[1] ).center(25) else: voro = " {0:5.2f} & {1:2d} ".format(vorovol[0], vorofaces[0] ).center(25) voro+='&' else: voro = "" print_and_log( "Voronoi volume = ", voro, imp = 'y') return voro def log_history(hstring): try: if hstring != header.history[-1]: header.history.append( hstring ) except: header.history.append( hstring ) return def gb_energy_volume(gb,bulk): if (gb.end.rprimd[1] != bulk.end.rprimd[1]).any() or (gb.end.rprimd[2] != bulk.end.rprimd[2]).any(): print_and_log("Warning! You are trying to calculate gb_energy from cells with different lateral sizes:"+str(gb.end.rprimd)+" "+str(bulk.end.rprimd)+"\n") #print bulk.vol V_1at = bulk.vol / bulk.natom #* to_ang**3 E_1at = bulk.energy_sigma0 / bulk.natom A = np.linalg.norm( np.cross(gb.end.rprimd[1], gb.end.rprimd[2]) ) #surface area of gb #print A gb.v_gb = ( gb.vol - V_1at * gb.natom) / A / 2. * 1000 gb.e_gb = ( gb.energy_sigma0 - E_1at * gb.natom) / A / 2. * eV_A_to_J_m * 1000 gb.e_gb_init = ( gb.list_e_sigma0[0] - E_1at * gb.natom) / A / 2. * eV_A_to_J_m * 1000 gb.bulk_extpress = bulk.extpress #print "Calc %s; e_gb_init = %.3f J/m^2; e_gb = %.3f J/m; v_gb = %.3f angstrom "%(gb.name, gb.e_gb_init, gb.e_gb, gb.v_gb ) outst = "%15s&%7.0f&%7.0f"%(gb.name, gb.e_gb, gb.v_gb) return outst def headers(): j = (7,12,14,7,8,9,9,5,5,20,5,20,8,12,20,8,5,8,8) d="&" header_for_bands= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\ +d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\ +"Warn!"+d+"nband"+d+"Added, \%"+"\\\\" header_for_ecut= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\ +d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\ +"Warn!"+d+"Ecut,eV"+"\\\\" header_for_npar= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\ +d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\ +"Warn!"+d+"NPAR".center(j[16])+d+"LPLANE".center(j[17])+"\\\\" header_for_kpoints= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\ +d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\ +"Warn!"+d+"k-mesh".center(j[8])+d+"k-spacings".center(j[9])+d+"nkpt".center(j[10])+"\\\\" header_for_tsmear= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\ +d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\ +"Warn!"+d+"k-mesh".center(j[8])+d+"tsmear, meV".center(j[13])+d+"Smearing error, meV/atom".center(j[14])+"\\\\" header_for_stress= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\ +d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\ +"Warn!"+d+"Stress, intr u.*1000".center(j[11])+d+"Pressure, MPa".center(j[12]) #print "\\hline" return header_for_kpoints def read_vectors(token, number_of_vectors, list_of_words, type_func = None, lists = False): """Returns the list of numpy vectors for the last match""" # lists - return list of lists instead list of vectors if type_func is None: type_func = lambda a : float(a) number_of_matches = list_of_words.count( token ) if number_of_matches == 0: #print_and_log("Warning token '"+token+"' was not found! return empty\n") return [None] if number_of_matches > 1: print_and_log("Warning token '"+token+"' was found more than one times\n") raise RuntimeError index = list_of_words.index(token, number_of_matches - 1 ) #Return the index of the last match #print list_of_words[index] list_of_vectors = [] list_of_lists = [] vector = np.zeros((3)) for i in range(number_of_vectors): vector[0] = type_func(list_of_words[index + 1]) vector[1] = type_func(list_of_words[index + 2]) vector[2] = type_func(list_of_words[index + 3]) list3 = [] for j in 1,2,3: list3.append(type_func(list_of_words[index + j]) ) index+=3 list_of_vectors.append(vector.copy()) list_of_lists.append(list3) if lists: out = list_of_lists else: out = list_of_vectors return out def read_string(token, length, string): sh = len(token)+1 i = string.find(token)+sh # print('length', i, i+length) # sys.exit() if i is -1: return '' else: return string[i:i+length] def read_list(token, number_of_elements, ttype, list_of_words): """Input is token to find, number of elements to read, type of elements and list of words, where to search Returns the list of elements for the last match""" number_of_matches = list_of_words.count( token ) #if number_of_elements == 0: raise RuntimeError if number_of_matches > 1: print_and_log("Warning token '"+token+"' was found more than one times\n") raise RuntimeError if number_of_matches == 0 or number_of_elements == 0: #print_and_log("Warning token '"+token+"' was not found or asked number of elements is zero! set to [None]\n") #if ttype == str: # return ['']*number_of_elements #else: # return [0]*number_of_elements return [None] try: index = list_of_words.index(token, number_of_matches - 1 ) #Return the index of the last match except ValueError: print_and_log("Warning!, token "+token+" was not found. I return [None]!\n") return [None] index+=1 #the position of token value list_of_elements = [] #define function dependig on type: if ttype == int : def convert(a): return int(a) elif ttype == float: def convert(a): # print a return float(a) elif ttype == str : def convert(a): return str(a) #print list_of_words[index], type(list_of_words[index]) if list_of_words[index] == "None" : def convert(a): return [None] #Make convertion for i in range(number_of_elements): if 'None' in list_of_words[index]: list_of_elements.append(None) else: list_of_elements.append( convert( list_of_words[index] ) ) index+=1 return list_of_elements def words(fileobj): """Generator of words. However does not allow to use methods of list for returned""" for line in fileobj: for word in line.split(): yield word def server_cp(copy_file, to, gz = True, scratch = False, new_filename = None): if scratch: if not header.PATH2ARCHIVE: printlog('Warning! PATH2ARCHIVE is empty! Please put path archive in ~/simanrc.py or ./project_conf.py ') copy_file = header.PATH2ARCHIVE + '/' + copy_file else: copy_file = header.project_path_cluster + '/' + copy_file filename = os.path.basename(copy_file) if new_filename is None: new_filename = filename if gz: command = 'cp '+copy_file + ' ' + to +'/'+new_filename + '.gz ; gunzip -f '+ to+ '/'+new_filename+'.gz' else: command = 'cp '+copy_file + ' ' + to +'/'+new_filename printlog('Running on server', command, imp = '') if file_exists_on_server(copy_file, header.cluster_address): out = run_on_server(command, addr = header.cluster_address) printlog('Output of run_on_server', out, imp = '') else: out = 'error, file does not exist on server: '+copy_file return out def wrapper_cp_on_server(file, to, new_filename = None): """ tries iterativly scratch and gz """ copy_to = to copy_file = file filename = os.path.basename(file) if new_filename: app = 'with new name '+new_filename else: app = '' for s, gz in product([0,1], ['', '.gz']): printlog('scratch, gz:', s, gz) out = server_cp(copy_file+gz, to = to, gz = gz, scratch = s, new_filename = new_filename) if out == '': printlog('File', filename, 'was succesfully copied to',to, app, imp = 'y') break # else: else: printlog('Warning! File was not copied, probably it does not exist. Try using header.warnings = "neyY" for more details', imp = 'y') return def update_incar(parameter = None, value = None, u_ramp_step = None, write = True, f = None, run = False, st = None): """Modifications of INCAR. Take attention that *parameter* will be changed to new *value* if it only already exist in INCAR. *u_ramp_step*-current step to determine u, *write*-sometimes just the return value is needed. Returns U value corresponding to *u_ramp_step*. """ self = st u_step = None if parameter == 'LDAUU': #Update only non-zero elements of LDAUU with value set_LDAUU_list = self.set.vasp_params['LDAUU'] new_LDAUU_list = copy.deepcopy(set_LDAUU_list) # print set_LDAUU_list u_step = 0.0 for i, u in enumerate(set_LDAUU_list): if u == 0: continue u_step = np.linspace(0, u, self.set.u_ramping_nstep)[u_ramp_step] u_step = np.round(u_step, 1) # new_LDAUU_list[i] = value new_LDAUU_list[i] = u_step new_LDAUU = 'LDAUU = '+' '.join(['{:}']*len(new_LDAUU_list)).format(*new_LDAUU_list) command = "sed -i.bak '/LDAUU/c\\" + new_LDAUU + "' INCAR\n" #print('u_step',u_step) #sys.exit() elif parameter == 'MAGMOM': new_incar_string = parameter + ' = ' + ' '.join(['{:}']*len(value)).format(*value) command = "sed -i.bak '/"+parameter+"/c\\" + new_incar_string + "' INCAR\n" # elif parameter in ['IMAGES', 'ISPIN']: else: new_incar_string = parameter + ' = ' + str(value) command = "sed -i.bak '/"+parameter+"/c\\" + new_incar_string + "' INCAR\n" if write and f: f.write(command) if run: runBash(command) return u_step #for last element def check_output(filename, check_string, load): """ Check if file exist and it is finished by search for check_string """ if filename and os.path.exists(filename): out = grep_file(check_string, filename, reverse = True) printlog('The grep result of',filename, 'is:', out) # sys.exit() if check_string in out or 'un' in load: state = '4. Finished' else: state = '5. Broken outcar' else: state = '5. no OUTCAR' return state
gpl-2.0
Rav3nPL/p2pool-myriadcoin
p2pool/util/jsonrpc.py
261
6082
from __future__ import division import json import weakref from twisted.internet import defer from twisted.protocols import basic from twisted.python import failure, log from twisted.web import client, error from p2pool.util import deferral, deferred_resource, memoize class Error(Exception): def __init__(self, code, message, data=None): if type(self) is Error: raise TypeError("can't directly instantiate Error class; use Error_for_code") if not isinstance(code, int): raise TypeError('code must be an int') #if not isinstance(message, unicode): # raise TypeError('message must be a unicode') self.code, self.message, self.data = code, message, data def __str__(self): return '%i %s' % (self.code, self.message) + (' %r' % (self.data, ) if self.data is not None else '') def _to_obj(self): return { 'code': self.code, 'message': self.message, 'data': self.data, } @memoize.memoize_with_backing(weakref.WeakValueDictionary()) def Error_for_code(code): class NarrowError(Error): def __init__(self, *args, **kwargs): Error.__init__(self, code, *args, **kwargs) return NarrowError class Proxy(object): def __init__(self, func, services=[]): self._func = func self._services = services def __getattr__(self, attr): if attr.startswith('rpc_'): return lambda *params: self._func('.'.join(self._services + [attr[len('rpc_'):]]), params) elif attr.startswith('svc_'): return Proxy(self._func, self._services + [attr[len('svc_'):]]) else: raise AttributeError('%r object has no attribute %r' % (self.__class__.__name__, attr)) @defer.inlineCallbacks def _handle(data, provider, preargs=(), response_handler=None): id_ = None try: try: try: req = json.loads(data) except Exception: raise Error_for_code(-32700)(u'Parse error') if 'result' in req or 'error' in req: response_handler(req['id'], req['result'] if 'error' not in req or req['error'] is None else failure.Failure(Error_for_code(req['error']['code'])(req['error']['message'], req['error'].get('data', None)))) defer.returnValue(None) id_ = req.get('id', None) method = req.get('method', None) if not isinstance(method, basestring): raise Error_for_code(-32600)(u'Invalid Request') params = req.get('params', []) if not isinstance(params, list): raise Error_for_code(-32600)(u'Invalid Request') for service_name in method.split('.')[:-1]: provider = getattr(provider, 'svc_' + service_name, None) if provider is None: raise Error_for_code(-32601)(u'Service not found') method_meth = getattr(provider, 'rpc_' + method.split('.')[-1], None) if method_meth is None: raise Error_for_code(-32601)(u'Method not found') result = yield method_meth(*list(preargs) + list(params)) error = None except Error: raise except Exception: log.err(None, 'Squelched JSON error:') raise Error_for_code(-32099)(u'Unknown error') except Error, e: result = None error = e._to_obj() defer.returnValue(json.dumps(dict( jsonrpc='2.0', id=id_, result=result, error=error, ))) # HTTP @defer.inlineCallbacks def _http_do(url, headers, timeout, method, params): id_ = 0 try: data = yield client.getPage( url=url, method='POST', headers=dict(headers, **{'Content-Type': 'application/json'}), postdata=json.dumps({ 'jsonrpc': '2.0', 'method': method, 'params': params, 'id': id_, }), timeout=timeout, ) except error.Error, e: try: resp = json.loads(e.response) except: raise e else: resp = json.loads(data) if resp['id'] != id_: raise ValueError('invalid id') if 'error' in resp and resp['error'] is not None: raise Error_for_code(resp['error']['code'])(resp['error']['message'], resp['error'].get('data', None)) defer.returnValue(resp['result']) HTTPProxy = lambda url, headers={}, timeout=5: Proxy(lambda method, params: _http_do(url, headers, timeout, method, params)) class HTTPServer(deferred_resource.DeferredResource): def __init__(self, provider): deferred_resource.DeferredResource.__init__(self) self._provider = provider @defer.inlineCallbacks def render_POST(self, request): data = yield _handle(request.content.read(), self._provider, preargs=[request]) assert data is not None request.setHeader('Content-Type', 'application/json') request.setHeader('Content-Length', len(data)) request.write(data) class LineBasedPeer(basic.LineOnlyReceiver): delimiter = '\n' def __init__(self): #basic.LineOnlyReceiver.__init__(self) self._matcher = deferral.GenericDeferrer(max_id=2**30, func=lambda id, method, params: self.sendLine(json.dumps({ 'jsonrpc': '2.0', 'method': method, 'params': params, 'id': id, }))) self.other = Proxy(self._matcher) def lineReceived(self, line): _handle(line, self, response_handler=self._matcher.got_response).addCallback(lambda line2: self.sendLine(line2) if line2 is not None else None)
gpl-3.0
TanguyPatte/phantomjs-packaging
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/failuremap_unittest.py
124
3843
# Copyright (c) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest2 as unittest from webkitpy.common.net.buildbot import Build from webkitpy.common.net.failuremap import * from webkitpy.common.net.regressionwindow import RegressionWindow from webkitpy.common.net.buildbot.buildbot_mock import MockBuilder class FailureMapTest(unittest.TestCase): builder1 = MockBuilder("Builder1") builder2 = MockBuilder("Builder2") build1a = Build(builder1, build_number=22, revision=1233, is_green=True) build1b = Build(builder1, build_number=23, revision=1234, is_green=False) build2a = Build(builder2, build_number=89, revision=1233, is_green=True) build2b = Build(builder2, build_number=90, revision=1235, is_green=False) regression_window1 = RegressionWindow(build1a, build1b, failing_tests=[u'test1', u'test1']) regression_window2 = RegressionWindow(build2a, build2b, failing_tests=[u'test1']) def _make_failure_map(self): failure_map = FailureMap() failure_map.add_regression_window(self.builder1, self.regression_window1) failure_map.add_regression_window(self.builder2, self.regression_window2) return failure_map def test_failing_revisions(self): failure_map = self._make_failure_map() self.assertEqual(failure_map.failing_revisions(), [1234, 1235]) def test_new_failures(self): failure_map = self._make_failure_map() failure_map.filter_out_old_failures(lambda revision: False) self.assertEqual(failure_map.failing_revisions(), [1234, 1235]) def test_new_failures_with_old_revisions(self): failure_map = self._make_failure_map() failure_map.filter_out_old_failures(lambda revision: revision == 1234) self.assertEqual(failure_map.failing_revisions(), []) def test_new_failures_with_more_old_revisions(self): failure_map = self._make_failure_map() failure_map.filter_out_old_failures(lambda revision: revision == 1235) self.assertEqual(failure_map.failing_revisions(), [1234]) def test_tests_failing_for(self): failure_map = self._make_failure_map() self.assertEqual(failure_map.tests_failing_for(1234), [u'test1']) def test_failing_tests(self): failure_map = self._make_failure_map() self.assertEqual(failure_map.failing_tests(), set([u'test1']))
bsd-3-clause
Cinntax/home-assistant
homeassistant/components/mopar/sensor.py
2
2710
"""Support for the Mopar vehicle sensor platform.""" from homeassistant.components.mopar import ( DOMAIN as MOPAR_DOMAIN, DATA_UPDATED, ATTR_VEHICLE_INDEX, ) from homeassistant.const import ATTR_ATTRIBUTION, LENGTH_KILOMETERS from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from homeassistant.helpers.entity import Entity ICON = "mdi:car" async def async_setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Mopar platform.""" data = hass.data[MOPAR_DOMAIN] add_entities( [MoparSensor(data, index) for index, _ in enumerate(data.vehicles)], True ) class MoparSensor(Entity): """Mopar vehicle sensor.""" def __init__(self, data, index): """Initialize the sensor.""" self._index = index self._vehicle = {} self._vhr = {} self._tow_guide = {} self._odometer = None self._data = data self._name = self._data.get_vehicle_name(self._index) @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self._odometer @property def device_state_attributes(self): """Return the state attributes.""" attributes = { ATTR_VEHICLE_INDEX: self._index, ATTR_ATTRIBUTION: self._data.attribution, } attributes.update(self._vehicle) attributes.update(self._vhr) attributes.update(self._tow_guide) return attributes @property def unit_of_measurement(self): """Return the unit of measurement.""" return self.hass.config.units.length_unit @property def icon(self): """Return the icon.""" return ICON @property def should_poll(self): """Return the polling requirement for this sensor.""" return False async def async_added_to_hass(self): """Handle entity which will be added.""" async_dispatcher_connect( self.hass, DATA_UPDATED, self._schedule_immediate_update ) def update(self): """Update device state.""" self._vehicle = self._data.vehicles[self._index] self._vhr = self._data.vhrs.get(self._index, {}) self._tow_guide = self._data.tow_guides.get(self._index, {}) if "odometer" in self._vhr: odo = float(self._vhr["odometer"]) self._odometer = int(self.hass.config.units.length(odo, LENGTH_KILOMETERS)) @callback def _schedule_immediate_update(self): self.async_schedule_update_ha_state(True)
apache-2.0
gstiebler/odemis
src/odemis/driver/test/andorcam3_test.py
2
2993
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' Created on 12 Mar 2012 @author: Éric Piel Testing class for driver.andorcam3 . Copyright © 2012 Éric Piel, Delmic This file is part of Odemis. Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Odemis. If not, see http://www.gnu.org/licenses/. ''' from __future__ import division import logging from odemis.driver import andorcam3 import unittest from unittest.case import skip from cam_test_abs import VirtualTestCam, VirtualStaticTestCam, \ VirtualTestSynchronized logging.getLogger().setLevel(logging.DEBUG) CLASS = andorcam3.AndorCam3 KWARGS = dict(name="camera", role="ccd", device=0, transpose=[2, -1], bitflow_install_dirs="/usr/share/bitflow/") class StaticTestAndorCam3(VirtualStaticTestCam, unittest.TestCase): camera_type = CLASS camera_kwargs = KWARGS # Inheritance order is important for setUp, tearDown #@skip("simple") class TestAndorCam3(VirtualTestCam, unittest.TestCase): """ Test directly the AndorCam3 class. """ camera_type = CLASS camera_kwargs = KWARGS #@skip("simple") class TestSynchronized(VirtualTestSynchronized, unittest.TestCase): """ Test the synchronizedOn(Event) interface, using the fake SEM """ camera_type = CLASS camera_kwargs = KWARGS # Notes on testing the reconnection (which is pretty impossible to do non-manually): # * Test both cable disconnect/reconnect and turning off/on # * Test the different scenarios: # - No acquisition; camera goes away -> the .state is updated # - No acquisition; camera goes away; camera comes back; acquisition -> acquisition starts # - No acquisition; camera goes away; acquisition; camera comes back -> acquisition starts # - Acquisition; camera goes away; camera comes back -> acquisition restarts # - Acquisition; camera goes away; acquisition stops; camera comes back; acquisition -> acquisition restarts # - Acquisition; camera goes away; acquisition stops; acquisition; camera comes back -> acquisition restarts # - Acquisition; camera goes away; terminate -> component ends if __name__ == '__main__': unittest.main() #from odemis.driver import andorcam3 #import logging #logging.getLogger().setLevel(logging.DEBUG) # #a = andorcam3.AndorCam3("test", "cam", 0, bitflow_install_dirs="/usr/share/bitflow/") #a.targetTemperature.value = -15 #a.fanSpeed.value = 0 #rr = a.readoutRate.value #a.data.get() #rt = a.GetFloat(u"ReadoutTime") #res = a.resolution.value #res[0] * res[1] / rr #a.data.get() #a.resolution.value = (128, 128)
gpl-2.0
bhargav2408/python-for-android
python3-alpha/python3-src/Lib/distutils/tests/__init__.py
165
1060
"""Test suite for distutils. This test suite consists of a collection of test modules in the distutils.tests package. Each test module has a name starting with 'test' and contains a function test_suite(). The function is expected to return an initialized unittest.TestSuite instance. Tests for the command classes in the distutils.command package are included in distutils.tests as well, instead of using a separate distutils.command.tests package, since command identification is done by import rather than matching pre-defined names. """ import os import sys import unittest from test.support import run_unittest here = os.path.dirname(__file__) or os.curdir def test_suite(): suite = unittest.TestSuite() for fn in os.listdir(here): if fn.startswith("test") and fn.endswith(".py"): modname = "distutils.tests." + fn[:-3] __import__(modname) module = sys.modules[modname] suite.addTest(module.test_suite()) return suite if __name__ == "__main__": run_unittest(test_suite())
apache-2.0
lukecampbell/compliance-checker
compliance_checker/tests/test_ioos_sos.py
1
2367
import unittest from compliance_checker.suite import CheckSuite from compliance_checker.runner import ComplianceChecker import os import httpretty # TODO: Use inheritance to eliminate redundant code in test setup, etc class TestIOOSSOSGetCapabilities(unittest.TestCase): def setUp(self): with open(os.path.join(os.path.dirname(__file__), 'data/http_mocks/ncsos_getcapabilities.xml')) as f: self.resp = f.read() # need to monkey patch checkers prior to running tests, or no checker # classes will show up CheckSuite().load_all_available_checkers() @httpretty.activate def test_retrieve_getcaps(self): """Method that simulates retrieving SOS GetCapabilities""" url = "http://data.oceansmap.com/thredds/sos/caricoos_ag/VIA/VIA.ncml" httpretty.register_uri(httpretty.GET, url, content_type="text/xml", body=self.resp) # need to mock out the HEAD response so that compliance checker # recognizes this as some sort of XML doc instead of an OPeNDAP # source ComplianceChecker.run_checker(url, ['ioos_sos'], 1, 'normal') class TestIOOSSOSDescribeSensor(unittest.TestCase): def setUp(self): with open(os.path.join(os.path.dirname(__file__), 'data/http_mocks/ncsos_describesensor.xml')) as f: self.resp = f.read() # need to monkey patch checkers prior to running tests, or no checker # classes will show up CheckSuite().load_all_available_checkers() @httpretty.activate def test_retrieve_describesensor(self): """Method that simulates retrieving SOS DescribeSensor""" url = ("http://data.oceansmap.com/thredds/sos/caricoos_ag/VIA/VIA.ncml?" "request=describesensor" "&service=sos" "&procedure=urn:ioos:station:ncsos:VIA" "&outputFormat=text/xml%3Bsubtype%3D%22sensorML/1.0.1/profiles/ioos_sos/1.0%22" "&version=1.0.0") httpretty.register_uri(httpretty.GET, url, content_type="text/xml", body=self.resp) # need to mock out the HEAD response so that compliance checker # recognizes this as some sort of XML doc instead of an OPeNDAP # source ComplianceChecker.run_checker(url, ['ioos_sos'], 1, 'normal')
apache-2.0
pshen/ansible
lib/ansible/modules/files/archive.py
33
14904
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Ben Doherty <bendohmv@gmail.com> # Sponsored by Oomph, Inc. http://www.oomphinc.com # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: archive version_added: 2.3 short_description: Creates a compressed archive of one or more files or trees. extends_documentation_fragment: files description: - Packs an archive. It is the opposite of M(unarchive). By default, it assumes the compression source exists on the target. It will not copy the source file from the local system to the target before archiving. Source files can be deleted after archival by specifying I(remove=True). options: path: description: - Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive. required: true format: description: - The type of compression to use. choices: [ 'gz', 'bz2', 'zip' ] default: 'gz' dest: description: - The file name of the destination archive. This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list. required: false default: null remove: description: - Remove any added source files and trees after adding to archive. required: false default: false author: "Ben Doherty (@bendoh)" notes: - requires tarfile, zipfile, gzip, and bzip2 packages on target host - can produce I(gzip), I(bzip2) and I(zip) compressed files or archives ''' EXAMPLES = ''' # Compress directory /path/to/foo/ into /path/to/foo.tgz - archive: path: /path/to/foo dest: /path/to/foo.tgz # Compress regular file /path/to/foo into /path/to/foo.gz and remove it - archive: path: /path/to/foo remove: True # Create a zip archive of /path/to/foo - archive: path: /path/to/foo format: zip # Create a bz2 archive of multiple files, rooted at /path - archive: path: - /path/to/foo - /path/wong/foo dest: /path/file.tar.bz2 format: bz2 ''' RETURN = ''' state: description: The current state of the archived file. If 'absent', then no source files were found and the archive does not exist. If 'compress', then the file source file is in the compressed state. If 'archive', then the source file or paths are currently archived. If 'incomplete', then an archive was created, but not all source paths were found. type: string returned: always missing: description: Any files that were missing from the source. type: list returned: success archived: description: Any files that were compressed or added to the archive. type: list returned: success arcroot: description: The archive root. type: string returned: always expanded_paths: description: The list of matching paths from paths argument. type: list returned: always ''' import os import re import glob import shutil import gzip import bz2 import filecmp import zipfile import tarfile from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pycompat24 import get_exception def main(): module = AnsibleModule( argument_spec = dict( path = dict(type='list', required=True), format = dict(choices=['gz', 'bz2', 'zip', 'tar'], default='gz', required=False), dest = dict(required=False, type='path'), remove = dict(required=False, default=False, type='bool'), ), add_file_common_args=True, supports_check_mode=True, ) params = module.params check_mode = module.check_mode paths = params['path'] dest = params['dest'] remove = params['remove'] expanded_paths = [] format = params['format'] globby = False changed = False state = 'absent' # Simple or archive file compression (inapplicable with 'zip' since it's always an archive) archive = False successes = [] for i, path in enumerate(paths): path = os.path.expanduser(os.path.expandvars(path)) # Expand any glob characters. If found, add the expanded glob to the # list of expanded_paths, which might be empty. if ('*' in path or '?' in path): expanded_paths = expanded_paths + glob.glob(path) globby = True # If there are no glob characters the path is added to the expanded paths # whether the path exists or not else: expanded_paths.append(path) if len(expanded_paths) == 0: return module.fail_json(path=', '.join(paths), expanded_paths=', '.join(expanded_paths), msg='Error, no source paths were found') # If we actually matched multiple files or TRIED to, then # treat this as a multi-file archive archive = globby or os.path.isdir(expanded_paths[0]) or len(expanded_paths) > 1 # Default created file name (for single-file archives) to # <file>.<format> if not dest and not archive: dest = '%s.%s' % (expanded_paths[0], format) # Force archives to specify 'dest' if archive and not dest: module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees') archive_paths = [] missing = [] arcroot = '' for path in expanded_paths: # Use the longest common directory name among all the files # as the archive root path if arcroot == '': arcroot = os.path.dirname(path) + os.sep else: for i in range(len(arcroot)): if path[i] != arcroot[i]: break if i < len(arcroot): arcroot = os.path.dirname(arcroot[0:i+1]) arcroot += os.sep # Don't allow archives to be created anywhere within paths to be removed if remove and os.path.isdir(path) and dest.startswith(path): module.fail_json(path=', '.join(paths), msg='Error, created archive can not be contained in source paths when remove=True') if os.path.lexists(path): archive_paths.append(path) else: missing.append(path) # No source files were found but the named archive exists: are we 'compress' or 'archive' now? if len(missing) == len(expanded_paths) and dest and os.path.exists(dest): # Just check the filename to know if it's an archive or simple compressed file if re.search(r'(\.tar|\.tar\.gz|\.tgz|.tbz2|\.tar\.bz2|\.zip)$', os.path.basename(dest), re.IGNORECASE): state = 'archive' else: state = 'compress' # Multiple files, or globbiness elif archive: if len(archive_paths) == 0: # No source files were found, but the archive is there. if os.path.lexists(dest): state = 'archive' elif len(missing) > 0: # SOME source files were found, but not all of them state = 'incomplete' archive = None size = 0 errors = [] if os.path.lexists(dest): size = os.path.getsize(dest) if state != 'archive': if check_mode: changed = True else: try: # Slightly more difficult (and less efficient!) compression using zipfile module if format == 'zip': arcfile = zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED) # Easier compression using tarfile module elif format == 'gz' or format == 'bz2': arcfile = tarfile.open(dest, 'w|' + format) # Or plain tar archiving elif format == 'tar': arcfile = tarfile.open(dest, 'w') match_root = re.compile('^%s' % re.escape(arcroot)) for path in archive_paths: if os.path.isdir(path): # Recurse into directories for dirpath, dirnames, filenames in os.walk(path, topdown=True): if not dirpath.endswith(os.sep): dirpath += os.sep for dirname in dirnames: fullpath = dirpath + dirname arcname = match_root.sub('', fullpath) try: if format == 'zip': arcfile.write(fullpath, arcname) else: arcfile.add(fullpath, arcname, recursive=False) except Exception: e = get_exception() errors.append('%s: %s' % (fullpath, str(e))) for filename in filenames: fullpath = dirpath + filename arcname = match_root.sub('', fullpath) if not filecmp.cmp(fullpath, dest): try: if format == 'zip': arcfile.write(fullpath, arcname) else: arcfile.add(fullpath, arcname, recursive=False) successes.append(fullpath) except Exception: e = get_exception() errors.append('Adding %s: %s' % (path, str(e))) else: if format == 'zip': arcfile.write(path, match_root.sub('', path)) else: arcfile.add(path, match_root.sub('', path), recursive=False) successes.append(path) except Exception: e = get_exception() return module.fail_json(msg='Error when writing %s archive at %s: %s' % (format == 'zip' and 'zip' or ('tar.' + format), dest, str(e))) if arcfile: arcfile.close() state = 'archive' if len(errors) > 0: module.fail_json(msg='Errors when writing archive at %s: %s' % (dest, '; '.join(errors))) if state in ['archive', 'incomplete'] and remove: for path in successes: try: if os.path.isdir(path): shutil.rmtree(path) elif not check_mode: os.remove(path) except OSError: e = get_exception() errors.append(path) if len(errors) > 0: module.fail_json(dest=dest, msg='Error deleting some source files: ' + str(e), files=errors) # Rudimentary check: If size changed then file changed. Not perfect, but easy. if os.path.getsize(dest) != size: changed = True if len(successes) and state != 'incomplete': state = 'archive' # Simple, single-file compression else: path = expanded_paths[0] # No source or compressed file if not (os.path.exists(path) or os.path.lexists(dest)): state = 'absent' # if it already exists and the source file isn't there, consider this done elif not os.path.lexists(path) and os.path.lexists(dest): state = 'compress' else: if module.check_mode: if not os.path.exists(dest): changed = True else: size = 0 f_in = f_out = arcfile = None if os.path.lexists(dest): size = os.path.getsize(dest) try: if format == 'zip': arcfile = zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED) arcfile.write(path, path[len(arcroot):]) arcfile.close() state = 'archive' # because all zip files are archives else: f_in = open(path, 'rb') if format == 'gz': f_out = gzip.open(dest, 'wb') elif format == 'bz2': f_out = bz2.BZ2File(dest, 'wb') else: raise OSError("Invalid format") shutil.copyfileobj(f_in, f_out) successes.append(path) except OSError: e = get_exception() module.fail_json(path=path, dest=dest, msg='Unable to write to compressed file: %s' % str(e)) if arcfile: arcfile.close() if f_in: f_in.close() if f_out: f_out.close() # Rudimentary check: If size changed then file changed. Not perfect, but easy. if os.path.getsize(dest) != size: changed = True state = 'compress' if remove and not check_mode: try: os.remove(path) except OSError: e = get_exception() module.fail_json(path=path, msg='Unable to remove source file: %s' % str(e)) params['path'] = dest file_args = module.load_file_common_arguments(params) changed = module.set_fs_attributes_if_different(file_args, changed) module.exit_json(archived=successes, dest=dest, changed=changed, state=state, arcroot=arcroot, missing=missing, expanded_paths=expanded_paths) if __name__ == '__main__': main()
gpl-3.0
tusuzu/tmtp
standalone/tmtpl/curves.py
2
7623
#!/usr/bin/python # # This file is part of the tmtp (Tau Meta Tau Physica) project. # For more information, see http://www.sew-brilliant.org/ # # Copyright (C) 2010, 2011, 2012 Susan Spencer and Steve Conklin # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. Attribution must be given in # all derived works. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #import sys #import math #import string #import re import math from pattern import Point, lineLengthP, pntOnLine, pntFromDistanceAndAngle, moveP from tmtpl.constants import * # Code derived from this C code: http://www.codeproject.com/KB/graphics/BezierSpline.aspx # knots - list of Point objects - spline points (must contain at least two points) # firstControlPoints - Output first control points (same length as knots - 1) # secondControlPoints - Output second control points (same length as knots - 1) def __getFirstControlPoints(rhs): """ Solves a tridiagonal system for one of coordinates (x or y) of first Bezier control points. """ # rhs: Right hand side vector # returns: Solution vector tlen = len(rhs) # init these to obvious values for debugging solution = [-0.123 for i in range(tlen)] tmp = [-0.987 for i in range(tlen)] # first point db = 2.0 solution[0] = (rhs[0] / db) for i in range(1, tlen-1): # Decomposition and forward substitution. tmp[i] = 1/db db = 4.0 - tmp[i] solution[i] = (rhs[i] - solution[i-1]) / db; # now do the last point i = tlen-1 tmp[i] = 1/db db = 3.5 - tmp[i] solution[i] = (rhs[i] - solution[i-1]) / db; for i in range(1, tlen): # Backsubstitution solution[tlen - i - 1] = solution[tlen - i - 1] - (tmp[tlen - i] * solution[tlen - i]) return solution def GetCurveControlPoints(name, knots): if len(knots) < 2: raise ValueError("At least two points required for input") fcpnum = 0 # used for naming control points sequentially scpnum = 0 np = len(knots) - 1 # init these to easily spotted values for debugging firstControlPoints = [0.2468 for i in range(np)] secondControlPoints = [0.1357 for i in range(np)] if len(knots) == 2: # Special case: Bezier curve should be a straight line. pnt = Point('reference', '%s-fcp%d' % (name, fcpnum), styledef = 'controlpoint_style') pnt.x = (2 * knots[0].x + knots[1].x) / 3 pnt.y = (2 * knots[0].y + knots[1].y) / 3 firstControlPoints[0] = pnt fcpnum = fcpnum + 1 pnt = Point('reference', '%s-scp%d' % (name, scpnum), styledef = 'controlpoint_style') pnt.x = 2 * firstControlPoints[0].x - knots[0].x pnt.y = 2 * firstControlPoints[0].y - knots[0].y secondControlPoints[0] = pnt scpnum = scpnum + 1 return (firstControlPoints, secondControlPoints) # Calculate first Bezier control points # Right hand side vector - init to known funky value for debugging rhs = [-0.57689 for i in range(np)] # Set right hand side X values for i in range(1, np-1): rhs[i] = 4 * knots[i].x + 2 * knots[i + 1].x rhs[0] = knots[0].x + 2 * knots[1].x rhs[np-1] = (8 * knots[np - 1].x + knots[np].x) / 2.0 # Get first control points X-values xx = __getFirstControlPoints(rhs); # Set right hand side Y values for i in range(1, np-1): rhs[i] = 4 * knots[i].y + 2 * knots[i + 1].y rhs[0] = knots[0].y + 2 * knots[1].y rhs[np-1] = (8 * knots[np - 1].y + knots[np].y) / 2.0 # Get first control points Y-values yy = __getFirstControlPoints(rhs); for i in range(0, np-1): # First control point pnt = Point('reference', '%s-fcp%d' % (name, fcpnum), xx[i], yy[i], styledef = 'controlpoint_style') firstControlPoints[i] = pnt fcpnum = fcpnum + 1 pnt = Point('reference', '%s-scp%d' % (name, scpnum), styledef = 'controlpoint_style') pnt.x = 2 * knots[i + 1].x - xx[i + 1] pnt.y = 2 * knots[i + 1].y - yy[i + 1] secondControlPoints[i] = pnt scpnum = scpnum + 1 # now do the last point i = np-1 pnt = Point('reference', '%s-fcp%d' % (name, fcpnum), xx[i], yy[i], styledef = 'controlpoint_style') firstControlPoints[i] = pnt fcpnum = fcpnum + 1 pnt = Point('reference', '%s-scp%d' % (name, scpnum), styledef = 'controlpoint_style') pnt.x = (knots[np].x + xx[np - 1]) / 2 pnt.y = (knots[np].y + yy[np - 1]) / 2 secondControlPoints[i] = pnt scpnum = scpnum + 1 #(fcp, scp) = FudgeControlPoints(knots, firstControlPoints, secondControlPoints, .3333) return (firstControlPoints, secondControlPoints) def FudgeControlPoints(knots, fcp, scp, percentage): """ Adjust the control point locations so that they lie along the same lines as before (tangent to the curve at the knot) but are moved to a distance from the knot which is a percentage of the length of the line between the knots """ if len(knots) < 2: raise ValueError("At least two points required for input") if len(knots) != len(fcp)+1: print 'knotlen = ', len(knots), 'fcplen = ', len(fcp) raise ValueError("knot list must be one longer than fcp list") if len(knots) != len(scp)+1: raise ValueError("knot list must be one longer than scp list") if len(knots) == 2: # Adjustment doesn't make a difference in this case return ll = [] for i in range(len(knots)-1): # get the length between each knot ll.append(lineLengthP(knots[i], knots[i+1])) minll = min(ll) maxll = max(ll) for i in range(len(knots)-1): # get the length between each knot, and save the max and min lll = ll[i] fcpl = lineLengthP(knots[i], fcp[i]) scpl = lineLengthP(scp[i], knots[i+1]) # Now calculate the desired length and change the control point locations dll = lll * percentage x, y = pntOnLine(knots[i].x, knots[i].y, fcp[i].x, fcp[i].y, dll, rotation = 0) fcp[i].x = x fcp[i].y = y dll = lll * percentage x, y = pntOnLine(knots[i+1].x, knots[i+1].y, scp[i].x, scp[i].y, dll, rotation = 0) scp[i].x = x scp[i].y = y fcpl = lineLengthP(knots[i], fcp[i]) scpl = lineLengthP(scp[i], knots[i+1]) return (fcp, scp) def curveThroughPoints(name, path_svg, pointlist): fcp, scp = GetCurveControlPoints(name, pointlist) if False: for i in range(0, len(fcp)): print ' point: %f %f' % (pointlist[i].x / IN_TO_PT, pointlist[i].y / IN_TO_PT) print ' fcp: %f %f' % (fcp[i].x / IN_TO_PT, fcp[i].y / IN_TO_PT) print ' scp: %f %f' % (scp[i].x / IN_TO_PT, scp[i].y / IN_TO_PT) print ' point: %f %f' % (pointlist[-1].x / IN_TO_PT, pointlist[-1].y / IN_TO_PT) moveP(path_svg, pointlist[0]) for i in range (1, len(pointlist)): path_svg.appendCubicCurveToPath(fcp[i-1].x, fcp[i-1].y, scp[i-1].x, scp[i-1].y, pointlist[i].x, pointlist[i].y, relative = False)
gpl-3.0
andris210296/andris-projeto
backend/venv/lib/python2.7/site-packages/unidecode/x01f.py
252
3899
data = ( 'a', # 0x00 'a', # 0x01 'a', # 0x02 'a', # 0x03 'a', # 0x04 'a', # 0x05 'a', # 0x06 'a', # 0x07 'A', # 0x08 'A', # 0x09 'A', # 0x0a 'A', # 0x0b 'A', # 0x0c 'A', # 0x0d 'A', # 0x0e 'A', # 0x0f 'e', # 0x10 'e', # 0x11 'e', # 0x12 'e', # 0x13 'e', # 0x14 'e', # 0x15 '[?]', # 0x16 '[?]', # 0x17 'E', # 0x18 'E', # 0x19 'E', # 0x1a 'E', # 0x1b 'E', # 0x1c 'E', # 0x1d '[?]', # 0x1e '[?]', # 0x1f 'e', # 0x20 'e', # 0x21 'e', # 0x22 'e', # 0x23 'e', # 0x24 'e', # 0x25 'e', # 0x26 'e', # 0x27 'E', # 0x28 'E', # 0x29 'E', # 0x2a 'E', # 0x2b 'E', # 0x2c 'E', # 0x2d 'E', # 0x2e 'E', # 0x2f 'i', # 0x30 'i', # 0x31 'i', # 0x32 'i', # 0x33 'i', # 0x34 'i', # 0x35 'i', # 0x36 'i', # 0x37 'I', # 0x38 'I', # 0x39 'I', # 0x3a 'I', # 0x3b 'I', # 0x3c 'I', # 0x3d 'I', # 0x3e 'I', # 0x3f 'o', # 0x40 'o', # 0x41 'o', # 0x42 'o', # 0x43 'o', # 0x44 'o', # 0x45 '[?]', # 0x46 '[?]', # 0x47 'O', # 0x48 'O', # 0x49 'O', # 0x4a 'O', # 0x4b 'O', # 0x4c 'O', # 0x4d '[?]', # 0x4e '[?]', # 0x4f 'u', # 0x50 'u', # 0x51 'u', # 0x52 'u', # 0x53 'u', # 0x54 'u', # 0x55 'u', # 0x56 'u', # 0x57 '[?]', # 0x58 'U', # 0x59 '[?]', # 0x5a 'U', # 0x5b '[?]', # 0x5c 'U', # 0x5d '[?]', # 0x5e 'U', # 0x5f 'o', # 0x60 'o', # 0x61 'o', # 0x62 'o', # 0x63 'o', # 0x64 'o', # 0x65 'o', # 0x66 'o', # 0x67 'O', # 0x68 'O', # 0x69 'O', # 0x6a 'O', # 0x6b 'O', # 0x6c 'O', # 0x6d 'O', # 0x6e 'O', # 0x6f 'a', # 0x70 'a', # 0x71 'e', # 0x72 'e', # 0x73 'e', # 0x74 'e', # 0x75 'i', # 0x76 'i', # 0x77 'o', # 0x78 'o', # 0x79 'u', # 0x7a 'u', # 0x7b 'o', # 0x7c 'o', # 0x7d '[?]', # 0x7e '[?]', # 0x7f 'a', # 0x80 'a', # 0x81 'a', # 0x82 'a', # 0x83 'a', # 0x84 'a', # 0x85 'a', # 0x86 'a', # 0x87 'A', # 0x88 'A', # 0x89 'A', # 0x8a 'A', # 0x8b 'A', # 0x8c 'A', # 0x8d 'A', # 0x8e 'A', # 0x8f 'e', # 0x90 'e', # 0x91 'e', # 0x92 'e', # 0x93 'e', # 0x94 'e', # 0x95 'e', # 0x96 'e', # 0x97 'E', # 0x98 'E', # 0x99 'E', # 0x9a 'E', # 0x9b 'E', # 0x9c 'E', # 0x9d 'E', # 0x9e 'E', # 0x9f 'o', # 0xa0 'o', # 0xa1 'o', # 0xa2 'o', # 0xa3 'o', # 0xa4 'o', # 0xa5 'o', # 0xa6 'o', # 0xa7 'O', # 0xa8 'O', # 0xa9 'O', # 0xaa 'O', # 0xab 'O', # 0xac 'O', # 0xad 'O', # 0xae 'O', # 0xaf 'a', # 0xb0 'a', # 0xb1 'a', # 0xb2 'a', # 0xb3 'a', # 0xb4 '[?]', # 0xb5 'a', # 0xb6 'a', # 0xb7 'A', # 0xb8 'A', # 0xb9 'A', # 0xba 'A', # 0xbb 'A', # 0xbc '\'', # 0xbd 'i', # 0xbe '\'', # 0xbf '~', # 0xc0 '"~', # 0xc1 'e', # 0xc2 'e', # 0xc3 'e', # 0xc4 '[?]', # 0xc5 'e', # 0xc6 'e', # 0xc7 'E', # 0xc8 'E', # 0xc9 'E', # 0xca 'E', # 0xcb 'E', # 0xcc '\'`', # 0xcd '\'\'', # 0xce '\'~', # 0xcf 'i', # 0xd0 'i', # 0xd1 'i', # 0xd2 'i', # 0xd3 '[?]', # 0xd4 '[?]', # 0xd5 'i', # 0xd6 'i', # 0xd7 'I', # 0xd8 'I', # 0xd9 'I', # 0xda 'I', # 0xdb '[?]', # 0xdc '`\'', # 0xdd '`\'', # 0xde '`~', # 0xdf 'u', # 0xe0 'u', # 0xe1 'u', # 0xe2 'u', # 0xe3 'R', # 0xe4 'R', # 0xe5 'u', # 0xe6 'u', # 0xe7 'U', # 0xe8 'U', # 0xe9 'U', # 0xea 'U', # 0xeb 'R', # 0xec '"`', # 0xed '"\'', # 0xee '`', # 0xef '[?]', # 0xf0 '[?]', # 0xf1 'o', # 0xf2 'o', # 0xf3 'o', # 0xf4 '[?]', # 0xf5 'o', # 0xf6 'o', # 0xf7 'O', # 0xf8 'O', # 0xf9 'O', # 0xfa 'O', # 0xfb 'O', # 0xfc '\'', # 0xfd '`', # 0xfe )
mit
sanjeevtripurari/hue
desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf/load.py
67
3959
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (C) 2007-2008 Søren Roug, European Environment Agency # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Contributor(s): # # This script is to be embedded in opendocument.py later # The purpose is to read an ODT/ODP/ODS file and create the datastructure # in memory. The user should then be able to make operations and then save # the structure again. from xml.sax import make_parser,handler from xml.sax.xmlreader import InputSource import xml.sax.saxutils from element import Element from namespaces import OFFICENS from cStringIO import StringIO # # Parse the XML files # class LoadParser(handler.ContentHandler): """ Extract headings from content.xml of an ODT file """ triggers = ( (OFFICENS, 'automatic-styles'), (OFFICENS, 'body'), (OFFICENS, 'font-face-decls'), (OFFICENS, 'master-styles'), (OFFICENS, 'meta'), (OFFICENS, 'scripts'), (OFFICENS, 'settings'), (OFFICENS, 'styles') ) def __init__(self, document): self.doc = document self.data = [] self.level = 0 self.parse = False def characters(self, data): if self.parse == False: return self.data.append(data) def startElementNS(self, tag, qname, attrs): if tag in self.triggers: self.parse = True if self.doc._parsing != "styles.xml" and tag == (OFFICENS, 'font-face-decls'): self.parse = False if self.parse == False: return self.level = self.level + 1 # Add any accumulated text content content = ''.join(self.data) if len(content.strip()) > 0: self.parent.addText(content, check_grammar=False) self.data = [] # Create the element attrdict = {} for (att,value) in attrs.items(): attrdict[att] = value try: e = Element(qname = tag, qattributes=attrdict, check_grammar=False) self.curr = e except AttributeError, v: print "Error: %s" % v if tag == (OFFICENS, 'automatic-styles'): e = self.doc.automaticstyles elif tag == (OFFICENS, 'body'): e = self.doc.body elif tag == (OFFICENS, 'master-styles'): e = self.doc.masterstyles elif tag == (OFFICENS, 'meta'): e = self.doc.meta elif tag == (OFFICENS,'scripts'): e = self.doc.scripts elif tag == (OFFICENS,'settings'): e = self.doc.settings elif tag == (OFFICENS,'styles'): e = self.doc.styles elif self.doc._parsing == "styles.xml" and tag == (OFFICENS, 'font-face-decls'): e = self.doc.fontfacedecls elif hasattr(self,'parent'): self.parent.addElement(e, check_grammar=False) self.parent = e def endElementNS(self, tag, qname): if self.parse == False: return self.level = self.level - 1 str = ''.join(self.data) if len(str.strip()) > 0: self.curr.addText(str, check_grammar=False) self.data = [] self.curr = self.curr.parentNode self.parent = self.curr if tag in self.triggers: self.parse = False
apache-2.0
hainm/statsmodels
statsmodels/examples/ex_shrink_pickle.py
34
2364
# -*- coding: utf-8 -*- """ Created on Fri Mar 09 16:00:27 2012 Author: Josef Perktold """ from statsmodels.compat.python import StringIO import numpy as np import statsmodels.api as sm nobs = 10000 np.random.seed(987689) x = np.random.randn(nobs, 3) x = sm.add_constant(x) y = x.sum(1) + np.random.randn(nobs) xf = 0.25 * np.ones((2,4)) model = sm.OLS(y, x) #y_count = np.random.poisson(np.exp(x.sum(1)-x.mean())) #model = sm.Poisson(y_count, x)#, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default results = model.fit() #print results.predict(xf) print(results.model.predict(results.params, xf)) results.summary() shrinkit = 1 if shrinkit: results.remove_data() from statsmodels.compat.python import cPickle fname = 'try_shrink%d_ols.pickle' % shrinkit fh = open(fname, 'w') cPickle.dump(results._results, fh) #pickling wrapper doesn't work fh.close() fh = open(fname, 'r') results2 = cPickle.load(fh) fh.close() print(results2.predict(xf)) print(results2.model.predict(results.params, xf)) y_count = np.random.poisson(np.exp(x.sum(1)-x.mean())) model = sm.Poisson(y_count, x)#, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default results = model.fit(method='bfgs') results.summary() print(results.model.predict(results.params, xf, exposure=1, offset=0)) if shrinkit: results.remove_data() else: #work around pickling bug results.mle_settings['callback'] = None import pickle fname = 'try_shrink%d_poisson.pickle' % shrinkit fh = open(fname, 'w') cPickle.dump(results._results, fh) #pickling wrapper doesn't work fh.close() fh = open(fname, 'r') results3 = cPickle.load(fh) fh.close() print(results3.predict(xf, exposure=1, offset=0)) print(results3.model.predict(results.params, xf, exposure=1, offset=0)) def check_pickle(obj): fh =StringIO() cPickle.dump(obj, fh) plen = fh.pos fh.seek(0,0) res = cPickle.load(fh) fh.close() return res, plen def test_remove_data_pickle(results, xf): res, l = check_pickle(results) #Note: 10000 is just a guess for the limit on the length of the pickle np.testing.assert_(l < 10000, msg='pickle length not %d < %d' % (l, 10000)) pred1 = results.predict(xf, exposure=1, offset=0) pred2 = res.predict(xf, exposure=1, offset=0) np.testing.assert_equal(pred2, pred1) test_remove_data_pickle(results._results, xf)
bsd-3-clause
steabert/molpy
molpy/molden.py
1
5280
# molden.py -- Molden format # Implements the Molden file format, specification can be found here: # http://www.cmbi.ru.nl/molden/molden_format.html # # Copyright (c) 2016 Steven Vancoillie # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Written by Steven Vancoillie. # import copy import numpy as np from . import export @export class MolcasMOLDEN: mx_angmom = 4 def __init__(self, filename, mode, strict=True): self.f = open(filename, mode) self.strict = strict def close(self): self.f.close() def write(self, wfn): """ write wavefunction data to file """ if wfn.basis_set is None: raise DataNotAvailable('The fchk format needs basis set info.') n_atoms, nuclear_charge = wfn.nuclear_info() n_electrons, n_a, n_b, spinmult, electronic_charge = wfn.electronic_info() if np.isnan(spinmult): spinmult = 1 if np.isnan(electronic_charge): charge = 0 n_electrons = int(nuclear_charge) n_b = (n_electrons - (spinmult - 1)) // 2 n_a = n_electrons - n_b else: charge = nuclear_charge + electronic_charge self.f.write('[Molden Format]\n') if not self.strict: self.f.write('[N_ATOMS]\n') self.write_natoms(n_atoms) self.f.write('[Atoms] (AU)\n') basis = wfn.basis_set labels = basis.center_labels charges = basis.center_charges coords = basis.center_coordinates self.write_atoms(labels, charges, coords) self.f.write('[5D]\n') self.f.write('[7F]\n') self.f.write('[9G]\n') if not self.strict: self.f.write('[CHARGE] (MULLIKEN)\n') mulliken_charges = wfn.mulliken_charges() if np.logical_or.reduce(np.isnan(mulliken_charges)): mulliken_charges.fill(0) self.write_mulliken(mulliken_charges) self.f.write('[GTO] (AU)\n') self.write_gto(wfn.basis_set.primitive_tree) self.f.write('[MO]\n') for kind, orbitals in wfn.mo.items(): orbitals = orbitals.sort_basis(order='molden') orbitals = orbitals.limit_basis(limit=self.mx_angmom) orbitals.sanitize() self.write_mo(orbitals, kind=kind) def write_natoms(self, natoms): self.f.write('{:12d}\n'.format(natoms)) def write_atoms(self, labels, charges, coords): center_properties = zip(labels, charges, coords) template = '{:s} {:7d} {:7d} {:14.7f} {:14.7f} {:14.7f}\n' for i, (label, charge, coord,) in enumerate(center_properties): label_nospaces = label.replace(' ','') self.f.write(template.format(label_nospaces, i+1, int(charge), *coord)) def write_mulliken(self, charges): for charge in charges: self.f.write('{:f}\n'.format(charge)) def write_gto(self, basisset): l = ['s', 'p', 'd', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n'] for center in basisset: self.f.write('{:4d}\n'.format(center['id'])) for angmom in center['angmoms']: if angmom['value'] > self.mx_angmom: continue for shell in angmom['shells']: pgto_selection = np.where(shell['coefficients'] > 1.0e-15) exponents = shell['exponents'][pgto_selection] coefficients = shell['coefficients'][pgto_selection] self.f.write(' {:1s}{:4d}\n'.format(l[angmom['value']], len(exponents))) for exp, coef, in zip(exponents, coefficients): self.f.write('{:17.9e} {:17.9e}\n'.format(exp, coef)) self.f.write('\n') def write_mo(self, orbitals, kind='restricted'): if kind == 'restricted': spin = 'alpha' else: spin = kind for irrep, ene, occ, mo in zip( orbitals.irreps, orbitals.energies, orbitals.occupations, orbitals.coefficients.T): self.f.write('Sym = {:d}\n'.format(irrep)) self.f.write('Ene = {:10.4f}\n'.format(ene)) self.f.write('Spin = {:s}\n'.format(spin)) self.f.write('Occup = {:10.5f}\n'.format(occ)) for idx, coef, in enumerate(mo): self.f.write('{:4d} {:16.8f}\n'.format(idx+1, coef)) @export class MolcasMOLDENGV(MolcasMOLDEN): def __init__(self, filename, mode): super().__init__(filename, mode, strict=False)
gpl-2.0
trondhindenes/ansible
lib/ansible/modules/cloud/misc/cloud_init_data_facts.py
33
3393
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2018, René Moser <mail@renemoser.net> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: cloud_init_data_facts short_description: Retrieve facts of cloud-init. description: - Gathers facts by reading the status.json and result.json of cloud-init. version_added: 2.6 author: René Moser (@resmo) options: filter: description: - Filter facts choices: [ status, result ] notes: - See http://cloudinit.readthedocs.io/ for more information abount cloud-init. ''' EXAMPLES = ''' - name: Gather all facts of cloud init cloud_init_data_facts: register: result - debug: var: result - name: Wait for cloud init to finish cloud_init_data_facts: filter: status register: res until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage" retries: 50 delay: 5 ''' RETURN = ''' --- cloud_init_data_facts: description: Facts of result and status. returned: success type: dict sample: '{ "status": { "v1": { "datasource": "DataSourceCloudStack", "errors": [] }, "result": { "v1": { "datasource": "DataSourceCloudStack", "init": { "errors": [], "finished": 1522066377.0185432, "start": 1522066375.2648022 }, "init-local": { "errors": [], "finished": 1522066373.70919, "start": 1522066373.4726632 }, "modules-config": { "errors": [], "finished": 1522066380.9097016, "start": 1522066379.0011985 }, "modules-final": { "errors": [], "finished": 1522066383.56594, "start": 1522066382.3449218 }, "stage": null } }' ''' import os from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_text CLOUD_INIT_PATH = "/var/lib/cloud/data/" def gather_cloud_init_data_facts(module): res = { 'cloud_init_data_facts': dict() } for i in ['result', 'status']: filter = module.params.get('filter') if filter is None or filter == i: res['cloud_init_data_facts'][i] = dict() json_file = CLOUD_INIT_PATH + i + '.json' if os.path.exists(json_file): f = open(json_file, 'rb') contents = to_text(f.read(), errors='surrogate_or_strict') f.close() if contents: res['cloud_init_data_facts'][i] = module.from_json(contents) return res def main(): module = AnsibleModule( argument_spec=dict( filter=dict(choices=['result', 'status']), ), supports_check_mode=True, ) facts = gather_cloud_init_data_facts(module) result = dict(changed=False, ansible_facts=facts, **facts) module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
ZobairAlijan/osf.io
scripts/migrate_cloudfiles_container.py
54
4081
#!/usr/bin/env python # encoding: utf-8 """Migrate files to correct container. Note: Must use Rackspace credentials with access to both test and production containers. Note: Must have pyrax installed to run. Run dry run: python -m scripts.migrate_cloudfiles_container dry Run migration: python -m scripts.migrate_cloudfiles_container Log: Run by sloria, jmcarp, and icereval on 2015-02-10 at 1:15 PM. 822 file version records were copied and migrated. A migration log was saved to the migration-logs directory. """ import sys import logging import pyrax from modularodm import Q from website.app import init_app from website.addons.osfstorage import model from scripts import utils as script_utils from scripts.osfstorage import settings as storage_settings TEST_CONTAINER_NAME = 'osf_uploads_test' PROD_CONTAINER_NAME = 'osf_storage_prod' test_container = None prod_container = None logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def migrate_version(version): if version.location['container'] != TEST_CONTAINER_NAME: raise ValueError('Version is already in correct container') key = test_container.get_object(version.location['object']) key.copy(prod_container) logger.info('Setting container of OsfStorageFileVersion {0} to {1}'.format( version._id, PROD_CONTAINER_NAME) ) version.location['container'] = PROD_CONTAINER_NAME version.save() def get_targets(): query = Q('location.container', 'eq', TEST_CONTAINER_NAME) return model.OsfStorageFileVersion.find(query) def main(dry_run): versions = get_targets() for version in versions: logger.info('Migrating OsfStorageFileVersion {0}'.format(version._id)) if not dry_run: migrate_version(version) if __name__ == '__main__': init_app(set_backends=True, routes=False) dry_run = 'dry' in sys.argv # Log to file if not dry_run: script_utils.add_file_logger(logger, __file__) # Authenticate to Rackspace pyrax.settings.set('identity_type', 'rackspace') pyrax.set_credentials( storage_settings.USERNAME, storage_settings.API_KEY, region=storage_settings.REGION ) # Look up containers test_container = pyrax.cloudfiles.get_container(TEST_CONTAINER_NAME) prod_container = pyrax.cloudfiles.get_container(PROD_CONTAINER_NAME) main(dry_run=dry_run) import mock from nose.tools import * # noqa from tests.base import OsfTestCase from website.addons.osfstorage.tests.factories import FileVersionFactory class TestMigrateContainer(OsfTestCase): def tearDown(self): super(TestMigrateContainer, self).tearDown() model.OsfStorageFileVersion.remove() def test_get_targets(self): versions = [FileVersionFactory() for _ in range(5)] versions[0].location['container'] = TEST_CONTAINER_NAME versions[0].save() targets = get_targets() assert_equal(len(targets), 1) assert_equal(targets[0], versions[0]) @mock.patch('scripts.migrate_cloudfiles_container.prod_container') @mock.patch('scripts.migrate_cloudfiles_container.test_container') def test_migrate_version(self, mock_test_container, mock_prod_container): mock_test_object = mock.Mock() mock_test_container.get_object.return_value = mock_test_object version = FileVersionFactory() version.location['container'] = TEST_CONTAINER_NAME version.save() migrate_version(version) mock_test_container.get_object.assert_called_with(version.location['object']) mock_test_object.copy.assert_called_with(mock_prod_container) version.reload() assert_equal(version.location['container'], PROD_CONTAINER_NAME) assert_equal(len(get_targets()), 0) def test_dry_run(self): versions = [FileVersionFactory() for _ in range(5)] versions[0].location['container'] = TEST_CONTAINER_NAME versions[0].save() main(dry_run=True) assert_equal(len(get_targets()), 1)
apache-2.0
SMALLplayer/smallplayer-image-creator
storage/.xbmc/addons/script.cu.lrclyrics/resources/lib/culrcscrapers/alsong/lyricsScraper.py
1
3106
# -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*- """ Scraper for http://lyrics.alsong.co.kr/ edge """ import sys import socket import hashlib import urllib2 import xml.dom.minidom as xml from utilities import * from audiofile import AudioFile __title__ = "Alsong" __priority__ = '115' __lrc__ = True socket.setdefaulttimeout(10) ALSONG_URL = "http://lyrics.alsong.net/alsongwebservice/service1.asmx" ALSONG_TMPL = '''\ <?xml version="1.0" encoding="UTF-8"?> <SOAP-ENV:Envelope xmlns:SOAP-ENV="http://www.w3.org/2003/05/soap-envelope" xmlns:SOAP-ENC="http://www.w3.org/2003/05/soap-encoding" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:ns2="ALSongWebServer/Service1Soap" xmlns:ns1="ALSongWebServer" xmlns:ns3="ALSongWebServer/Service1Soap12"> <SOAP-ENV:Body> <ns1:GetLyric5> <ns1:stQuery> <ns1:strChecksum>%s</ns1:strChecksum> <ns1:strVersion>2.2</ns1:strVersion> <ns1:strMACAddress /> <ns1:strIPAddress /> </ns1:stQuery> </ns1:GetLyric5> </SOAP-ENV:Body> </SOAP-ENV:Envelope> ''' class alsongClient(object): ''' privide alsong specific function, such as key from mp3 ''' @staticmethod def GetKeyFromFile(file): musf = AudioFile() musf.Open(file) ext = file[file.rfind('.'):].lower() if ext == '.ogg': buf = musf.ReadAudioStream(160*1024,11) # 160KB excluding header elif ext == '.wma': buf = musf.ReadAudioStream(160*1024,24) # 160KB excluding header else: buf = musf.ReadAudioStream(160*1024) # 160KB from audio data musf.Close() # calculate hashkey m = hashlib.md5(); m.update(buf); return m.hexdigest() class LyricsFetcher: def __init__( self ): self.base_url = "http://lyrics.alsong.co.kr/" def get_lyrics(self, song): log( "%s: searching lyrics for %s - %s" % (__title__, song.artist, song.title)) lyrics = Lyrics() lyrics.song = song lyrics.source = __title__ lyrics.lrc = __lrc__ try: key = alsongClient.GetKeyFromFile( song.filepath ) if not key: return None headers = { 'Content-Type' : 'text/xml; charset=utf-8' } request = urllib2.Request(ALSONG_URL, ALSONG_TMPL % key, headers) response = urllib2.urlopen(request) Page = response.read() except: log( "%s: %s::%s (%d) [%s]" % ( __title__, self.__class__.__name__, sys.exc_info()[ 2 ].tb_frame.f_code.co_name, sys.exc_info()[ 2 ].tb_lineno, sys.exc_info()[ 1 ] )) return None tree = xml.parseString( Page ) if tree.getElementsByTagName("strInfoID")[0].childNodes[0].data == '-1': return None lyr = tree.getElementsByTagName("strLyric")[0].childNodes[0].data.replace('<br>','\n') lyrics.lyrics = lyr.encode('utf-8') return lyrics
gpl-2.0
manisandro/QGIS
python/plugins/processing/gui/menus.py
4
13593
# -*- coding: utf-8 -*- """ *************************************************************************** menus.py --------------------- Date : February 2016 Copyright : (C) 2016 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'February 2016' __copyright__ = '(C) 2016, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from qgis.PyQt.QtCore import QCoreApplication from qgis.PyQt.QtWidgets import QAction, QMenu from qgis.PyQt.QtGui import QIcon from qgis.PyQt.QtWidgets import QApplication from processing.core.ProcessingConfig import ProcessingConfig, Setting from processing.gui.MessageDialog import MessageDialog from processing.gui.AlgorithmDialog import AlgorithmDialog from qgis.utils import iface from qgis.core import QgsApplication, QgsMessageLog, QgsStringUtils, QgsProcessingAlgorithm from qgis.gui import QgsGui from processing.gui.MessageBarProgress import MessageBarProgress from processing.gui.AlgorithmExecutor import execute from processing.gui.Postprocessing import handleAlgorithmResults from processing.core.Processing import Processing from processing.tools import dataobjects algorithmsToolbar = None menusSettingsGroup = 'Menus' defaultMenuEntries = {} vectorMenu = QApplication.translate('MainWindow', 'Vect&or') analysisToolsMenu = vectorMenu + "/" + Processing.tr('&Analysis Tools') defaultMenuEntries.update({'qgis:distancematrix': analysisToolsMenu, 'qgis:sumlinelengths': analysisToolsMenu, 'qgis:countpointsinpolygon': analysisToolsMenu, 'qgis:listuniquevalues': analysisToolsMenu, 'qgis:basicstatisticsforfields': analysisToolsMenu, 'qgis:nearestneighbouranalysis': analysisToolsMenu, 'native:meancoordinates': analysisToolsMenu, 'native:lineintersections': analysisToolsMenu}) researchToolsMenu = vectorMenu + "/" + Processing.tr('&Research Tools') defaultMenuEntries.update({'qgis:creategrid': researchToolsMenu, 'qgis:randomselection': researchToolsMenu, 'qgis:randomselectionwithinsubsets': researchToolsMenu, 'qgis:randompointsinextent': researchToolsMenu, 'qgis:randompointsinlayerbounds': researchToolsMenu, 'qgis:randompointsinsidepolygons': researchToolsMenu, 'qgis:regularpoints': researchToolsMenu, 'native:selectbylocation': researchToolsMenu, 'qgis:polygonfromlayerextent': researchToolsMenu}) geoprocessingToolsMenu = vectorMenu + "/" + Processing.tr('&Geoprocessing Tools') defaultMenuEntries.update({'native:buffer': geoprocessingToolsMenu, 'native:convexhull': geoprocessingToolsMenu, 'native:intersection': geoprocessingToolsMenu, 'native:union': geoprocessingToolsMenu, 'native:symmetricaldifference': geoprocessingToolsMenu, 'native:clip': geoprocessingToolsMenu, 'native:difference': geoprocessingToolsMenu, 'native:dissolve': geoprocessingToolsMenu, 'qgis:eliminateselectedpolygons': geoprocessingToolsMenu}) geometryToolsMenu = vectorMenu + "/" + Processing.tr('G&eometry Tools') defaultMenuEntries.update({'qgis:checkvalidity': geometryToolsMenu, 'qgis:exportaddgeometrycolumns': geometryToolsMenu, 'native:centroids': geometryToolsMenu, 'qgis:delaunaytriangulation': geometryToolsMenu, 'qgis:voronoipolygons': geometryToolsMenu, 'native:simplifygeometries': geometryToolsMenu, 'qgis:densifygeometries': geometryToolsMenu, 'native:multiparttosingleparts': geometryToolsMenu, 'native:collect': geometryToolsMenu, 'native:polygonstolines': geometryToolsMenu, 'qgis:linestopolygons': geometryToolsMenu, 'native:extractvertices': geometryToolsMenu}) managementToolsMenu = vectorMenu + "/" + Processing.tr('&Data Management Tools') defaultMenuEntries.update({'native:reprojectlayer': managementToolsMenu, 'qgis:joinattributesbylocation': managementToolsMenu, 'qgis:splitvectorlayer': managementToolsMenu, 'native:mergevectorlayers': managementToolsMenu, 'qgis:createspatialindex': managementToolsMenu}) rasterMenu = QApplication.translate('MainWindow', '&Raster') projectionsMenu = rasterMenu + "/" + Processing.tr('Projections') defaultMenuEntries.update({'gdal:warpreproject': projectionsMenu, 'gdal:extractprojection': projectionsMenu, 'gdal:assignprojection': projectionsMenu}) conversionMenu = rasterMenu + "/" + Processing.tr('Conversion') defaultMenuEntries.update({'gdal:rasterize': conversionMenu, 'gdal:polygonize': conversionMenu, 'gdal:translate': conversionMenu, 'gdal:rgbtopct': conversionMenu, 'gdal:pcttorgb': conversionMenu}) extractionMenu = rasterMenu + "/" + Processing.tr('Extraction') defaultMenuEntries.update({'gdal:contour': extractionMenu, 'gdal:cliprasterbyextent': extractionMenu, 'gdal:cliprasterbymasklayer': extractionMenu}) analysisMenu = rasterMenu + "/" + Processing.tr('Analysis') defaultMenuEntries.update({'gdal:sieve': analysisMenu, 'gdal:nearblack': analysisMenu, 'gdal:fillnodata': analysisMenu, 'gdal:proximity': analysisMenu, 'gdal:griddatametrics': analysisMenu, 'gdal:gridaverage': analysisMenu, 'gdal:gridinversedistance': analysisMenu, 'gdal:gridnearestneighbor': analysisMenu, 'gdal:aspect': analysisMenu, 'gdal:hillshade': analysisMenu, 'gdal:roughness': analysisMenu, 'gdal:slope': analysisMenu, 'gdal:tpitopographicpositionindex': analysisMenu, 'gdal:triterrainruggednessindex': analysisMenu}) miscMenu = rasterMenu + "/" + Processing.tr('Miscellaneous') defaultMenuEntries.update({'gdal:buildvirtualraster': miscMenu, 'gdal:merge': miscMenu, 'gdal:gdalinfo': miscMenu, 'gdal:overviews': miscMenu, 'gdal:tileindex': miscMenu}) def initializeMenus(): for m in defaultMenuEntries.keys(): alg = QgsApplication.processingRegistry().algorithmById(m) if alg is None or alg.id() != m: QgsMessageLog.logMessage(Processing.tr('Invalid algorithm ID for menu: {}').format(m), Processing.tr('Processing')) for provider in QgsApplication.processingRegistry().providers(): for alg in provider.algorithms(): d = defaultMenuEntries.get(alg.id(), "") setting = Setting(menusSettingsGroup, "MENU_" + alg.id(), "Menu path", d) ProcessingConfig.addSetting(setting) setting = Setting(menusSettingsGroup, "BUTTON_" + alg.id(), "Add button", False) ProcessingConfig.addSetting(setting) setting = Setting(menusSettingsGroup, "ICON_" + alg.id(), "Icon", "", valuetype=Setting.FILE) ProcessingConfig.addSetting(setting) ProcessingConfig.readSettings() def updateMenus(): removeMenus() QCoreApplication.processEvents() createMenus() def createMenus(): for alg in QgsApplication.processingRegistry().algorithms(): menuPath = ProcessingConfig.getSetting("MENU_" + alg.id()) addButton = ProcessingConfig.getSetting("BUTTON_" + alg.id()) icon = ProcessingConfig.getSetting("ICON_" + alg.id()) if icon and os.path.exists(icon): icon = QIcon(icon) else: icon = None if menuPath: paths = menuPath.split("/") addAlgorithmEntry(alg, paths[0], paths[-1], addButton=addButton, icon=icon) def removeMenus(): for alg in QgsApplication.processingRegistry().algorithms(): menuPath = ProcessingConfig.getSetting("MENU_" + alg.id()) if menuPath: paths = menuPath.split("/") removeAlgorithmEntry(alg, paths[0], paths[-1]) def addAlgorithmEntry(alg, menuName, submenuName, actionText=None, icon=None, addButton=False): if actionText is None: if (QgsGui.higFlags() & QgsGui.HigMenuTextIsTitleCase) and not (alg.flags() & QgsProcessingAlgorithm.FlagDisplayNameIsLiteral): alg_title = QgsStringUtils.capitalize(alg.displayName(), QgsStringUtils.TitleCase) else: alg_title = alg.displayName() actionText = alg_title + QCoreApplication.translate('Processing', '…') action = QAction(icon or alg.icon(), actionText, iface.mainWindow()) alg_id = alg.id() action.setData(alg_id) action.triggered.connect(lambda: _executeAlgorithm(alg_id)) action.setObjectName("mProcessingUserMenu_%s" % alg_id) if menuName: menu = getMenu(menuName, iface.mainWindow().menuBar()) submenu = getMenu(submenuName, menu) submenu.addAction(action) if addButton: global algorithmsToolbar if algorithmsToolbar is None: algorithmsToolbar = iface.addToolBar(QCoreApplication.translate('MainWindow', 'Processing Algorithms')) algorithmsToolbar.setObjectName("ProcessingAlgorithms") algorithmsToolbar.setToolTip(QCoreApplication.translate('MainWindow', 'Processing Algorithms Toolbar')) algorithmsToolbar.addAction(action) def removeAlgorithmEntry(alg, menuName, submenuName, delButton=True): if menuName: menu = getMenu(menuName, iface.mainWindow().menuBar()) subMenu = getMenu(submenuName, menu) action = findAction(subMenu.actions(), alg) if action is not None: subMenu.removeAction(action) if len(subMenu.actions()) == 0: subMenu.deleteLater() if delButton: global algorithmsToolbar if algorithmsToolbar is not None: action = findAction(algorithmsToolbar.actions(), alg) if action is not None: algorithmsToolbar.removeAction(action) def _executeAlgorithm(alg_id): alg = QgsApplication.processingRegistry().createAlgorithmById(alg_id) if alg is None: dlg = MessageDialog() dlg.setTitle(Processing.tr('Missing Algorithm')) dlg.setMessage( Processing.tr('The algorithm "{}" is no longer available. (Perhaps a plugin was uninstalled?)').format(alg_id)) dlg.exec_() return ok, message = alg.canExecute() if not ok: dlg = MessageDialog() dlg.setTitle(Processing.tr('Missing Dependency')) dlg.setMessage( Processing.tr('<h3>Missing dependency. This algorithm cannot ' 'be run :-( </h3>\n{0}').format(message)) dlg.exec_() return if (alg.countVisibleParameters()) > 0: dlg = alg.createCustomParametersWidget(parent=iface.mainWindow()) if not dlg: dlg = AlgorithmDialog(alg, parent=iface.mainWindow()) canvas = iface.mapCanvas() prevMapTool = canvas.mapTool() dlg.show() dlg.exec_() if canvas.mapTool() != prevMapTool: try: canvas.mapTool().reset() except: pass canvas.setMapTool(prevMapTool) else: feedback = MessageBarProgress() context = dataobjects.createContext(feedback) parameters = {} ret, results = execute(alg, parameters, context, feedback) handleAlgorithmResults(alg, context, feedback) feedback.close() def getMenu(name, parent): menus = [c for c in parent.children() if isinstance(c, QMenu) and c.title() == name] if menus: return menus[0] else: return parent.addMenu(name) def findAction(actions, alg): for action in actions: if action.data() == alg.id(): return action return None
gpl-2.0
svenstaro/ansible
lib/ansible/modules/cloud/openstack/os_server_group.py
27
5339
#!/usr/bin/python # Copyright (c) 2016 Catalyst IT Limited # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: os_server_group short_description: Manage OpenStack server groups extends_documentation_fragment: openstack version_added: "2.2" author: "Lingxian Kong (@kong)" description: - Add or remove server groups from OpenStack. options: state: description: - Indicate desired state of the resource. When I(state) is 'present', then I(policies) is required. choices: ['present', 'absent'] required: false default: present name: description: - Server group name. required: true policies: description: - A list of one or more policy names to associate with the server group. The list must contain at least one policy name. The current valid policy names are anti-affinity, affinity, soft-anti-affinity and soft-affinity. required: false availability_zone: description: - Ignored. Present for backwards compatability required: false requirements: - "python >= 2.6" - "shade" ''' EXAMPLES = ''' # Create a server group with 'affinity' policy. - os_server_group: state: present auth: auth_url: https://api.cloud.catalyst.net.nz:5000/v2.0 username: admin password: admin project_name: admin name: my_server_group policies: - affinity # Delete 'my_server_group' server group. - os_server_group: state: absent auth: auth_url: https://api.cloud.catalyst.net.nz:5000/v2.0 username: admin password: admin project_name: admin name: my_server_group ''' RETURN = ''' id: description: Unique UUID. returned: success type: string name: description: The name of the server group. returned: success type: string policies: description: A list of one or more policy names of the server group. returned: success type: list of strings members: description: A list of members in the server group. returned: success type: list of strings metadata: description: Metadata key and value pairs. returned: success type: dict project_id: description: The project ID who owns the server group. returned: success type: string user_id: description: The user ID who owns the server group. returned: success type: string ''' try: import shade HAS_SHADE = True except ImportError: HAS_SHADE = False def _system_state_change(state, server_group): if state == 'present' and not server_group: return True if state == 'absent' and server_group: return True return False def main(): argument_spec = openstack_full_argument_spec( name=dict(required=True), policies=dict(required=False, type='list'), state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule( argument_spec, supports_check_mode=True, **module_kwargs ) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') name = module.params['name'] policies = module.params['policies'] state = module.params['state'] try: cloud = shade.openstack_cloud(**module.params) server_group = cloud.get_server_group(name) if module.check_mode: module.exit_json( changed=_system_state_change(state, server_group) ) changed = False if state == 'present': if not server_group: if not policies: module.fail_json( msg="Parameter 'policies' is required in Server Group " "Create" ) server_group = cloud.create_server_group(name, policies) changed = True module.exit_json( changed=changed, id=server_group['id'], server_group=server_group ) if state == 'absent': if server_group: cloud.delete_server_group(server_group['id']) changed = True module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=str(e), extra_data=e.extra_data) # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main()
gpl-3.0
ASCrookes/django
django/contrib/gis/utils/wkt.py
589
1923
""" Utilities for manipulating Geometry WKT. """ from django.utils import six def precision_wkt(geom, prec): """ Returns WKT text of the geometry according to the given precision (an integer or a string). If the precision is an integer, then the decimal places of coordinates WKT will be truncated to that number: >>> from django.contrib.gis.geos import Point >>> pnt = Point(5, 23) >>> pnt.wkt 'POINT (5.0000000000000000 23.0000000000000000)' >>> precision_wkt(pnt, 1) 'POINT (5.0 23.0)' If the precision is a string, it must be valid Python format string (e.g., '%20.7f') -- thus, you should know what you're doing. """ if isinstance(prec, int): num_fmt = '%%.%df' % prec elif isinstance(prec, six.string_types): num_fmt = prec else: raise TypeError # TODO: Support 3D geometries. coord_fmt = ' '.join([num_fmt, num_fmt]) def formatted_coords(coords): return ','.join(coord_fmt % c[:2] for c in coords) def formatted_poly(poly): return ','.join('(%s)' % formatted_coords(r) for r in poly) def formatted_geom(g): gtype = str(g.geom_type).upper() yield '%s(' % gtype if gtype == 'POINT': yield formatted_coords((g.coords,)) elif gtype in ('LINESTRING', 'LINEARRING'): yield formatted_coords(g.coords) elif gtype in ('POLYGON', 'MULTILINESTRING'): yield formatted_poly(g) elif gtype == 'MULTIPOINT': yield formatted_coords(g.coords) elif gtype == 'MULTIPOLYGON': yield ','.join('(%s)' % formatted_poly(p) for p in g) elif gtype == 'GEOMETRYCOLLECTION': yield ','.join(''.join(wkt for wkt in formatted_geom(child)) for child in g) else: raise TypeError yield ')' return ''.join(wkt for wkt in formatted_geom(geom))
bsd-3-clause
cecep-edu/edx-platform
cms/djangoapps/contentstore/tests/test_import.py
14
11987
# -*- coding: utf-8 -*- # pylint: disable=protected-access """ Tests for import_course_from_xml using the mongo modulestore. """ from django.test.client import Client from django.test.utils import override_settings from django.conf import settings import ddt import copy from openedx.core.djangoapps.content.course_structures.tests import SignalDisconnectTestMixin from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.django import modulestore from xmodule.contentstore.django import contentstore from xmodule.modulestore.tests.factories import check_exact_number_of_calls, check_number_of_calls from xmodule.modulestore.xml_importer import import_course_from_xml from xmodule.exceptions import NotFoundError from uuid import uuid4 TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE) TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT @ddt.ddt @override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE, SEARCH_ENGINE=None) class ContentStoreImportTest(SignalDisconnectTestMixin, ModuleStoreTestCase): """ Tests that rely on the toy and test_import_course courses. NOTE: refactor using CourseFactory so they do not. """ def setUp(self): super(ContentStoreImportTest, self).setUp() self.client = Client() self.client.login(username=self.user.username, password=self.user_password) def load_test_import_course(self, target_id=None, create_if_not_present=True, module_store=None): ''' Load the standard course used to test imports (for do_import_static=False behavior). ''' content_store = contentstore() if module_store is None: module_store = modulestore() import_course_from_xml( module_store, self.user.id, TEST_DATA_DIR, ['test_import_course'], static_content_store=content_store, do_import_static=False, verbose=True, target_id=target_id, create_if_not_present=create_if_not_present, ) course_id = module_store.make_course_key('edX', 'test_import_course', '2012_Fall') course = module_store.get_course(course_id) self.assertIsNotNone(course) return module_store, content_store, course def test_import_course_into_similar_namespace(self): # Checks to make sure that a course with an org/course like # edx/course can be imported into a namespace with an org/course # like edx/course_name module_store, __, course = self.load_test_import_course() course_items = import_course_from_xml( module_store, self.user.id, TEST_DATA_DIR, ['test_import_course_2'], target_id=course.id, verbose=True, ) self.assertEqual(len(course_items), 1) def test_unicode_chars_in_course_name_import(self): """ # Test that importing course with unicode 'id' and 'display name' doesn't give UnicodeEncodeError """ # Test with the split modulestore because store.has_course fails in old mongo with unicode characters. with modulestore().default_store(ModuleStoreEnum.Type.split): module_store = modulestore() course_id = module_store.make_course_key(u'Юникода', u'unicode_course', u'échantillon') import_course_from_xml( module_store, self.user.id, TEST_DATA_DIR, ['2014_Uni'], target_id=course_id, create_if_not_present=True ) course = module_store.get_course(course_id) self.assertIsNotNone(course) # test that course 'display_name' same as imported course 'display_name' self.assertEqual(course.display_name, u"Φυσικά το όνομα Unicode") def test_static_import(self): ''' Stuff in static_import should always be imported into contentstore ''' _, content_store, course = self.load_test_import_course() # make sure we have ONE asset in our contentstore ("should_be_imported.html") all_assets, count = content_store.get_all_content_for_course(course.id) print "len(all_assets)=%d" % len(all_assets) self.assertEqual(len(all_assets), 1) self.assertEqual(count, 1) content = None try: location = course.id.make_asset_key('asset', 'should_be_imported.html') content = content_store.find(location) except NotFoundError: pass self.assertIsNotNone(content) # make sure course.static_asset_path is correct print "static_asset_path = {0}".format(course.static_asset_path) self.assertEqual(course.static_asset_path, 'test_import_course') def test_asset_import_nostatic(self): ''' This test validates that an image asset is NOT imported when do_import_static=False ''' content_store = contentstore() module_store = modulestore() import_course_from_xml( module_store, self.user.id, TEST_DATA_DIR, ['toy'], static_content_store=content_store, do_import_static=False, create_if_not_present=True, verbose=True ) course = module_store.get_course(module_store.make_course_key('edX', 'toy', '2012_Fall')) # make sure we have NO assets in our contentstore all_assets, count = content_store.get_all_content_for_course(course.id) self.assertEqual(len(all_assets), 0) self.assertEqual(count, 0) def test_no_static_link_rewrites_on_import(self): module_store = modulestore() courses = import_course_from_xml( module_store, self.user.id, TEST_DATA_DIR, ['toy'], do_import_static=False, verbose=True, create_if_not_present=True ) course_key = courses[0].id handouts = module_store.get_item(course_key.make_usage_key('course_info', 'handouts')) self.assertIn('/static/', handouts.data) handouts = module_store.get_item(course_key.make_usage_key('html', 'toyhtml')) self.assertIn('/static/', handouts.data) def test_tab_name_imports_correctly(self): _module_store, _content_store, course = self.load_test_import_course() print "course tabs = {0}".format(course.tabs) self.assertEqual(course.tabs[2]['name'], 'Syllabus') def test_import_performance_mongo(self): store = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.mongo) # we try to refresh the inheritance tree for each update_item in the import with check_exact_number_of_calls(store, 'refresh_cached_metadata_inheritance_tree', 28): # _get_cached_metadata_inheritance_tree should be called twice (once for import, once on publish) with check_exact_number_of_calls(store, '_get_cached_metadata_inheritance_tree', 2): # with bulk-edit in progress, the inheritance tree should be recomputed only at the end of the import # NOTE: On Jenkins, with memcache enabled, the number of calls here is only 1. # Locally, without memcache, the number of calls is actually 2 (once more during the publish step) with check_number_of_calls(store, '_compute_metadata_inheritance_tree', 2): self.load_test_import_course(create_if_not_present=False, module_store=store) @ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split) def test_reimport(self, default_ms_type): with modulestore().default_store(default_ms_type): __, __, course = self.load_test_import_course(create_if_not_present=True) self.load_test_import_course(target_id=course.id) def test_rewrite_reference_list(self): # This test fails with split modulestore (the HTML component is not in "different_course_id" namespace). # More investigation needs to be done. module_store = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.mongo) target_id = module_store.make_course_key('testX', 'conditional_copy', 'copy_run') import_course_from_xml( module_store, self.user.id, TEST_DATA_DIR, ['conditional'], target_id=target_id ) conditional_module = module_store.get_item( target_id.make_usage_key('conditional', 'condone') ) self.assertIsNotNone(conditional_module) different_course_id = module_store.make_course_key('edX', 'different_course', None) self.assertListEqual( [ target_id.make_usage_key('problem', 'choiceprob'), different_course_id.make_usage_key('html', 'for_testing_import_rewrites') ], conditional_module.sources_list ) self.assertListEqual( [ target_id.make_usage_key('html', 'congrats'), target_id.make_usage_key('html', 'secret_page') ], conditional_module.show_tag_list ) def test_rewrite_reference_value_dict_published(self): """ Test rewriting references in ReferenceValueDict, specifically with published content. """ self._verify_split_test_import( 'split_test_copy', 'split_test_module', 'split1', {"0": 'sample_0', "2": 'sample_2'}, ) def test_rewrite_reference_value_dict_draft(self): """ Test rewriting references in ReferenceValueDict, specifically with draft content. """ self._verify_split_test_import( 'split_test_copy_with_draft', 'split_test_module_draft', 'fb34c21fe64941999eaead421a8711b8', {"0": '9f0941d021414798836ef140fb5f6841', "1": '0faf29473cf1497baa33fcc828b179cd'}, ) def _verify_split_test_import(self, target_course_name, source_course_name, split_test_name, groups_to_verticals): module_store = modulestore() target_id = module_store.make_course_key('testX', target_course_name, 'copy_run') import_course_from_xml( module_store, self.user.id, TEST_DATA_DIR, [source_course_name], target_id=target_id, create_if_not_present=True ) split_test_module = module_store.get_item( target_id.make_usage_key('split_test', split_test_name) ) self.assertIsNotNone(split_test_module) remapped_verticals = { key: target_id.make_usage_key('vertical', value) for key, value in groups_to_verticals.iteritems() } self.assertEqual(remapped_verticals, split_test_module.group_id_to_child) @ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split) def test_video_components_present_while_import(self, store): """ Test that video components with same edx_video_id are present while re-importing """ with modulestore().default_store(store): module_store = modulestore() course_id = module_store.make_course_key('edX', 'test_import_course', '2012_Fall') # Import first time __, __, course = self.load_test_import_course(target_id=course_id, module_store=module_store) # Re-import __, __, re_course = self.load_test_import_course(target_id=course.id, module_store=module_store) vertical = module_store.get_item(re_course.id.make_usage_key('vertical', 'vertical_test')) video = module_store.get_item(vertical.children[1]) self.assertEqual(video.display_name, 'default')
agpl-3.0
gsehub/edx-platform
common/lib/xmodule/xmodule/annotatable_module.py
13
7026
import logging import textwrap from lxml import etree from pkg_resources import resource_string from xblock.fields import Scope, String from xmodule.raw_module import RawDescriptor from xmodule.x_module import XModule log = logging.getLogger(__name__) # Make '_' a no-op so we can scrape strings. Using lambda instead of # `django.utils.translation.ugettext_noop` because Django cannot be imported in this file _ = lambda text: text class AnnotatableFields(object): data = String( help=_("XML data for the annotation"), scope=Scope.content, default=textwrap.dedent(""" <annotatable> <instructions> <p>Enter your (optional) instructions for the exercise in HTML format.</p> <p>Annotations are specified by an <code>&lt;annotation&gt;</code> tag which may may have the following attributes:</p> <ul class="instructions-template"> <li><code>title</code> (optional). Title of the annotation. Defaults to <i>Commentary</i> if omitted.</li> <li><code>body</code> (<b>required</b>). Text of the annotation.</li> <li><code>problem</code> (optional). Numeric index of the problem associated with this annotation. This is a zero-based index, so the first problem on the page would have <code>problem="0"</code>.</li> <li><code>highlight</code> (optional). Possible values: yellow, red, orange, green, blue, or purple. Defaults to yellow if this attribute is omitted.</li> </ul> </instructions> <p>Add your HTML with annotation spans here.</p> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. <annotation title="My title" body="My comment" highlight="yellow" problem="0">Ut sodales laoreet est, egestas gravida felis egestas nec.</annotation> Aenean at volutpat erat. Cras commodo viverra nibh in aliquam.</p> <p>Nulla facilisi. <annotation body="Basic annotation example." problem="1">Pellentesque id vestibulum libero.</annotation> Suspendisse potenti. Morbi scelerisque nisi vitae felis dictum mattis. Nam sit amet magna elit. Nullam volutpat cursus est, sit amet sagittis odio vulputate et. Curabitur euismod, orci in vulputate imperdiet, augue lorem tempor purus, id aliquet augue turpis a est. Aenean a sagittis libero. Praesent fringilla pretium magna, non condimentum risus elementum nec. Pellentesque faucibus elementum pharetra. Pellentesque vitae metus eros.</p> </annotatable> """) ) display_name = String( display_name=_("Display Name"), help=_("The display name for this component."), scope=Scope.settings, default=_('Annotation'), ) class AnnotatableModule(AnnotatableFields, XModule): js = { 'js': [ resource_string(__name__, 'js/src/html/display.js'), resource_string(__name__, 'js/src/annotatable/display.js'), resource_string(__name__, 'js/src/javascript_loader.js'), resource_string(__name__, 'js/src/collapsible.js'), ] } js_module_name = "Annotatable" css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]} icon_class = 'annotatable' def __init__(self, *args, **kwargs): super(AnnotatableModule, self).__init__(*args, **kwargs) xmltree = etree.fromstring(self.data) self.instructions = self._extract_instructions(xmltree) self.content = etree.tostring(xmltree, encoding='unicode') self.element_id = self.location.html_id() self.highlight_colors = ['yellow', 'orange', 'purple', 'blue', 'green'] def _get_annotation_class_attr(self, index, el): """ Returns a dict with the CSS class attribute to set on the annotation and an XML key to delete from the element. """ attr = {} cls = ['annotatable-span', 'highlight'] highlight_key = 'highlight' color = el.get(highlight_key) if color is not None: if color in self.highlight_colors: cls.append('highlight-' + color) attr['_delete'] = highlight_key attr['value'] = ' '.join(cls) return {'class': attr} def _get_annotation_data_attr(self, index, el): """ Returns a dict in which the keys are the HTML data attributes to set on the annotation element. Each data attribute has a corresponding 'value' and (optional) '_delete' key to specify an XML attribute to delete. """ data_attrs = {} attrs_map = { 'body': 'data-comment-body', 'title': 'data-comment-title', 'problem': 'data-problem-id' } for xml_key in attrs_map.keys(): if xml_key in el.attrib: value = el.get(xml_key, '') html_key = attrs_map[xml_key] data_attrs[html_key] = {'value': value, '_delete': xml_key} return data_attrs def _render_annotation(self, index, el): """ Renders an annotation element for HTML output. """ attr = {} attr.update(self._get_annotation_class_attr(index, el)) attr.update(self._get_annotation_data_attr(index, el)) el.tag = 'span' for key in attr.keys(): el.set(key, attr[key]['value']) if '_delete' in attr[key] and attr[key]['_delete'] is not None: delete_key = attr[key]['_delete'] del el.attrib[delete_key] def _render_content(self): """ Renders annotatable content with annotation spans and returns HTML. """ xmltree = etree.fromstring(self.content) xmltree.tag = 'div' if 'display_name' in xmltree.attrib: del xmltree.attrib['display_name'] index = 0 for el in xmltree.findall('.//annotation'): self._render_annotation(index, el) index += 1 return etree.tostring(xmltree, encoding='unicode') def _extract_instructions(self, xmltree): """ Removes <instructions> from the xmltree and returns them as a string, otherwise None. """ instructions = xmltree.find('instructions') if instructions is not None: instructions.tag = 'div' xmltree.remove(instructions) return etree.tostring(instructions, encoding='unicode') return None def get_html(self): """ Renders parameters to template. """ context = { 'display_name': self.display_name_with_default_escaped, 'element_id': self.element_id, 'instructions_html': self.instructions, 'content_html': self._render_content() } return self.system.render_template('annotatable.html', context) class AnnotatableDescriptor(AnnotatableFields, RawDescriptor): module_class = AnnotatableModule mako_template = "widgets/raw-edit.html" resources_dir = None
agpl-3.0
dimdung/boto
boto/dynamodb/layer2.py
135
33814
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # from boto.dynamodb.layer1 import Layer1 from boto.dynamodb.table import Table from boto.dynamodb.schema import Schema from boto.dynamodb.item import Item from boto.dynamodb.batch import BatchList, BatchWriteList from boto.dynamodb.types import get_dynamodb_type, Dynamizer, \ LossyFloatDynamizer, NonBooleanDynamizer class TableGenerator(object): """ This is an object that wraps up the table_generator function. The only real reason to have this is that we want to be able to accumulate and return the ConsumedCapacityUnits element that is part of each response. :ivar last_evaluated_key: A sequence representing the key(s) of the item last evaluated, or None if no additional results are available. :ivar remaining: The remaining quantity of results requested. :ivar table: The table to which the call was made. """ def __init__(self, table, callable, remaining, item_class, kwargs): self.table = table self.callable = callable self.remaining = -1 if remaining is None else remaining self.item_class = item_class self.kwargs = kwargs self._consumed_units = 0.0 self.last_evaluated_key = None self._count = 0 self._scanned_count = 0 self._response = None @property def count(self): """ The total number of items retrieved thus far. This value changes with iteration and even when issuing a call with count=True, it is necessary to complete the iteration to assert an accurate count value. """ self.response return self._count @property def scanned_count(self): """ As above, but representing the total number of items scanned by DynamoDB, without regard to any filters. """ self.response return self._scanned_count @property def consumed_units(self): """ Returns a float representing the ConsumedCapacityUnits accumulated. """ self.response return self._consumed_units @property def response(self): """ The current response to the call from DynamoDB. """ return self.next_response() if self._response is None else self._response def next_response(self): """ Issue a call and return the result. You can invoke this method while iterating over the TableGenerator in order to skip to the next "page" of results. """ # preserve any existing limit in case the user alters self.remaining limit = self.kwargs.get('limit') if (self.remaining > 0 and (limit is None or limit > self.remaining)): self.kwargs['limit'] = self.remaining self._response = self.callable(**self.kwargs) self.kwargs['limit'] = limit self._consumed_units += self._response.get('ConsumedCapacityUnits', 0.0) self._count += self._response.get('Count', 0) self._scanned_count += self._response.get('ScannedCount', 0) # at the expense of a possibly gratuitous dynamize, ensure that # early generator termination won't result in bad LEK values if 'LastEvaluatedKey' in self._response: lek = self._response['LastEvaluatedKey'] esk = self.table.layer2.dynamize_last_evaluated_key(lek) self.kwargs['exclusive_start_key'] = esk lektuple = (lek['HashKeyElement'],) if 'RangeKeyElement' in lek: lektuple += (lek['RangeKeyElement'],) self.last_evaluated_key = lektuple else: self.last_evaluated_key = None return self._response def __iter__(self): while self.remaining != 0: response = self.response for item in response.get('Items', []): self.remaining -= 1 yield self.item_class(self.table, attrs=item) if self.remaining == 0: break if response is not self._response: break else: if self.last_evaluated_key is not None: self.next_response() continue break if response is not self._response: continue break class Layer2(object): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, debug=0, security_token=None, region=None, validate_certs=True, dynamizer=LossyFloatDynamizer, profile_name=None): self.layer1 = Layer1(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, debug, security_token, region, validate_certs=validate_certs, profile_name=profile_name) self.dynamizer = dynamizer() def use_decimals(self, use_boolean=False): """ Use the ``decimal.Decimal`` type for encoding/decoding numeric types. By default, ints/floats are used to represent numeric types ('N', 'NS') received from DynamoDB. Using the ``Decimal`` type is recommended to prevent loss of precision. """ # Eventually this should be made the default dynamizer. self.dynamizer = Dynamizer() if use_boolean else NonBooleanDynamizer() def dynamize_attribute_updates(self, pending_updates): """ Convert a set of pending item updates into the structure required by Layer1. """ d = {} for attr_name in pending_updates: action, value = pending_updates[attr_name] if value is None: # DELETE without an attribute value d[attr_name] = {"Action": action} else: d[attr_name] = {"Action": action, "Value": self.dynamizer.encode(value)} return d def dynamize_item(self, item): d = {} for attr_name in item: d[attr_name] = self.dynamizer.encode(item[attr_name]) return d def dynamize_range_key_condition(self, range_key_condition): """ Convert a layer2 range_key_condition parameter into the structure required by Layer1. """ return range_key_condition.to_dict() def dynamize_scan_filter(self, scan_filter): """ Convert a layer2 scan_filter parameter into the structure required by Layer1. """ d = None if scan_filter: d = {} for attr_name in scan_filter: condition = scan_filter[attr_name] d[attr_name] = condition.to_dict() return d def dynamize_expected_value(self, expected_value): """ Convert an expected_value parameter into the data structure required for Layer1. """ d = None if expected_value: d = {} for attr_name in expected_value: attr_value = expected_value[attr_name] if attr_value is True: attr_value = {'Exists': True} elif attr_value is False: attr_value = {'Exists': False} else: val = self.dynamizer.encode(expected_value[attr_name]) attr_value = {'Value': val} d[attr_name] = attr_value return d def dynamize_last_evaluated_key(self, last_evaluated_key): """ Convert a last_evaluated_key parameter into the data structure required for Layer1. """ d = None if last_evaluated_key: hash_key = last_evaluated_key['HashKeyElement'] d = {'HashKeyElement': self.dynamizer.encode(hash_key)} if 'RangeKeyElement' in last_evaluated_key: range_key = last_evaluated_key['RangeKeyElement'] d['RangeKeyElement'] = self.dynamizer.encode(range_key) return d def build_key_from_values(self, schema, hash_key, range_key=None): """ Build a Key structure to be used for accessing items in Amazon DynamoDB. This method takes the supplied hash_key and optional range_key and validates them against the schema. If there is a mismatch, a TypeError is raised. Otherwise, a Python dict version of a Amazon DynamoDB Key data structure is returned. :type hash_key: int|float|str|unicode|Binary :param hash_key: The hash key of the item you are looking for. The type of the hash key should match the type defined in the schema. :type range_key: int|float|str|unicode|Binary :param range_key: The range key of the item your are looking for. This should be supplied only if the schema requires a range key. The type of the range key should match the type defined in the schema. """ dynamodb_key = {} dynamodb_value = self.dynamizer.encode(hash_key) if list(dynamodb_value.keys())[0] != schema.hash_key_type: msg = 'Hashkey must be of type: %s' % schema.hash_key_type raise TypeError(msg) dynamodb_key['HashKeyElement'] = dynamodb_value if range_key is not None: dynamodb_value = self.dynamizer.encode(range_key) if list(dynamodb_value.keys())[0] != schema.range_key_type: msg = 'RangeKey must be of type: %s' % schema.range_key_type raise TypeError(msg) dynamodb_key['RangeKeyElement'] = dynamodb_value return dynamodb_key def new_batch_list(self): """ Return a new, empty :class:`boto.dynamodb.batch.BatchList` object. """ return BatchList(self) def new_batch_write_list(self): """ Return a new, empty :class:`boto.dynamodb.batch.BatchWriteList` object. """ return BatchWriteList(self) def list_tables(self, limit=None): """ Return a list of the names of all tables associated with the current account and region. :type limit: int :param limit: The maximum number of tables to return. """ tables = [] start_table = None while not limit or len(tables) < limit: this_round_limit = None if limit: this_round_limit = limit - len(tables) this_round_limit = min(this_round_limit, 100) result = self.layer1.list_tables(limit=this_round_limit, start_table=start_table) tables.extend(result.get('TableNames', [])) start_table = result.get('LastEvaluatedTableName', None) if not start_table: break return tables def describe_table(self, name): """ Retrieve information about an existing table. :type name: str :param name: The name of the desired table. """ return self.layer1.describe_table(name) def table_from_schema(self, name, schema): """ Create a Table object from a schema. This method will create a Table object without making any API calls. If you know the name and schema of the table, you can use this method instead of ``get_table``. Example usage:: table = layer2.table_from_schema( 'tablename', Schema.create(hash_key=('foo', 'N'))) :type name: str :param name: The name of the table. :type schema: :class:`boto.dynamodb.schema.Schema` :param schema: The schema associated with the table. :rtype: :class:`boto.dynamodb.table.Table` :return: A Table object representing the table. """ return Table.create_from_schema(self, name, schema) def get_table(self, name): """ Retrieve the Table object for an existing table. :type name: str :param name: The name of the desired table. :rtype: :class:`boto.dynamodb.table.Table` :return: A Table object representing the table. """ response = self.layer1.describe_table(name) return Table(self, response) lookup = get_table def create_table(self, name, schema, read_units, write_units): """ Create a new Amazon DynamoDB table. :type name: str :param name: The name of the desired table. :type schema: :class:`boto.dynamodb.schema.Schema` :param schema: The Schema object that defines the schema used by this table. :type read_units: int :param read_units: The value for ReadCapacityUnits. :type write_units: int :param write_units: The value for WriteCapacityUnits. :rtype: :class:`boto.dynamodb.table.Table` :return: A Table object representing the new Amazon DynamoDB table. """ response = self.layer1.create_table(name, schema.dict, {'ReadCapacityUnits': read_units, 'WriteCapacityUnits': write_units}) return Table(self, response) def update_throughput(self, table, read_units, write_units): """ Update the ProvisionedThroughput for the Amazon DynamoDB Table. :type table: :class:`boto.dynamodb.table.Table` :param table: The Table object whose throughput is being updated. :type read_units: int :param read_units: The new value for ReadCapacityUnits. :type write_units: int :param write_units: The new value for WriteCapacityUnits. """ response = self.layer1.update_table(table.name, {'ReadCapacityUnits': read_units, 'WriteCapacityUnits': write_units}) table.update_from_response(response) def delete_table(self, table): """ Delete this table and all items in it. After calling this the Table objects status attribute will be set to 'DELETING'. :type table: :class:`boto.dynamodb.table.Table` :param table: The Table object that is being deleted. """ response = self.layer1.delete_table(table.name) table.update_from_response(response) def create_schema(self, hash_key_name, hash_key_proto_value, range_key_name=None, range_key_proto_value=None): """ Create a Schema object used when creating a Table. :type hash_key_name: str :param hash_key_name: The name of the HashKey for the schema. :type hash_key_proto_value: int|long|float|str|unicode|Binary :param hash_key_proto_value: A sample or prototype of the type of value you want to use for the HashKey. Alternatively, you can also just pass in the Python type (e.g. int, float, etc.). :type range_key_name: str :param range_key_name: The name of the RangeKey for the schema. This parameter is optional. :type range_key_proto_value: int|long|float|str|unicode|Binary :param range_key_proto_value: A sample or prototype of the type of value you want to use for the RangeKey. Alternatively, you can also pass in the Python type (e.g. int, float, etc.) This parameter is optional. """ hash_key = (hash_key_name, get_dynamodb_type(hash_key_proto_value)) if range_key_name and range_key_proto_value is not None: range_key = (range_key_name, get_dynamodb_type(range_key_proto_value)) else: range_key = None return Schema.create(hash_key, range_key) def get_item(self, table, hash_key, range_key=None, attributes_to_get=None, consistent_read=False, item_class=Item): """ Retrieve an existing item from the table. :type table: :class:`boto.dynamodb.table.Table` :param table: The Table object from which the item is retrieved. :type hash_key: int|long|float|str|unicode|Binary :param hash_key: The HashKey of the requested item. The type of the value must match the type defined in the schema for the table. :type range_key: int|long|float|str|unicode|Binary :param range_key: The optional RangeKey of the requested item. The type of the value must match the type defined in the schema for the table. :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :type consistent_read: bool :param consistent_read: If True, a consistent read request is issued. Otherwise, an eventually consistent request is issued. :type item_class: Class :param item_class: Allows you to override the class used to generate the items. This should be a subclass of :class:`boto.dynamodb.item.Item` """ key = self.build_key_from_values(table.schema, hash_key, range_key) response = self.layer1.get_item(table.name, key, attributes_to_get, consistent_read, object_hook=self.dynamizer.decode) item = item_class(table, hash_key, range_key, response['Item']) if 'ConsumedCapacityUnits' in response: item.consumed_units = response['ConsumedCapacityUnits'] return item def batch_get_item(self, batch_list): """ Return a set of attributes for a multiple items in multiple tables using their primary keys. :type batch_list: :class:`boto.dynamodb.batch.BatchList` :param batch_list: A BatchList object which consists of a list of :class:`boto.dynamoddb.batch.Batch` objects. Each Batch object contains the information about one batch of objects that you wish to retrieve in this request. """ request_items = batch_list.to_dict() return self.layer1.batch_get_item(request_items, object_hook=self.dynamizer.decode) def batch_write_item(self, batch_list): """ Performs multiple Puts and Deletes in one batch. :type batch_list: :class:`boto.dynamodb.batch.BatchWriteList` :param batch_list: A BatchWriteList object which consists of a list of :class:`boto.dynamoddb.batch.BatchWrite` objects. Each Batch object contains the information about one batch of objects that you wish to put or delete. """ request_items = batch_list.to_dict() return self.layer1.batch_write_item(request_items, object_hook=self.dynamizer.decode) def put_item(self, item, expected_value=None, return_values=None): """ Store a new item or completely replace an existing item in Amazon DynamoDB. :type item: :class:`boto.dynamodb.item.Item` :param item: The Item to write to Amazon DynamoDB. :type expected_value: dict :param expected_value: A dictionary of name/value pairs that you expect. This dictionary should have name/value pairs where the name is the name of the attribute and the value is either the value you are expecting or False if you expect the attribute not to exist. :type return_values: str :param return_values: Controls the return of attribute name-value pairs before then were changed. Possible values are: None or 'ALL_OLD'. If 'ALL_OLD' is specified and the item is overwritten, the content of the old item is returned. """ expected_value = self.dynamize_expected_value(expected_value) response = self.layer1.put_item(item.table.name, self.dynamize_item(item), expected_value, return_values, object_hook=self.dynamizer.decode) if 'ConsumedCapacityUnits' in response: item.consumed_units = response['ConsumedCapacityUnits'] return response def update_item(self, item, expected_value=None, return_values=None): """ Commit pending item updates to Amazon DynamoDB. :type item: :class:`boto.dynamodb.item.Item` :param item: The Item to update in Amazon DynamoDB. It is expected that you would have called the add_attribute, put_attribute and/or delete_attribute methods on this Item prior to calling this method. Those queued changes are what will be updated. :type expected_value: dict :param expected_value: A dictionary of name/value pairs that you expect. This dictionary should have name/value pairs where the name is the name of the attribute and the value is either the value you are expecting or False if you expect the attribute not to exist. :type return_values: str :param return_values: Controls the return of attribute name/value pairs before they were updated. Possible values are: None, 'ALL_OLD', 'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is specified and the item is overwritten, the content of the old item is returned. If 'ALL_NEW' is specified, then all the attributes of the new version of the item are returned. If 'UPDATED_NEW' is specified, the new versions of only the updated attributes are returned. """ expected_value = self.dynamize_expected_value(expected_value) key = self.build_key_from_values(item.table.schema, item.hash_key, item.range_key) attr_updates = self.dynamize_attribute_updates(item._updates) response = self.layer1.update_item(item.table.name, key, attr_updates, expected_value, return_values, object_hook=self.dynamizer.decode) item._updates.clear() if 'ConsumedCapacityUnits' in response: item.consumed_units = response['ConsumedCapacityUnits'] return response def delete_item(self, item, expected_value=None, return_values=None): """ Delete the item from Amazon DynamoDB. :type item: :class:`boto.dynamodb.item.Item` :param item: The Item to delete from Amazon DynamoDB. :type expected_value: dict :param expected_value: A dictionary of name/value pairs that you expect. This dictionary should have name/value pairs where the name is the name of the attribute and the value is either the value you are expecting or False if you expect the attribute not to exist. :type return_values: str :param return_values: Controls the return of attribute name-value pairs before then were changed. Possible values are: None or 'ALL_OLD'. If 'ALL_OLD' is specified and the item is overwritten, the content of the old item is returned. """ expected_value = self.dynamize_expected_value(expected_value) key = self.build_key_from_values(item.table.schema, item.hash_key, item.range_key) return self.layer1.delete_item(item.table.name, key, expected=expected_value, return_values=return_values, object_hook=self.dynamizer.decode) def query(self, table, hash_key, range_key_condition=None, attributes_to_get=None, request_limit=None, max_results=None, consistent_read=False, scan_index_forward=True, exclusive_start_key=None, item_class=Item, count=False): """ Perform a query on the table. :type table: :class:`boto.dynamodb.table.Table` :param table: The Table object that is being queried. :type hash_key: int|long|float|str|unicode|Binary :param hash_key: The HashKey of the requested item. The type of the value must match the type defined in the schema for the table. :type range_key_condition: :class:`boto.dynamodb.condition.Condition` :param range_key_condition: A Condition object. Condition object can be one of the following types: EQ|LE|LT|GE|GT|BEGINS_WITH|BETWEEN The only condition which expects or will accept two values is 'BETWEEN', otherwise a single value should be passed to the Condition constructor. :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :type request_limit: int :param request_limit: The maximum number of items to retrieve from Amazon DynamoDB on each request. You may want to set a specific request_limit based on the provisioned throughput of your table. The default behavior is to retrieve as many results as possible per request. :type max_results: int :param max_results: The maximum number of results that will be retrieved from Amazon DynamoDB in total. For example, if you only wanted to see the first 100 results from the query, regardless of how many were actually available, you could set max_results to 100 and the generator returned from the query method will only yeild 100 results max. :type consistent_read: bool :param consistent_read: If True, a consistent read request is issued. Otherwise, an eventually consistent request is issued. :type scan_index_forward: bool :param scan_index_forward: Specified forward or backward traversal of the index. Default is forward (True). :type count: bool :param count: If True, Amazon DynamoDB returns a total number of items for the Query operation, even if the operation has no matching items for the assigned filter. If count is True, the actual items are not returned and the count is accessible as the ``count`` attribute of the returned object. :type exclusive_start_key: list or tuple :param exclusive_start_key: Primary key of the item from which to continue an earlier query. This would be provided as the LastEvaluatedKey in that query. :type item_class: Class :param item_class: Allows you to override the class used to generate the items. This should be a subclass of :class:`boto.dynamodb.item.Item` :rtype: :class:`boto.dynamodb.layer2.TableGenerator` """ if range_key_condition: rkc = self.dynamize_range_key_condition(range_key_condition) else: rkc = None if exclusive_start_key: esk = self.build_key_from_values(table.schema, *exclusive_start_key) else: esk = None kwargs = {'table_name': table.name, 'hash_key_value': self.dynamizer.encode(hash_key), 'range_key_conditions': rkc, 'attributes_to_get': attributes_to_get, 'limit': request_limit, 'count': count, 'consistent_read': consistent_read, 'scan_index_forward': scan_index_forward, 'exclusive_start_key': esk, 'object_hook': self.dynamizer.decode} return TableGenerator(table, self.layer1.query, max_results, item_class, kwargs) def scan(self, table, scan_filter=None, attributes_to_get=None, request_limit=None, max_results=None, exclusive_start_key=None, item_class=Item, count=False): """ Perform a scan of DynamoDB. :type table: :class:`boto.dynamodb.table.Table` :param table: The Table object that is being scanned. :type scan_filter: A dict :param scan_filter: A dictionary where the key is the attribute name and the value is a :class:`boto.dynamodb.condition.Condition` object. Valid Condition objects include: * EQ - equal (1) * NE - not equal (1) * LE - less than or equal (1) * LT - less than (1) * GE - greater than or equal (1) * GT - greater than (1) * NOT_NULL - attribute exists (0, use None) * NULL - attribute does not exist (0, use None) * CONTAINS - substring or value in list (1) * NOT_CONTAINS - absence of substring or value in list (1) * BEGINS_WITH - substring prefix (1) * IN - exact match in list (N) * BETWEEN - >= first value, <= second value (2) :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :type request_limit: int :param request_limit: The maximum number of items to retrieve from Amazon DynamoDB on each request. You may want to set a specific request_limit based on the provisioned throughput of your table. The default behavior is to retrieve as many results as possible per request. :type max_results: int :param max_results: The maximum number of results that will be retrieved from Amazon DynamoDB in total. For example, if you only wanted to see the first 100 results from the query, regardless of how many were actually available, you could set max_results to 100 and the generator returned from the query method will only yeild 100 results max. :type count: bool :param count: If True, Amazon DynamoDB returns a total number of items for the Scan operation, even if the operation has no matching items for the assigned filter. If count is True, the actual items are not returned and the count is accessible as the ``count`` attribute of the returned object. :type exclusive_start_key: list or tuple :param exclusive_start_key: Primary key of the item from which to continue an earlier query. This would be provided as the LastEvaluatedKey in that query. :type item_class: Class :param item_class: Allows you to override the class used to generate the items. This should be a subclass of :class:`boto.dynamodb.item.Item` :rtype: :class:`boto.dynamodb.layer2.TableGenerator` """ if exclusive_start_key: esk = self.build_key_from_values(table.schema, *exclusive_start_key) else: esk = None kwargs = {'table_name': table.name, 'scan_filter': self.dynamize_scan_filter(scan_filter), 'attributes_to_get': attributes_to_get, 'limit': request_limit, 'count': count, 'exclusive_start_key': esk, 'object_hook': self.dynamizer.decode} return TableGenerator(table, self.layer1.scan, max_results, item_class, kwargs)
mit
dderevjanik/agescx
agescx/scenario.py
1
3376
from .models import * from .controller import * from .utilities import * # utilities from .decompress import * from .compress import * class Scenario: """ Scenario class """ def __init__(self, filename=None, ver=1.21): """create scenario with defaults values, check default.txt for more information Args: filename (str, optional): load scenario from file version (float, optional): specific version""" self.version = ver if filename: self.load(filename) else: self._clear() def __repr__(self): name = "SCENARIO:{}\n".format(self.filename) info1 = "\tWIDTH:{} HEIGHT:{}\n".format(self.tiles.width, self.tiles.height) info2 = "\tUNITS:{}\n".format(len(self.units)) info3 = "\tTRIGGERS:{}".format(len(self.triggers)) return name + info1 + info2 + info3 def load(self, filename, ver=1.21): """ load scenario from file it doesn't save current scenario Args: filename (str): scenario filename ver (float, optional): version of scenario Raises: IOError: if file doesn't exits or is broken """ self._clear() try: f = open(filename, 'rb') except: raise(IOError("File is broken or doesn't exists")) b = f.read() # get bytes from file Decompress(self, b, ver, False) # load data def save(self, filename=None, ver=1.21): """ save scenario as scx format Args: filename (str, optional): if set, it will create new scenario file, otherwise rewrite current ver (float, optional): save with specific Todo: finish this section """ if filename is None: filename = self.filename # save to this scenario file Compress(self, filename, ver) def new(self, filename): """create whole new blank scenario Args: terrainType (int, optional): starting terrain type, 0 eleavtion (int, optional): starting elevation, 0 filename (str, optional): if sets, it will create new scenario file ver (float, optional): create version specific scenario Todo: finish starting terrainType and elevation """ self._clear() self.version = "1.21" self.version2 = 1.22 self.filename = filename def _clear(self): """clear all scenario data""" self.filename = None # scenario filename self.version = None # scenario version self.instructions = "" self.plrnumb = 8 self.players = Players() # initialize players self.messages = Messages() # self.cinematics = Cinematics() # movies self.background = Background() # pre-game image self.map = Map() self.tiles = self.map.tiles self.goals = Goals() self.units = Units() self.triggers = Triggers() self.debug = Debug() for i in range(len(self.players)): self.players[i].units = self.units[i] self.timestamp = 0 # last save
mit
lupyuen/RaspberryPiImage
home/pi/GrovePi/Software/Python/others/temboo/Library/Google/Contacts/__init__.py
5
1118
from temboo.Library.Google.Contacts.CreateContact import CreateContact, CreateContactInputSet, CreateContactResultSet, CreateContactChoreographyExecution from temboo.Library.Google.Contacts.DeleteContact import DeleteContact, DeleteContactInputSet, DeleteContactResultSet, DeleteContactChoreographyExecution from temboo.Library.Google.Contacts.GetAllContacts import GetAllContacts, GetAllContactsInputSet, GetAllContactsResultSet, GetAllContactsChoreographyExecution from temboo.Library.Google.Contacts.GetAllGroups import GetAllGroups, GetAllGroupsInputSet, GetAllGroupsResultSet, GetAllGroupsChoreographyExecution from temboo.Library.Google.Contacts.GetContactById import GetContactById, GetContactByIdInputSet, GetContactByIdResultSet, GetContactByIdChoreographyExecution from temboo.Library.Google.Contacts.GetContactsWithQuery import GetContactsWithQuery, GetContactsWithQueryInputSet, GetContactsWithQueryResultSet, GetContactsWithQueryChoreographyExecution from temboo.Library.Google.Contacts.UpdateContact import UpdateContact, UpdateContactInputSet, UpdateContactResultSet, UpdateContactChoreographyExecution
apache-2.0
vietpn/ghost_nodejs
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/lexers/_scilab_builtins.py
364
31261
# -*- coding: utf-8 -*- """ pygments.lexers._scilab_builtins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Builtin list for the ScilabLexer. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ # These lists are generated automatically. # Run the following in a Scilab script: # # varType=["functions", "commands", "macros", "variables" ]; # fd = mopen('list.txt','wt'); # # for j=1:size(varType,"*") # myStr=""; # a=completion("",varType(j)); # myStr=varType(j)+"_kw = ["; # for i=1:size(a,"*") # myStr = myStr + """" + a(i) + """"; # if size(a,"*") <> i then # myStr = myStr + ","; end # end # myStr = myStr + "]"; # mputl(myStr,fd); # end # mclose(fd); # # Then replace "$" by "\\$" manually. functions_kw = ["%XMLAttr_6","%XMLAttr_e","%XMLAttr_i_XMLElem","%XMLAttr_length","%XMLAttr_p","%XMLAttr_size","%XMLDoc_6","%XMLDoc_e","%XMLDoc_i_XMLList","%XMLDoc_p","%XMLElem_6","%XMLElem_e","%XMLElem_i_XMLDoc","%XMLElem_i_XMLElem","%XMLElem_i_XMLList","%XMLElem_p","%XMLList_6","%XMLList_e","%XMLList_i_XMLElem","%XMLList_i_XMLList","%XMLList_length","%XMLList_p","%XMLList_size","%XMLNs_6","%XMLNs_e","%XMLNs_i_XMLElem","%XMLNs_p","%XMLSet_6","%XMLSet_e","%XMLSet_length","%XMLSet_p","%XMLSet_size","%XMLValid_p","%b_i_XMLList","%c_i_XMLAttr","%c_i_XMLDoc","%c_i_XMLElem","%c_i_XMLList","%ce_i_XMLList","%fptr_i_XMLList","%h_i_XMLList","%hm_i_XMLList","%i_abs","%i_cumprod","%i_cumsum","%i_diag","%i_i_XMLList","%i_matrix","%i_max","%i_maxi","%i_min","%i_mini","%i_mput","%i_p","%i_prod","%i_sum","%i_tril","%i_triu","%ip_i_XMLList","%l_i_XMLList","%lss_i_XMLList","%mc_i_XMLList","%msp_full","%msp_i_XMLList","%msp_spget","%p_i_XMLList","%ptr_i_XMLList","%r_i_XMLList","%s_i_XMLList","%sp_i_XMLList","%spb_i_XMLList","%st_i_XMLList","Calendar","ClipBoard","Matplot","Matplot1","PlaySound","TCL_DeleteInterp","TCL_DoOneEvent","TCL_EvalFile","TCL_EvalStr","TCL_ExistArray","TCL_ExistInterp","TCL_ExistVar","TCL_GetVar","TCL_GetVersion","TCL_SetVar","TCL_UnsetVar","TCL_UpVar","_","_code2str","_str2code","about","abs","acos","addcb","addf","addhistory","addinter","amell","and","argn","arl2_ius","ascii","asin","atan","backslash","balanc","banner","base2dec","basename","bdiag","beep","besselh","besseli","besselj","besselk","bessely","beta","bezout","bfinit","blkfc1i","blkslvi","bool2s","browsehistory","browsevar","bsplin3val","buildDocv2","buildouttb","bvode","c_link","calerf","call","callblk","captions","cd","cdfbet","cdfbin","cdfchi","cdfchn","cdff","cdffnc","cdfgam","cdfnbn","cdfnor","cdfpoi","cdft","ceil","champ","champ1","chdir","chol","clc","clean","clear","clear_pixmap","clearfun","clearglobal","closeEditor","closeXcos","code2str","coeff","comp","completion","conj","contour2di","contr","conv2","convstr","copy","copyfile","corr","cos","coserror","createdir","cshep2d","ctree2","ctree3","ctree4","cumprod","cumsum","curblock","curblockc","dasrt","dassl","data2sig","debug","dec2base","deff","definedfields","degree","delbpt","delete","deletefile","delip","delmenu","det","dgettext","dhinf","diag","diary","diffobjs","disp","dispbpt","displayhistory","disposefftwlibrary","dlgamma","dnaupd","dneupd","double","draw","drawaxis","drawlater","drawnow","dsaupd","dsearch","dseupd","duplicate","editor","editvar","emptystr","end_scicosim","ereduc","errcatch","errclear","error","eval_cshep2d","exec","execstr","exists","exit","exp","expm","exportUI","export_to_hdf5","eye","fadj2sp","fec","feval","fft","fftw","fftw_flags","fftw_forget_wisdom","fftwlibraryisloaded","file","filebrowser","fileext","fileinfo","fileparts","filesep","find","findBD","findfiles","floor","format","fort","fprintfMat","freq","frexp","fromc","fromjava","fscanfMat","fsolve","fstair","full","fullpath","funcprot","funptr","gamma","gammaln","geom3d","get","get_absolute_file_path","get_fftw_wisdom","getblocklabel","getcallbackobject","getdate","getdebuginfo","getdefaultlanguage","getdrives","getdynlibext","getenv","getfield","gethistory","gethistoryfile","getinstalledlookandfeels","getio","getlanguage","getlongpathname","getlookandfeel","getmd5","getmemory","getmodules","getos","getpid","getrelativefilename","getscicosvars","getscilabmode","getshortpathname","gettext","getvariablesonstack","getversion","glist","global","glue","grand","grayplot","grep","gsort","gstacksize","havewindow","helpbrowser","hess","hinf","historymanager","historysize","host","iconvert","iconvert","ieee","ilib_verbose","imag","impl","import_from_hdf5","imult","inpnvi","int","int16","int2d","int32","int3d","int8","interp","interp2d","interp3d","intg","intppty","inttype","inv","is_handle_valid","isalphanum","isascii","isdef","isdigit","isdir","isequal","isequalbitwise","iserror","isfile","isglobal","isletter","isreal","iswaitingforinput","javaclasspath","javalibrarypath","kron","lasterror","ldiv","ldivf","legendre","length","lib","librarieslist","libraryinfo","linear_interpn","lines","link","linmeq","list","load","loadScicos","loadfftwlibrary","loadhistory","log","log1p","lsq","lsq_splin","lsqrsolve","lsslist","lstcat","lstsize","ltitr","lu","ludel","lufact","luget","lusolve","macr2lst","macr2tree","matfile_close","matfile_listvar","matfile_open","matfile_varreadnext","matfile_varwrite","matrix","max","maxfiles","mclearerr","mclose","meof","merror","messagebox","mfprintf","mfscanf","mget","mgeti","mgetl","mgetstr","min","mlist","mode","model2blk","mopen","move","movefile","mprintf","mput","mputl","mputstr","mscanf","mseek","msprintf","msscanf","mtell","mtlb_mode","mtlb_sparse","mucomp","mulf","nearfloat","newaxes","newest","newfun","nnz","notify","number_properties","ode","odedc","ones","opentk","optim","or","ordmmd","parallel_concurrency","parallel_run","param3d","param3d1","part","pathconvert","pathsep","phase_simulation","plot2d","plot2d1","plot2d2","plot2d3","plot2d4","plot3d","plot3d1","pointer_xproperty","poly","ppol","pppdiv","predef","print","printf","printfigure","printsetupbox","prod","progressionbar","prompt","pwd","qld","qp_solve","qr","raise_window","rand","rankqr","rat","rcond","rdivf","read","read4b","readb","readgateway","readmps","real","realtime","realtimeinit","regexp","relocate_handle","remez","removedir","removelinehistory","res_with_prec","resethistory","residu","resume","return","ricc","ricc_old","rlist","roots","rotate_axes","round","rpem","rtitr","rubberbox","save","saveafterncommands","saveconsecutivecommands","savehistory","schur","sci_haltscicos","sci_tree2","sci_tree3","sci_tree4","sciargs","scicos_debug","scicos_debug_count","scicos_time","scicosim","scinotes","sctree","semidef","set","set_blockerror","set_fftw_wisdom","set_xproperty","setbpt","setdefaultlanguage","setenv","setfield","sethistoryfile","setlanguage","setlookandfeel","setmenu","sfact","sfinit","show_pixmap","show_window","showalluimenushandles","sident","sig2data","sign","simp","simp_mode","sin","size","slash","sleep","sorder","sparse","spchol","spcompack","spec","spget","splin","splin2d","splin3d","spones","sprintf","sqrt","stacksize","str2code","strcat","strchr","strcmp","strcspn","strindex","string","stringbox","stripblanks","strncpy","strrchr","strrev","strsplit","strspn","strstr","strsubst","strtod","strtok","subf","sum","svd","swap_handles","symfcti","syredi","system_getproperty","system_setproperty","ta2lpd","tan","taucs_chdel","taucs_chfact","taucs_chget","taucs_chinfo","taucs_chsolve","tempname","testmatrix","timer","tlist","tohome","tokens","toolbar","toprint","tr_zer","tril","triu","type","typename","uiDisplayTree","uicontextmenu","uicontrol","uigetcolor","uigetdir","uigetfile","uigetfont","uimenu","uint16","uint32","uint8","uipopup","uiputfile","uiwait","ulink","umf_ludel","umf_lufact","umf_luget","umf_luinfo","umf_lusolve","umfpack","unglue","unix","unsetmenu","unzoom","updatebrowsevar","usecanvas","user","var2vec","varn","vec2var","waitbar","warnBlockByUID","warning","what","where","whereis","who","winsid","with_embedded_jre","with_module","writb","write","write4b","x_choose","x_choose_modeless","x_dialog","x_mdialog","xarc","xarcs","xarrows","xchange","xchoicesi","xclick","xcos","xcosAddToolsMenu","xcosConfigureXmlFile","xcosDiagramToScilab","xcosPalCategoryAdd","xcosPalDelete","xcosPalDisable","xcosPalEnable","xcosPalGenerateIcon","xcosPalLoad","xcosPalMove","xcosUpdateBlock","xdel","xfarc","xfarcs","xfpoly","xfpolys","xfrect","xget","xgetech","xgetmouse","xgraduate","xgrid","xlfont","xls_open","xls_read","xmlAddNs","xmlAsNumber","xmlAsText","xmlDTD","xmlDelete","xmlDocument","xmlDump","xmlElement","xmlFormat","xmlGetNsByHref","xmlGetNsByPrefix","xmlGetOpenDocs","xmlIsValidObject","xmlNs","xmlRead","xmlReadStr","xmlRelaxNG","xmlRemove","xmlSchema","xmlSetAttributes","xmlValidate","xmlWrite","xmlXPath","xname","xpause","xpoly","xpolys","xrect","xrects","xs2bmp","xs2eps","xs2gif","xs2jpg","xs2pdf","xs2png","xs2ppm","xs2ps","xs2svg","xsegs","xset","xsetech","xstring","xstringb","xtitle","zeros","znaupd","zneupd","zoom_rect"] commands_kw = ["abort","apropos","break","case","catch","clc","clear","continue","do","else","elseif","end","endfunction","exit","for","function","help","if","pause","pwd","quit","resume","return","select","then","try","what","while","who"] macros_kw = ["%0_i_st","%3d_i_h","%Block_xcosUpdateBlock","%TNELDER_p","%TNELDER_string","%TNMPLOT_p","%TNMPLOT_string","%TOPTIM_p","%TOPTIM_string","%TSIMPLEX_p","%TSIMPLEX_string","%_gsort","%_strsplit","%ar_p","%asn","%b_a_b","%b_a_s","%b_c_s","%b_c_spb","%b_cumprod","%b_cumsum","%b_d_s","%b_diag","%b_e","%b_f_s","%b_f_spb","%b_g_s","%b_g_spb","%b_h_s","%b_h_spb","%b_i_b","%b_i_ce","%b_i_h","%b_i_hm","%b_i_s","%b_i_sp","%b_i_spb","%b_i_st","%b_iconvert","%b_l_b","%b_l_s","%b_m_b","%b_m_s","%b_matrix","%b_n_hm","%b_o_hm","%b_p_s","%b_prod","%b_r_b","%b_r_s","%b_s_b","%b_s_s","%b_string","%b_sum","%b_tril","%b_triu","%b_x_b","%b_x_s","%c_a_c","%c_b_c","%c_b_s","%c_diag","%c_e","%c_eye","%c_f_s","%c_i_c","%c_i_ce","%c_i_h","%c_i_hm","%c_i_lss","%c_i_r","%c_i_s","%c_i_st","%c_matrix","%c_n_l","%c_n_st","%c_o_l","%c_o_st","%c_ones","%c_rand","%c_tril","%c_triu","%cblock_c_cblock","%cblock_c_s","%cblock_e","%cblock_f_cblock","%cblock_p","%cblock_size","%ce_6","%ce_c_ce","%ce_e","%ce_f_ce","%ce_i_ce","%ce_i_s","%ce_i_st","%ce_matrix","%ce_p","%ce_size","%ce_string","%ce_t","%champdat_i_h","%choose","%diagram_xcos","%dir_p","%fptr_i_st","%grayplot_i_h","%h_i_st","%hm_1_hm","%hm_1_s","%hm_2_hm","%hm_2_s","%hm_3_hm","%hm_3_s","%hm_4_hm","%hm_4_s","%hm_5","%hm_a_hm","%hm_a_r","%hm_a_s","%hm_abs","%hm_and","%hm_bool2s","%hm_c_hm","%hm_ceil","%hm_conj","%hm_cos","%hm_cumprod","%hm_cumsum","%hm_d_hm","%hm_d_s","%hm_degree","%hm_e","%hm_exp","%hm_f_hm","%hm_fft","%hm_find","%hm_floor","%hm_g_hm","%hm_h_hm","%hm_i_b","%hm_i_ce","%hm_i_hm","%hm_i_i","%hm_i_p","%hm_i_r","%hm_i_s","%hm_i_st","%hm_iconvert","%hm_imag","%hm_int","%hm_isnan","%hm_isreal","%hm_j_hm","%hm_j_s","%hm_k_hm","%hm_k_s","%hm_log","%hm_m_p","%hm_m_r","%hm_m_s","%hm_matrix","%hm_maxi","%hm_mean","%hm_median","%hm_mini","%hm_n_b","%hm_n_c","%hm_n_hm","%hm_n_i","%hm_n_p","%hm_n_s","%hm_o_b","%hm_o_c","%hm_o_hm","%hm_o_i","%hm_o_p","%hm_o_s","%hm_ones","%hm_or","%hm_p","%hm_prod","%hm_q_hm","%hm_r_s","%hm_rand","%hm_real","%hm_round","%hm_s","%hm_s_hm","%hm_s_r","%hm_s_s","%hm_sign","%hm_sin","%hm_size","%hm_sqrt","%hm_st_deviation","%hm_string","%hm_sum","%hm_x_hm","%hm_x_p","%hm_x_s","%hm_zeros","%i_1_s","%i_2_s","%i_3_s","%i_4_s","%i_Matplot","%i_a_i","%i_a_s","%i_and","%i_ascii","%i_b_s","%i_bezout","%i_champ","%i_champ1","%i_contour","%i_contour2d","%i_d_i","%i_d_s","%i_e","%i_fft","%i_g_i","%i_gcd","%i_h_i","%i_i_ce","%i_i_h","%i_i_hm","%i_i_i","%i_i_s","%i_i_st","%i_j_i","%i_j_s","%i_l_s","%i_lcm","%i_length","%i_m_i","%i_m_s","%i_mfprintf","%i_mprintf","%i_msprintf","%i_n_s","%i_o_s","%i_or","%i_p_i","%i_p_s","%i_plot2d","%i_plot2d1","%i_plot2d2","%i_q_s","%i_r_i","%i_r_s","%i_round","%i_s_i","%i_s_s","%i_sign","%i_string","%i_x_i","%i_x_s","%ip_a_s","%ip_i_st","%ip_m_s","%ip_n_ip","%ip_o_ip","%ip_p","%ip_s_s","%ip_string","%k","%l_i_h","%l_i_s","%l_i_st","%l_isequal","%l_n_c","%l_n_l","%l_n_m","%l_n_p","%l_n_s","%l_n_st","%l_o_c","%l_o_l","%l_o_m","%l_o_p","%l_o_s","%l_o_st","%lss_a_lss","%lss_a_p","%lss_a_r","%lss_a_s","%lss_c_lss","%lss_c_p","%lss_c_r","%lss_c_s","%lss_e","%lss_eye","%lss_f_lss","%lss_f_p","%lss_f_r","%lss_f_s","%lss_i_ce","%lss_i_lss","%lss_i_p","%lss_i_r","%lss_i_s","%lss_i_st","%lss_inv","%lss_l_lss","%lss_l_p","%lss_l_r","%lss_l_s","%lss_m_lss","%lss_m_p","%lss_m_r","%lss_m_s","%lss_n_lss","%lss_n_p","%lss_n_r","%lss_n_s","%lss_norm","%lss_o_lss","%lss_o_p","%lss_o_r","%lss_o_s","%lss_ones","%lss_r_lss","%lss_r_p","%lss_r_r","%lss_r_s","%lss_rand","%lss_s","%lss_s_lss","%lss_s_p","%lss_s_r","%lss_s_s","%lss_size","%lss_t","%lss_v_lss","%lss_v_p","%lss_v_r","%lss_v_s","%lt_i_s","%m_n_l","%m_o_l","%mc_i_h","%mc_i_s","%mc_i_st","%mc_n_st","%mc_o_st","%mc_string","%mps_p","%mps_string","%msp_a_s","%msp_abs","%msp_e","%msp_find","%msp_i_s","%msp_i_st","%msp_length","%msp_m_s","%msp_maxi","%msp_n_msp","%msp_nnz","%msp_o_msp","%msp_p","%msp_sparse","%msp_spones","%msp_t","%p_a_lss","%p_a_r","%p_c_lss","%p_c_r","%p_cumprod","%p_cumsum","%p_d_p","%p_d_r","%p_d_s","%p_det","%p_e","%p_f_lss","%p_f_r","%p_i_ce","%p_i_h","%p_i_hm","%p_i_lss","%p_i_p","%p_i_r","%p_i_s","%p_i_st","%p_inv","%p_j_s","%p_k_p","%p_k_r","%p_k_s","%p_l_lss","%p_l_p","%p_l_r","%p_l_s","%p_m_hm","%p_m_lss","%p_m_r","%p_matrix","%p_n_l","%p_n_lss","%p_n_r","%p_o_l","%p_o_lss","%p_o_r","%p_o_sp","%p_p_s","%p_prod","%p_q_p","%p_q_r","%p_q_s","%p_r_lss","%p_r_p","%p_r_r","%p_r_s","%p_s_lss","%p_s_r","%p_simp","%p_string","%p_sum","%p_v_lss","%p_v_p","%p_v_r","%p_v_s","%p_x_hm","%p_x_r","%p_y_p","%p_y_r","%p_y_s","%p_z_p","%p_z_r","%p_z_s","%r_a_hm","%r_a_lss","%r_a_p","%r_a_r","%r_a_s","%r_c_lss","%r_c_p","%r_c_r","%r_c_s","%r_clean","%r_cumprod","%r_d_p","%r_d_r","%r_d_s","%r_det","%r_diag","%r_e","%r_eye","%r_f_lss","%r_f_p","%r_f_r","%r_f_s","%r_i_ce","%r_i_hm","%r_i_lss","%r_i_p","%r_i_r","%r_i_s","%r_i_st","%r_inv","%r_j_s","%r_k_p","%r_k_r","%r_k_s","%r_l_lss","%r_l_p","%r_l_r","%r_l_s","%r_m_hm","%r_m_lss","%r_m_p","%r_m_r","%r_m_s","%r_matrix","%r_n_lss","%r_n_p","%r_n_r","%r_n_s","%r_norm","%r_o_lss","%r_o_p","%r_o_r","%r_o_s","%r_ones","%r_p","%r_p_s","%r_prod","%r_q_p","%r_q_r","%r_q_s","%r_r_lss","%r_r_p","%r_r_r","%r_r_s","%r_rand","%r_s","%r_s_hm","%r_s_lss","%r_s_p","%r_s_r","%r_s_s","%r_simp","%r_size","%r_string","%r_sum","%r_t","%r_tril","%r_triu","%r_v_lss","%r_v_p","%r_v_r","%r_v_s","%r_x_p","%r_x_r","%r_x_s","%r_y_p","%r_y_r","%r_y_s","%r_z_p","%r_z_r","%r_z_s","%s_1_hm","%s_1_i","%s_2_hm","%s_2_i","%s_3_hm","%s_3_i","%s_4_hm","%s_4_i","%s_5","%s_a_b","%s_a_hm","%s_a_i","%s_a_ip","%s_a_lss","%s_a_msp","%s_a_r","%s_a_sp","%s_and","%s_b_i","%s_b_s","%s_c_b","%s_c_cblock","%s_c_lss","%s_c_r","%s_c_sp","%s_d_b","%s_d_i","%s_d_p","%s_d_r","%s_d_sp","%s_e","%s_f_b","%s_f_cblock","%s_f_lss","%s_f_r","%s_f_sp","%s_g_b","%s_g_s","%s_h_b","%s_h_s","%s_i_b","%s_i_c","%s_i_ce","%s_i_h","%s_i_hm","%s_i_i","%s_i_lss","%s_i_p","%s_i_r","%s_i_s","%s_i_sp","%s_i_spb","%s_i_st","%s_j_i","%s_k_hm","%s_k_p","%s_k_r","%s_k_sp","%s_l_b","%s_l_hm","%s_l_i","%s_l_lss","%s_l_p","%s_l_r","%s_l_s","%s_l_sp","%s_m_b","%s_m_hm","%s_m_i","%s_m_ip","%s_m_lss","%s_m_msp","%s_m_r","%s_matrix","%s_n_hm","%s_n_i","%s_n_l","%s_n_lss","%s_n_r","%s_n_st","%s_o_hm","%s_o_i","%s_o_l","%s_o_lss","%s_o_r","%s_o_st","%s_or","%s_p_b","%s_p_i","%s_pow","%s_q_hm","%s_q_i","%s_q_p","%s_q_r","%s_q_sp","%s_r_b","%s_r_i","%s_r_lss","%s_r_p","%s_r_r","%s_r_s","%s_r_sp","%s_s_b","%s_s_hm","%s_s_i","%s_s_ip","%s_s_lss","%s_s_r","%s_s_sp","%s_simp","%s_v_lss","%s_v_p","%s_v_r","%s_v_s","%s_x_b","%s_x_hm","%s_x_i","%s_x_r","%s_y_p","%s_y_r","%s_y_sp","%s_z_p","%s_z_r","%s_z_sp","%sn","%sp_a_s","%sp_a_sp","%sp_and","%sp_c_s","%sp_ceil","%sp_cos","%sp_cumprod","%sp_cumsum","%sp_d_s","%sp_d_sp","%sp_diag","%sp_e","%sp_exp","%sp_f_s","%sp_floor","%sp_gsort","%sp_i_ce","%sp_i_h","%sp_i_s","%sp_i_sp","%sp_i_st","%sp_int","%sp_inv","%sp_k_s","%sp_k_sp","%sp_l_s","%sp_l_sp","%sp_length","%sp_norm","%sp_or","%sp_p_s","%sp_prod","%sp_q_s","%sp_q_sp","%sp_r_s","%sp_r_sp","%sp_round","%sp_s_s","%sp_s_sp","%sp_sin","%sp_sqrt","%sp_string","%sp_sum","%sp_tril","%sp_triu","%sp_y_s","%sp_y_sp","%sp_z_s","%sp_z_sp","%spb_and","%spb_c_b","%spb_cumprod","%spb_cumsum","%spb_diag","%spb_e","%spb_f_b","%spb_g_b","%spb_g_spb","%spb_h_b","%spb_h_spb","%spb_i_b","%spb_i_ce","%spb_i_h","%spb_i_st","%spb_or","%spb_prod","%spb_sum","%spb_tril","%spb_triu","%st_6","%st_c_st","%st_e","%st_f_st","%st_i_b","%st_i_c","%st_i_fptr","%st_i_h","%st_i_i","%st_i_ip","%st_i_lss","%st_i_msp","%st_i_p","%st_i_r","%st_i_s","%st_i_sp","%st_i_spb","%st_i_st","%st_matrix","%st_n_c","%st_n_l","%st_n_mc","%st_n_p","%st_n_s","%st_o_c","%st_o_l","%st_o_mc","%st_o_p","%st_o_s","%st_o_tl","%st_p","%st_size","%st_string","%st_t","%ticks_i_h","%xls_e","%xls_p","%xlssheet_e","%xlssheet_p","%xlssheet_size","%xlssheet_string","DominationRank","G_make","IsAScalar","NDcost","OS_Version","PlotSparse","ReadHBSparse","ReadmiMatrix","TCL_CreateSlave","WritemiMatrix","abcd","abinv","accept_func_default","accept_func_vfsa","acf","acosd","acosh","acoshm","acosm","acot","acotd","acoth","acsc","acscd","acsch","add_demo","add_help_chapter","add_module_help_chapter","add_param","add_profiling","adj2sp","aff2ab","ana_style","analpf","analyze","aplat","apropos","arhnk","arl2","arma2p","armac","armax","armax1","arobasestring2strings","arsimul","ascii2string","asciimat","asec","asecd","asech","asind","asinh","asinhm","asinm","assert_checkalmostequal","assert_checkequal","assert_checkerror","assert_checkfalse","assert_checkfilesequal","assert_checktrue","assert_comparecomplex","assert_computedigits","assert_cond2reltol","assert_cond2reqdigits","assert_generror","atand","atanh","atanhm","atanm","atomsAutoload","atomsAutoloadAdd","atomsAutoloadDel","atomsAutoloadList","atomsCategoryList","atomsCheckModule","atomsDepTreeShow","atomsGetConfig","atomsGetInstalled","atomsGetLoaded","atomsGetLoadedPath","atomsInstall","atomsIsInstalled","atomsIsLoaded","atomsList","atomsLoad","atomsRemove","atomsRepositoryAdd","atomsRepositoryDel","atomsRepositoryList","atomsRestoreConfig","atomsSaveConfig","atomsSearch","atomsSetConfig","atomsShow","atomsSystemInit","atomsSystemUpdate","atomsTest","atomsUpdate","atomsVersion","augment","auread","auwrite","balreal","bench_run","bilin","bilt","bin2dec","binomial","bitand","bitcmp","bitget","bitor","bitset","bitxor","black","blanks","bloc2exp","bloc2ss","block_parameter_error","bode","bstap","buttmag","bvodeS","bytecode","bytecodewalk","cainv","calendar","calfrq","canon","casc","cat","cat_code","cb_m2sci_gui","ccontrg","cell","cell2mat","cellstr","center","cepstrum","cfspec","char","chart","cheb1mag","cheb2mag","check_gateways","check_help","check_modules_xml","check_versions","chepol","chfact","chsolve","classmarkov","clean_help","clock","cls2dls","cmb_lin","cmndred","cmoment","coding_ga_binary","coding_ga_identity","coff","coffg","colcomp","colcompr","colinout","colregul","companion","complex","compute_initial_temp","cond","cond2sp","condestsp","config","configure_msifort","configure_msvc","cont_frm","cont_mat","contrss","conv","convert_to_float","convertindex","convol","convol2d","copfac","correl","cosd","cosh","coshm","cosm","cotd","cotg","coth","cothm","covar","createfun","createstruct","crossover_ga_binary","crossover_ga_default","csc","cscd","csch","csgn","csim","cspect","ctr_gram","czt","dae","daeoptions","damp","datafit","date","datenum","datevec","dbphi","dcf","ddp","dec2bin","dec2hex","dec2oct","del_help_chapter","del_module_help_chapter","demo_begin","demo_choose","demo_compiler","demo_end","demo_file_choice","demo_folder_choice","demo_function_choice","demo_gui","demo_mdialog","demo_message","demo_run","demo_viewCode","denom","derivat","derivative","des2ss","des2tf","detectmsifort64tools","detectmsvc64tools","determ","detr","detrend","devtools_run_builder","dft","dhnorm","diff","diophant","dir","dirname","dispfiles","dllinfo","dscr","dsimul","dt_ility","dtsi","edit","edit_error","eigenmarkov","ell1mag","enlarge_shape","entropy","eomday","epred","eqfir","eqiir","equil","equil1","erf","erfc","erfcx","erfinv","etime","eval","evans","evstr","expression2code","extract_help_examples","factor","factorial","factors","faurre","ffilt","fft2","fftshift","fieldnames","filt_sinc","filter","findABCD","findAC","findBDK","findR","find_freq","find_links","find_scicos_version","findm","findmsifortcompiler","findmsvccompiler","findx0BD","firstnonsingleton","fit_dat","fix","fixedpointgcd","flipdim","flts","fminsearch","format_txt","fourplan","fprintf","frep2tf","freson","frfit","frmag","fscanf","fseek_origin","fsfirlin","fspec","fspecg","fstabst","ftest","ftuneq","fullfile","fullrf","fullrfk","fun2string","g_margin","gainplot","gamitg","gcare","gcd","gencompilationflags_unix","generateBlockImage","generateBlockImages","generic_i_ce","generic_i_h","generic_i_hm","generic_i_s","generic_i_st","genlib","genlib_old","genmarkov","geomean","getDiagramVersion","getModelicaPath","get_file_path","get_function_path","get_param","get_profile","get_scicos_version","getd","getscilabkeywords","getshell","gettklib","gfare","gfrancis","givens","glever","gmres","group","gschur","gspec","gtild","h2norm","h_cl","h_inf","h_inf_st","h_norm","hallchart","halt","hank","hankelsv","harmean","haveacompiler","head_comments","help","help_from_sci","help_skeleton","hermit","hex2dec","hilb","hilbert","horner","householder","hrmt","htrianr","hypermat","ifft","iir","iirgroup","iirlp","iirmod","ilib_build","ilib_compile","ilib_for_link","ilib_gen_Make","ilib_gen_Make_unix","ilib_gen_cleaner","ilib_gen_gateway","ilib_gen_loader","ilib_include_flag","ilib_mex_build","im_inv","importScicosDiagram","importScicosPal","importXcosDiagram","imrep2ss","ind2sub","inistate","init_ga_default","init_param","initial_scicos_tables","input","instruction2code","intc","intdec","integrate","interp1","interpln","intersect","intl","intsplin","inttrap","inv_coeff","invr","invrs","invsyslin","iqr","isLeapYear","is_absolute_path","is_param","iscell","iscellstr","isempty","isfield","isinf","isnan","isnum","issparse","isstruct","isvector","jmat","justify","kalm","karmarkar","kernel","kpure","krac2","kroneck","lattn","launchtest","lcf","lcm","lcmdiag","leastsq","leqe","leqr","lev","levin","lex_sort","lft","lin","lin2mu","lincos","lindquist","linf","linfn","linsolve","linspace","list2vec","list_param","listfiles","listfunctions","listvarinfile","lmisolver","lmitool","loadXcosLibs","loadmatfile","loadwave","log10","log2","logm","logspace","lqe","lqg","lqg2stan","lqg_ltr","lqr","ls","lyap","m2sci_gui","m_circle","macglov","macrovar","mad","makecell","manedit","mapsound","markp2ss","matfile2sci","mdelete","mean","meanf","median","mese","meshgrid","mfft","mfile2sci","minreal","minss","mkdir","modulo","moment","mrfit","msd","mstr2sci","mtlb","mtlb_0","mtlb_a","mtlb_all","mtlb_any","mtlb_axes","mtlb_axis","mtlb_beta","mtlb_box","mtlb_choices","mtlb_close","mtlb_colordef","mtlb_cond","mtlb_conv","mtlb_cov","mtlb_cumprod","mtlb_cumsum","mtlb_dec2hex","mtlb_delete","mtlb_diag","mtlb_diff","mtlb_dir","mtlb_double","mtlb_e","mtlb_echo","mtlb_error","mtlb_eval","mtlb_exist","mtlb_eye","mtlb_false","mtlb_fft","mtlb_fftshift","mtlb_filter","mtlb_find","mtlb_findstr","mtlb_fliplr","mtlb_fopen","mtlb_format","mtlb_fprintf","mtlb_fread","mtlb_fscanf","mtlb_full","mtlb_fwrite","mtlb_get","mtlb_grid","mtlb_hold","mtlb_i","mtlb_ifft","mtlb_image","mtlb_imp","mtlb_int16","mtlb_int32","mtlb_int8","mtlb_is","mtlb_isa","mtlb_isfield","mtlb_isletter","mtlb_isspace","mtlb_l","mtlb_legendre","mtlb_linspace","mtlb_logic","mtlb_logical","mtlb_loglog","mtlb_lower","mtlb_max","mtlb_mean","mtlb_median","mtlb_mesh","mtlb_meshdom","mtlb_min","mtlb_more","mtlb_num2str","mtlb_ones","mtlb_pcolor","mtlb_plot","mtlb_prod","mtlb_qr","mtlb_qz","mtlb_rand","mtlb_randn","mtlb_rcond","mtlb_realmax","mtlb_realmin","mtlb_repmat","mtlb_s","mtlb_semilogx","mtlb_semilogy","mtlb_setstr","mtlb_size","mtlb_sort","mtlb_sortrows","mtlb_sprintf","mtlb_sscanf","mtlb_std","mtlb_strcmp","mtlb_strcmpi","mtlb_strfind","mtlb_strrep","mtlb_subplot","mtlb_sum","mtlb_t","mtlb_toeplitz","mtlb_tril","mtlb_triu","mtlb_true","mtlb_type","mtlb_uint16","mtlb_uint32","mtlb_uint8","mtlb_upper","mtlb_var","mtlb_zeros","mu2lin","mutation_ga_binary","mutation_ga_default","mvcorrel","mvvacov","nancumsum","nand2mean","nanmax","nanmean","nanmeanf","nanmedian","nanmin","nanstdev","nansum","narsimul","ndgrid","ndims","nehari","neigh_func_csa","neigh_func_default","neigh_func_fsa","neigh_func_vfsa","neldermead_cget","neldermead_configure","neldermead_costf","neldermead_defaultoutput","neldermead_destroy","neldermead_display","neldermead_function","neldermead_get","neldermead_log","neldermead_new","neldermead_restart","neldermead_search","neldermead_updatesimp","nextpow2","nfreq","nicholschart","nlev","nmplot_cget","nmplot_configure","nmplot_contour","nmplot_destroy","nmplot_display","nmplot_function","nmplot_get","nmplot_historyplot","nmplot_log","nmplot_new","nmplot_outputcmd","nmplot_restart","nmplot_search","nmplot_simplexhistory","noisegen","nonreg_test_run","norm","now","null","num2cell","numdiff","numer","nyquist","nyquistfrequencybounds","obs_gram","obscont","observer","obsv_mat","obsvss","oct2dec","odeoptions","optim_ga","optim_moga","optim_nsga","optim_nsga2","optim_sa","optimbase_cget","optimbase_checkbounds","optimbase_checkcostfun","optimbase_checkx0","optimbase_configure","optimbase_destroy","optimbase_display","optimbase_function","optimbase_get","optimbase_hasbounds","optimbase_hasconstraints","optimbase_hasnlcons","optimbase_histget","optimbase_histset","optimbase_incriter","optimbase_isfeasible","optimbase_isinbounds","optimbase_isinnonlincons","optimbase_log","optimbase_logshutdown","optimbase_logstartup","optimbase_new","optimbase_outputcmd","optimbase_outstruct","optimbase_proj2bnds","optimbase_set","optimbase_stoplog","optimbase_terminate","optimget","optimplotfunccount","optimplotfval","optimplotx","optimset","optimsimplex_center","optimsimplex_check","optimsimplex_compsomefv","optimsimplex_computefv","optimsimplex_deltafv","optimsimplex_deltafvmax","optimsimplex_destroy","optimsimplex_dirmat","optimsimplex_fvmean","optimsimplex_fvstdev","optimsimplex_fvvariance","optimsimplex_getall","optimsimplex_getallfv","optimsimplex_getallx","optimsimplex_getfv","optimsimplex_getn","optimsimplex_getnbve","optimsimplex_getve","optimsimplex_getx","optimsimplex_gradientfv","optimsimplex_log","optimsimplex_new","optimsimplex_print","optimsimplex_reflect","optimsimplex_setall","optimsimplex_setallfv","optimsimplex_setallx","optimsimplex_setfv","optimsimplex_setn","optimsimplex_setnbve","optimsimplex_setve","optimsimplex_setx","optimsimplex_shrink","optimsimplex_size","optimsimplex_sort","optimsimplex_tostring","optimsimplex_xbar","orth","p_margin","pack","pareto_filter","parrot","pbig","pca","pcg","pdiv","pen2ea","pencan","pencost","penlaur","perctl","perl","perms","permute","pertrans","pfactors","pfss","phasemag","phaseplot","phc","pinv","playsnd","plotprofile","plzr","pmodulo","pol2des","pol2str","polar","polfact","prbs_a","prettyprint","primes","princomp","profile","proj","projsl","projspec","psmall","pspect","qmr","qpsolve","quart","quaskro","rafiter","randpencil","range","rank","read_csv","readxls","recompilefunction","recons","reglin","regress","remezb","remove_param","remove_profiling","repfreq","replace_Ix_by_Fx","repmat","reset_profiling","resize_matrix","returntoscilab","rhs2code","ric_desc","riccati","rmdir","routh_t","rowcomp","rowcompr","rowinout","rowregul","rowshuff","rref","sample","samplef","samwr","savematfile","savewave","scanf","sci2exp","sciGUI_init","sci_sparse","scicos_getvalue","scicos_simulate","scicos_workspace_init","scisptdemo","scitest","sdiff","sec","secd","sech","selection_ga_elitist","selection_ga_random","sensi","set_param","setdiff","sgrid","show_margins","show_pca","showprofile","signm","sinc","sincd","sind","sinh","sinhm","sinm","sm2des","sm2ss","smga","smooth","solve","sound","soundsec","sp2adj","spaninter","spanplus","spantwo","specfact","speye","sprand","spzeros","sqroot","sqrtm","squarewave","squeeze","srfaur","srkf","ss2des","ss2ss","ss2tf","sscanf","sskf","ssprint","ssrand","st_deviation","st_i_generic","st_ility","stabil","statgain","stdev","stdevf","steadycos","strange","strcmpi","struct","sub2ind","sva","svplot","sylm","sylv","sysconv","sysdiag","sysfact","syslin","syssize","system","systmat","tabul","tand","tanh","tanhm","tanm","tbx_build_blocks","tbx_build_cleaner","tbx_build_gateway","tbx_build_gateway_clean","tbx_build_gateway_loader","tbx_build_help","tbx_build_help_loader","tbx_build_loader","tbx_build_macros","tbx_build_src","tbx_builder","tbx_builder_gateway","tbx_builder_gateway_lang","tbx_builder_help","tbx_builder_help_lang","tbx_builder_macros","tbx_builder_src","tbx_builder_src_lang","temp_law_csa","temp_law_default","temp_law_fsa","temp_law_huang","temp_law_vfsa","test_clean","test_on_columns","test_run","test_run_level","testexamples","tf2des","tf2ss","thrownan","tic","time_id","toc","toeplitz","tokenpos","toolboxes","trace","trans","translatepaths","tree2code","trfmod","trianfml","trimmean","trisolve","trzeros","typeof","ui_observer","union","unique","unit_test_run","unix_g","unix_s","unix_w","unix_x","unobs","unpack","variance","variancef","vec2list","vectorfind","ver","warnobsolete","wavread","wavwrite","wcenter","weekday","wfir","wfir_gui","whereami","who_user","whos","wiener","wigner","winclose","window","winlist","with_javasci","with_macros_source","with_modelica_compiler","with_pvm","with_texmacs","with_tk","write_csv","xcosBlockEval","xcosBlockInterface","xcosCodeGeneration","xcosConfigureModelica","xcosPal","xcosPalAdd","xcosPalAddBlock","xcosPalExport","xcosShowBlockWarning","xcosValidateBlockSet","xcosValidateCompareBlock","xcos_compile","xcos_run","xcos_simulate","xcos_workspace_init","xmltochm","xmltoformat","xmltohtml","xmltojar","xmltopdf","xmltops","xmltoweb","yulewalk","zeropen","zgrid","zpbutt","zpch1","zpch2","zpell"] builtin_consts = ["\\$","%F","%T","%e","%eps","%f","%fftw","%gui","%i","%inf","%io","%modalWarning","%nan","%pi","%s","%t","%tk","%toolboxes","%toolboxes_dir","%z","PWD","SCI","SCIHOME","TMPDIR","a","ans","assertlib","atomslib","cacsdlib","compatibility_functilib","corelib","data_structureslib","demo_toolslib","development_toolslib","differential_equationlib","dynamic_linklib","elementary_functionslib","fd","fileiolib","functionslib","genetic_algorithmslib","helptoolslib","home","i","integerlib","interpolationlib","iolib","j","linear_algebralib","m2scilib","matiolib","modules_managerlib","myStr","neldermeadlib","optimbaselib","optimizationlib","optimsimplexlib","output_streamlib","overloadinglib","parameterslib","polynomialslib","scicos_autolib","scicos_utilslib","scinoteslib","signal_processinglib","simulated_annealinglib","soundlib","sparselib","special_functionslib","spreadsheetlib","statisticslib","stringlib","tclscilib","timelib","umfpacklib","varType","xcoslib"]
mit
postlund/home-assistant
homeassistant/components/notion/config_flow.py
3
2040
"""Config flow to configure the Notion integration.""" from aionotion import async_get_client from aionotion.errors import NotionError import voluptuous as vol from homeassistant import config_entries from homeassistant.const import CONF_PASSWORD, CONF_USERNAME from homeassistant.core import callback from homeassistant.helpers import aiohttp_client from .const import DOMAIN @callback def configured_instances(hass): """Return a set of configured Notion instances.""" return set( entry.data[CONF_USERNAME] for entry in hass.config_entries.async_entries(DOMAIN) ) @config_entries.HANDLERS.register(DOMAIN) class NotionFlowHandler(config_entries.ConfigFlow): """Handle a Notion config flow.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL async def _show_form(self, errors=None): """Show the form to the user.""" data_schema = vol.Schema( {vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str} ) return self.async_show_form( step_id="user", data_schema=data_schema, errors=errors or {} ) async def async_step_import(self, import_config): """Import a config entry from configuration.yaml.""" return await self.async_step_user(import_config) async def async_step_user(self, user_input=None): """Handle the start of the config flow.""" if not user_input: return await self._show_form() if user_input[CONF_USERNAME] in configured_instances(self.hass): return await self._show_form({CONF_USERNAME: "identifier_exists"}) session = aiohttp_client.async_get_clientsession(self.hass) try: await async_get_client( user_input[CONF_USERNAME], user_input[CONF_PASSWORD], session ) except NotionError: return await self._show_form({"base": "invalid_credentials"}) return self.async_create_entry(title=user_input[CONF_USERNAME], data=user_input)
apache-2.0
RPI-OPENEDX/edx-platform
common/lib/symmath/symmath/test_symmath_check.py
166
2648
from unittest import TestCase from .symmath_check import symmath_check class SymmathCheckTest(TestCase): def test_symmath_check_integers(self): number_list = [i for i in range(-100, 100)] self._symmath_check_numbers(number_list) def test_symmath_check_floats(self): number_list = [i + 0.01 for i in range(-100, 100)] self._symmath_check_numbers(number_list) def test_symmath_check_same_symbols(self): expected_str = "x+2*y" dynamath = ''' <math xmlns="http://www.w3.org/1998/Math/MathML"> <mstyle displaystyle="true"> <mrow> <mi>x</mi> <mo>+</mo> <mn>2</mn> <mo>*</mo> <mi>y</mi> </mrow> </mstyle> </math>'''.strip() # Expect that the exact same symbolic string is marked correct result = symmath_check(expected_str, expected_str, dynamath=[dynamath]) self.assertTrue('ok' in result and result['ok']) def test_symmath_check_equivalent_symbols(self): expected_str = "x+2*y" input_str = "x+y+y" dynamath = ''' <math xmlns="http://www.w3.org/1998/Math/MathML"> <mstyle displaystyle="true"> <mrow> <mi>x</mi> <mo>+</mo> <mi>y</mi> <mo>+</mo> <mi>y</mi> </mrow> </mstyle> </math>'''.strip() # Expect that equivalent symbolic strings are marked correct result = symmath_check(expected_str, input_str, dynamath=[dynamath]) self.assertTrue('ok' in result and result['ok']) def test_symmath_check_different_symbols(self): expected_str = "0" input_str = "x+y" dynamath = ''' <math xmlns="http://www.w3.org/1998/Math/MathML"> <mstyle displaystyle="true"> <mrow> <mi>x</mi> <mo>+</mo> <mi>y</mi> </mrow> </mstyle> </math>'''.strip() # Expect that an incorrect response is marked incorrect result = symmath_check(expected_str, input_str, dynamath=[dynamath]) self.assertTrue('ok' in result and not result['ok']) self.assertFalse('fail' in result['msg']) def _symmath_check_numbers(self, number_list): for n in number_list: # expect = ans, so should say the answer is correct expect = n ans = n result = symmath_check(str(expect), str(ans)) self.assertTrue('ok' in result and result['ok'], "%f should == %f" % (expect, ans)) # Change expect so that it != ans expect += 0.1 result = symmath_check(str(expect), str(ans)) self.assertTrue('ok' in result and not result['ok'], "%f should != %f" % (expect, ans))
agpl-3.0
bodi000/odoo
addons/l10n_be/wizard/__init__.py
438
1145
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import l10n_be_partner_vat_listing import l10n_be_vat_intra import l10n_be_account_vat_declaration # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
aperigault/ansible
lib/ansible/module_utils/facts/system/selinux.py
162
3207
# Collect facts related to selinux # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.module_utils.facts.collector import BaseFactCollector try: import selinux HAVE_SELINUX = True except ImportError: HAVE_SELINUX = False SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' } class SelinuxFactCollector(BaseFactCollector): name = 'selinux' _fact_ids = set() def collect(self, module=None, collected_facts=None): facts_dict = {} selinux_facts = {} # If selinux library is missing, only set the status and selinux_python_present since # there is no way to tell if SELinux is enabled or disabled on the system # without the library. if not HAVE_SELINUX: selinux_facts['status'] = 'Missing selinux Python library' facts_dict['selinux'] = selinux_facts facts_dict['selinux_python_present'] = False return facts_dict # Set a boolean for testing whether the Python library is present facts_dict['selinux_python_present'] = True if not selinux.is_selinux_enabled(): selinux_facts['status'] = 'disabled' else: selinux_facts['status'] = 'enabled' try: selinux_facts['policyvers'] = selinux.security_policyvers() except (AttributeError, OSError): selinux_facts['policyvers'] = 'unknown' try: (rc, configmode) = selinux.selinux_getenforcemode() if rc == 0: selinux_facts['config_mode'] = SELINUX_MODE_DICT.get(configmode, 'unknown') else: selinux_facts['config_mode'] = 'unknown' except (AttributeError, OSError): selinux_facts['config_mode'] = 'unknown' try: mode = selinux.security_getenforce() selinux_facts['mode'] = SELINUX_MODE_DICT.get(mode, 'unknown') except (AttributeError, OSError): selinux_facts['mode'] = 'unknown' try: (rc, policytype) = selinux.selinux_getpolicytype() if rc == 0: selinux_facts['type'] = policytype else: selinux_facts['type'] = 'unknown' except (AttributeError, OSError): selinux_facts['type'] = 'unknown' facts_dict['selinux'] = selinux_facts return facts_dict
gpl-3.0
deanishe/alfred-packal-search
src/workflow/workflow.py
1
98456
# encoding: utf-8 # # Copyright (c) 2014 Dean Jackson <deanishe@deanishe.net> # # MIT Licence. See http://opensource.org/licenses/MIT # # Created on 2014-02-15 # """The :class:`Workflow` object is the main interface to this library. :class:`Workflow` is targeted at Alfred 2. Use :class:`~workflow.Workflow3` if you want to use Alfred 3's new features, such as :ref:`workflow variables <workflow-variables>` or more powerful modifiers. See :ref:`setup` in the :ref:`user-manual` for an example of how to set up your Python script to best utilise the :class:`Workflow` object. """ from __future__ import print_function, unicode_literals import atexit import binascii from contextlib import contextmanager import cPickle from copy import deepcopy import errno import json import logging import logging.handlers import os import pickle import plistlib import re import shutil import signal import string import subprocess import sys import time import unicodedata try: import xml.etree.cElementTree as ET except ImportError: # pragma: no cover import xml.etree.ElementTree as ET #: Sentinel for properties that haven't been set yet (that might #: correctly have the value ``None``) UNSET = object() #################################################################### # Standard system icons #################################################################### # These icons are default macOS icons. They are super-high quality, and # will be familiar to users. # This library uses `ICON_ERROR` when a workflow dies in flames, so # in my own workflows, I use `ICON_WARNING` for less fatal errors # (e.g. bad user input, no results etc.) # The system icons are all in this directory. There are many more than # are listed here ICON_ROOT = '/System/Library/CoreServices/CoreTypes.bundle/Contents/Resources' ICON_ACCOUNT = os.path.join(ICON_ROOT, 'Accounts.icns') ICON_BURN = os.path.join(ICON_ROOT, 'BurningIcon.icns') ICON_CLOCK = os.path.join(ICON_ROOT, 'Clock.icns') ICON_COLOR = os.path.join(ICON_ROOT, 'ProfileBackgroundColor.icns') ICON_COLOUR = ICON_COLOR # Queen's English, if you please ICON_EJECT = os.path.join(ICON_ROOT, 'EjectMediaIcon.icns') # Shown when a workflow throws an error ICON_ERROR = os.path.join(ICON_ROOT, 'AlertStopIcon.icns') ICON_FAVORITE = os.path.join(ICON_ROOT, 'ToolbarFavoritesIcon.icns') ICON_FAVOURITE = ICON_FAVORITE ICON_GROUP = os.path.join(ICON_ROOT, 'GroupIcon.icns') ICON_HELP = os.path.join(ICON_ROOT, 'HelpIcon.icns') ICON_HOME = os.path.join(ICON_ROOT, 'HomeFolderIcon.icns') ICON_INFO = os.path.join(ICON_ROOT, 'ToolbarInfo.icns') ICON_NETWORK = os.path.join(ICON_ROOT, 'GenericNetworkIcon.icns') ICON_NOTE = os.path.join(ICON_ROOT, 'AlertNoteIcon.icns') ICON_SETTINGS = os.path.join(ICON_ROOT, 'ToolbarAdvanced.icns') ICON_SWIRL = os.path.join(ICON_ROOT, 'ErasingIcon.icns') ICON_SWITCH = os.path.join(ICON_ROOT, 'General.icns') ICON_SYNC = os.path.join(ICON_ROOT, 'Sync.icns') ICON_TRASH = os.path.join(ICON_ROOT, 'TrashIcon.icns') ICON_USER = os.path.join(ICON_ROOT, 'UserIcon.icns') ICON_WARNING = os.path.join(ICON_ROOT, 'AlertCautionIcon.icns') ICON_WEB = os.path.join(ICON_ROOT, 'BookmarkIcon.icns') #################################################################### # non-ASCII to ASCII diacritic folding. # Used by `fold_to_ascii` method #################################################################### ASCII_REPLACEMENTS = { 'À': 'A', 'Á': 'A', 'Â': 'A', 'Ã': 'A', 'Ä': 'A', 'Å': 'A', 'Æ': 'AE', 'Ç': 'C', 'È': 'E', 'É': 'E', 'Ê': 'E', 'Ë': 'E', 'Ì': 'I', 'Í': 'I', 'Î': 'I', 'Ï': 'I', 'Ð': 'D', 'Ñ': 'N', 'Ò': 'O', 'Ó': 'O', 'Ô': 'O', 'Õ': 'O', 'Ö': 'O', 'Ø': 'O', 'Ù': 'U', 'Ú': 'U', 'Û': 'U', 'Ü': 'U', 'Ý': 'Y', 'Þ': 'Th', 'ß': 'ss', 'à': 'a', 'á': 'a', 'â': 'a', 'ã': 'a', 'ä': 'a', 'å': 'a', 'æ': 'ae', 'ç': 'c', 'è': 'e', 'é': 'e', 'ê': 'e', 'ë': 'e', 'ì': 'i', 'í': 'i', 'î': 'i', 'ï': 'i', 'ð': 'd', 'ñ': 'n', 'ò': 'o', 'ó': 'o', 'ô': 'o', 'õ': 'o', 'ö': 'o', 'ø': 'o', 'ù': 'u', 'ú': 'u', 'û': 'u', 'ü': 'u', 'ý': 'y', 'þ': 'th', 'ÿ': 'y', 'Ł': 'L', 'ł': 'l', 'Ń': 'N', 'ń': 'n', 'Ņ': 'N', 'ņ': 'n', 'Ň': 'N', 'ň': 'n', 'Ŋ': 'ng', 'ŋ': 'NG', 'Ō': 'O', 'ō': 'o', 'Ŏ': 'O', 'ŏ': 'o', 'Ő': 'O', 'ő': 'o', 'Œ': 'OE', 'œ': 'oe', 'Ŕ': 'R', 'ŕ': 'r', 'Ŗ': 'R', 'ŗ': 'r', 'Ř': 'R', 'ř': 'r', 'Ś': 'S', 'ś': 's', 'Ŝ': 'S', 'ŝ': 's', 'Ş': 'S', 'ş': 's', 'Š': 'S', 'š': 's', 'Ţ': 'T', 'ţ': 't', 'Ť': 'T', 'ť': 't', 'Ŧ': 'T', 'ŧ': 't', 'Ũ': 'U', 'ũ': 'u', 'Ū': 'U', 'ū': 'u', 'Ŭ': 'U', 'ŭ': 'u', 'Ů': 'U', 'ů': 'u', 'Ű': 'U', 'ű': 'u', 'Ŵ': 'W', 'ŵ': 'w', 'Ŷ': 'Y', 'ŷ': 'y', 'Ÿ': 'Y', 'Ź': 'Z', 'ź': 'z', 'Ż': 'Z', 'ż': 'z', 'Ž': 'Z', 'ž': 'z', 'ſ': 's', 'Α': 'A', 'Β': 'B', 'Γ': 'G', 'Δ': 'D', 'Ε': 'E', 'Ζ': 'Z', 'Η': 'E', 'Θ': 'Th', 'Ι': 'I', 'Κ': 'K', 'Λ': 'L', 'Μ': 'M', 'Ν': 'N', 'Ξ': 'Ks', 'Ο': 'O', 'Π': 'P', 'Ρ': 'R', 'Σ': 'S', 'Τ': 'T', 'Υ': 'U', 'Φ': 'Ph', 'Χ': 'Kh', 'Ψ': 'Ps', 'Ω': 'O', 'α': 'a', 'β': 'b', 'γ': 'g', 'δ': 'd', 'ε': 'e', 'ζ': 'z', 'η': 'e', 'θ': 'th', 'ι': 'i', 'κ': 'k', 'λ': 'l', 'μ': 'm', 'ν': 'n', 'ξ': 'x', 'ο': 'o', 'π': 'p', 'ρ': 'r', 'ς': 's', 'σ': 's', 'τ': 't', 'υ': 'u', 'φ': 'ph', 'χ': 'kh', 'ψ': 'ps', 'ω': 'o', 'А': 'A', 'Б': 'B', 'В': 'V', 'Г': 'G', 'Д': 'D', 'Е': 'E', 'Ж': 'Zh', 'З': 'Z', 'И': 'I', 'Й': 'I', 'К': 'K', 'Л': 'L', 'М': 'M', 'Н': 'N', 'О': 'O', 'П': 'P', 'Р': 'R', 'С': 'S', 'Т': 'T', 'У': 'U', 'Ф': 'F', 'Х': 'Kh', 'Ц': 'Ts', 'Ч': 'Ch', 'Ш': 'Sh', 'Щ': 'Shch', 'Ъ': "'", 'Ы': 'Y', 'Ь': "'", 'Э': 'E', 'Ю': 'Iu', 'Я': 'Ia', 'а': 'a', 'б': 'b', 'в': 'v', 'г': 'g', 'д': 'd', 'е': 'e', 'ж': 'zh', 'з': 'z', 'и': 'i', 'й': 'i', 'к': 'k', 'л': 'l', 'м': 'm', 'н': 'n', 'о': 'o', 'п': 'p', 'р': 'r', 'с': 's', 'т': 't', 'у': 'u', 'ф': 'f', 'х': 'kh', 'ц': 'ts', 'ч': 'ch', 'ш': 'sh', 'щ': 'shch', 'ъ': "'", 'ы': 'y', 'ь': "'", 'э': 'e', 'ю': 'iu', 'я': 'ia', # 'ᴀ': '', # 'ᴁ': '', # 'ᴂ': '', # 'ᴃ': '', # 'ᴄ': '', # 'ᴅ': '', # 'ᴆ': '', # 'ᴇ': '', # 'ᴈ': '', # 'ᴉ': '', # 'ᴊ': '', # 'ᴋ': '', # 'ᴌ': '', # 'ᴍ': '', # 'ᴎ': '', # 'ᴏ': '', # 'ᴐ': '', # 'ᴑ': '', # 'ᴒ': '', # 'ᴓ': '', # 'ᴔ': '', # 'ᴕ': '', # 'ᴖ': '', # 'ᴗ': '', # 'ᴘ': '', # 'ᴙ': '', # 'ᴚ': '', # 'ᴛ': '', # 'ᴜ': '', # 'ᴝ': '', # 'ᴞ': '', # 'ᴟ': '', # 'ᴠ': '', # 'ᴡ': '', # 'ᴢ': '', # 'ᴣ': '', # 'ᴤ': '', # 'ᴥ': '', 'ᴦ': 'G', 'ᴧ': 'L', 'ᴨ': 'P', 'ᴩ': 'R', 'ᴪ': 'PS', 'ẞ': 'Ss', 'Ỳ': 'Y', 'ỳ': 'y', 'Ỵ': 'Y', 'ỵ': 'y', 'Ỹ': 'Y', 'ỹ': 'y', } #################################################################### # Smart-to-dumb punctuation mapping #################################################################### DUMB_PUNCTUATION = { '‘': "'", '’': "'", '‚': "'", '“': '"', '”': '"', '„': '"', '–': '-', '—': '-' } #################################################################### # Used by `Workflow.filter` #################################################################### # Anchor characters in a name #: Characters that indicate the beginning of a "word" in CamelCase INITIALS = string.ascii_uppercase + string.digits #: Split on non-letters, numbers split_on_delimiters = re.compile('[^a-zA-Z0-9]').split # Match filter flags #: Match items that start with ``query`` MATCH_STARTSWITH = 1 #: Match items whose capital letters start with ``query`` MATCH_CAPITALS = 2 #: Match items with a component "word" that matches ``query`` MATCH_ATOM = 4 #: Match items whose initials (based on atoms) start with ``query`` MATCH_INITIALS_STARTSWITH = 8 #: Match items whose initials (based on atoms) contain ``query`` MATCH_INITIALS_CONTAIN = 16 #: Combination of :const:`MATCH_INITIALS_STARTSWITH` and #: :const:`MATCH_INITIALS_CONTAIN` MATCH_INITIALS = 24 #: Match items if ``query`` is a substring MATCH_SUBSTRING = 32 #: Match items if all characters in ``query`` appear in the item in order MATCH_ALLCHARS = 64 #: Combination of all other ``MATCH_*`` constants MATCH_ALL = 127 #################################################################### # Used by `Workflow.check_update` #################################################################### # Number of days to wait between checking for updates to the workflow DEFAULT_UPDATE_FREQUENCY = 1 #################################################################### # Lockfile and Keychain access errors #################################################################### class AcquisitionError(Exception): """Raised if a lock cannot be acquired.""" class KeychainError(Exception): """Raised for unknown Keychain errors. Raised by methods :meth:`Workflow.save_password`, :meth:`Workflow.get_password` and :meth:`Workflow.delete_password` when ``security`` CLI app returns an unknown error code. """ class PasswordNotFound(KeychainError): """Password not in Keychain. Raised by method :meth:`Workflow.get_password` when ``account`` is unknown to the Keychain. """ class PasswordExists(KeychainError): """Raised when trying to overwrite an existing account password. You should never receive this error: it is used internally by the :meth:`Workflow.save_password` method to know if it needs to delete the old password first (a Keychain implementation detail). """ #################################################################### # Helper functions #################################################################### def isascii(text): """Test if ``text`` contains only ASCII characters. :param text: text to test for ASCII-ness :type text: ``unicode`` :returns: ``True`` if ``text`` contains only ASCII characters :rtype: ``Boolean`` """ try: text.encode('ascii') except UnicodeEncodeError: return False return True #################################################################### # Implementation classes #################################################################### class SerializerManager(object): """Contains registered serializers. .. versionadded:: 1.8 A configured instance of this class is available at :attr:`workflow.manager`. Use :meth:`register()` to register new (or replace existing) serializers, which you can specify by name when calling :class:`~workflow.Workflow` data storage methods. See :ref:`guide-serialization` and :ref:`guide-persistent-data` for further information. """ def __init__(self): """Create new SerializerManager object.""" self._serializers = {} def register(self, name, serializer): """Register ``serializer`` object under ``name``. Raises :class:`AttributeError` if ``serializer`` in invalid. .. note:: ``name`` will be used as the file extension of the saved files. :param name: Name to register ``serializer`` under :type name: ``unicode`` or ``str`` :param serializer: object with ``load()`` and ``dump()`` methods """ # Basic validation getattr(serializer, 'load') getattr(serializer, 'dump') self._serializers[name] = serializer def serializer(self, name): """Return serializer object for ``name``. :param name: Name of serializer to return :type name: ``unicode`` or ``str`` :returns: serializer object or ``None`` if no such serializer is registered. """ return self._serializers.get(name) def unregister(self, name): """Remove registered serializer with ``name``. Raises a :class:`ValueError` if there is no such registered serializer. :param name: Name of serializer to remove :type name: ``unicode`` or ``str`` :returns: serializer object """ if name not in self._serializers: raise ValueError('No such serializer registered : {0}'.format( name)) serializer = self._serializers[name] del self._serializers[name] return serializer @property def serializers(self): """Return names of registered serializers.""" return sorted(self._serializers.keys()) class JSONSerializer(object): """Wrapper around :mod:`json`. Sets ``indent`` and ``encoding``. .. versionadded:: 1.8 Use this serializer if you need readable data files. JSON doesn't support Python objects as well as ``cPickle``/``pickle``, so be careful which data you try to serialize as JSON. """ @classmethod def load(cls, file_obj): """Load serialized object from open JSON file. .. versionadded:: 1.8 :param file_obj: file handle :type file_obj: ``file`` object :returns: object loaded from JSON file :rtype: object """ return json.load(file_obj) @classmethod def dump(cls, obj, file_obj): """Serialize object ``obj`` to open JSON file. .. versionadded:: 1.8 :param obj: Python object to serialize :type obj: JSON-serializable data structure :param file_obj: file handle :type file_obj: ``file`` object """ return json.dump(obj, file_obj, indent=2, encoding='utf-8') class CPickleSerializer(object): """Wrapper around :mod:`cPickle`. Sets ``protocol``. .. versionadded:: 1.8 This is the default serializer and the best combination of speed and flexibility. """ @classmethod def load(cls, file_obj): """Load serialized object from open pickle file. .. versionadded:: 1.8 :param file_obj: file handle :type file_obj: ``file`` object :returns: object loaded from pickle file :rtype: object """ return cPickle.load(file_obj) @classmethod def dump(cls, obj, file_obj): """Serialize object ``obj`` to open pickle file. .. versionadded:: 1.8 :param obj: Python object to serialize :type obj: Python object :param file_obj: file handle :type file_obj: ``file`` object """ return cPickle.dump(obj, file_obj, protocol=-1) class PickleSerializer(object): """Wrapper around :mod:`pickle`. Sets ``protocol``. .. versionadded:: 1.8 Use this serializer if you need to add custom pickling. """ @classmethod def load(cls, file_obj): """Load serialized object from open pickle file. .. versionadded:: 1.8 :param file_obj: file handle :type file_obj: ``file`` object :returns: object loaded from pickle file :rtype: object """ return pickle.load(file_obj) @classmethod def dump(cls, obj, file_obj): """Serialize object ``obj`` to open pickle file. .. versionadded:: 1.8 :param obj: Python object to serialize :type obj: Python object :param file_obj: file handle :type file_obj: ``file`` object """ return pickle.dump(obj, file_obj, protocol=-1) # Set up default manager and register built-in serializers manager = SerializerManager() manager.register('cpickle', CPickleSerializer) manager.register('pickle', PickleSerializer) manager.register('json', JSONSerializer) class Item(object): """Represents a feedback item for Alfred. Generates Alfred-compliant XML for a single item. You probably shouldn't use this class directly, but via :meth:`Workflow.add_item`. See :meth:`~Workflow.add_item` for details of arguments. """ def __init__(self, title, subtitle='', modifier_subtitles=None, arg=None, autocomplete=None, valid=False, uid=None, icon=None, icontype=None, type=None, largetext=None, copytext=None, quicklookurl=None): """Same arguments as :meth:`Workflow.add_item`.""" self.title = title self.subtitle = subtitle self.modifier_subtitles = modifier_subtitles or {} self.arg = arg self.autocomplete = autocomplete self.valid = valid self.uid = uid self.icon = icon self.icontype = icontype self.type = type self.largetext = largetext self.copytext = copytext self.quicklookurl = quicklookurl @property def elem(self): """Create and return feedback item for Alfred. :returns: :class:`ElementTree.Element <xml.etree.ElementTree.Element>` instance for this :class:`Item` instance. """ # Attributes on <item> element attr = {} if self.valid: attr['valid'] = 'yes' else: attr['valid'] = 'no' # Allow empty string for autocomplete. This is a useful value, # as TABing the result will revert the query back to just the # keyword if self.autocomplete is not None: attr['autocomplete'] = self.autocomplete # Optional attributes for name in ('uid', 'type'): value = getattr(self, name, None) if value: attr[name] = value root = ET.Element('item', attr) ET.SubElement(root, 'title').text = self.title ET.SubElement(root, 'subtitle').text = self.subtitle # Add modifier subtitles for mod in ('cmd', 'ctrl', 'alt', 'shift', 'fn'): if mod in self.modifier_subtitles: ET.SubElement(root, 'subtitle', {'mod': mod}).text = self.modifier_subtitles[mod] # Add arg as element instead of attribute on <item>, as it's more # flexible (newlines aren't allowed in attributes) if self.arg: ET.SubElement(root, 'arg').text = self.arg # Add icon if there is one if self.icon: if self.icontype: attr = dict(type=self.icontype) else: attr = {} ET.SubElement(root, 'icon', attr).text = self.icon if self.largetext: ET.SubElement(root, 'text', {'type': 'largetype'}).text = self.largetext if self.copytext: ET.SubElement(root, 'text', {'type': 'copy'}).text = self.copytext if self.quicklookurl: ET.SubElement(root, 'quicklookurl').text = self.quicklookurl return root class LockFile(object): """Context manager to protect filepaths with lockfiles. .. versionadded:: 1.13 Creates a lockfile alongside ``protected_path``. Other ``LockFile`` instances will refuse to lock the same path. >>> path = '/path/to/file' >>> with LockFile(path): >>> with open(path, 'wb') as fp: >>> fp.write(data) Args: protected_path (unicode): File to protect with a lockfile timeout (int, optional): Raises an :class:`AcquisitionError` if lock cannot be acquired within this number of seconds. If ``timeout`` is 0 (the default), wait forever. delay (float, optional): How often to check (in seconds) if lock has been released. """ def __init__(self, protected_path, timeout=0, delay=0.05): """Create new :class:`LockFile` object.""" self.lockfile = protected_path + '.lock' self.timeout = timeout self.delay = delay self._locked = False atexit.register(self.release) @property def locked(self): """`True` if file is locked by this instance.""" return self._locked def acquire(self, blocking=True): """Acquire the lock if possible. If the lock is in use and ``blocking`` is ``False``, return ``False``. Otherwise, check every `self.delay` seconds until it acquires lock or exceeds `self.timeout` and raises an `~AcquisitionError`. """ start = time.time() while True: self._validate_lockfile() try: fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR) with os.fdopen(fd, 'w') as fd: fd.write(str(os.getpid())) break except OSError as err: if err.errno != errno.EEXIST: # pragma: no cover raise if self.timeout and (time.time() - start) >= self.timeout: raise AcquisitionError('lock acquisition timed out') if not blocking: return False time.sleep(self.delay) self._locked = True return True def _validate_lockfile(self): """Check existence and validity of lockfile. If the lockfile exists, but contains an invalid PID or the PID of a non-existant process, it is removed. """ try: with open(self.lockfile) as fp: s = fp.read() except Exception: return try: pid = int(s) except ValueError: return self.release() from background import _process_exists if not _process_exists(pid): self.release() def release(self): """Release the lock by deleting `self.lockfile`.""" self._locked = False try: os.unlink(self.lockfile) except (OSError, IOError) as err: # pragma: no cover if err.errno != 2: raise err def __enter__(self): """Acquire lock.""" self.acquire() return self def __exit__(self, typ, value, traceback): """Release lock.""" self.release() def __del__(self): """Clear up `self.lockfile`.""" if self._locked: # pragma: no cover self.release() @contextmanager def atomic_writer(file_path, mode): """Atomic file writer. .. versionadded:: 1.12 Context manager that ensures the file is only written if the write succeeds. The data is first written to a temporary file. :param file_path: path of file to write to. :type file_path: ``unicode`` :param mode: sames as for :func:`open` :type mode: string """ temp_suffix = '.aw.temp' temp_file_path = file_path + temp_suffix with open(temp_file_path, mode) as file_obj: try: yield file_obj os.rename(temp_file_path, file_path) finally: try: os.remove(temp_file_path) except (OSError, IOError): pass class uninterruptible(object): """Decorator that postpones SIGTERM until wrapped function returns. .. versionadded:: 1.12 .. important:: This decorator is NOT thread-safe. As of version 2.7, Alfred allows Script Filters to be killed. If your workflow is killed in the middle of critical code (e.g. writing data to disk), this may corrupt your workflow's data. Use this decorator to wrap critical functions that *must* complete. If the script is killed while a wrapped function is executing, the SIGTERM will be caught and handled after your function has finished executing. Alfred-Workflow uses this internally to ensure its settings, data and cache writes complete. """ def __init__(self, func, class_name=''): """Decorate `func`.""" self.func = func self._caught_signal = None def signal_handler(self, signum, frame): """Called when process receives SIGTERM.""" self._caught_signal = (signum, frame) def __call__(self, *args, **kwargs): """Trap ``SIGTERM`` and call wrapped function.""" self._caught_signal = None # Register handler for SIGTERM, then call `self.func` self.old_signal_handler = signal.getsignal(signal.SIGTERM) signal.signal(signal.SIGTERM, self.signal_handler) self.func(*args, **kwargs) # Restore old signal handler signal.signal(signal.SIGTERM, self.old_signal_handler) # Handle any signal caught during execution if self._caught_signal is not None: signum, frame = self._caught_signal if callable(self.old_signal_handler): self.old_signal_handler(signum, frame) elif self.old_signal_handler == signal.SIG_DFL: sys.exit(0) def __get__(self, obj=None, klass=None): """Decorator API.""" return self.__class__(self.func.__get__(obj, klass), klass.__name__) class Settings(dict): """A dictionary that saves itself when changed. Dictionary keys & values will be saved as a JSON file at ``filepath``. If the file does not exist, the dictionary (and settings file) will be initialised with ``defaults``. :param filepath: where to save the settings :type filepath: :class:`unicode` :param defaults: dict of default settings :type defaults: :class:`dict` An appropriate instance is provided by :class:`Workflow` instances at :attr:`Workflow.settings`. """ def __init__(self, filepath, defaults=None): """Create new :class:`Settings` object.""" super(Settings, self).__init__() self._filepath = filepath self._nosave = False self._original = {} if os.path.exists(self._filepath): self._load() elif defaults: for key, val in defaults.items(): self[key] = val self.save() # save default settings def _load(self): """Load cached settings from JSON file `self._filepath`.""" self._nosave = True d = {} with open(self._filepath, 'rb') as file_obj: for key, value in json.load(file_obj, encoding='utf-8').items(): d[key] = value self.update(d) self._original = deepcopy(d) self._nosave = False @uninterruptible def save(self): """Save settings to JSON file specified in ``self._filepath``. If you're using this class via :attr:`Workflow.settings`, which you probably are, ``self._filepath`` will be ``settings.json`` in your workflow's data directory (see :attr:`~Workflow.datadir`). """ if self._nosave: return data = {} data.update(self) # for key, value in self.items(): # data[key] = value with LockFile(self._filepath): with atomic_writer(self._filepath, 'wb') as file_obj: json.dump(data, file_obj, sort_keys=True, indent=2, encoding='utf-8') # dict methods def __setitem__(self, key, value): """Implement :class:`dict` interface.""" if self._original.get(key) != value: super(Settings, self).__setitem__(key, value) self.save() def __delitem__(self, key): """Implement :class:`dict` interface.""" super(Settings, self).__delitem__(key) self.save() def update(self, *args, **kwargs): """Override :class:`dict` method to save on update.""" super(Settings, self).update(*args, **kwargs) self.save() def setdefault(self, key, value=None): """Override :class:`dict` method to save on update.""" ret = super(Settings, self).setdefault(key, value) self.save() return ret class Workflow(object): """The ``Workflow`` object is the main interface to Alfred-Workflow. It provides APIs for accessing the Alfred/workflow environment, storing & caching data, using Keychain, and generating Script Filter feedback. ``Workflow`` is compatible with both Alfred 2 and 3. The :class:`~workflow.Workflow3` subclass provides additional, Alfred 3-only features, such as workflow variables. :param default_settings: default workflow settings. If no settings file exists, :class:`Workflow.settings` will be pre-populated with ``default_settings``. :type default_settings: :class:`dict` :param update_settings: settings for updating your workflow from GitHub releases. The only required key is ``github_slug``, whose value must take the form of ``username/repo``. If specified, ``Workflow`` will check the repo's releases for updates. Your workflow must also have a semantic version number. Please see the :ref:`User Manual <user-manual>` and `update API docs <api-updates>` for more information. :type update_settings: :class:`dict` :param input_encoding: encoding of command line arguments. You should probably leave this as the default (``utf-8``), which is the encoding Alfred uses. :type input_encoding: :class:`unicode` :param normalization: normalisation to apply to CLI args. See :meth:`Workflow.decode` for more details. :type normalization: :class:`unicode` :param capture_args: Capture and act on ``workflow:*`` arguments. See :ref:`Magic arguments <magic-arguments>` for details. :type capture_args: :class:`Boolean` :param libraries: sequence of paths to directories containing libraries. These paths will be prepended to ``sys.path``. :type libraries: :class:`tuple` or :class:`list` :param help_url: URL to webpage where a user can ask for help with the workflow, report bugs, etc. This could be the GitHub repo or a page on AlfredForum.com. If your workflow throws an error, this URL will be displayed in the log and Alfred's debugger. It can also be opened directly in a web browser with the ``workflow:help`` :ref:`magic argument <magic-arguments>`. :type help_url: :class:`unicode` or :class:`str` """ # Which class to use to generate feedback items. You probably # won't want to change this item_class = Item def __init__(self, default_settings=None, update_settings=None, input_encoding='utf-8', normalization='NFC', capture_args=True, libraries=None, help_url=None): """Create new :class:`Workflow` object.""" self._default_settings = default_settings or {} self._update_settings = update_settings or {} self._input_encoding = input_encoding self._normalizsation = normalization self._capture_args = capture_args self.help_url = help_url self._workflowdir = None self._settings_path = None self._settings = None self._bundleid = None self._debugging = None self._name = None self._cache_serializer = 'cpickle' self._data_serializer = 'cpickle' self._info = None self._info_loaded = False self._logger = None self._items = [] self._alfred_env = None # Version number of the workflow self._version = UNSET # Version from last workflow run self._last_version_run = UNSET # Cache for regex patterns created for filter keys self._search_pattern_cache = {} # Magic arguments #: The prefix for all magic arguments. Default is ``workflow:`` self.magic_prefix = 'workflow:' #: Mapping of available magic arguments. The built-in magic #: arguments are registered by default. To add your own magic arguments #: (or override built-ins), add a key:value pair where the key is #: what the user should enter (prefixed with :attr:`magic_prefix`) #: and the value is a callable that will be called when the argument #: is entered. If you would like to display a message in Alfred, the #: function should return a ``unicode`` string. #: #: By default, the magic arguments documented #: :ref:`here <magic-arguments>` are registered. self.magic_arguments = {} self._register_default_magic() if libraries: sys.path = libraries + sys.path #################################################################### # API methods #################################################################### # info.plist contents and alfred_* environment variables ---------- @property def alfred_version(self): """Alfred version as :class:`~workflow.update.Version` object.""" from update import Version return Version(self.alfred_env.get('version')) @property def alfred_env(self): """Dict of Alfred's environmental variables minus ``alfred_`` prefix. .. versionadded:: 1.7 The variables Alfred 2.4+ exports are: ============================ ========================================= Variable Description ============================ ========================================= debug Set to ``1`` if Alfred's debugger is open, otherwise unset. preferences Path to Alfred.alfredpreferences (where your workflows and settings are stored). preferences_localhash Machine-specific preferences are stored in ``Alfred.alfredpreferences/preferences/local/<hash>`` (see ``preferences`` above for the path to ``Alfred.alfredpreferences``) theme ID of selected theme theme_background Background colour of selected theme in format ``rgba(r,g,b,a)`` theme_subtext Show result subtext. ``0`` = Always, ``1`` = Alternative actions only, ``2`` = Selected result only, ``3`` = Never version Alfred version number, e.g. ``'2.4'`` version_build Alfred build number, e.g. ``277`` workflow_bundleid Bundle ID, e.g. ``net.deanishe.alfred-mailto`` workflow_cache Path to workflow's cache directory workflow_data Path to workflow's data directory workflow_name Name of current workflow workflow_uid UID of workflow workflow_version The version number specified in the workflow configuration sheet/info.plist ============================ ========================================= **Note:** all values are Unicode strings except ``version_build`` and ``theme_subtext``, which are integers. :returns: ``dict`` of Alfred's environmental variables without the ``alfred_`` prefix, e.g. ``preferences``, ``workflow_data``. """ if self._alfred_env is not None: return self._alfred_env data = {} for key in ( 'alfred_debug', 'alfred_preferences', 'alfred_preferences_localhash', 'alfred_theme', 'alfred_theme_background', 'alfred_theme_subtext', 'alfred_version', 'alfred_version_build', 'alfred_workflow_bundleid', 'alfred_workflow_cache', 'alfred_workflow_data', 'alfred_workflow_name', 'alfred_workflow_uid', 'alfred_workflow_version'): value = os.getenv(key) if isinstance(value, str): if key in ('alfred_debug', 'alfred_version_build', 'alfred_theme_subtext'): value = int(value) else: value = self.decode(value) data[key[7:]] = value self._alfred_env = data return self._alfred_env @property def info(self): """:class:`dict` of ``info.plist`` contents.""" if not self._info_loaded: self._load_info_plist() return self._info @property def bundleid(self): """Workflow bundle ID from environmental vars or ``info.plist``. :returns: bundle ID :rtype: ``unicode`` """ if not self._bundleid: if self.alfred_env.get('workflow_bundleid'): self._bundleid = self.alfred_env.get('workflow_bundleid') else: self._bundleid = unicode(self.info['bundleid'], 'utf-8') return self._bundleid @property def debugging(self): """Whether Alfred's debugger is open. :returns: ``True`` if Alfred's debugger is open. :rtype: ``bool`` """ if self._debugging is None: if self.alfred_env.get('debug') == 1: self._debugging = True else: self._debugging = False return self._debugging @property def name(self): """Workflow name from Alfred's environmental vars or ``info.plist``. :returns: workflow name :rtype: ``unicode`` """ if not self._name: if self.alfred_env.get('workflow_name'): self._name = self.decode(self.alfred_env.get('workflow_name')) else: self._name = self.decode(self.info['name']) return self._name @property def version(self): """Return the version of the workflow. .. versionadded:: 1.9.10 Get the workflow version from environment variable, the ``update_settings`` dict passed on instantiation, the ``version`` file located in the workflow's root directory or ``info.plist``. Return ``None`` if none exists or :class:`ValueError` if the version number is invalid (i.e. not semantic). :returns: Version of the workflow (not Alfred-Workflow) :rtype: :class:`~workflow.update.Version` object """ if self._version is UNSET: version = None # environment variable has priority if self.alfred_env.get('workflow_version'): version = self.alfred_env['workflow_version'] # Try `update_settings` elif self._update_settings: version = self._update_settings.get('version') # `version` file if not version: filepath = self.workflowfile('version') if os.path.exists(filepath): with open(filepath, 'rb') as fileobj: version = fileobj.read() # info.plist if not version: version = self.info.get('version') if version: from update import Version version = Version(version) self._version = version return self._version # Workflow utility methods ----------------------------------------- @property def args(self): """Return command line args as normalised unicode. Args are decoded and normalised via :meth:`~Workflow.decode`. The encoding and normalisation are the ``input_encoding`` and ``normalization`` arguments passed to :class:`Workflow` (``UTF-8`` and ``NFC`` are the defaults). If :class:`Workflow` is called with ``capture_args=True`` (the default), :class:`Workflow` will look for certain ``workflow:*`` args and, if found, perform the corresponding actions and exit the workflow. See :ref:`Magic arguments <magic-arguments>` for details. """ msg = None args = [self.decode(arg) for arg in sys.argv[1:]] # Handle magic args if len(args) and self._capture_args: for name in self.magic_arguments: key = '{0}{1}'.format(self.magic_prefix, name) if key in args: msg = self.magic_arguments[name]() if msg: self.logger.debug(msg) if not sys.stdout.isatty(): # Show message in Alfred self.add_item(msg, valid=False, icon=ICON_INFO) self.send_feedback() sys.exit(0) return args @property def cachedir(self): """Path to workflow's cache directory. The cache directory is a subdirectory of Alfred's own cache directory in ``~/Library/Caches``. The full path is: ``~/Library/Caches/com.runningwithcrayons.Alfred-X/Workflow Data/<bundle id>`` ``Alfred-X`` may be ``Alfred-2`` or ``Alfred-3``. :returns: full path to workflow's cache directory :rtype: ``unicode`` """ if self.alfred_env.get('workflow_cache'): dirpath = self.alfred_env.get('workflow_cache') else: dirpath = self._default_cachedir return self._create(dirpath) @property def _default_cachedir(self): """Alfred 2's default cache directory.""" return os.path.join( os.path.expanduser( '~/Library/Caches/com.runningwithcrayons.Alfred-2/' 'Workflow Data/'), self.bundleid) @property def datadir(self): """Path to workflow's data directory. The data directory is a subdirectory of Alfred's own data directory in ``~/Library/Application Support``. The full path is: ``~/Library/Application Support/Alfred 2/Workflow Data/<bundle id>`` :returns: full path to workflow data directory :rtype: ``unicode`` """ if self.alfred_env.get('workflow_data'): dirpath = self.alfred_env.get('workflow_data') else: dirpath = self._default_datadir return self._create(dirpath) @property def _default_datadir(self): """Alfred 2's default data directory.""" return os.path.join(os.path.expanduser( '~/Library/Application Support/Alfred 2/Workflow Data/'), self.bundleid) @property def workflowdir(self): """Path to workflow's root directory (where ``info.plist`` is). :returns: full path to workflow root directory :rtype: ``unicode`` """ if not self._workflowdir: # Try the working directory first, then the directory # the library is in. CWD will be the workflow root if # a workflow is being run in Alfred candidates = [ os.path.abspath(os.getcwdu()), os.path.dirname(os.path.abspath(os.path.dirname(__file__)))] # climb the directory tree until we find `info.plist` for dirpath in candidates: # Ensure directory path is Unicode dirpath = self.decode(dirpath) while True: if os.path.exists(os.path.join(dirpath, 'info.plist')): self._workflowdir = dirpath break elif dirpath == '/': # no `info.plist` found break # Check the parent directory dirpath = os.path.dirname(dirpath) # No need to check other candidates if self._workflowdir: break if not self._workflowdir: raise IOError("'info.plist' not found in directory tree") return self._workflowdir def cachefile(self, filename): """Path to ``filename`` in workflow's cache directory. Return absolute path to ``filename`` within your workflow's :attr:`cache directory <Workflow.cachedir>`. :param filename: basename of file :type filename: ``unicode`` :returns: full path to file within cache directory :rtype: ``unicode`` """ return os.path.join(self.cachedir, filename) def datafile(self, filename): """Path to ``filename`` in workflow's data directory. Return absolute path to ``filename`` within your workflow's :attr:`data directory <Workflow.datadir>`. :param filename: basename of file :type filename: ``unicode`` :returns: full path to file within data directory :rtype: ``unicode`` """ return os.path.join(self.datadir, filename) def workflowfile(self, filename): """Return full path to ``filename`` in workflow's root directory. :param filename: basename of file :type filename: ``unicode`` :returns: full path to file within data directory :rtype: ``unicode`` """ return os.path.join(self.workflowdir, filename) @property def logfile(self): """Path to logfile. :returns: path to logfile within workflow's cache directory :rtype: ``unicode`` """ return self.cachefile('%s.log' % self.bundleid) @property def logger(self): """Logger that logs to both console and a log file. If Alfred's debugger is open, log level will be ``DEBUG``, else it will be ``INFO``. Use :meth:`open_log` to open the log file in Console. :returns: an initialised :class:`~logging.Logger` """ if self._logger: return self._logger # Initialise new logger and optionally handlers logger = logging.getLogger('workflow') if not len(logger.handlers): # Only add one set of handlers fmt = logging.Formatter( '%(asctime)s %(filename)s:%(lineno)s' ' %(levelname)-8s %(message)s', datefmt='%H:%M:%S') logfile = logging.handlers.RotatingFileHandler( self.logfile, maxBytes=1024 * 1024, backupCount=1) logfile.setFormatter(fmt) logger.addHandler(logfile) console = logging.StreamHandler() console.setFormatter(fmt) logger.addHandler(console) if self.debugging: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) self._logger = logger return self._logger @logger.setter def logger(self, logger): """Set a custom logger. :param logger: The logger to use :type logger: `~logging.Logger` instance """ self._logger = logger @property def settings_path(self): """Path to settings file within workflow's data directory. :returns: path to ``settings.json`` file :rtype: ``unicode`` """ if not self._settings_path: self._settings_path = self.datafile('settings.json') return self._settings_path @property def settings(self): """Return a dictionary subclass that saves itself when changed. See :ref:`guide-settings` in the :ref:`user-manual` for more information on how to use :attr:`settings` and **important limitations** on what it can do. :returns: :class:`~workflow.workflow.Settings` instance initialised from the data in JSON file at :attr:`settings_path` or if that doesn't exist, with the ``default_settings`` :class:`dict` passed to :class:`Workflow` on instantiation. :rtype: :class:`~workflow.workflow.Settings` instance """ if not self._settings: self.logger.debug('reading settings from %s', self.settings_path) self._settings = Settings(self.settings_path, self._default_settings) return self._settings @property def cache_serializer(self): """Name of default cache serializer. .. versionadded:: 1.8 This serializer is used by :meth:`cache_data()` and :meth:`cached_data()` See :class:`SerializerManager` for details. :returns: serializer name :rtype: ``unicode`` """ return self._cache_serializer @cache_serializer.setter def cache_serializer(self, serializer_name): """Set the default cache serialization format. .. versionadded:: 1.8 This serializer is used by :meth:`cache_data()` and :meth:`cached_data()` The specified serializer must already by registered with the :class:`SerializerManager` at `~workflow.workflow.manager`, otherwise a :class:`ValueError` will be raised. :param serializer_name: Name of default serializer to use. :type serializer_name: """ if manager.serializer(serializer_name) is None: raise ValueError( 'Unknown serializer : `{0}`. Register your serializer ' 'with `manager` first.'.format(serializer_name)) self.logger.debug('default cache serializer: %s', serializer_name) self._cache_serializer = serializer_name @property def data_serializer(self): """Name of default data serializer. .. versionadded:: 1.8 This serializer is used by :meth:`store_data()` and :meth:`stored_data()` See :class:`SerializerManager` for details. :returns: serializer name :rtype: ``unicode`` """ return self._data_serializer @data_serializer.setter def data_serializer(self, serializer_name): """Set the default cache serialization format. .. versionadded:: 1.8 This serializer is used by :meth:`store_data()` and :meth:`stored_data()` The specified serializer must already by registered with the :class:`SerializerManager` at `~workflow.workflow.manager`, otherwise a :class:`ValueError` will be raised. :param serializer_name: Name of serializer to use by default. """ if manager.serializer(serializer_name) is None: raise ValueError( 'Unknown serializer : `{0}`. Register your serializer ' 'with `manager` first.'.format(serializer_name)) self.logger.debug('default data serializer: %s', serializer_name) self._data_serializer = serializer_name def stored_data(self, name): """Retrieve data from data directory. Returns ``None`` if there are no data stored under ``name``. .. versionadded:: 1.8 :param name: name of datastore """ metadata_path = self.datafile('.{0}.alfred-workflow'.format(name)) if not os.path.exists(metadata_path): self.logger.debug('no data stored for `%s`', name) return None with open(metadata_path, 'rb') as file_obj: serializer_name = file_obj.read().strip() serializer = manager.serializer(serializer_name) if serializer is None: raise ValueError( 'Unknown serializer `{0}`. Register a corresponding ' 'serializer with `manager.register()` ' 'to load this data.'.format(serializer_name)) self.logger.debug('data `%s` stored as `%s`', name, serializer_name) filename = '{0}.{1}'.format(name, serializer_name) data_path = self.datafile(filename) if not os.path.exists(data_path): self.logger.debug('no data stored: %s', name) if os.path.exists(metadata_path): os.unlink(metadata_path) return None with open(data_path, 'rb') as file_obj: data = serializer.load(file_obj) self.logger.debug('stored data loaded: %s', data_path) return data def store_data(self, name, data, serializer=None): """Save data to data directory. .. versionadded:: 1.8 If ``data`` is ``None``, the datastore will be deleted. Note that the datastore does NOT support mutliple threads. :param name: name of datastore :param data: object(s) to store. **Note:** some serializers can only handled certain types of data. :param serializer: name of serializer to use. If no serializer is specified, the default will be used. See :class:`SerializerManager` for more information. :returns: data in datastore or ``None`` """ # Ensure deletion is not interrupted by SIGTERM @uninterruptible def delete_paths(paths): """Clear one or more data stores""" for path in paths: if os.path.exists(path): os.unlink(path) self.logger.debug('deleted data file: %s', path) serializer_name = serializer or self.data_serializer # In order for `stored_data()` to be able to load data stored with # an arbitrary serializer, yet still have meaningful file extensions, # the format (i.e. extension) is saved to an accompanying file metadata_path = self.datafile('.{0}.alfred-workflow'.format(name)) filename = '{0}.{1}'.format(name, serializer_name) data_path = self.datafile(filename) if data_path == self.settings_path: raise ValueError( 'Cannot save data to' + '`{0}` with format `{1}`. '.format(name, serializer_name) + "This would overwrite Alfred-Workflow's settings file.") serializer = manager.serializer(serializer_name) if serializer is None: raise ValueError( 'Invalid serializer `{0}`. Register your serializer with ' '`manager.register()` first.'.format(serializer_name)) if data is None: # Delete cached data delete_paths((metadata_path, data_path)) return # Ensure write is not interrupted by SIGTERM @uninterruptible def _store(): # Save file extension with atomic_writer(metadata_path, 'wb') as file_obj: file_obj.write(serializer_name) with atomic_writer(data_path, 'wb') as file_obj: serializer.dump(data, file_obj) _store() self.logger.debug('saved data: %s', data_path) def cached_data(self, name, data_func=None, max_age=60): """Return cached data if younger than ``max_age`` seconds. Retrieve data from cache or re-generate and re-cache data if stale/non-existant. If ``max_age`` is 0, return cached data no matter how old. :param name: name of datastore :param data_func: function to (re-)generate data. :type data_func: ``callable`` :param max_age: maximum age of cached data in seconds :type max_age: ``int`` :returns: cached data, return value of ``data_func`` or ``None`` if ``data_func`` is not set """ serializer = manager.serializer(self.cache_serializer) cache_path = self.cachefile('%s.%s' % (name, self.cache_serializer)) age = self.cached_data_age(name) if (age < max_age or max_age == 0) and os.path.exists(cache_path): with open(cache_path, 'rb') as file_obj: self.logger.debug('loading cached data: %s', cache_path) return serializer.load(file_obj) if not data_func: return None data = data_func() self.cache_data(name, data) return data def cache_data(self, name, data): """Save ``data`` to cache under ``name``. If ``data`` is ``None``, the corresponding cache file will be deleted. :param name: name of datastore :param data: data to store. This may be any object supported by the cache serializer """ serializer = manager.serializer(self.cache_serializer) cache_path = self.cachefile('%s.%s' % (name, self.cache_serializer)) if data is None: if os.path.exists(cache_path): os.unlink(cache_path) self.logger.debug('deleted cache file: %s', cache_path) return with atomic_writer(cache_path, 'wb') as file_obj: serializer.dump(data, file_obj) self.logger.debug('cached data: %s', cache_path) def cached_data_fresh(self, name, max_age): """Whether cache `name` is less than `max_age` seconds old. :param name: name of datastore :param max_age: maximum age of data in seconds :type max_age: ``int`` :returns: ``True`` if data is less than ``max_age`` old, else ``False`` """ age = self.cached_data_age(name) if not age: return False return age < max_age def cached_data_age(self, name): """Return age in seconds of cache `name` or 0 if cache doesn't exist. :param name: name of datastore :type name: ``unicode`` :returns: age of datastore in seconds :rtype: ``int`` """ cache_path = self.cachefile('%s.%s' % (name, self.cache_serializer)) if not os.path.exists(cache_path): return 0 return time.time() - os.stat(cache_path).st_mtime def filter(self, query, items, key=lambda x: x, ascending=False, include_score=False, min_score=0, max_results=0, match_on=MATCH_ALL, fold_diacritics=True): """Fuzzy search filter. Returns list of ``items`` that match ``query``. ``query`` is case-insensitive. Any item that does not contain the entirety of ``query`` is rejected. If ``query`` is an empty string or contains only whitespace, all items will match. :param query: query to test items against :type query: ``unicode`` :param items: iterable of items to test :type items: ``list`` or ``tuple`` :param key: function to get comparison key from ``items``. Must return a ``unicode`` string. The default simply returns the item. :type key: ``callable`` :param ascending: set to ``True`` to get worst matches first :type ascending: ``Boolean`` :param include_score: Useful for debugging the scoring algorithm. If ``True``, results will be a list of tuples ``(item, score, rule)``. :type include_score: ``Boolean`` :param min_score: If non-zero, ignore results with a score lower than this. :type min_score: ``int`` :param max_results: If non-zero, prune results list to this length. :type max_results: ``int`` :param match_on: Filter option flags. Bitwise-combined list of ``MATCH_*`` constants (see below). :type match_on: ``int`` :param fold_diacritics: Convert search keys to ASCII-only characters if ``query`` only contains ASCII characters. :type fold_diacritics: ``Boolean`` :returns: list of ``items`` matching ``query`` or list of ``(item, score, rule)`` `tuples` if ``include_score`` is ``True``. ``rule`` is the ``MATCH_*`` rule that matched the item. :rtype: ``list`` **Matching rules** By default, :meth:`filter` uses all of the following flags (i.e. :const:`MATCH_ALL`). The tests are always run in the given order: 1. :const:`MATCH_STARTSWITH` Item search key starts with ``query`` (case-insensitive). 2. :const:`MATCH_CAPITALS` The list of capital letters in item search key starts with ``query`` (``query`` may be lower-case). E.g., ``of`` would match ``OmniFocus``, ``gc`` would match ``Google Chrome``. 3. :const:`MATCH_ATOM` Search key is split into "atoms" on non-word characters (.,-,' etc.). Matches if ``query`` is one of these atoms (case-insensitive). 4. :const:`MATCH_INITIALS_STARTSWITH` Initials are the first characters of the above-described "atoms" (case-insensitive). 5. :const:`MATCH_INITIALS_CONTAIN` ``query`` is a substring of the above-described initials. 6. :const:`MATCH_INITIALS` Combination of (4) and (5). 7. :const:`MATCH_SUBSTRING` ``query`` is a substring of item search key (case-insensitive). 8. :const:`MATCH_ALLCHARS` All characters in ``query`` appear in item search key in the same order (case-insensitive). 9. :const:`MATCH_ALL` Combination of all the above. :const:`MATCH_ALLCHARS` is considerably slower than the other tests and provides much less accurate results. **Examples:** To ignore :const:`MATCH_ALLCHARS` (tends to provide the worst matches and is expensive to run), use ``match_on=MATCH_ALL ^ MATCH_ALLCHARS``. To match only on capitals, use ``match_on=MATCH_CAPITALS``. To match only on startswith and substring, use ``match_on=MATCH_STARTSWITH | MATCH_SUBSTRING``. **Diacritic folding** .. versionadded:: 1.3 If ``fold_diacritics`` is ``True`` (the default), and ``query`` contains only ASCII characters, non-ASCII characters in search keys will be converted to ASCII equivalents (e.g. **ü** -> **u**, **ß** -> **ss**, **é** -> **e**). See :const:`ASCII_REPLACEMENTS` for all replacements. If ``query`` contains non-ASCII characters, search keys will not be altered. """ if not query: return items # Remove preceding/trailing spaces query = query.strip() if not query: return items # Use user override if there is one fold_diacritics = self.settings.get('__workflow_diacritic_folding', fold_diacritics) results = [] for item in items: skip = False score = 0 words = [s.strip() for s in query.split(' ')] value = key(item).strip() if value == '': continue for word in words: if word == '': continue s, rule = self._filter_item(value, word, match_on, fold_diacritics) if not s: # Skip items that don't match part of the query skip = True score += s if skip: continue if score: # use "reversed" `score` (i.e. highest becomes lowest) and # `value` as sort key. This means items with the same score # will be sorted in alphabetical not reverse alphabetical order results.append(((100.0 / score, value.lower(), score), (item, score, rule))) # sort on keys, then discard the keys results.sort(reverse=ascending) results = [t[1] for t in results] if min_score: results = [r for r in results if r[1] > min_score] if max_results and len(results) > max_results: results = results[:max_results] # return list of ``(item, score, rule)`` if include_score: return results # just return list of items return [t[0] for t in results] def _filter_item(self, value, query, match_on, fold_diacritics): """Filter ``value`` against ``query`` using rules ``match_on``. :returns: ``(score, rule)`` """ query = query.lower() if not isascii(query): fold_diacritics = False if fold_diacritics: value = self.fold_to_ascii(value) # pre-filter any items that do not contain all characters # of ``query`` to save on running several more expensive tests if not set(query) <= set(value.lower()): return (0, None) # item starts with query if match_on & MATCH_STARTSWITH and value.lower().startswith(query): score = 100.0 - (len(value) / len(query)) return (score, MATCH_STARTSWITH) # query matches capitalised letters in item, # e.g. of = OmniFocus if match_on & MATCH_CAPITALS: initials = ''.join([c for c in value if c in INITIALS]) if initials.lower().startswith(query): score = 100.0 - (len(initials) / len(query)) return (score, MATCH_CAPITALS) # split the item into "atoms", i.e. words separated by # spaces or other non-word characters if (match_on & MATCH_ATOM or match_on & MATCH_INITIALS_CONTAIN or match_on & MATCH_INITIALS_STARTSWITH): atoms = [s.lower() for s in split_on_delimiters(value)] # print('atoms : %s --> %s' % (value, atoms)) # initials of the atoms initials = ''.join([s[0] for s in atoms if s]) if match_on & MATCH_ATOM: # is `query` one of the atoms in item? # similar to substring, but scores more highly, as it's # a word within the item if query in atoms: score = 100.0 - (len(value) / len(query)) return (score, MATCH_ATOM) # `query` matches start (or all) of the initials of the # atoms, e.g. ``himym`` matches "How I Met Your Mother" # *and* "how i met your mother" (the ``capitals`` rule only # matches the former) if (match_on & MATCH_INITIALS_STARTSWITH and initials.startswith(query)): score = 100.0 - (len(initials) / len(query)) return (score, MATCH_INITIALS_STARTSWITH) # `query` is a substring of initials, e.g. ``doh`` matches # "The Dukes of Hazzard" elif (match_on & MATCH_INITIALS_CONTAIN and query in initials): score = 95.0 - (len(initials) / len(query)) return (score, MATCH_INITIALS_CONTAIN) # `query` is a substring of item if match_on & MATCH_SUBSTRING and query in value.lower(): score = 90.0 - (len(value) / len(query)) return (score, MATCH_SUBSTRING) # finally, assign a score based on how close together the # characters in `query` are in item. if match_on & MATCH_ALLCHARS: search = self._search_for_query(query) match = search(value) if match: score = 100.0 / ((1 + match.start()) * (match.end() - match.start() + 1)) return (score, MATCH_ALLCHARS) # Nothing matched return (0, None) def _search_for_query(self, query): if query in self._search_pattern_cache: return self._search_pattern_cache[query] # Build pattern: include all characters pattern = [] for c in query: # pattern.append('[^{0}]*{0}'.format(re.escape(c))) pattern.append('.*?{0}'.format(re.escape(c))) pattern = ''.join(pattern) search = re.compile(pattern, re.IGNORECASE).search self._search_pattern_cache[query] = search return search def run(self, func, text_errors=False): """Call ``func`` to run your workflow. :param func: Callable to call with ``self`` (i.e. the :class:`Workflow` instance) as first argument. :param text_errors: Emit error messages in plain text, not in Alfred's XML/JSON feedback format. Use this when you're not running Alfred-Workflow in a Script Filter and would like to pass the error message to, say, a notification. :type text_errors: ``Boolean`` ``func`` will be called with :class:`Workflow` instance as first argument. ``func`` should be the main entry point to your workflow. Any exceptions raised will be logged and an error message will be output to Alfred. """ start = time.time() # Call workflow's entry function/method within a try-except block # to catch any errors and display an error message in Alfred try: if self.version: self.logger.debug('workflow version: %s', self.version) # Run update check if configured for self-updates. # This call has to go in the `run` try-except block, as it will # initialise `self.settings`, which will raise an exception # if `settings.json` isn't valid. if self._update_settings: self.check_update() # Run workflow's entry function/method func(self) # Set last version run to current version after a successful # run self.set_last_version() except Exception as err: self.logger.exception(err) if self.help_url: self.logger.info('for assistance, see: %s', self.help_url) if not sys.stdout.isatty(): # Show error in Alfred if text_errors: print(unicode(err).encode('utf-8'), end='') else: self._items = [] if self._name: name = self._name elif self._bundleid: name = self._bundleid else: # pragma: no cover name = os.path.dirname(__file__) self.add_item("Error in workflow '%s'" % name, unicode(err), icon=ICON_ERROR) self.send_feedback() return 1 finally: self.logger.debug('workflow finished in %0.3f seconds', time.time() - start) return 0 # Alfred feedback methods ------------------------------------------ def add_item(self, title, subtitle='', modifier_subtitles=None, arg=None, autocomplete=None, valid=False, uid=None, icon=None, icontype=None, type=None, largetext=None, copytext=None, quicklookurl=None): """Add an item to be output to Alfred. :param title: Title shown in Alfred :type title: ``unicode`` :param subtitle: Subtitle shown in Alfred :type subtitle: ``unicode`` :param modifier_subtitles: Subtitles shown when modifier (CMD, OPT etc.) is pressed. Use a ``dict`` with the lowercase keys ``cmd``, ``ctrl``, ``shift``, ``alt`` and ``fn`` :type modifier_subtitles: ``dict`` :param arg: Argument passed by Alfred as ``{query}`` when item is actioned :type arg: ``unicode`` :param autocomplete: Text expanded in Alfred when item is TABbed :type autocomplete: ``unicode`` :param valid: Whether or not item can be actioned :type valid: ``Boolean`` :param uid: Used by Alfred to remember/sort items :type uid: ``unicode`` :param icon: Filename of icon to use :type icon: ``unicode`` :param icontype: Type of icon. Must be one of ``None`` , ``'filetype'`` or ``'fileicon'``. Use ``'filetype'`` when ``icon`` is a filetype such as ``'public.folder'``. Use ``'fileicon'`` when you wish to use the icon of the file specified as ``icon``, e.g. ``icon='/Applications/Safari.app', icontype='fileicon'``. Leave as `None` if ``icon`` points to an actual icon file. :type icontype: ``unicode`` :param type: Result type. Currently only ``'file'`` is supported (by Alfred). This will tell Alfred to enable file actions for this item. :type type: ``unicode`` :param largetext: Text to be displayed in Alfred's large text box if user presses CMD+L on item. :type largetext: ``unicode`` :param copytext: Text to be copied to pasteboard if user presses CMD+C on item. :type copytext: ``unicode`` :param quicklookurl: URL to be displayed using Alfred's Quick Look feature (tapping ``SHIFT`` or ``⌘+Y`` on a result). :type quicklookurl: ``unicode`` :returns: :class:`Item` instance See :ref:`icons` for a list of the supported system icons. .. note:: Although this method returns an :class:`Item` instance, you don't need to hold onto it or worry about it. All generated :class:`Item` instances are also collected internally and sent to Alfred when :meth:`send_feedback` is called. The generated :class:`Item` is only returned in case you want to edit it or do something with it other than send it to Alfred. """ item = self.item_class(title, subtitle, modifier_subtitles, arg, autocomplete, valid, uid, icon, icontype, type, largetext, copytext, quicklookurl) self._items.append(item) return item def send_feedback(self): """Print stored items to console/Alfred as XML.""" root = ET.Element('items') for item in self._items: root.append(item.elem) sys.stdout.write('<?xml version="1.0" encoding="utf-8"?>\n') sys.stdout.write(ET.tostring(root).encode('utf-8')) sys.stdout.flush() #################################################################### # Updating methods #################################################################### @property def first_run(self): """Return ``True`` if it's the first time this version has run. .. versionadded:: 1.9.10 Raises a :class:`ValueError` if :attr:`version` isn't set. """ if not self.version: raise ValueError('No workflow version set') if not self.last_version_run: return True return self.version != self.last_version_run @property def last_version_run(self): """Return version of last version to run (or ``None``). .. versionadded:: 1.9.10 :returns: :class:`~workflow.update.Version` instance or ``None`` """ if self._last_version_run is UNSET: version = self.settings.get('__workflow_last_version') if version: from update import Version version = Version(version) self._last_version_run = version self.logger.debug('last run version: %s', self._last_version_run) return self._last_version_run def set_last_version(self, version=None): """Set :attr:`last_version_run` to current version. .. versionadded:: 1.9.10 :param version: version to store (default is current version) :type version: :class:`~workflow.update.Version` instance or ``unicode`` :returns: ``True`` if version is saved, else ``False`` """ if not version: if not self.version: self.logger.warning( "Can't save last version: workflow has no version") return False version = self.version if isinstance(version, basestring): from update import Version version = Version(version) self.settings['__workflow_last_version'] = str(version) self.logger.debug('set last run version: %s', version) return True @property def update_available(self): """Whether an update is available. .. versionadded:: 1.9 See :ref:`guide-updates` in the :ref:`user-manual` for detailed information on how to enable your workflow to update itself. :returns: ``True`` if an update is available, else ``False`` """ # Create a new workflow object to ensure standard serialiser # is used (update.py is called without the user's settings) update_data = Workflow().cached_data('__workflow_update_status', max_age=0) self.logger.debug('update_data: %r', update_data) if not update_data or not update_data.get('available'): return False return update_data['available'] @property def prereleases(self): """Whether workflow should update to pre-release versions. .. versionadded:: 1.16 :returns: ``True`` if pre-releases are enabled with the :ref:`magic argument <magic-arguments>` or the ``update_settings`` dict, else ``False``. """ if self._update_settings.get('prereleases'): return True return self.settings.get('__workflow_prereleases') or False def check_update(self, force=False): """Call update script if it's time to check for a new release. .. versionadded:: 1.9 The update script will be run in the background, so it won't interfere in the execution of your workflow. See :ref:`guide-updates` in the :ref:`user-manual` for detailed information on how to enable your workflow to update itself. :param force: Force update check :type force: ``Boolean`` """ frequency = self._update_settings.get('frequency', DEFAULT_UPDATE_FREQUENCY) if not force and not self.settings.get('__workflow_autoupdate', True): self.logger.debug('Auto update turned off by user') return # Check for new version if it's time if (force or not self.cached_data_fresh( '__workflow_update_status', frequency * 86400)): github_slug = self._update_settings['github_slug'] # version = self._update_settings['version'] version = str(self.version) from background import run_in_background # update.py is adjacent to this file update_script = os.path.join(os.path.dirname(__file__), b'update.py') cmd = ['/usr/bin/python', update_script, 'check', github_slug, version] if self.prereleases: cmd.append('--prereleases') self.logger.info('Checking for update ...') run_in_background('__workflow_update_check', cmd) else: self.logger.debug('Update check not due') def start_update(self): """Check for update and download and install new workflow file. .. versionadded:: 1.9 See :ref:`guide-updates` in the :ref:`user-manual` for detailed information on how to enable your workflow to update itself. :returns: ``True`` if an update is available and will be installed, else ``False`` """ import update github_slug = self._update_settings['github_slug'] # version = self._update_settings['version'] version = str(self.version) if not update.check_update(github_slug, version, self.prereleases): return False from background import run_in_background # update.py is adjacent to this file update_script = os.path.join(os.path.dirname(__file__), b'update.py') cmd = ['/usr/bin/python', update_script, 'install', github_slug, version] if self.prereleases: cmd.append('--prereleases') self.logger.debug('Downloading update ...') run_in_background('__workflow_update_install', cmd) return True #################################################################### # Keychain password storage methods #################################################################### def save_password(self, account, password, service=None): """Save account credentials. If the account exists, the old password will first be deleted (Keychain throws an error otherwise). If something goes wrong, a :class:`KeychainError` exception will be raised. :param account: name of the account the password is for, e.g. "Pinboard" :type account: ``unicode`` :param password: the password to secure :type password: ``unicode`` :param service: Name of the service. By default, this is the workflow's bundle ID :type service: ``unicode`` """ if not service: service = self.bundleid try: self._call_security('add-generic-password', service, account, '-w', password) self.logger.debug('Saved password : %s:%s', service, account) except PasswordExists: self.logger.debug('Password exists : %s:%s', service, account) current_password = self.get_password(account, service) if current_password == password: self.logger.debug('Password unchanged') else: self.delete_password(account, service) self._call_security('add-generic-password', service, account, '-w', password) self.logger.debug('save_password : %s:%s', service, account) def get_password(self, account, service=None): """Retrieve the password saved at ``service/account``. Raise :class:`PasswordNotFound` exception if password doesn't exist. :param account: name of the account the password is for, e.g. "Pinboard" :type account: ``unicode`` :param service: Name of the service. By default, this is the workflow's bundle ID :type service: ``unicode`` :returns: account password :rtype: ``unicode`` """ if not service: service = self.bundleid output = self._call_security('find-generic-password', service, account, '-g') # Parsing of `security` output is adapted from python-keyring # by Jason R. Coombs # https://pypi.python.org/pypi/keyring m = re.search( r'password:\s*(?:0x(?P<hex>[0-9A-F]+)\s*)?(?:"(?P<pw>.*)")?', output) if m: groups = m.groupdict() h = groups.get('hex') password = groups.get('pw') if h: password = unicode(binascii.unhexlify(h), 'utf-8') self.logger.debug('Got password : %s:%s', service, account) return password def delete_password(self, account, service=None): """Delete the password stored at ``service/account``. Raise :class:`PasswordNotFound` if account is unknown. :param account: name of the account the password is for, e.g. "Pinboard" :type account: ``unicode`` :param service: Name of the service. By default, this is the workflow's bundle ID :type service: ``unicode`` """ if not service: service = self.bundleid self._call_security('delete-generic-password', service, account) self.logger.debug('Deleted password : %s:%s', service, account) #################################################################### # Methods for workflow:* magic args #################################################################### def _register_default_magic(self): """Register the built-in magic arguments.""" # TODO: refactor & simplify # Wrap callback and message with callable def callback(func, msg): def wrapper(): func() return msg return wrapper self.magic_arguments['delcache'] = callback(self.clear_cache, 'Deleted workflow cache') self.magic_arguments['deldata'] = callback(self.clear_data, 'Deleted workflow data') self.magic_arguments['delsettings'] = callback( self.clear_settings, 'Deleted workflow settings') self.magic_arguments['reset'] = callback(self.reset, 'Reset workflow') self.magic_arguments['openlog'] = callback(self.open_log, 'Opening workflow log file') self.magic_arguments['opencache'] = callback( self.open_cachedir, 'Opening workflow cache directory') self.magic_arguments['opendata'] = callback( self.open_datadir, 'Opening workflow data directory') self.magic_arguments['openworkflow'] = callback( self.open_workflowdir, 'Opening workflow directory') self.magic_arguments['openterm'] = callback( self.open_terminal, 'Opening workflow root directory in Terminal') # Diacritic folding def fold_on(): self.settings['__workflow_diacritic_folding'] = True return 'Diacritics will always be folded' def fold_off(): self.settings['__workflow_diacritic_folding'] = False return 'Diacritics will never be folded' def fold_default(): if '__workflow_diacritic_folding' in self.settings: del self.settings['__workflow_diacritic_folding'] return 'Diacritics folding reset' self.magic_arguments['foldingon'] = fold_on self.magic_arguments['foldingoff'] = fold_off self.magic_arguments['foldingdefault'] = fold_default # Updates def update_on(): self.settings['__workflow_autoupdate'] = True return 'Auto update turned on' def update_off(): self.settings['__workflow_autoupdate'] = False return 'Auto update turned off' def prereleases_on(): self.settings['__workflow_prereleases'] = True return 'Prerelease updates turned on' def prereleases_off(): self.settings['__workflow_prereleases'] = False return 'Prerelease updates turned off' def do_update(): if self.start_update(): return 'Downloading and installing update ...' else: return 'No update available' self.magic_arguments['autoupdate'] = update_on self.magic_arguments['noautoupdate'] = update_off self.magic_arguments['prereleases'] = prereleases_on self.magic_arguments['noprereleases'] = prereleases_off self.magic_arguments['update'] = do_update # Help def do_help(): if self.help_url: self.open_help() return 'Opening workflow help URL in browser' else: return 'Workflow has no help URL' def show_version(): if self.version: return 'Version: {0}'.format(self.version) else: return 'This workflow has no version number' def list_magic(): """Display all available magic args in Alfred.""" isatty = sys.stderr.isatty() for name in sorted(self.magic_arguments.keys()): if name == 'magic': continue arg = self.magic_prefix + name self.logger.debug(arg) if not isatty: self.add_item(arg, icon=ICON_INFO) if not isatty: self.send_feedback() self.magic_arguments['help'] = do_help self.magic_arguments['magic'] = list_magic self.magic_arguments['version'] = show_version def clear_cache(self, filter_func=lambda f: True): """Delete all files in workflow's :attr:`cachedir`. :param filter_func: Callable to determine whether a file should be deleted or not. ``filter_func`` is called with the filename of each file in the data directory. If it returns ``True``, the file will be deleted. By default, *all* files will be deleted. :type filter_func: ``callable`` """ self._delete_directory_contents(self.cachedir, filter_func) def clear_data(self, filter_func=lambda f: True): """Delete all files in workflow's :attr:`datadir`. :param filter_func: Callable to determine whether a file should be deleted or not. ``filter_func`` is called with the filename of each file in the data directory. If it returns ``True``, the file will be deleted. By default, *all* files will be deleted. :type filter_func: ``callable`` """ self._delete_directory_contents(self.datadir, filter_func) def clear_settings(self): """Delete workflow's :attr:`settings_path`.""" if os.path.exists(self.settings_path): os.unlink(self.settings_path) self.logger.debug('Deleted : %r', self.settings_path) def reset(self): """Delete workflow settings, cache and data. File :attr:`settings <settings_path>` and directories :attr:`cache <cachedir>` and :attr:`data <datadir>` are deleted. """ self.clear_cache() self.clear_data() self.clear_settings() def open_log(self): """Open :attr:`logfile` in default app (usually Console.app).""" subprocess.call(['open', self.logfile]) def open_cachedir(self): """Open the workflow's :attr:`cachedir` in Finder.""" subprocess.call(['open', self.cachedir]) def open_datadir(self): """Open the workflow's :attr:`datadir` in Finder.""" subprocess.call(['open', self.datadir]) def open_workflowdir(self): """Open the workflow's :attr:`workflowdir` in Finder.""" subprocess.call(['open', self.workflowdir]) def open_terminal(self): """Open a Terminal window at workflow's :attr:`workflowdir`.""" subprocess.call(['open', '-a', 'Terminal', self.workflowdir]) def open_help(self): """Open :attr:`help_url` in default browser.""" subprocess.call(['open', self.help_url]) return 'Opening workflow help URL in browser' #################################################################### # Helper methods #################################################################### def decode(self, text, encoding=None, normalization=None): """Return ``text`` as normalised unicode. If ``encoding`` and/or ``normalization`` is ``None``, the ``input_encoding``and ``normalization`` parameters passed to :class:`Workflow` are used. :param text: string :type text: encoded or Unicode string. If ``text`` is already a Unicode string, it will only be normalised. :param encoding: The text encoding to use to decode ``text`` to Unicode. :type encoding: ``unicode`` or ``None`` :param normalization: The nomalisation form to apply to ``text``. :type normalization: ``unicode`` or ``None`` :returns: decoded and normalised ``unicode`` :class:`Workflow` uses "NFC" normalisation by default. This is the standard for Python and will work well with data from the web (via :mod:`~workflow.web` or :mod:`json`). macOS, on the other hand, uses "NFD" normalisation (nearly), so data coming from the system (e.g. via :mod:`subprocess` or :func:`os.listdir`/:mod:`os.path`) may not match. You should either normalise this data, too, or change the default normalisation used by :class:`Workflow`. """ encoding = encoding or self._input_encoding normalization = normalization or self._normalizsation if not isinstance(text, unicode): text = unicode(text, encoding) return unicodedata.normalize(normalization, text) def fold_to_ascii(self, text): """Convert non-ASCII characters to closest ASCII equivalent. .. versionadded:: 1.3 .. note:: This only works for a subset of European languages. :param text: text to convert :type text: ``unicode`` :returns: text containing only ASCII characters :rtype: ``unicode`` """ if isascii(text): return text text = ''.join([ASCII_REPLACEMENTS.get(c, c) for c in text]) return unicode(unicodedata.normalize('NFKD', text).encode('ascii', 'ignore')) def dumbify_punctuation(self, text): """Convert non-ASCII punctuation to closest ASCII equivalent. This method replaces "smart" quotes and n- or m-dashes with their workaday ASCII equivalents. This method is currently not used internally, but exists as a helper method for workflow authors. .. versionadded: 1.9.7 :param text: text to convert :type text: ``unicode`` :returns: text with only ASCII punctuation :rtype: ``unicode`` """ if isascii(text): return text text = ''.join([DUMB_PUNCTUATION.get(c, c) for c in text]) return text def _delete_directory_contents(self, dirpath, filter_func): """Delete all files in a directory. :param dirpath: path to directory to clear :type dirpath: ``unicode`` or ``str`` :param filter_func function to determine whether a file shall be deleted or not. :type filter_func ``callable`` """ if os.path.exists(dirpath): for filename in os.listdir(dirpath): if not filter_func(filename): continue path = os.path.join(dirpath, filename) if os.path.isdir(path): shutil.rmtree(path) else: os.unlink(path) self.logger.debug('Deleted : %r', path) def _load_info_plist(self): """Load workflow info from ``info.plist``.""" # info.plist should be in the directory above this one self._info = plistlib.readPlist(self.workflowfile('info.plist')) self._info_loaded = True def _create(self, dirpath): """Create directory `dirpath` if it doesn't exist. :param dirpath: path to directory :type dirpath: ``unicode`` :returns: ``dirpath`` argument :rtype: ``unicode`` """ if not os.path.exists(dirpath): os.makedirs(dirpath) return dirpath def _call_security(self, action, service, account, *args): """Call ``security`` CLI program that provides access to keychains. May raise `PasswordNotFound`, `PasswordExists` or `KeychainError` exceptions (the first two are subclasses of `KeychainError`). :param action: The ``security`` action to call, e.g. ``add-generic-password`` :type action: ``unicode`` :param service: Name of the service. :type service: ``unicode`` :param account: name of the account the password is for, e.g. "Pinboard" :type account: ``unicode`` :param password: the password to secure :type password: ``unicode`` :param *args: list of command line arguments to be passed to ``security`` :type *args: `list` or `tuple` :returns: ``(retcode, output)``. ``retcode`` is an `int`, ``output`` a ``unicode`` string. :rtype: `tuple` (`int`, ``unicode``) """ cmd = ['security', action, '-s', service, '-a', account] + list(args) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, _ = p.communicate() if p.returncode == 44: # password does not exist raise PasswordNotFound() elif p.returncode == 45: # password already exists raise PasswordExists() elif p.returncode > 0: err = KeychainError('Unknown Keychain error : %s' % stdout) err.retcode = p.returncode raise err return stdout.strip().decode('utf-8')
mit
GistdaDev/geonode
geonode/proxy/views.py
28
3660
# -*- coding: utf-8 -*- ######################################################################### # # Copyright (C) 2012 OpenPlans # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################### from django.http import HttpResponse from httplib import HTTPConnection, HTTPSConnection from urlparse import urlsplit from django.conf import settings from django.utils.http import is_safe_url from django.http.request import validate_host def proxy(request): PROXY_ALLOWED_HOSTS = getattr(settings, 'PROXY_ALLOWED_HOSTS', ()) host = None if 'geonode.geoserver' in settings.INSTALLED_APPS: from geonode.geoserver.helpers import ogc_server_settings hostname = (ogc_server_settings.hostname,) if ogc_server_settings else () PROXY_ALLOWED_HOSTS += hostname host = ogc_server_settings.netloc if 'url' not in request.GET: return HttpResponse("The proxy service requires a URL-encoded URL as a parameter.", status=400, content_type="text/plain" ) raw_url = request.GET['url'] url = urlsplit(raw_url) locator = str(url.path) if url.query != "": locator += '?' + url.query if url.fragment != "": locator += '#' + url.fragment if not settings.DEBUG: if not validate_host(url.hostname, PROXY_ALLOWED_HOSTS): return HttpResponse("DEBUG is set to False but the host of the path provided to the proxy service" " is not in the PROXY_ALLOWED_HOSTS setting.", status=403, content_type="text/plain" ) headers = {} if settings.SESSION_COOKIE_NAME in request.COOKIES and is_safe_url(url=raw_url, host=host): headers["Cookie"] = request.META["HTTP_COOKIE"] if request.method in ("POST", "PUT") and "CONTENT_TYPE" in request.META: headers["Content-Type"] = request.META["CONTENT_TYPE"] if url.scheme == 'https': conn = HTTPSConnection(url.hostname, url.port) else: conn = HTTPConnection(url.hostname, url.port) conn.request(request.method, locator, request.body, headers) result = conn.getresponse() # If we get a redirect, let's add a useful message. if result.status in (301, 302, 303, 307): response = HttpResponse(('This proxy does not support redirects. The server in "%s" ' 'asked for a redirect to "%s"' % (url, result.getheader('Location'))), status=result.status, content_type=result.getheader("Content-Type", "text/plain") ) response['Location'] = result.getheader('Location') else: response = HttpResponse( result.read(), status=result.status, content_type=result.getheader("Content-Type", "text/plain")) return response
gpl-3.0
mariusbaumann/pyload
module/plugins/hoster/YourfilesTo.py
2
2049
# -*- coding: utf-8 -*- import re from urllib import unquote from module.plugins.Hoster import Hoster class YourfilesTo(Hoster): __name__ = "YourfilesTo" __type__ = "hoster" __version__ = "0.21" __pattern__ = r'(http://)?(?:www\.)?yourfiles\.(to|biz)/\?d=\w+' __description__ = """Youfiles.to hoster plugin""" __license__ = "GPLv3" __authors__ = [("jeix", "jeix@hasnomail.de"), ("skydancer", "skydancer@hasnomail.de")] def process(self, pyfile): self.pyfile = pyfile self.prepare() self.download(self.get_file_url()) def prepare(self): if not self.file_exists(): self.offline() self.pyfile.name = self.get_file_name() wait_time = self.get_waiting_time() self.setWait(wait_time) self.wait() def get_waiting_time(self): if not self.html: self.download_html() #var zzipitime = 15; m = re.search(r'var zzipitime = (\d+);', self.html) if m: sec = int(m.group(1)) else: sec = 0 return sec def download_html(self): url = self.pyfile.url self.html = self.load(url) def get_file_url(self): """ returns the absolute downloadable filepath """ url = re.search(r"var bla = '(.*?)';", self.html) if url: url = url.group(1) url = unquote(url.replace("http://http:/http://", "http://").replace("dumdidum", "")) return url else: self.error(_("Absolute filepath not found")) def get_file_name(self): if not self.html: self.download_html() return re.search("<title>(.*)</title>", self.html).group(1) def file_exists(self): """ returns True or False """ if not self.html: self.download_html() if re.search(r"HTTP Status 404", self.html) is not None: return False else: return True
gpl-3.0
InUrSys/PescArt2.0
src/srcPlus/GenericPesquisas.py
1
10030
''' Created on 13/12/2017 @author: chernomirdinmacuvele ''' from PyQt5.Qt import QDialog, QModelIndex, QStandardItemModel, QStandardItem,\ QGroupBox import mixedModel import QT_tblViewUtility import rscForm import frmPesquisa_Sort class GenericPesquisas(QDialog): def configCombox(self): ''' Metodo para configurar o Modelo e Text Hint no Combox. ''' lstWdgt = self.dictCB['widget'] lstQuer = self.dictCB['quer'] for idx, val in enumerate (lstWdgt): model = mixedModel.setQueryModel(lstQuer[idx]) val.setModel(model) val.setModelColumn(1) self.CBTextHint(Combox=val) def CBTextHint(self, Combox=None): mdel = QStandardItemModel(Combox.model()) firstIndex = mdel.index(0, Combox.modelColumn(), Combox.rootModelIndex()) firstItem = QStandardItem(mdel.itemFromIndex(firstIndex)) firstItem.setSelectable(False) def configComboxLocal(self): ''' Configurar o combox e o evento ''' lstWdgt = [self.CBProvincia, self.CBDistrito, self.CBPosto, self.CBCentroPesca] for wdg in lstWdgt: wdgName = wdg.objectName() query= self.dictLocal[wdgName]['query'] model= mixedModel.setQueryModel(query= query) wdg.setModel(model) wdg.setModelColumn(1) self.CBTextHint(Combox=wdg) wdg.currentTextChanged.connect(self.updateNextCombo) def updateNextCombo(self): ''' Atualiza o modelo do Next widget ''' wdgt= self.sender().objectName() setNext= self.dictLocal[wdgt]['nextLVL'] if setNext is not None: query= self.dictLocal[setNext]['query'].format(val = mixedModel.getDataCombox(widg= self.sender())) nextWdg = self.dictLocal[setNext]['wdgt'] nextWdgModel= nextWdg.model() nextWdgModel.setQuery(query) nextWdg.setCurrentIndex(0) self.CBTextHint(Combox=self.dictLocal[setNext]['wdgt']) def configRegistador(self): if self.CBProvincia.currentIndex() == 0: quer= "select null as id, '-Registador-' as nome union all select id, nome from ref_registador" else: id_prov = mixedModel.getDataCombox(widg= self.CBProvincia) quer = "select null as id, '-Registador-' as nome union all select id, nome from ref_registador where id_centro = '{prov}'".format(prov = id_prov) model = mixedModel.setQueryModel(query =quer) self.CBRegistador.setModel(model) self.CBRegistador.setModelColumn(1) def buldingTheQuery(self): startQuery = """SELECT tbl1.id, date(data_amostragem) as "Data da Amostra", tbl2.nome as "Centro", tbl3.nome as "Registador", hora_inicioamo, hor_fimamo, tbl4.nome as "Dia da Semana", tbl5.nome as "Forca do Vento", tbl6.nome as "Nivel da Mare", tbl7.nome as "Direcao do Vento", hora_vento, tbl8.nome as "Tipo de Mare", altura_preamar, hora_preamar, altura_baimar, hora_baixamar, tbl9.nome as "Fase da Lua", tbl10.nome as "Nebulosidade", hora_nebulosidade, actividade_pesq, total_artes_amos, total_artes_act, total_artes_n_activas, total_artes_prov_outo_cent, observacoes FROM public.t_saidas as tbl1 left join ref_geometric as tbl2 on tbl1.id_centro = tbl2.id and tbl2.id_tiplocal = 'CTP' left join ref_registador as tbl3 on tbl1.id_registrador = tbl3.id left join ref_diasemana as tbl4 on tbl1.id_diasemana = tbl4.id left join ref_table as tbl5 on tbl1.id_forcavento = tbl5.id and tbl5.id_grupo = 'FCV' left join ref_table as tbl6 on tbl1.id_estadomare = tbl6.id and tbl6.id_grupo = 'NVM' left join ref_table as tbl7 on tbl1.id_direccao = tbl7.id and tbl7.id_grupo = 'DDV' left join ref_table as tbl8 on tbl1.id_tipomare = tbl8.id and tbl8.id_grupo = 'TPM' left join ref_table as tbl9 on tbl1.id_faselua = tbl9.id and tbl9.id_grupo = 'FLD' left join ref_table as tbl10 on tbl1.id_nebulosidade = tbl10.id and tbl10.id_grupo = 'NBL' """#BigQuery and start Where # # if self.CBProvincia.currentIndex() != 0: startQuery += " where " if self.CBDistrito.currentIndex() != 0: if self.CBPosto.currentIndex() != 0: if self.CBCentroPesca.currentIndex() != 0: ctp = mixedModel.getDataCombox(widg= self.CBCentroPesca) startQuery += "tbl1.id_centro in (select tbl1.id from ref_geometric as tbl1 where tbl1.id = '{ctp}')".format(ctp = ctp) else: psd = mixedModel.getDataCombox(widg= self.CBPosto) startQuery += """ tbl1.id_centro in (select tbl1.id from ref_geometric as tbl1 inner join ref_geometric as tbl2 on tbl1.id_parent = tbl2.id where tbl2.id like '{psd}') """.format(psd = psd) else: dst = mixedModel.getDataCombox(widg= self.CBDistrito) startQuery += """ tbl1.id_centro in (select tbl1.id from ref_geometric as tbl1 inner join ref_geometric as tbl2 on tbl1.id_parent = tbl2.id inner join ref_geometric as tbl3 on tbl2.id_parent = tbl3.id where tbl3.id like '{dst}') """.format(dst = dst) else: prv = mixedModel.getDataCombox(widg= self.CBProvincia) startQuery += """ tbl1.id_centro in (select tbl1.id from ref_geometric as tbl1 inner join ref_geometric as tbl2 on tbl1.id_parent = tbl2.id inner join ref_geometric as tbl3 on tbl2.id_parent = tbl3.id inner join ref_geometric as tbl4 on tbl3.id_parent = tbl4.id where tbl4.id like '{prv}') """.format(prv = prv) # # if self.GBData.isChecked(): if self.CBProvincia.currentIndex(): #!= 0 or self.CBRegistador.currentIndex() != 0 or self.CBDiaSemana.currentIndex() != 0 or self.CBActividadePesqueria.currentIndex() != 0: startQuery += ' and ' else: startQuery += " where " inicio = rscForm.getText(widg = self.DEInicio) fim = rscForm.getText(widg = self.DEFim) startQuery += "data_amostragem between '{inicio}' and '{fim}' ".format(inicio=inicio, fim=fim) # # if self.CBRegistador.currentIndex() != 0: if self.CBProvincia.currentIndex() != 0 or self.GBData.isChecked():#or self.CBDiaSemana.currentIndex() != 0 or self.CBActividadePesqueria.currentIndex() != 0: startQuery += ' and ' else: startQuery += " where " rgt = mixedModel.getDataCombox(widg= self.CBRegistador) startQuery += "tbl3.id = '{rgt}' ".format(rgt = rgt) # # if self.CBDiaSemana.currentIndex() != 0: if self.CBProvincia.currentIndex() != 0 or self.GBData.isChecked() or self.CBRegistador.currentIndex() != 0:# or self.CBActividadePesqueria.currentIndex() != 0: startQuery += ' and ' else: startQuery += " where " dsm = mixedModel.getDataCombox(widg= self.CBDiaSemana) startQuery += "tbl4.id = '{dsm}' ".format(dsm = dsm) # # if self.CBActividadePesqueria.currentIndex() != 0: if self.CBProvincia.currentIndex() != 0 or self.GBData.isChecked() or self.CBRegistador.currentIndex() != 0 or self.CBDiaSemana.currentIndex() != 0: startQuery += ' and ' else: startQuery += " where " quer = mixedModel.getDataCombox(widg = self.CBActividadePesqueria) startQuery += quer # # try: if self.endQuery != " ": startQuery += "order by " startQuery += self.endQuery except AttributeError: startQuery += " order by data_amostragem " lstName = self.dictSaidas['newNames'] model = mixedModel.setQueryModel(query= startQuery, lstNewNames= lstName) toHide = self.dictSaidas['toHide'] lstSizeCol = self.dictSaidas['sizeCol'] QT_tblViewUtility.setModelInView(tblView= self.TVSaidas, ViewModel= model, toHide = toHide) QT_tblViewUtility.setViewCustom(tblView=self.TVSaidas, lstSizeCol=lstSizeCol) def selectedRow(self, mIdx): lstOut=[] lenDict = len(self.dictSaidas['fldName']) model = mIdx.model() clickedRow = mIdx.row() for idx in range(lenDict): val = model.record(clickedRow).value(idx) lstOut.append(val) self.lstVal= lstOut self.bOK = True def toOpenSort(self): dlg = frmPesquisa_Sort.frmSortting() dlg.exec_() self.endQuery = dlg.fQuery
gpl-3.0
willemneal/Docky
lib/flask/logging.py
838
1398
# -*- coding: utf-8 -*- """ flask.logging ~~~~~~~~~~~~~ Implements the logging support for Flask. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import from logging import getLogger, StreamHandler, Formatter, getLoggerClass, DEBUG def create_logger(app): """Creates a logger for the given application. This logger works similar to a regular Python logger but changes the effective logging level based on the application's debug flag. Furthermore this function also removes all attached handlers in case there was a logger with the log name before. """ Logger = getLoggerClass() class DebugLogger(Logger): def getEffectiveLevel(x): if x.level == 0 and app.debug: return DEBUG return Logger.getEffectiveLevel(x) class DebugHandler(StreamHandler): def emit(x, record): StreamHandler.emit(x, record) if app.debug else None handler = DebugHandler() handler.setLevel(DEBUG) handler.setFormatter(Formatter(app.debug_log_format)) logger = getLogger(app.logger_name) # just in case that was not a new logger, get rid of all the handlers # already attached to it. del logger.handlers[:] logger.__class__ = DebugLogger logger.addHandler(handler) return logger
mit
yuvrajsingh86/DeepLearning_Udacity
weight-initialization/helper.py
153
3649
import numpy as np import matplotlib.pyplot as plt import tensorflow as tf def hist_dist(title, distribution_tensor, hist_range=(-4, 4)): """ Display histogram of a TF distribution """ with tf.Session() as sess: values = sess.run(distribution_tensor) plt.title(title) plt.hist(values, np.linspace(*hist_range, num=len(values)/2)) plt.show() def _get_loss_acc(dataset, weights): """ Get losses and validation accuracy of example neural network """ batch_size = 128 epochs = 2 learning_rate = 0.001 features = tf.placeholder(tf.float32) labels = tf.placeholder(tf.float32) learn_rate = tf.placeholder(tf.float32) biases = [ tf.Variable(tf.zeros([256])), tf.Variable(tf.zeros([128])), tf.Variable(tf.zeros([dataset.train.labels.shape[1]])) ] # Layers layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0]) layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1]) logits = tf.matmul(layer_2, weights[2]) + biases[2] # Training loss loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)) # Optimizer optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss) # Accuracy correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Measurements use for graphing loss loss_batch = [] with tf.Session() as session: session.run(tf.global_variables_initializer()) batch_count = int((dataset.train.num_examples / batch_size)) # The training cycle for epoch_i in range(epochs): for batch_i in range(batch_count): batch_features, batch_labels = dataset.train.next_batch(batch_size) # Run optimizer and get loss session.run( optimizer, feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate}) l = session.run( loss, feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate}) loss_batch.append(l) valid_acc = session.run( accuracy, feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0}) # Hack to Reset batches dataset.train._index_in_epoch = 0 dataset.train._epochs_completed = 0 return loss_batch, valid_acc def compare_init_weights( dataset, title, weight_init_list, plot_n_batches=100): """ Plot loss and print stats of weights using an example neural network """ colors = ['r', 'b', 'g', 'c', 'y', 'k'] label_accs = [] label_loss = [] assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot' for i, (weights, label) in enumerate(weight_init_list): loss, val_acc = _get_loss_acc(dataset, weights) plt.plot(loss[:plot_n_batches], colors[i], label=label) label_accs.append((label, val_acc)) label_loss.append((label, loss[-1])) plt.title(title) plt.xlabel('Batches') plt.ylabel('Loss') plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() print('After 858 Batches (2 Epochs):') print('Validation Accuracy') for label, val_acc in label_accs: print(' {:7.3f}% -- {}'.format(val_acc*100, label)) print('Loss') for label, loss in label_loss: print(' {:7.3f} -- {}'.format(loss, label))
mit
clemenshage/grslra
smmprod/__init__.py
1
1426
# # -*- coding: utf-8 -*- import numpy as np from . import _smmprod def smmprod_c(A, B, Omega): # out wird hier preallocated, kann in Schleifen dann wiederverwendet werden out = np.zeros(Omega[0].shape[0]) _smmprod.smmprod(A, B, Omega, out) return out # def smmprod(A, B, Omega): # A_rows = A[Omega[0]] # B_cols = B.T[Omega[1]] # return np.sum(A_rows * B_cols, axis=1) # # # def smmprod2(A, B, Omega): # A_rows = A[Omega[0]] # B_cols = B.T[Omega[1]] # # Inplace Multiplikation nach A_rows, damit fällt Speicher Allokation weg # np.multiply(A_rows, B_cols, A_rows) # return np.sum(A_rows, axis=1) # # # def smmprod3(A, B, Omega): # # out wird hier preallocated, kann in Schleifen dann wiederverwendet werden # out = np.zeros(Omega.shape[1]) # _smmprod.smmprod(A, B, Omega, out) # return out # # # def smmprod_loop(A, B, Omega): # card_Omega = np.size(Omega[0]) # result = np.zeros(card_Omega) # for k in range(card_Omega): # result[k] = np.dot(A[Omega[0][k]], B.T[Omega[1][k]]) # return result # # # def smmprod_loop2(A, B, Omega): # card_Omega = np.size(Omega[0]) # result = np.zeros(card_Omega) # # B nur einmal transponieren # B = B.T # # über Omega.T iterieren, günstigere Index-Extraction # for index, idx in enumerate(Omega.T): # result[index] = np.dot(A[idx[0]], B[idx[1]]) # return result
mit
jmighion/ansible
lib/ansible/modules/web_infrastructure/ansible_tower/tower_job_cancel.py
34
2725
#!/usr/bin/python # coding: utf-8 -*- # (c) 2017, Wayne Witzel III <wayne@riotousliving.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: tower_job_cancel author: "Wayne Witzel III (@wwitzel3)" version_added: "2.3" short_description: Cancel an Ansible Tower Job. description: - Cancel Ansible Tower jobs. See U(https://www.ansible.com/tower) for an overview. options: job_id: description: - ID of the job to cancel required: True fail_if_not_running: description: - Fail loudly if the job_id does not reference a running job. default: False extends_documentation_fragment: tower ''' EXAMPLES = ''' - name: Cancel job tower_job_cancel: job_id: job.id ''' RETURN = ''' id: description: job id requesting to cancel returned: success type: int sample: 94 status: description: status of the cancel request returned: success type: string sample: canceled ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ansible_tower import tower_auth_config, tower_check_mode, tower_argument_spec, HAS_TOWER_CLI try: import tower_cli import tower_cli.utils.exceptions as exc from tower_cli.conf import settings except ImportError: pass def main(): argument_spec = tower_argument_spec() argument_spec.update(dict( job_id=dict(type='int', required=True), fail_if_not_running=dict(type='bool', default=False), )) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) if not HAS_TOWER_CLI: module.fail_json(msg='ansible-tower-cli required for this module') job_id = module.params.get('job_id') json_output = {} tower_auth = tower_auth_config(module) with settings.runtime_values(**tower_auth): tower_check_mode(module) job = tower_cli.get_resource('job') params = module.params.copy() try: result = job.cancel(job_id, **params) json_output['id'] = job_id except (exc.ConnectionError, exc.BadRequest, exc.TowerCLIError) as excinfo: module.fail_json(msg='Unable to cancel job_id/{0}: {1}'.format(job_id, excinfo), changed=False) json_output['changed'] = result['changed'] json_output['status'] = result['status'] module.exit_json(**json_output) if __name__ == '__main__': main()
gpl-3.0
thejens/luigi
luigi/worker.py
1
33476
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The worker communicates with the scheduler and does two things: 1. Sends all tasks that has to be run 2. Gets tasks from the scheduler that should be run When running in local mode, the worker talks directly to a :py:class:`~luigi.scheduler.CentralPlannerScheduler` instance. When you run a central server, the worker will talk to the scheduler using a :py:class:`~luigi.rpc.RemoteScheduler` instance. Everything in this module is private to luigi and may change in incompatible ways between versions. The exception is the exception types and the :py:class:`worker` config class. """ import collections import getpass import logging import multiprocessing # Note: this seems to have some stability issues: https://github.com/spotify/luigi/pull/438 import os import signal try: import Queue except ImportError: import queue as Queue import random import socket import threading import time import traceback import types from luigi import six from luigi import notifications from luigi.event import Event from luigi.task_register import load_task from luigi.scheduler import DISABLED, DONE, FAILED, PENDING, CentralPlannerScheduler from luigi.target import Target from luigi.task import Task, flatten, getpaths, Config from luigi.task_register import TaskClassException from luigi.task_status import RUNNING from luigi.parameter import FloatParameter, IntParameter, BoolParameter try: import simplejson as json except ImportError: import json logger = logging.getLogger('luigi-interface') # Prevent fork() from being called during a C-level getaddrinfo() which uses a process-global mutex, # that may not be unlocked in child process, resulting in the process being locked indefinitely. fork_lock = threading.Lock() # Why we assert on _WAIT_INTERVAL_EPS: # multiprocessing.Queue.get() is undefined for timeout=0 it seems: # https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.get. # I also tried with really low epsilon, but then ran into the same issue where # the test case "test_external_dependency_worker_is_patient" got stuck. So I # unscientifically just set the final value to a floating point number that # "worked for me". _WAIT_INTERVAL_EPS = 0.00001 class TaskException(Exception): pass class TaskProcess(multiprocessing.Process): """ Wrap all task execution in this class. Mainly for convenience since this is run in a separate process. """ def __init__(self, task, worker_id, result_queue, random_seed=False, worker_timeout=0, tracking_url_callback=None): super(TaskProcess, self).__init__() self.task = task self.worker_id = worker_id self.result_queue = result_queue self.random_seed = random_seed self.tracking_url_callback = tracking_url_callback if task.worker_timeout is not None: worker_timeout = task.worker_timeout self.timeout_time = time.time() + worker_timeout if worker_timeout else None def _run_get_new_deps(self): run_again = False try: task_gen = self.task.run(tracking_url_callback=self.tracking_url_callback) except TypeError as ex: if 'unexpected keyword argument' not in getattr(ex, 'message', ex.args[0]): raise run_again = True if run_again: task_gen = self.task.run() if not isinstance(task_gen, types.GeneratorType): return None next_send = None while True: try: if next_send is None: requires = six.next(task_gen) else: requires = task_gen.send(next_send) except StopIteration: return None new_req = flatten(requires) new_deps = [(t.task_module, t.task_family, t.to_str_params()) for t in new_req] if all(t.complete() for t in new_req): next_send = getpaths(requires) else: return new_deps def run(self): logger.info('[pid %s] Worker %s running %s', os.getpid(), self.worker_id, self.task) if self.random_seed: # Need to have different random seeds if running in separate processes random.seed((os.getpid(), time.time())) status = FAILED expl = '' missing = [] new_deps = [] try: # Verify that all the tasks are fulfilled! missing = [dep.task_id for dep in self.task.deps() if not dep.complete()] if missing: deps = 'dependency' if len(missing) == 1 else 'dependencies' raise RuntimeError('Unfulfilled %s at run time: %s' % (deps, ', '.join(missing))) self.task.trigger_event(Event.START, self.task) t0 = time.time() status = None if self.task.run == NotImplemented: # External task # TODO(erikbern): We should check for task completeness after non-external tasks too! # This will resolve #814 and make things a lot more consistent status = DONE if self.task.complete() else FAILED else: new_deps = self._run_get_new_deps() status = DONE if not new_deps else PENDING if new_deps: logger.info( '[pid %s] Worker %s new requirements %s', os.getpid(), self.worker_id, self.task) elif status == DONE: self.task.trigger_event( Event.PROCESSING_TIME, self.task, time.time() - t0) expl = self.task.on_success() logger.info('[pid %s] Worker %s done %s', os.getpid(), self.worker_id, self.task) self.task.trigger_event(Event.SUCCESS, self.task) except KeyboardInterrupt: raise except BaseException as ex: status = FAILED logger.exception("[pid %s] Worker %s failed %s", os.getpid(), self.worker_id, self.task) self.task.trigger_event(Event.FAILURE, self.task, ex) raw_error_message = self.task.on_failure(ex) expl = raw_error_message finally: self.result_queue.put( (self.task.task_id, status, expl, missing, new_deps)) def _recursive_terminate(self): import psutil try: parent = psutil.Process(self.pid) children = parent.children(recursive=True) # terminate parent. Give it a chance to clean up super(TaskProcess, self).terminate() parent.wait() # terminate children for child in children: try: child.terminate() except psutil.NoSuchProcess: continue except psutil.NoSuchProcess: return def terminate(self): """Terminate this process and its subprocesses.""" # default terminate() doesn't cleanup child processes, it orphans them. try: return self._recursive_terminate() except ImportError: return super(TaskProcess, self).terminate() class SingleProcessPool(object): """ Dummy process pool for using a single processor. Imitates the api of multiprocessing.Pool using single-processor equivalents. """ def apply_async(self, function, args): return function(*args) def close(self): pass def join(self): pass class DequeQueue(collections.deque): """ deque wrapper implementing the Queue interface. """ def put(self, obj, block=None, timeout=None): return self.append(obj) def get(self, block=None, timeout=None): return self.pop() class AsyncCompletionException(Exception): """ Exception indicating that something went wrong with checking complete. """ def __init__(self, trace): self.trace = trace class TracebackWrapper(object): """ Class to wrap tracebacks so we can know they're not just strings. """ def __init__(self, trace): self.trace = trace def check_complete(task, out_queue): """ Checks if task is complete, puts the result to out_queue. """ logger.debug("Checking if %s is complete", task) try: is_complete = task.complete() except Exception: is_complete = TracebackWrapper(traceback.format_exc()) out_queue.put((task, is_complete)) class worker(Config): ping_interval = FloatParameter(default=1.0, config_path=dict(section='core', name='worker-ping-interval')) keep_alive = BoolParameter(default=False, config_path=dict(section='core', name='worker-keep-alive')) count_uniques = BoolParameter(default=False, config_path=dict(section='core', name='worker-count-uniques'), description='worker-count-uniques means that we will keep a ' 'worker alive only if it has a unique pending task, as ' 'well as having keep-alive true') wait_interval = FloatParameter(default=1.0, config_path=dict(section='core', name='worker-wait-interval')) wait_jitter = FloatParameter(default=5.0) max_reschedules = IntParameter(default=1, config_path=dict(section='core', name='worker-max-reschedules')) timeout = IntParameter(default=0, config_path=dict(section='core', name='worker-timeout')) task_limit = IntParameter(default=None, config_path=dict(section='core', name='worker-task-limit')) retry_external_tasks = BoolParameter(default=False, config_path=dict(section='core', name='retry-external-tasks'), description='If true, incomplete external tasks will be ' 'retested for completion while Luigi is running.') class KeepAliveThread(threading.Thread): """ Periodically tell the scheduler that the worker still lives. """ def __init__(self, scheduler, worker_id, ping_interval): super(KeepAliveThread, self).__init__() self._should_stop = threading.Event() self._scheduler = scheduler self._worker_id = worker_id self._ping_interval = ping_interval def stop(self): self._should_stop.set() def run(self): while True: self._should_stop.wait(self._ping_interval) if self._should_stop.is_set(): logger.info("Worker %s was stopped. Shutting down Keep-Alive thread" % self._worker_id) break with fork_lock: try: self._scheduler.ping(worker=self._worker_id) except: # httplib.BadStatusLine: logger.warning('Failed pinging scheduler') class Worker(object): """ Worker object communicates with a scheduler. Simple class that talks to a scheduler and: * tells the scheduler what it has to do + its dependencies * asks for stuff to do (pulls it in a loop and runs it) """ def __init__(self, scheduler=None, worker_id=None, worker_processes=1, assistant=False, **kwargs): if scheduler is None: scheduler = CentralPlannerScheduler() self.worker_processes = int(worker_processes) self._worker_info = self._generate_worker_info() if not worker_id: worker_id = 'Worker(%s)' % ', '.join(['%s=%s' % (k, v) for k, v in self._worker_info]) self._config = worker(**kwargs) assert self._config.wait_interval >= _WAIT_INTERVAL_EPS, "[worker] wait_interval must be positive" assert self._config.wait_jitter >= 0.0, "[worker] wait_jitter must be equal or greater than zero" self._id = worker_id self._scheduler = scheduler self._assistant = assistant self._stop_requesting_work = False self.host = socket.gethostname() self._scheduled_tasks = {} self._suspended_tasks = {} self._first_task = None self.add_succeeded = True self.run_succeeded = True self.unfulfilled_counts = collections.defaultdict(int) try: signal.signal(signal.SIGUSR1, self.handle_interrupt) except AttributeError: pass # Keep info about what tasks are running (could be in other processes) if worker_processes == 1: self._task_result_queue = DequeQueue() else: self._task_result_queue = multiprocessing.Queue() self._running_tasks = {} # Stuff for execution_summary self._add_task_history = [] self._get_work_response_history = [] def _add_task(self, *args, **kwargs): """ Call ``self._scheduler.add_task``, but store the values too so we can implement :py:func:`luigi.execution_summary.summary`. """ task_id = kwargs['task_id'] status = kwargs['status'] runnable = kwargs['runnable'] task = self._scheduled_tasks.get(task_id) if task: msg = (task, status, runnable) self._add_task_history.append(msg) self._scheduler.add_task(*args, **kwargs) logger.info('Informed scheduler that task %s has status %s', task_id, status) def __enter__(self): """ Start the KeepAliveThread. """ self._keep_alive_thread = KeepAliveThread(self._scheduler, self._id, self._config.ping_interval) self._keep_alive_thread.daemon = True self._keep_alive_thread.start() return self def __exit__(self, type, value, traceback): """ Stop the KeepAliveThread and kill still running tasks. """ self._keep_alive_thread.stop() self._keep_alive_thread.join() for task in self._running_tasks.values(): if task.is_alive(): task.terminate() return False # Don't suppress exception def _generate_worker_info(self): # Generate as much info as possible about the worker # Some of these calls might not be available on all OS's args = [('salt', '%09d' % random.randrange(0, 999999999)), ('workers', self.worker_processes)] try: args += [('host', socket.gethostname())] except BaseException: pass try: args += [('username', getpass.getuser())] except BaseException: pass try: args += [('pid', os.getpid())] except BaseException: pass try: sudo_user = os.getenv("SUDO_USER") if sudo_user: args.append(('sudo_user', sudo_user)) except BaseException: pass return args def _validate_task(self, task): if not isinstance(task, Task): raise TaskException('Can not schedule non-task %s' % task) if not task.initialized(): # we can't get the repr of it since it's not initialized... raise TaskException('Task of class %s not initialized. Did you override __init__ and forget to call super(...).__init__?' % task.__class__.__name__) def _log_complete_error(self, task, tb): log_msg = "Will not schedule {task} or any dependencies due to error in complete() method:\n{tb}".format(task=task, tb=tb) logger.warning(log_msg) def _log_dependency_error(self, task, tb): log_msg = "Will not schedule {task} or any dependencies due to error in deps() method:\n{tb}".format(task=task, tb=tb) logger.warning(log_msg) def _log_unexpected_error(self, task): logger.exception("Luigi unexpected framework error while scheduling %s", task) # needs to be called from within except clause def _email_complete_error(self, task, formatted_traceback): self._email_error(task, formatted_traceback, subject="Luigi: {task} failed scheduling. Host: {host}", headline="Will not schedule task or any dependencies due to error in complete() method", ) def _email_dependency_error(self, task, formatted_traceback): self._email_error(task, formatted_traceback, subject="Luigi: {task} failed scheduling. Host: {host}", headline="Will not schedule task or any dependencies due to error in deps() method", ) def _email_unexpected_error(self, task, formatted_traceback): self._email_error(task, formatted_traceback, subject="Luigi: Framework error while scheduling {task}. Host: {host}", headline="Luigi framework error", ) def _email_task_failure(self, task, formatted_traceback): self._email_error(task, formatted_traceback, subject="Luigi: {task} FAILED. Host: {host}", headline="A task failed when running. Most likely run() raised an exception.", ) def _email_error(self, task, formatted_traceback, subject, headline): formatted_subject = subject.format(task=task, host=self.host) message = notifications.format_task_error(headline, task, formatted_traceback) notifications.send_error_email(formatted_subject, message, task.owner_email) def add(self, task, multiprocess=False): """ Add a Task for the worker to check and possibly schedule and run. Returns True if task and its dependencies were successfully scheduled or completed before. """ if self._first_task is None and hasattr(task, 'task_id'): self._first_task = task.task_id self.add_succeeded = True if multiprocess: queue = multiprocessing.Manager().Queue() pool = multiprocessing.Pool() else: queue = DequeQueue() pool = SingleProcessPool() self._validate_task(task) pool.apply_async(check_complete, [task, queue]) # we track queue size ourselves because len(queue) won't work for multiprocessing queue_size = 1 try: seen = set([task.task_id]) while queue_size: current = queue.get() queue_size -= 1 item, is_complete = current for next in self._add(item, is_complete): if next.task_id not in seen: self._validate_task(next) seen.add(next.task_id) pool.apply_async(check_complete, [next, queue]) queue_size += 1 except (KeyboardInterrupt, TaskException): raise except Exception as ex: self.add_succeeded = False formatted_traceback = traceback.format_exc() self._log_unexpected_error(task) task.trigger_event(Event.BROKEN_TASK, task, ex) self._email_unexpected_error(task, formatted_traceback) finally: pool.close() pool.join() return self.add_succeeded def _add(self, task, is_complete): if self._config.task_limit is not None and len(self._scheduled_tasks) >= self._config.task_limit: logger.warning('Will not schedule %s or any dependencies due to exceeded task-limit of %d', task, self._config.task_limit) return formatted_traceback = None try: self._check_complete_value(is_complete) except KeyboardInterrupt: raise except AsyncCompletionException as ex: formatted_traceback = ex.trace except BaseException: formatted_traceback = traceback.format_exc() if formatted_traceback is not None: self.add_succeeded = False self._log_complete_error(task, formatted_traceback) task.trigger_event(Event.DEPENDENCY_MISSING, task) self._email_complete_error(task, formatted_traceback) # abort, i.e. don't schedule any subtasks of a task with # failing complete()-method since we don't know if the task # is complete and subtasks might not be desirable to run if # they have already ran before return if is_complete: deps = None status = DONE runnable = False task.trigger_event(Event.DEPENDENCY_PRESENT, task) elif task.run == NotImplemented: deps = None status = PENDING runnable = worker().retry_external_tasks task.trigger_event(Event.DEPENDENCY_MISSING, task) logger.warning('Data for %s does not exist (yet?). The task is an ' 'external data depedency, so it can not be run from' ' this luigi process.', task) else: try: deps = task.deps() except Exception as ex: formatted_traceback = traceback.format_exc() self.add_succeeded = False self._log_dependency_error(task, formatted_traceback) task.trigger_event(Event.BROKEN_TASK, task, ex) self._email_dependency_error(task, formatted_traceback) return status = PENDING runnable = True if task.disabled: status = DISABLED if deps: for d in deps: self._validate_dependency(d) task.trigger_event(Event.DEPENDENCY_DISCOVERED, task, d) yield d # return additional tasks to add deps = [d.task_id for d in deps] self._scheduled_tasks[task.task_id] = task self._add_task(worker=self._id, task_id=task.task_id, status=status, deps=deps, runnable=runnable, priority=task.priority, resources=task.process_resources(), params=task.to_str_params(), family=task.task_family, module=task.task_module) def _validate_dependency(self, dependency): if isinstance(dependency, Target): raise Exception('requires() can not return Target objects. Wrap it in an ExternalTask class') elif not isinstance(dependency, Task): raise Exception('requires() must return Task objects') def _check_complete_value(self, is_complete): if is_complete not in (True, False): if isinstance(is_complete, TracebackWrapper): raise AsyncCompletionException(is_complete.trace) raise Exception("Return value of Task.complete() must be boolean (was %r)" % is_complete) def _add_worker(self): self._worker_info.append(('first_task', self._first_task)) self._scheduler.add_worker(self._id, self._worker_info) def _log_remote_tasks(self, running_tasks, n_pending_tasks, n_unique_pending): logger.debug("Done") logger.debug("There are no more tasks to run at this time") if running_tasks: for r in running_tasks: logger.debug('%s is currently run by worker %s', r['task_id'], r['worker']) elif n_pending_tasks: logger.debug("There are %s pending tasks possibly being run by other workers", n_pending_tasks) if n_unique_pending: logger.debug("There are %i pending tasks unique to this worker", n_unique_pending) def _get_work(self): if self._stop_requesting_work: return None, 0, 0, 0 logger.debug("Asking scheduler for work...") r = self._scheduler.get_work( worker=self._id, host=self.host, assistant=self._assistant, current_tasks=list(self._running_tasks.keys()), ) n_pending_tasks = r['n_pending_tasks'] task_id = r['task_id'] running_tasks = r['running_tasks'] n_unique_pending = r['n_unique_pending'] self._get_work_response_history.append(dict( task_id=task_id, running_tasks=running_tasks, )) if task_id is not None and task_id not in self._scheduled_tasks: logger.info('Did not schedule %s, will load it dynamically', task_id) try: # TODO: we should obtain the module name from the server! self._scheduled_tasks[task_id] = \ load_task(module=r.get('task_module'), task_name=r['task_family'], params_str=r['task_params']) except TaskClassException as ex: msg = 'Cannot find task for %s' % task_id logger.exception(msg) subject = 'Luigi: %s' % msg error_message = notifications.wrap_traceback(ex) notifications.send_error_email(subject, error_message) self._add_task(worker=self._id, task_id=task_id, status=FAILED, runnable=False, assistant=self._assistant) task_id = None self.run_succeeded = False return task_id, running_tasks, n_pending_tasks, n_unique_pending def _run_task(self, task_id): task = self._scheduled_tasks[task_id] p = self._create_task_process(task) self._running_tasks[task_id] = p if self.worker_processes > 1: with fork_lock: p.start() else: # Run in the same process p.run() def _create_task_process(self, task): def update_tracking_url(tracking_url): self._scheduler.add_task( task_id=task.task_id, worker=self._id, status=RUNNING, tracking_url=tracking_url, ) return TaskProcess( task, self._id, self._task_result_queue, random_seed=bool(self.worker_processes > 1), worker_timeout=self._config.timeout, tracking_url_callback=update_tracking_url, ) def _purge_children(self): """ Find dead children and put a response on the result queue. :return: """ for task_id, p in six.iteritems(self._running_tasks): if not p.is_alive() and p.exitcode: error_msg = 'Task %s died unexpectedly with exit code %s' % (task_id, p.exitcode) elif p.timeout_time is not None and time.time() > float(p.timeout_time) and p.is_alive(): p.terminate() error_msg = 'Task %s timed out and was terminated.' % task_id else: continue logger.info(error_msg) self._task_result_queue.put((task_id, FAILED, error_msg, [], [])) def _handle_next_task(self): """ We have to catch three ways a task can be "done": 1. normal execution: the task runs/fails and puts a result back on the queue, 2. new dependencies: the task yielded new deps that were not complete and will be rescheduled and dependencies added, 3. child process dies: we need to catch this separately. """ while True: self._purge_children() # Deal with subprocess failures try: task_id, status, expl, missing, new_requirements = ( self._task_result_queue.get( timeout=self._config.wait_interval)) except Queue.Empty: return task = self._scheduled_tasks[task_id] if not task or task_id not in self._running_tasks: continue # Not a running task. Probably already removed. # Maybe it yielded something? if status == FAILED and expl: # If no expl, it is because of a retry-external-task failure. self._email_task_failure(task, expl) new_deps = [] if new_requirements: new_req = [load_task(module, name, params) for module, name, params in new_requirements] for t in new_req: self.add(t) new_deps = [t.task_id for t in new_req] self._add_task(worker=self._id, task_id=task_id, status=status, expl=json.dumps(expl), resources=task.process_resources(), runnable=None, params=task.to_str_params(), family=task.task_family, module=task.task_module, new_deps=new_deps, assistant=self._assistant) self._running_tasks.pop(task_id) # re-add task to reschedule missing dependencies if missing: reschedule = True # keep out of infinite loops by not rescheduling too many times for task_id in missing: self.unfulfilled_counts[task_id] += 1 if (self.unfulfilled_counts[task_id] > self._config.max_reschedules): reschedule = False if reschedule: self.add(task) self.run_succeeded &= (status == DONE) or (len(new_deps) > 0) return def _sleeper(self): # TODO is exponential backoff necessary? while True: jitter = self._config.wait_jitter wait_interval = self._config.wait_interval + random.uniform(0, jitter) logger.debug('Sleeping for %f seconds', wait_interval) time.sleep(wait_interval) yield def _keep_alive(self, n_pending_tasks, n_unique_pending): """ Returns true if a worker should stay alive given. If worker-keep-alive is not set, this will always return false. For an assistant, it will always return the value of worker-keep-alive. Otherwise, it will return true for nonzero n_pending_tasks. If worker-count-uniques is true, it will also require that one of the tasks is unique to this worker. """ if not self._config.keep_alive: return False elif self._assistant: return True else: return n_pending_tasks and (n_unique_pending or not self._config.count_uniques) def handle_interrupt(self, signum, _): """ Stops the assistant from asking for more work on SIGUSR1 """ if signum == signal.SIGUSR1: self._config.keep_alive = False self._stop_requesting_work = True def run(self): """ Returns True if all scheduled tasks were executed successfully. """ logger.info('Running Worker with %d processes', self.worker_processes) sleeper = self._sleeper() self.run_succeeded = True self._add_worker() while True: while len(self._running_tasks) >= self.worker_processes: logger.debug('%d running tasks, waiting for next task to finish', len(self._running_tasks)) self._handle_next_task() task_id, running_tasks, n_pending_tasks, n_unique_pending = self._get_work() if task_id is None: if not self._stop_requesting_work: self._log_remote_tasks(running_tasks, n_pending_tasks, n_unique_pending) if len(self._running_tasks) == 0: if self._keep_alive(n_pending_tasks, n_unique_pending): six.next(sleeper) continue else: break else: self._handle_next_task() continue # task_id is not None: logger.debug("Pending tasks: %s", n_pending_tasks) self._run_task(task_id) while len(self._running_tasks): logger.debug('Shut down Worker, %d more tasks to go', len(self._running_tasks)) self._handle_next_task() return self.run_succeeded
apache-2.0
krount/sslyze
sslyze/plugins/heartbleed_plugin.py
1
6967
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import unicode_literals import socket import types from xml.etree.ElementTree import Element from nassl._nassl import WantReadError from sslyze.plugins import plugin_base from sslyze.plugins.plugin_base import PluginScanResult, PluginScanCommand from sslyze.server_connectivity import ServerConnectivityInfo from tls_parser.alert_protocol import TlsAlertRecord from tls_parser.exceptions import NotEnoughData from tls_parser.handshake_protocol import TlsServerHelloDoneRecord, TlsHandshakeRecord from tls_parser.heartbeat_protocol import TlsHeartbeatRequestRecord from tls_parser.parser import TlsRecordParser from tls_parser.record_protocol import TlsVersionEnum class HeartbleedScanCommand(PluginScanCommand): """Test the server(s) for the OpenSSL Heartbleed vulnerability. """ @classmethod def get_cli_argument(cls): return 'heartbleed' class HeartbleedPlugin(plugin_base.Plugin): """Test the server(s) for the OpenSSL Heartbleed vulnerability (CVE-2014-0160). """ @classmethod def get_available_commands(cls): return [HeartbleedScanCommand] def process_task(self, server_info, scan_command): # type: (ServerConnectivityInfo, HeartbleedScanCommand) -> HeartbleedScanResult ssl_connection = server_info.get_preconfigured_ssl_connection() # Replace nassl.sslClient.do_handshake() with a heartbleed checking SSL handshake so that all the SSLyze options # (startTLS, proxy, etc.) still work ssl_connection.do_handshake = types.MethodType(do_handshake_with_heartbleed, ssl_connection) is_vulnerable_to_heartbleed = False try: # Start the SSL handshake ssl_connection.connect() except VulnerableToHeartbleed: # The test was completed and the server is vulnerable is_vulnerable_to_heartbleed = True except NotVulnerableToHeartbleed: # The test was completed and the server is NOT vulnerable pass finally: ssl_connection.close() return HeartbleedScanResult(server_info, scan_command, is_vulnerable_to_heartbleed) class HeartbleedScanResult(PluginScanResult): """The result of running a HeartbleedScanCommand on a specific server. Attributes: is_vulnerable_to_heartbleed (bool): True if the server is vulnerable to the Heartbleed attack. """ COMMAND_TITLE = 'OpenSSL Heartbleed' def __init__(self, server_info, scan_command, is_vulnerable_to_heartbleed): # type: (ServerConnectivityInfo, HeartbleedScanCommand, bool) -> None super(HeartbleedScanResult, self).__init__(server_info, scan_command) self.is_vulnerable_to_heartbleed = is_vulnerable_to_heartbleed def as_text(self): heartbleed_txt = 'VULNERABLE - Server is vulnerable to Heartbleed' \ if self.is_vulnerable_to_heartbleed \ else 'OK - Not vulnerable to Heartbleed' return [self._format_title(self.COMMAND_TITLE), self._format_field('', heartbleed_txt)] def as_xml(self): xml_output = Element(self.scan_command.get_cli_argument(), title=self.COMMAND_TITLE) xml_output.append(Element('openSslHeartbleed', isVulnerable=str(self.is_vulnerable_to_heartbleed))) return xml_output class VulnerableToHeartbleed(Exception): """Exception to raise during the handshake to hijack the flow and test for Heartbleed. """ class NotVulnerableToHeartbleed(Exception): """Exception to raise during the handshake to hijack the flow and test for Heartbleed. """ def do_handshake_with_heartbleed(self): """Modified do_handshake() to send a heartbleed payload and return the result. """ try: # Start the handshake using nassl - will throw WantReadError right away self._ssl.do_handshake() except WantReadError: # Send the Client Hello len_to_read = self._network_bio.pending() while len_to_read: # Get the data from the SSL engine handshake_data_out = self._network_bio.read(len_to_read) # Send it to the peer self._sock.send(handshake_data_out) len_to_read = self._network_bio.pending() # Build the heartbleed payload - based on # https://blog.mozilla.org/security/2014/04/12/testing-for-heartbleed-vulnerability-without-exploiting-the-server/ payload = TlsHeartbeatRequestRecord.from_parameters( tls_version=TlsVersionEnum[self._ssl_version.name], heartbeat_data=b'\x01' * 16381 ).to_bytes() payload += TlsHeartbeatRequestRecord.from_parameters( TlsVersionEnum[self._ssl_version.name], heartbeat_data=b'\x01\x00\x00' ).to_bytes() # Send the payload self._sock.send(payload) # Retrieve the server's response - directly read the underlying network socket # Retrieve data until we get to the ServerHelloDone # The server may send back a ServerHello, an Alert or a CertificateRequest first did_receive_hello_done = False remaining_bytes = b'' while not did_receive_hello_done: try: tls_record, len_consumed = TlsRecordParser.parse_bytes(remaining_bytes) remaining_bytes = remaining_bytes[len_consumed::] except NotEnoughData: # Try to get more data raw_ssl_bytes = self._sock.recv(16381) if not raw_ssl_bytes: # No data? break remaining_bytes = remaining_bytes + raw_ssl_bytes continue if isinstance(tls_record, TlsServerHelloDoneRecord): did_receive_hello_done = True elif isinstance(tls_record, TlsHandshakeRecord): # Could be a ServerHello, a Certificate or a CertificateRequest if the server requires client auth pass elif isinstance(tls_record, TlsAlertRecord): # Server returned a TLS alert break else: raise ValueError('Unknown record? Type {}'.format(tls_record.header.type)) is_vulnerable_to_heartbleed = False if did_receive_hello_done: expected_heartbleed_payload = b'\x01' * 10 if expected_heartbleed_payload in remaining_bytes: # Server replied with our hearbeat payload is_vulnerable_to_heartbleed = True else: try: raw_ssl_bytes = self._sock.recv(16381) except socket.error: # Server closed the connection after receiving the heartbleed payload raise NotVulnerableToHeartbleed() if expected_heartbleed_payload in raw_ssl_bytes: # Server replied with our hearbeat payload is_vulnerable_to_heartbleed = True if is_vulnerable_to_heartbleed: raise VulnerableToHeartbleed() else: raise NotVulnerableToHeartbleed()
gpl-2.0
kingmotley/SickRage
lib/sqlalchemy/dialects/mssql/pymssql.py
76
2980
# mssql/pymssql.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mssql+pymssql :name: pymssql :dbapi: pymssql :connectstring: mssql+pymssql://<username>:<password>@<freetds_name>?charset=utf8 :url: http://pymssql.org/ pymssql is a Python module that provides a Python DBAPI interface around `FreeTDS <http://www.freetds.org/>`_. Compatible builds are available for Linux, MacOSX and Windows platforms. """ from .base import MSDialect from ... import types as sqltypes, util, processors import re class _MSNumeric_pymssql(sqltypes.Numeric): def result_processor(self, dialect, type_): if not self.asdecimal: return processors.to_float else: return sqltypes.Numeric.result_processor(self, dialect, type_) class MSDialect_pymssql(MSDialect): supports_sane_rowcount = False driver = 'pymssql' colspecs = util.update_copy( MSDialect.colspecs, { sqltypes.Numeric: _MSNumeric_pymssql, sqltypes.Float: sqltypes.Float, } ) @classmethod def dbapi(cls): module = __import__('pymssql') # pymmsql doesn't have a Binary method. we use string # TODO: monkeypatching here is less than ideal module.Binary = lambda x: x if hasattr(x, 'decode') else str(x) client_ver = tuple(int(x) for x in module.__version__.split(".")) if client_ver < (1, ): util.warn("The pymssql dialect expects at least " "the 1.0 series of the pymssql DBAPI.") return module def __init__(self, **params): super(MSDialect_pymssql, self).__init__(**params) self.use_scope_identity = True def _get_server_version_info(self, connection): vers = connection.scalar("select @@version") m = re.match( r"Microsoft SQL Server.*? - (\d+).(\d+).(\d+).(\d+)", vers) if m: return tuple(int(x) for x in m.group(1, 2, 3, 4)) else: return None def create_connect_args(self, url): opts = url.translate_connect_args(username='user') opts.update(url.query) port = opts.pop('port', None) if port and 'host' in opts: opts['host'] = "%s:%s" % (opts['host'], port) return [[], opts] def is_disconnect(self, e, connection, cursor): for msg in ( "Adaptive Server connection timed out", "Net-Lib error during Connection reset by peer", "message 20003", # connection timeout "Error 10054", "Not connected to any MS SQL server", "Connection is closed" ): if msg in str(e): return True else: return False dialect = MSDialect_pymssql
gpl-3.0
ltilve/ChromiumGStreamerBackend
tools/telemetry/third_party/gsutilz/gslib/addlhelp/naming.py
34
8930
# -*- coding: utf-8 -*- # Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Additional help about gsutil object and bucket naming.""" from __future__ import absolute_import from gslib.help_provider import HelpProvider _DETAILED_HELP_TEXT = (""" <B>BUCKET NAME REQUIREMENTS</B> Google Cloud Storage has a single namespace, so you will not be allowed to create a bucket with a name already in use by another user. You can, however, carve out parts of the bucket name space corresponding to your company's domain name (see "DOMAIN NAMED BUCKETS"). Bucket names must conform to standard DNS naming conventions. This is because a bucket name can appear in a DNS record as part of a CNAME redirect. In addition to meeting DNS naming requirements, Google Cloud Storage imposes other requirements on bucket naming. At a minimum, your bucket names must meet the following requirements: - Bucket names must contain only lowercase letters, numbers, dashes (-), and dots (.). - Bucket names must start and end with a number or letter. - Bucket names must contain 3 to 63 characters. Names containing dots can contain up to 222 characters, but each dot-separated component can be no longer than 63 characters. - Bucket names cannot be represented as an IPv4 address in dotted-decimal notation (for example, 192.168.5.4). - Bucket names cannot begin with the "goog" prefix. - For DNS compliance, you should not have a period adjacent to another period or dash. For example, ".." or "-." or ".-" are not acceptable. <B>OBJECT NAME REQUIREMENTS</B> Object names can contain any sequence of Unicode characters, of length 1-1024 bytes when UTF-8 encoded. Object names must not contain CarriageReturn, CarriageReturnLineFeed, or the XML-disallowed surrogate blocks (xFFFE or xFFFF). We strongly recommend that you abide by the following object naming conventions: - Avoid using control characters that are illegal in XML 1.0 in your object names (#x7F-#x84 and #x86-#x9F). These characters will cause XML listing issues when you try to list your objects. - Avoid using "#" in your object names. gsutil interprets object names ending with #<numeric string> as version identifiers, so including "#" in object names can make it difficult or impossible to perform various operations on such objects using gsutil (see 'gsutil help versions'). - Avoid using "[", "]", "*", or "?" in your object names. gsutil interprets these characters as wildcards, so including any of these characters in object names can make it difficult or impossible to perform various wildcard operations using gsutil (see 'gsutil help wildcards'). See also 'gsutil help encoding' about file/object name encoding requirements and potential interoperability concerns. <B>DOMAIN NAMED BUCKETS</B> You can carve out parts of the Google Cloud Storage bucket name space by creating buckets with domain names (like "example.com"). Before you can create a bucket name containing one or more '.' characters, the following rules apply: - If the name is a syntactically valid DNS name ending with a currently-recognized top-level domain (such as .com), you will be required to verify domain ownership. - Otherwise you will be disallowed from creating the bucket. If your project needs to use a domain-named bucket, you need to have a team member both verify the domain and create the bucket. This is because Google Cloud Storage checks for domain ownership against the user who creates the bucket, so the user who creates the bucket must also be verified as an owner or manager of the domain. To verify as the owner or manager of a domain, use the Google Webmaster Tools verification process. The Webmaster Tools verification process provides three methods for verifying an owner or manager of a domain: 1. Adding a special Meta tag to a site's homepage. 2. Uploading a special HTML file to a site. 3. Adding a DNS TXT record to a domain's DNS configuration. Meta tag verification and HTML file verification are easier to perform and are probably adequate for most situations. DNS TXT record verification is a domain-based verification method that is useful in situations where a site wants to tightly control who can create domain-named buckets. Once a site creates a DNS TXT record to verify ownership of a domain, it takes precedence over meta tag and HTML file verification. For example, you might have two IT staff members who are responsible for managing your site, called "example.com." If they complete the DNS TXT record verification, only they would be able to create buckets called "example.com", "reports.example.com", "downloads.example.com", and other domain-named buckets. Site-Based Verification ----------------------- If you have administrative control over the HTML files that make up a site, you can use one of the site-based verification methods to verify that you control or own a site. When you do this, Google Cloud Storage lets you create buckets representing the verified site and any sub-sites - provided nobody has used the DNS TXT record method to verify domain ownership of a parent of the site. As an example, assume that nobody has used the DNS TXT record method to verify ownership of the following domains: abc.def.example.com, def.example.com, and example.com. In this case, Google Cloud Storage lets you create a bucket named abc.def.example.com if you verify that you own or control any of the following sites: http://abc.def.example.com http://def.example.com http://example.com Domain-Based Verification ------------------------- If you have administrative control over a domain's DNS configuration, you can use the DNS TXT record verification method to verify that you own or control a domain. When you use the domain-based verification method to verify that you own or control a domain, Google Cloud Storage lets you create buckets that represent any subdomain under the verified domain. Furthermore, Google Cloud Storage prevents anybody else from creating buckets under that domain unless you add their name to the list of verified domain owners or they have verified their domain ownership by using the DNS TXT record verification method. For example, if you use the DNS TXT record verification method to verify your ownership of the domain example.com, Google Cloud Storage will let you create bucket names that represent any subdomain under the example.com domain, such as abc.def.example.com, example.com/music/jazz, or abc.example.com/music/jazz. Using the DNS TXT record method to verify domain ownership supersedes verification by site-based verification methods. For example, if you use the Meta tag method or HTML file method to verify domain ownership of http://example.com, but someone else uses the DNS TXT record method to verify ownership of the example.com domain, Google Cloud Storage will not allow you to create a bucket named example.com. To create the bucket example.com, the domain owner who used the DNS TXT method to verify domain ownership must add you to the list of verified domain owners for example.com. The DNS TXT record verification method is particularly useful if you manage a domain for a large organization that has numerous subdomains because it lets you control who can create buckets representing those domain names. Note: If you use the DNS TXT record verification method to verify ownership of a domain, you cannot create a CNAME record for that domain. RFC 1034 disallows inclusion of any other resource records if there is a CNAME resource record present. If you want to create a CNAME resource record for a domain, you must use the Meta tag verification method or the HTML file verification method. """) class CommandOptions(HelpProvider): """Additional help about gsutil object and bucket naming.""" # Help specification. See help_provider.py for documentation. help_spec = HelpProvider.HelpSpec( help_name='naming', help_name_aliases=['domain', 'limits', 'name', 'names'], help_type='additional_help', help_one_line_summary='Object and Bucket Naming', help_text=_DETAILED_HELP_TEXT, subcommand_help_text={}, )
bsd-3-clause
keseldude/brobot
brobot/plugins/users.py
1
1217
#=============================================================================== # brobot # Copyright (C) 2010 Michael Keselman # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> #=============================================================================== from core import bot class UsersPlugin(bot.CommandPlugin): name = 'users' def process(self, connection, source, target, args): channel = self.ircbot.find_channel(connection.server, target) if channel is not None: num_users = len(channel.users) return self.privmsg(target, u'%d Users in the channel.' % num_users)
gpl-3.0
KaranToor/MA450
google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/appengine/tools/value_mixin.py
7
1607
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides ValueMixin. ValueMixin provides comparison (including equality) methods and hashing based on the values of fields. """ class ValueMixin(object): def __cmp__(self, other): # Compare this object to the other one based on the names and values of # the fields of both classes. If other is not a class instance then it # won't have a __dict__ attribute. We can't establish a proper order then, # since we might not have x < y => y > x, but we arbitrarily declare # our instances greater than non-instance objects so at least they are not # equal. if hasattr(other, '__dict__'): return self.__dict__.__cmp__(other.__dict__) else: return 1 def __hash__(self): return hash(frozenset(self.__dict__.items())) def __repr__(self): # Return a string representation like "MyClass(foo=23, bar='skidoo')". d = self.__dict__ attrs = ['%s=%r' % (key, d[key]) for key in sorted(d)] return '%s(%s)' % (self.__class__.__name__, ', '.join(attrs))
apache-2.0
amstart/demo
vote/polls/forms.py
1
1604
from django import forms from crispy_forms.helper import FormHelper from crispy_forms.layout import Submit from crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field from crispy_forms.bootstrap import AppendedText, PrependedText, FormActions class PremiseForm(forms.Form): radio_buttons = forms.ChoiceField( choices = ( ('option_one', "Option one is this and that be sure to include why it's great"), ('option_two', "Option two can is something else and selecting it will deselect option one") ), widget = forms.RadioSelect, initial = 'option_two', ) appended_text = forms.CharField( help_text = "Here's more help text" ) prepended_text = forms.CharField() prepended_text_two = forms.CharField() multicolon_select = forms.MultipleChoiceField( choices = (('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5')), ) # Uni-form helper = FormHelper() helper.form_class = 'form-horizontal' helper.layout = Layout( Field('text_input', css_class='input-xlarge'), Field('textarea', rows="3", css_class='input-xlarge'), 'radio_buttons', AppendedText('appended_text', '.00'), PrependedText('prepended_text', '<input type="checkbox" checked="checked" value="" id="" name="">', active=True), PrependedText('prepended_text_two', '@'), 'multicolon_select', FormActions( Submit('save_changes', 'Save changes', css_class="btn-primary"), Submit('cancel', 'Cancel'), ) )
mit
crepererum/invenio
invenio/ext/sqlalchemy/expressions.py
17
1194
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2011, 2012, 2013 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ invenio.ext.sqlalchemy.expressions ---------------------------------- Implements various custom ORM expressions. """ from sqlalchemy.ext.compiler import compiles from sqlalchemy.sql.expression import FunctionElement class AsBINARY(FunctionElement): name = 'AsBINARY' @compiles(AsBINARY) def compile(element, compiler, **kw): return "BINARY %s" % compiler.process(element.clauses)
gpl-2.0
simplyguru-dot/ansible-modules-core
cloud/openstack/os_object.py
130
4074
#!/usr/bin/python # Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # Copyright (c) 2013, Benno Joy <benno@ansible.com> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. try: import shade HAS_SHADE = True except ImportError: HAS_SHADE = False DOCUMENTATION = ''' --- module: os_object short_description: Create or Delete objects and containers from OpenStack version_added: "2.0" author: "Monty Taylor (@emonty)" extends_documentation_fragment: openstack description: - Create or Delete objects and containers from OpenStack options: container: description: - The name of the container in which to create the object required: true name: description: - Name to be give to the object. If omitted, operations will be on the entire container required: false file: description: - Path to local file to be uploaded. required: false container_access: description: - desired container access level. required: false choices: ['private', 'public'] default: private state: description: - Should the resource be present or absent. choices: [present, absent] default: present ''' EXAMPLES = ''' # Creates a object named 'fstab' in the 'config' container - os_object: cloud=mordred state=present name=fstab container=config file=/etc/fstab # Deletes a container called config and all of its contents - os_object: cloud=rax-iad state=absent container=config ''' def process_object( cloud_obj, container, name, filename, container_access, **kwargs): changed = False container_obj = cloud_obj.get_container(container) if kwargs['state'] == 'present': if not container_obj: container_obj = cloud_obj.create_container(container) changed = True if cloud_obj.get_container_access(container) != container_access: cloud_obj.set_container_access(container, container_access) changed = True if name: if cloud_obj.is_object_stale(container, name, filename): cloud_obj.create_object(container, name, filename) changed = True else: if container_obj: if name: if cloud_obj.get_object_metadata(container, name): cloud_obj.delete_object(container, name) changed= True else: cloud_obj.delete_container(container) changed= True return changed def main(): argument_spec = openstack_full_argument_spec( name=dict(required=False, default=None), container=dict(required=True), filename=dict(required=False, default=None), container_access=dict(default='private', choices=['private', 'public']), state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') try: cloud = shade.openstack_cloud(**module.params) changed = process_object(cloud, **module.params) module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * main()
gpl-3.0
joyxu/autotest
frontend/afe/views.py
4
2452
import httplib2 import sys import traceback import cgi from django.http import HttpResponse, HttpResponsePermanentRedirect from django.http import HttpResponseServerError from django.template import Context, loader from autotest.client.shared import utils from autotest.frontend import views_common from autotest.frontend.afe import models, rpc_handler, rpc_interface from autotest.frontend.afe import rpc_utils site_rpc_interface = utils.import_site_module( __file__, 'autotest.frontend.afe.site_rpc_interface', dummy=object()) # since site_rpc_interface is later in the list, its methods will override those # of rpc_interface rpc_handler_obj = rpc_handler.RpcHandler((rpc_interface, site_rpc_interface), document_module=rpc_interface) def handle_rpc(request): return rpc_handler_obj.handle_rpc_request(request) def rpc_documentation(request): return rpc_handler_obj.get_rpc_documentation() def model_documentation(request): model_names = ('Label', 'Host', 'Test', 'User', 'AclGroup', 'Job', 'AtomicGroup') return views_common.model_documentation(models, model_names) def redirect_with_extra_data(request, url, **kwargs): kwargs['getdata'] = request.GET.urlencode() kwargs['server_name'] = request.META['SERVER_NAME'] return HttpResponsePermanentRedirect(url % kwargs) GWT_SERVER = 'http://localhost:8888/' def gwt_forward(request, forward_addr): url = GWT_SERVER + forward_addr if len(request.POST) == 0: headers, content = httplib2.Http().request(url, 'GET') else: headers, content = httplib2.Http().request(url, 'POST', body=request.raw_post_data) http_response = HttpResponse(content) for header, value in headers.iteritems(): # remove components that could cause hop-by-hop errors if header not in ('connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade',): http_response[header] = value return http_response def handler500(request): t = loader.get_template('500.html') trace = traceback.format_exc() context = Context({ 'type': sys.exc_type, 'value': sys.exc_value, 'traceback': cgi.escape(trace) }) return HttpResponseServerError(t.render(context))
gpl-2.0
wujuguang/sentry
src/sentry/migrations/0078_auto__add_field_affecteduserbygroup_tuser.py
36
21866
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'AffectedUserByGroup.tuser' db.add_column('sentry_affecteduserbygroup', 'tuser', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.TrackedUser'], null=True), keep_default=False) def backwards(self, orm): # Deleting field 'AffectedUserByGroup.tuser' db.delete_column('sentry_affecteduserbygroup', 'tuser_id') models = { 'sentry.user': { 'Meta': {'object_name': 'User', 'db_table': "'auth_user'"}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'sentry.affecteduserbygroup': { 'Meta': {'unique_together': "(('project', 'ident', 'group'),)", 'object_name': 'AffectedUserByGroup'}, 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'tuser': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.TrackedUser']", 'null': 'True'}) }, 'sentry.event': { 'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"}, 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}), 'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}), 'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'}) }, 'sentry.filterkey': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.filtervalue': { 'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.group': { 'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"}, 'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}), 'users_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}) }, 'sentry.groupbookmark': { 'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'}, 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"}) }, 'sentry.groupmeta': { 'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'}, 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'value': ('django.db.models.fields.TextField', [], {}) }, 'sentry.lostpasswordhash': { 'Meta': {'object_name': 'LostPasswordHash'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'}) }, 'sentry.messagecountbyminute': { 'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'}, 'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'sentry.messagefiltervalue': { 'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'}, 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.messageindex': { 'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'}, 'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'sentry.option': { 'Meta': {'object_name': 'Option'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}), 'value': ('picklefield.fields.PickledObjectField', [], {}) }, 'sentry.pendingteammember': { 'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}), 'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'sentry.project': { 'Meta': {'object_name': 'Project'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'}) }, 'sentry.projectcountbyminute': { 'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'}, 'date': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'sentry.projectkey': { 'Meta': {'object_name': 'ProjectKey'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}), 'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}), 'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"}) }, 'sentry.projectoption': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'value': ('picklefield.fields.PickledObjectField', [], {}) }, 'sentry.searchdocument': { 'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}) }, 'sentry.searchtoken': { 'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'}, 'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}), 'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'token': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'sentry.team': { 'Meta': {'object_name': 'Team'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}) }, 'sentry.teammember': { 'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}), 'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"}) }, 'sentry.trackeduser': { 'Meta': {'unique_together': "(('project', 'ident'),)", 'object_name': 'TrackedUser'}, 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.useroption': { 'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}), 'value': ('picklefield.fields.PickledObjectField', [], {}) } } complete_apps = ['sentry']
bsd-3-clause
40023154/0628
static/Brython3.1.1-20150328-091302/Lib/VFS_import.py
738
3059
import os from browser import doc #_scripts=doc.createElement('script') #_scripts.src="/src/py_VFS.js" #_scripts.type="text/javascript" #doc.get(tag='head')[0].appendChild(_scripts) VFS=dict(JSObject(__BRYTHON__.py_VFS)) class VFSModuleFinder: def __init__(self, path_entry): print("in VFSModuleFinder") if path_entry.startswith('/libs') or path_entry.startswith('/Lib'): self.path_entry=path_entry else: raise ImportError() def __str__(self): return '<%s for "%s">' % (self.__class__.__name__, self.path_entry) def find_module(self, fullname, path=None): path = path or self.path_entry #print('looking for "%s" in %s ...' % (fullname, path)) for _ext in ['js', 'pyj', 'py']: _filepath=os.path.join(self.path_entry, '%s.%s' % (fullname, _ext)) if _filepath in VFS: print("module found at %s:%s" % (_filepath, fullname)) return VFSModuleLoader(_filepath, fullname) print('module %s not found' % fullname) raise ImportError() return None class VFSModuleLoader: """Load source for modules""" def __init__(self, filepath, name): self._filepath=filepath self._name=name def get_source(self): if self._filepath in VFS: return JSObject(readFromVFS(self._filepath)) raise ImportError('could not find source for %s' % fullname) def is_package(self): return '.' in self._name def load_module(self): if self._name in sys.modules: #print('reusing existing module from previous import of "%s"' % fullname) mod = sys.modules[self._name] return mod _src=self.get_source() if self._filepath.endswith('.js'): mod=JSObject(import_js_module(_src, self._filepath, self._name)) elif self._filepath.endswith('.py'): mod=JSObject(import_py_module(_src, self._filepath, self._name)) elif self._filepath.endswith('.pyj'): mod=JSObject(import_pyj_module(_src, self._filepath, self._name)) else: raise ImportError('Invalid Module: %s' % self._filepath) # Set a few properties required by PEP 302 mod.__file__ = self._filepath mod.__name__ = self._name mod.__path__ = os.path.abspath(self._filepath) mod.__loader__ = self mod.__package__ = '.'.join(self._name.split('.')[:-1]) if self.is_package(): print('adding path for package') # Set __path__ for packages # so we can find the sub-modules. mod.__path__ = [ self.path_entry ] else: print('imported as regular module') print('creating a new module object for "%s"' % self._name) sys.modules.setdefault(self._name, mod) JSObject(__BRYTHON__.imported)[self._name]=mod return mod JSObject(__BRYTHON__.path_hooks.insert(0, VFSModuleFinder))
gpl-3.0
nicfitzgerald/pyrogue-game
map_utils.py
1
4764
from tdl.map import Map from random import randint from components.ai import BasicMonster from components.fighter import Fighter from entity import Entity class GameMap(Map): def __init__(self, width, height): super().__init__(width, height) self.explored = [[False for y in range(height)] for x in range(width)] class Rect: def __init__(self, x, y, w, h): self.x1 = x self.y1 = y self.x2 = x + w self.y2 = y + h def center(self): center_x = int((self.x1 + self.x2) / 2) center_y = int((self.y1 + self.y2) / 2) return (center_x, center_y) def intersect(self, other): # returns true if this rectangle intersects with another one return (self.x1 <= other.x2 and self.x2 >= other.x1 and self.y1 <= other.y2 and self.y2 >= other.y1) def create_room(game_map, room): # Go through the tiles and make them passable for x in range(room.x1 + 1, room.x2): for y in range(room.y1 + 1, room.y2): game_map.walkable[x, y] = True game_map.transparent[x, y] = True def create_h_tunnel(game_map, x1, x2, y): for x in range(min(x1, x2), max(x1, x2) + 1): game_map.walkable[x, y] = True game_map.transparent[x, y] = True def create_v_tunnel(game_map, y1, y2, x): for y in range(min(y1, y2), max(y1, y2) + 1): game_map.walkable[x, y] = True game_map.transparent[x, y] = True def place_entities(room, entities, max_monsters_per_room, colors): # Get a random number of monsters number_of_monsters = randint(0, max_monsters_per_room) for i in range(number_of_monsters): # Choose a random location in the room x = randint(room.x1 + 1, room.x2 - 1) y = randint(room.y1 + 1, room.y2 - 1) if not any([entity for entity in entities if entity.x == x and entity.y == y]): if randint(0, 100) < 80: fighter_component = Fighter(hp=10, defense=0, power=3) ai_component = BasicMonster() monster = Entity(x, y, 'o', colors.get('desaturated_green'), 'Orc', blocks=True, fighter=fighter_component, ai=ai_component) else: fighter_component = Fighter(hp=16, defense=1, power=4) ai_component = BasicMonster() monster = Entity(x, y, 'T', colors.get('darker_green'), 'Troll', blocks=True, fighter=fighter_component, ai=ai_component) entities.append(monster) def make_map(game_map, max_rooms, room_min_size, room_max_size, map_width, map_height, player, entities, max_monsters_per_room, colors): rooms = [] num_rooms = 0 for r in range(max_rooms): # random width and height w = randint(room_min_size, room_max_size) h = randint(room_min_size, room_max_size) # random position without going out of the boundaries of the map x = randint(0, map_width - w - 1) y = randint(0, map_height - h - 1) # "Rect" class makes rectangles easier to work with new_room = Rect(x, y, w, h) # run through the other rooms and see if they intersect with this one for other_room in rooms: if new_room.intersect(other_room): break else: # this means there are no intersections, so this room is valid # "paint" it to the map's tiles create_room(game_map, new_room) # center coordinates of new room, will be useful later (new_x, new_y) = new_room.center() if num_rooms == 0: # this is the first room, where the player starts at player.x = new_x player.y = new_y else: # all rooms after the first: # connect it to the previous room with a tunnel # center coordinates of previous room (prev_x, prev_y) = rooms[num_rooms - 1].center() # flip a coin (random number that is either 0 or 1) if randint(0, 1) == 1: # first move horizontally, then vertically create_h_tunnel(game_map, prev_x, new_x, prev_y) create_v_tunnel(game_map, prev_y, new_y, new_x) else: # first move vertically, then horizontally create_v_tunnel(game_map, prev_y, new_y, prev_x) create_h_tunnel(game_map, prev_x, new_x, new_y) place_entities(new_room, entities, max_monsters_per_room, colors) # finally, append the new room to the list rooms.append(new_room) num_rooms += 1
gpl-3.0
proudlygeek/proudlygeek-blog
pygments/plugin.py
74
1862
# -*- coding: utf-8 -*- """ pygments.plugin ~~~~~~~~~~~~~~~ Pygments setuptools plugin interface. The methods defined here also work if setuptools isn't installed but they just return nothing. lexer plugins:: [pygments.lexers] yourlexer = yourmodule:YourLexer formatter plugins:: [pygments.formatters] yourformatter = yourformatter:YourFormatter /.ext = yourformatter:YourFormatter As you can see, you can define extensions for the formatter with a leading slash. syntax plugins:: [pygments.styles] yourstyle = yourstyle:YourStyle filter plugin:: [pygments.filter] yourfilter = yourfilter:YourFilter :copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ try: import pkg_resources except ImportError: pkg_resources = None LEXER_ENTRY_POINT = 'pygments.lexers' FORMATTER_ENTRY_POINT = 'pygments.formatters' STYLE_ENTRY_POINT = 'pygments.styles' FILTER_ENTRY_POINT = 'pygments.filters' def find_plugin_lexers(): if pkg_resources is None: return for entrypoint in pkg_resources.iter_entry_points(LEXER_ENTRY_POINT): yield entrypoint.load() def find_plugin_formatters(): if pkg_resources is None: return for entrypoint in pkg_resources.iter_entry_points(FORMATTER_ENTRY_POINT): yield entrypoint.name, entrypoint.load() def find_plugin_styles(): if pkg_resources is None: return for entrypoint in pkg_resources.iter_entry_points(STYLE_ENTRY_POINT): yield entrypoint.name, entrypoint.load() def find_plugin_filters(): if pkg_resources is None: return for entrypoint in pkg_resources.iter_entry_points(FILTER_ENTRY_POINT): yield entrypoint.name, entrypoint.load()
mit
ATIX-AG/ansible
lib/ansible/modules/network/junos/junos_scp.py
76
5091
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2018, Ansible by Red Hat, inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = """ --- module: junos_scp version_added: "2.5" author: "Christian Giese (@GIC-de)" short_description: Transfer files from or to remote devices running Junos description: - This module transfers files via SCP from or to remote devices running Junos. extends_documentation_fragment: junos options: src: description: - The C(src) argument takes a single path, or a list of paths to be transfered. The argument C(recursive) must be C(true) to transfer directories. required: true dest: description: - The C(dest) argument specifies the path in which to receive the files. default: '.' recursive: description: - The C(recursive) argument enables recursive transfer of files and directories. type: bool default: 'no' remote_src: description: - The C(remote_src) argument enables the download of files (I(scp get)) from the remote device. The default behavior is to upload files (I(scp put)) to the remote device. type: bool default: 'no' requirements: - junos-eznc - ncclient (>=v0.5.2) notes: - This module requires the netconf system service be enabled on the remote device being managed. - Tested against vMX JUNOS version 17.3R1.10. - Works with C(local) connections only. """ EXAMPLES = """ # the required set of connection arguments have been purposely left off # the examples for brevity - name: upload local file to home directory on remote device junos_scp: src: test.tgz - name: upload local file to tmp directory on remote device junos_scp: src: test.tgz dest: /tmp/ - name: download file from remote device junos_scp: src: test.tgz remote_src: true """ RETURN = """ changed: description: always true returned: always type: bool """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.junos.junos import junos_argument_spec, get_param from ansible.module_utils._text import to_native try: from jnpr.junos import Device from jnpr.junos.utils.scp import SCP from jnpr.junos.exception import ConnectError HAS_PYEZ = True except ImportError: HAS_PYEZ = False def connect(module): host = get_param(module, 'host') kwargs = { 'port': get_param(module, 'port') or 830, 'user': get_param(module, 'username') } if get_param(module, 'password'): kwargs['passwd'] = get_param(module, 'password') if get_param(module, 'ssh_keyfile'): kwargs['ssh_private_key_file'] = get_param(module, 'ssh_keyfile') kwargs['gather_facts'] = False try: device = Device(host, **kwargs) device.open() device.timeout = get_param(module, 'timeout') or 10 except ConnectError as exc: module.fail_json('unable to connect to %s: %s' % (host, to_native(exc))) return device def transfer_files(module, device): dest = module.params['dest'] recursive = module.params['recursive'] with SCP(device) as scp: for src in module.params['src']: if module.params['remote_src']: scp.get(src.strip(), local_path=dest, recursive=recursive) else: scp.put(src.strip(), remote_path=dest, recursive=recursive) def main(): """ Main entry point for Ansible module execution """ argument_spec = dict( src=dict(type='list', required=True), dest=dict(type='path', required=False, default="."), recursive=dict(type='bool', default=False), remote_src=dict(type='bool', default=False), transport=dict(default='netconf', choices=['netconf']) ) argument_spec.update(junos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if module.params['provider'] is None: module.params['provider'] = {} if not HAS_PYEZ: module.fail_json( msg='junos-eznc is required but does not appear to be installed. ' 'It can be installed using `pip install junos-eznc`' ) result = dict(changed=True) if not module.check_mode: # open pyez connection and transfer files via SCP try: device = connect(module) transfer_files(module, device) except Exception as ex: module.fail_json( msg=to_native(ex) ) finally: try: # close pyez connection and ignore exceptions device.close() except Exception: pass module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
sgiavasis/nipype
nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py
12
1450
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from ....testing import assert_equal from ..developer import MedicAlgorithmImageCalculator def test_MedicAlgorithmImageCalculator_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inOperation=dict(argstr='--inOperation %s', ), inVolume=dict(argstr='--inVolume %s', ), inVolume2=dict(argstr='--inVolume2 %s', ), null=dict(argstr='--null %s', ), outResult=dict(argstr='--outResult %s', hash_files=False, ), terminal_output=dict(nohash=True, ), xDefaultMem=dict(argstr='-xDefaultMem %d', ), xMaxProcess=dict(argstr='-xMaxProcess %d', usedefault=True, ), xPrefExt=dict(argstr='--xPrefExt %s', ), ) inputs = MedicAlgorithmImageCalculator.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_MedicAlgorithmImageCalculator_outputs(): output_map = dict(outResult=dict(), ) outputs = MedicAlgorithmImageCalculator.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): yield assert_equal, getattr(outputs.traits()[key], metakey), value
bsd-3-clause
Pirionfr/pyPhoenix
pyphoenix/responses_pb2.py
1
35351
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: pyphoenix/responses.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from pyphoenix import common_pb2 as pyphoenix_dot_common__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='pyphoenix/responses.proto', package='pyphoenix', syntax='proto3', serialized_pb=_b('\n\x19pyphoenix/responses.proto\x12\tpyphoenix\x1a\x16pyphoenix/common.proto\"\xe7\x01\n\x11ResultSetResponse\x12\x15\n\rconnection_id\x18\x01 \x01(\t\x12\x14\n\x0cstatement_id\x18\x02 \x01(\r\x12\x15\n\rown_statement\x18\x03 \x01(\x08\x12\'\n\tsignature\x18\x04 \x01(\x0b\x32\x14.pyphoenix.Signature\x12%\n\x0b\x66irst_frame\x18\x05 \x01(\x0b\x32\x10.pyphoenix.Frame\x12\x14\n\x0cupdate_count\x18\x06 \x01(\x04\x12(\n\x08metadata\x18\x07 \x01(\x0b\x32\x16.pyphoenix.RpcMetadata\"\x85\x01\n\x0f\x45xecuteResponse\x12-\n\x07results\x18\x01 \x03(\x0b\x32\x1c.pyphoenix.ResultSetResponse\x12\x19\n\x11missing_statement\x18\x02 \x01(\x08\x12(\n\x08metadata\x18\x03 \x01(\x0b\x32\x16.pyphoenix.RpcMetadata\"j\n\x0fPrepareResponse\x12-\n\tstatement\x18\x01 \x01(\x0b\x32\x1a.pyphoenix.StatementHandle\x12(\n\x08metadata\x18\x02 \x01(\x0b\x32\x16.pyphoenix.RpcMetadata\"\x8e\x01\n\rFetchResponse\x12\x1f\n\x05\x66rame\x18\x01 \x01(\x0b\x32\x10.pyphoenix.Frame\x12\x19\n\x11missing_statement\x18\x02 \x01(\x08\x12\x17\n\x0fmissing_results\x18\x03 \x01(\x08\x12(\n\x08metadata\x18\x04 \x01(\x0b\x32\x16.pyphoenix.RpcMetadata\"p\n\x17\x43reateStatementResponse\x12\x15\n\rconnection_id\x18\x01 \x01(\t\x12\x14\n\x0cstatement_id\x18\x02 \x01(\r\x12(\n\x08metadata\x18\x03 \x01(\x0b\x32\x16.pyphoenix.RpcMetadata\"B\n\x16\x43loseStatementResponse\x12(\n\x08metadata\x18\x01 \x01(\x0b\x32\x16.pyphoenix.RpcMetadata\"B\n\x16OpenConnectionResponse\x12(\n\x08metadata\x18\x01 \x01(\x0b\x32\x16.pyphoenix.RpcMetadata\"C\n\x17\x43loseConnectionResponse\x12(\n\x08metadata\x18\x01 \x01(\x0b\x32\x16.pyphoenix.RpcMetadata\"w\n\x16\x43onnectionSyncResponse\x12\x33\n\nconn_props\x18\x01 \x01(\x0b\x32\x1f.pyphoenix.ConnectionProperties\x12(\n\x08metadata\x18\x02 \x01(\x0b\x32\x16.pyphoenix.RpcMetadata\"\x93\x01\n\x17\x44\x61tabasePropertyElement\x12(\n\x03key\x18\x01 \x01(\x0b\x32\x1b.pyphoenix.DatabaseProperty\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.pyphoenix.TypedValue\x12(\n\x08metadata\x18\x03 \x01(\x0b\x32\x16.pyphoenix.RpcMetadata\"w\n\x18\x44\x61tabasePropertyResponse\x12\x31\n\x05props\x18\x01 \x03(\x0b\x32\".pyphoenix.DatabasePropertyElement\x12(\n\x08metadata\x18\x02 \x01(\x0b\x32\x16.pyphoenix.RpcMetadata\"\xca\x01\n\rErrorResponse\x12\x12\n\nexceptions\x18\x01 \x03(\t\x12\x16\n\x0ehas_exceptions\x18\x07 \x01(\x08\x12\x15\n\rerror_message\x18\x02 \x01(\t\x12%\n\x08severity\x18\x03 \x01(\x0e\x32\x13.pyphoenix.Severity\x12\x12\n\nerror_code\x18\x04 \x01(\r\x12\x11\n\tsql_state\x18\x05 \x01(\t\x12(\n\x08metadata\x18\x06 \x01(\x0b\x32\x16.pyphoenix.RpcMetadata\"p\n\x13SyncResultsResponse\x12\x19\n\x11missing_statement\x18\x01 \x01(\x08\x12\x14\n\x0cmore_results\x18\x02 \x01(\x08\x12(\n\x08metadata\x18\x03 \x01(\x0b\x32\x16.pyphoenix.RpcMetadata\"%\n\x0bRpcMetadata\x12\x16\n\x0eserver_address\x18\x01 \x01(\t\"\x10\n\x0e\x43ommitResponse\"\x12\n\x10RollbackResponse\"\x9f\x01\n\x14\x45xecuteBatchResponse\x12\x15\n\rconnection_id\x18\x01 \x01(\t\x12\x14\n\x0cstatement_id\x18\x02 \x01(\r\x12\x15\n\rupdate_counts\x18\x03 \x03(\x04\x12\x19\n\x11missing_statement\x18\x04 \x01(\x08\x12(\n\x08metadata\x18\x05 \x01(\x0b\x32\x16.pyphoenix.RpcMetadatab\x06proto3') , dependencies=[pyphoenix_dot_common__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _RESULTSETRESPONSE = _descriptor.Descriptor( name='ResultSetResponse', full_name='pyphoenix.ResultSetResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='connection_id', full_name='pyphoenix.ResultSetResponse.connection_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='statement_id', full_name='pyphoenix.ResultSetResponse.statement_id', index=1, number=2, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='own_statement', full_name='pyphoenix.ResultSetResponse.own_statement', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='signature', full_name='pyphoenix.ResultSetResponse.signature', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='first_frame', full_name='pyphoenix.ResultSetResponse.first_frame', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='update_count', full_name='pyphoenix.ResultSetResponse.update_count', index=5, number=6, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='metadata', full_name='pyphoenix.ResultSetResponse.metadata', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=65, serialized_end=296, ) _EXECUTERESPONSE = _descriptor.Descriptor( name='ExecuteResponse', full_name='pyphoenix.ExecuteResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='results', full_name='pyphoenix.ExecuteResponse.results', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='missing_statement', full_name='pyphoenix.ExecuteResponse.missing_statement', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='metadata', full_name='pyphoenix.ExecuteResponse.metadata', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=299, serialized_end=432, ) _PREPARERESPONSE = _descriptor.Descriptor( name='PrepareResponse', full_name='pyphoenix.PrepareResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='statement', full_name='pyphoenix.PrepareResponse.statement', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='metadata', full_name='pyphoenix.PrepareResponse.metadata', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=434, serialized_end=540, ) _FETCHRESPONSE = _descriptor.Descriptor( name='FetchResponse', full_name='pyphoenix.FetchResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='frame', full_name='pyphoenix.FetchResponse.frame', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='missing_statement', full_name='pyphoenix.FetchResponse.missing_statement', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='missing_results', full_name='pyphoenix.FetchResponse.missing_results', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='metadata', full_name='pyphoenix.FetchResponse.metadata', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=543, serialized_end=685, ) _CREATESTATEMENTRESPONSE = _descriptor.Descriptor( name='CreateStatementResponse', full_name='pyphoenix.CreateStatementResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='connection_id', full_name='pyphoenix.CreateStatementResponse.connection_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='statement_id', full_name='pyphoenix.CreateStatementResponse.statement_id', index=1, number=2, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='metadata', full_name='pyphoenix.CreateStatementResponse.metadata', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=687, serialized_end=799, ) _CLOSESTATEMENTRESPONSE = _descriptor.Descriptor( name='CloseStatementResponse', full_name='pyphoenix.CloseStatementResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='metadata', full_name='pyphoenix.CloseStatementResponse.metadata', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=801, serialized_end=867, ) _OPENCONNECTIONRESPONSE = _descriptor.Descriptor( name='OpenConnectionResponse', full_name='pyphoenix.OpenConnectionResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='metadata', full_name='pyphoenix.OpenConnectionResponse.metadata', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=869, serialized_end=935, ) _CLOSECONNECTIONRESPONSE = _descriptor.Descriptor( name='CloseConnectionResponse', full_name='pyphoenix.CloseConnectionResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='metadata', full_name='pyphoenix.CloseConnectionResponse.metadata', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=937, serialized_end=1004, ) _CONNECTIONSYNCRESPONSE = _descriptor.Descriptor( name='ConnectionSyncResponse', full_name='pyphoenix.ConnectionSyncResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='conn_props', full_name='pyphoenix.ConnectionSyncResponse.conn_props', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='metadata', full_name='pyphoenix.ConnectionSyncResponse.metadata', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1006, serialized_end=1125, ) _DATABASEPROPERTYELEMENT = _descriptor.Descriptor( name='DatabasePropertyElement', full_name='pyphoenix.DatabasePropertyElement', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pyphoenix.DatabasePropertyElement.key', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='value', full_name='pyphoenix.DatabasePropertyElement.value', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='metadata', full_name='pyphoenix.DatabasePropertyElement.metadata', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1128, serialized_end=1275, ) _DATABASEPROPERTYRESPONSE = _descriptor.Descriptor( name='DatabasePropertyResponse', full_name='pyphoenix.DatabasePropertyResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='props', full_name='pyphoenix.DatabasePropertyResponse.props', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='metadata', full_name='pyphoenix.DatabasePropertyResponse.metadata', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1277, serialized_end=1396, ) _ERRORRESPONSE = _descriptor.Descriptor( name='ErrorResponse', full_name='pyphoenix.ErrorResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='exceptions', full_name='pyphoenix.ErrorResponse.exceptions', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='has_exceptions', full_name='pyphoenix.ErrorResponse.has_exceptions', index=1, number=7, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='error_message', full_name='pyphoenix.ErrorResponse.error_message', index=2, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='severity', full_name='pyphoenix.ErrorResponse.severity', index=3, number=3, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='error_code', full_name='pyphoenix.ErrorResponse.error_code', index=4, number=4, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='sql_state', full_name='pyphoenix.ErrorResponse.sql_state', index=5, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='metadata', full_name='pyphoenix.ErrorResponse.metadata', index=6, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1399, serialized_end=1601, ) _SYNCRESULTSRESPONSE = _descriptor.Descriptor( name='SyncResultsResponse', full_name='pyphoenix.SyncResultsResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='missing_statement', full_name='pyphoenix.SyncResultsResponse.missing_statement', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='more_results', full_name='pyphoenix.SyncResultsResponse.more_results', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='metadata', full_name='pyphoenix.SyncResultsResponse.metadata', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1603, serialized_end=1715, ) _RPCMETADATA = _descriptor.Descriptor( name='RpcMetadata', full_name='pyphoenix.RpcMetadata', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='server_address', full_name='pyphoenix.RpcMetadata.server_address', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1717, serialized_end=1754, ) _COMMITRESPONSE = _descriptor.Descriptor( name='CommitResponse', full_name='pyphoenix.CommitResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1756, serialized_end=1772, ) _ROLLBACKRESPONSE = _descriptor.Descriptor( name='RollbackResponse', full_name='pyphoenix.RollbackResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1774, serialized_end=1792, ) _EXECUTEBATCHRESPONSE = _descriptor.Descriptor( name='ExecuteBatchResponse', full_name='pyphoenix.ExecuteBatchResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='connection_id', full_name='pyphoenix.ExecuteBatchResponse.connection_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='statement_id', full_name='pyphoenix.ExecuteBatchResponse.statement_id', index=1, number=2, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='update_counts', full_name='pyphoenix.ExecuteBatchResponse.update_counts', index=2, number=3, type=4, cpp_type=4, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='missing_statement', full_name='pyphoenix.ExecuteBatchResponse.missing_statement', index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='metadata', full_name='pyphoenix.ExecuteBatchResponse.metadata', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1795, serialized_end=1954, ) _RESULTSETRESPONSE.fields_by_name['signature'].message_type = pyphoenix_dot_common__pb2._SIGNATURE _RESULTSETRESPONSE.fields_by_name['first_frame'].message_type = pyphoenix_dot_common__pb2._FRAME _RESULTSETRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA _EXECUTERESPONSE.fields_by_name['results'].message_type = _RESULTSETRESPONSE _EXECUTERESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA _PREPARERESPONSE.fields_by_name['statement'].message_type = pyphoenix_dot_common__pb2._STATEMENTHANDLE _PREPARERESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA _FETCHRESPONSE.fields_by_name['frame'].message_type = pyphoenix_dot_common__pb2._FRAME _FETCHRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA _CREATESTATEMENTRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA _CLOSESTATEMENTRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA _OPENCONNECTIONRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA _CLOSECONNECTIONRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA _CONNECTIONSYNCRESPONSE.fields_by_name['conn_props'].message_type = pyphoenix_dot_common__pb2._CONNECTIONPROPERTIES _CONNECTIONSYNCRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA _DATABASEPROPERTYELEMENT.fields_by_name['key'].message_type = pyphoenix_dot_common__pb2._DATABASEPROPERTY _DATABASEPROPERTYELEMENT.fields_by_name['value'].message_type = pyphoenix_dot_common__pb2._TYPEDVALUE _DATABASEPROPERTYELEMENT.fields_by_name['metadata'].message_type = _RPCMETADATA _DATABASEPROPERTYRESPONSE.fields_by_name['props'].message_type = _DATABASEPROPERTYELEMENT _DATABASEPROPERTYRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA _ERRORRESPONSE.fields_by_name['severity'].enum_type = pyphoenix_dot_common__pb2._SEVERITY _ERRORRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA _SYNCRESULTSRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA _EXECUTEBATCHRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA DESCRIPTOR.message_types_by_name['ResultSetResponse'] = _RESULTSETRESPONSE DESCRIPTOR.message_types_by_name['ExecuteResponse'] = _EXECUTERESPONSE DESCRIPTOR.message_types_by_name['PrepareResponse'] = _PREPARERESPONSE DESCRIPTOR.message_types_by_name['FetchResponse'] = _FETCHRESPONSE DESCRIPTOR.message_types_by_name['CreateStatementResponse'] = _CREATESTATEMENTRESPONSE DESCRIPTOR.message_types_by_name['CloseStatementResponse'] = _CLOSESTATEMENTRESPONSE DESCRIPTOR.message_types_by_name['OpenConnectionResponse'] = _OPENCONNECTIONRESPONSE DESCRIPTOR.message_types_by_name['CloseConnectionResponse'] = _CLOSECONNECTIONRESPONSE DESCRIPTOR.message_types_by_name['ConnectionSyncResponse'] = _CONNECTIONSYNCRESPONSE DESCRIPTOR.message_types_by_name['DatabasePropertyElement'] = _DATABASEPROPERTYELEMENT DESCRIPTOR.message_types_by_name['DatabasePropertyResponse'] = _DATABASEPROPERTYRESPONSE DESCRIPTOR.message_types_by_name['ErrorResponse'] = _ERRORRESPONSE DESCRIPTOR.message_types_by_name['SyncResultsResponse'] = _SYNCRESULTSRESPONSE DESCRIPTOR.message_types_by_name['RpcMetadata'] = _RPCMETADATA DESCRIPTOR.message_types_by_name['CommitResponse'] = _COMMITRESPONSE DESCRIPTOR.message_types_by_name['RollbackResponse'] = _ROLLBACKRESPONSE DESCRIPTOR.message_types_by_name['ExecuteBatchResponse'] = _EXECUTEBATCHRESPONSE ResultSetResponse = _reflection.GeneratedProtocolMessageType('ResultSetResponse', (_message.Message,), dict( DESCRIPTOR = _RESULTSETRESPONSE, __module__ = 'pyphoenix.responses_pb2' # @@protoc_insertion_point(class_scope:pyphoenix.ResultSetResponse) )) _sym_db.RegisterMessage(ResultSetResponse) ExecuteResponse = _reflection.GeneratedProtocolMessageType('ExecuteResponse', (_message.Message,), dict( DESCRIPTOR = _EXECUTERESPONSE, __module__ = 'pyphoenix.responses_pb2' # @@protoc_insertion_point(class_scope:pyphoenix.ExecuteResponse) )) _sym_db.RegisterMessage(ExecuteResponse) PrepareResponse = _reflection.GeneratedProtocolMessageType('PrepareResponse', (_message.Message,), dict( DESCRIPTOR = _PREPARERESPONSE, __module__ = 'pyphoenix.responses_pb2' # @@protoc_insertion_point(class_scope:pyphoenix.PrepareResponse) )) _sym_db.RegisterMessage(PrepareResponse) FetchResponse = _reflection.GeneratedProtocolMessageType('FetchResponse', (_message.Message,), dict( DESCRIPTOR = _FETCHRESPONSE, __module__ = 'pyphoenix.responses_pb2' # @@protoc_insertion_point(class_scope:pyphoenix.FetchResponse) )) _sym_db.RegisterMessage(FetchResponse) CreateStatementResponse = _reflection.GeneratedProtocolMessageType('CreateStatementResponse', (_message.Message,), dict( DESCRIPTOR = _CREATESTATEMENTRESPONSE, __module__ = 'pyphoenix.responses_pb2' # @@protoc_insertion_point(class_scope:pyphoenix.CreateStatementResponse) )) _sym_db.RegisterMessage(CreateStatementResponse) CloseStatementResponse = _reflection.GeneratedProtocolMessageType('CloseStatementResponse', (_message.Message,), dict( DESCRIPTOR = _CLOSESTATEMENTRESPONSE, __module__ = 'pyphoenix.responses_pb2' # @@protoc_insertion_point(class_scope:pyphoenix.CloseStatementResponse) )) _sym_db.RegisterMessage(CloseStatementResponse) OpenConnectionResponse = _reflection.GeneratedProtocolMessageType('OpenConnectionResponse', (_message.Message,), dict( DESCRIPTOR = _OPENCONNECTIONRESPONSE, __module__ = 'pyphoenix.responses_pb2' # @@protoc_insertion_point(class_scope:pyphoenix.OpenConnectionResponse) )) _sym_db.RegisterMessage(OpenConnectionResponse) CloseConnectionResponse = _reflection.GeneratedProtocolMessageType('CloseConnectionResponse', (_message.Message,), dict( DESCRIPTOR = _CLOSECONNECTIONRESPONSE, __module__ = 'pyphoenix.responses_pb2' # @@protoc_insertion_point(class_scope:pyphoenix.CloseConnectionResponse) )) _sym_db.RegisterMessage(CloseConnectionResponse) ConnectionSyncResponse = _reflection.GeneratedProtocolMessageType('ConnectionSyncResponse', (_message.Message,), dict( DESCRIPTOR = _CONNECTIONSYNCRESPONSE, __module__ = 'pyphoenix.responses_pb2' # @@protoc_insertion_point(class_scope:pyphoenix.ConnectionSyncResponse) )) _sym_db.RegisterMessage(ConnectionSyncResponse) DatabasePropertyElement = _reflection.GeneratedProtocolMessageType('DatabasePropertyElement', (_message.Message,), dict( DESCRIPTOR = _DATABASEPROPERTYELEMENT, __module__ = 'pyphoenix.responses_pb2' # @@protoc_insertion_point(class_scope:pyphoenix.DatabasePropertyElement) )) _sym_db.RegisterMessage(DatabasePropertyElement) DatabasePropertyResponse = _reflection.GeneratedProtocolMessageType('DatabasePropertyResponse', (_message.Message,), dict( DESCRIPTOR = _DATABASEPROPERTYRESPONSE, __module__ = 'pyphoenix.responses_pb2' # @@protoc_insertion_point(class_scope:pyphoenix.DatabasePropertyResponse) )) _sym_db.RegisterMessage(DatabasePropertyResponse) ErrorResponse = _reflection.GeneratedProtocolMessageType('ErrorResponse', (_message.Message,), dict( DESCRIPTOR = _ERRORRESPONSE, __module__ = 'pyphoenix.responses_pb2' # @@protoc_insertion_point(class_scope:pyphoenix.ErrorResponse) )) _sym_db.RegisterMessage(ErrorResponse) SyncResultsResponse = _reflection.GeneratedProtocolMessageType('SyncResultsResponse', (_message.Message,), dict( DESCRIPTOR = _SYNCRESULTSRESPONSE, __module__ = 'pyphoenix.responses_pb2' # @@protoc_insertion_point(class_scope:pyphoenix.SyncResultsResponse) )) _sym_db.RegisterMessage(SyncResultsResponse) RpcMetadata = _reflection.GeneratedProtocolMessageType('RpcMetadata', (_message.Message,), dict( DESCRIPTOR = _RPCMETADATA, __module__ = 'pyphoenix.responses_pb2' # @@protoc_insertion_point(class_scope:pyphoenix.RpcMetadata) )) _sym_db.RegisterMessage(RpcMetadata) CommitResponse = _reflection.GeneratedProtocolMessageType('CommitResponse', (_message.Message,), dict( DESCRIPTOR = _COMMITRESPONSE, __module__ = 'pyphoenix.responses_pb2' # @@protoc_insertion_point(class_scope:pyphoenix.CommitResponse) )) _sym_db.RegisterMessage(CommitResponse) RollbackResponse = _reflection.GeneratedProtocolMessageType('RollbackResponse', (_message.Message,), dict( DESCRIPTOR = _ROLLBACKRESPONSE, __module__ = 'pyphoenix.responses_pb2' # @@protoc_insertion_point(class_scope:pyphoenix.RollbackResponse) )) _sym_db.RegisterMessage(RollbackResponse) ExecuteBatchResponse = _reflection.GeneratedProtocolMessageType('ExecuteBatchResponse', (_message.Message,), dict( DESCRIPTOR = _EXECUTEBATCHRESPONSE, __module__ = 'pyphoenix.responses_pb2' # @@protoc_insertion_point(class_scope:pyphoenix.ExecuteBatchResponse) )) _sym_db.RegisterMessage(ExecuteBatchResponse) # @@protoc_insertion_point(module_scope)
apache-2.0
open-synergy/opnsynid-l10n-indonesia
l10n_id_taxform_faktur_pajak_common/models/faktur_pajak_common.py
1
24141
# -*- coding: utf-8 -*- # Copyright 2017 OpenSynergy Indonesia # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). from openerp import models, fields, api import openerp.addons.decimal_precision as dp from datetime import datetime import re class FakturPajakCommon(models.AbstractModel): _name = "l10n_id.faktur_pajak_common" _description = "Faktur Pajak" _inherit = ["mail.thread"] @api.depends( "transaction_type_id", ) @api.multi def _compute_jenis_transaksi(self): for fp in self: fp.enofa_jenis_transaksi = fp.transaction_type_id.code @api.depends( "transaction_type_id", ) @api.multi def _compute_fg_pengganti(self): for fp in self: fp.enofa_fg_pengganti = fp.fp_state @api.depends( "name", ) @api.multi def _compute_nomor_dokumen(self): for fp in self: fp.enofa_nomor_dokumen = fp.name @api.depends( "creditable", ) @api.multi def _compute_is_creditable(self): for fp in self: fp.enofa_is_creditable = fp.creditable @api.depends( "seller_branch_id", "buyer_branch_id", "fp_direction", ) @api.multi def _compute_nama(self): for fp in self: fp.enofa_nama = "-" if fp.fp_direction == "keluaran": fp.enofa_nama = fp.buyer_branch_id.name elif fp.fp_direction == "masukan": fp.enofa_nama = fp.seller_branch_id.name @api.depends( "seller_branch_id", "buyer_branch_id", "fp_direction", ) @api.multi def _compute_alamat_lengkap(self): for fp in self: fp.enofa_alamat_lengkap = "-" if fp.fp_direction == "keluaran": fp.enofa_alamat_lengkap = fp.buyer_branch_id.enofa_address elif fp.fp_direction == "masukan": fp.enofa_alamat_lengkap = fp.seller_branch_id.enofa_address @api.depends( "seller_branch_id", "buyer_branch_id", "fp_direction", ) @api.multi def _compute_npwp(self): for fp in self: fp.enofa_npwp = "000000000000000" if fp.fp_direction == "keluaran": if fp.seller_branch_id.vat: npwp = fp.seller_branch_id.vat self.enofa_npwp = "" for s in re.findall(r"\d+", npwp): self.enofa_npwp += s elif fp.fp_direction == "masukan": if fp.buyer_branch_id.vat: npwp = fp.buyer_branch_id.vat self.enofa_npwp = "" for s in re.findall(r"\d+", npwp): self.enofa_npwp += s @api.depends( "date", ) @api.multi def _compute_tanggal_dokumen(self): for fp in self: fp.enofa_tanggal_dokumen = "-" if fp.date: fp.enofa_tanggal_dokumen = datetime.strptime( fp.date, "%Y-%m-%d").strftime( "%d/%m/%Y") @api.depends( "taxform_period_id", ) @api.multi def _compute_masa_pajak(self): for fp in self: fp.enofa_masa_pajak = fp.taxform_period_id.code @api.depends( "taxform_year_id", ) @api.multi def _compute_tahun_pajak(self): for fp in self: fp.enofa_tahun_pajak = fp.taxform_year_id.code @api.depends( "base", ) @api.multi def _compute_jumlah_dpp(self): for fp in self: fp.enofa_jumlah_dpp = int(fp.base_company_currency) @api.depends( "ppn_amount", ) @api.multi def _compute_jumlah_ppn(self): for fp in self: fp.enofa_jumlah_ppn = int(fp.ppn_amount) @api.depends( "ppnbm_amount", ) @api.multi def _compute_jumlah_ppnbm(self): for fp in self: fp.enofa_jumlah_ppnbm = int(fp.ppnbm_amount) @api.depends( "date", ) @api.multi def _compute_taxform_period(self): for fp in self: fp.taxform_period_id = False if fp.date: fp.taxform_period_id = self.env["l10n_id.tax_period"].\ _find_period(fp.date).id @api.depends( "taxform_period_id", ) @api.multi def _compute_taxform_year(self): for fp in self: fp.taxform_year_id = False if fp.taxform_period_id: fp.taxform_year_id = fp.taxform_period_id.year_id.id @api.depends( "type_id", ) def _compute_transaction_type(self): for fp in self: fp.allowed_transaction_type_ids = fp.type_id.\ allowed_transaction_type_ids.ids @api.depends( "type_id", "transaction_type_id", ) def _compute_tax_code(self): obj_dpp_code = self.env["l10n_id.faktur_pajak_allowed_dpp_tax_code"] obj_ppn_code = self.env["l10n_id.faktur_pajak_allowed_ppn_tax_code"] obj_ppnbm_code = self.env[ "l10n_id.faktur_pajak_allowed_ppnbm_tax_code"] for fp in self: criteria = [ ("type_id", "=", fp.type_id.id), ("transaction_type_id", "=", fp.transaction_type_id.id), ] dpp_codes = obj_dpp_code.search(criteria) for dpp_code in dpp_codes: fp.allowed_dpp_tax_code_ids += dpp_code.tax_code_ids ppn_codes = obj_ppn_code.search(criteria) for ppn_code in ppn_codes: fp.allowed_ppn_tax_code_ids += ppn_code.tax_code_ids ppnbm_codes = obj_ppnbm_code.search(criteria) for ppnbm_code in ppnbm_codes: fp.allowed_ppnbm_tax_code_ids += ppnbm_code.tax_code_ids @api.depends( "type_id", ) def _compute_additional_flag(self): for fp in self: fp.allowed_additional_flag_ids = fp.type_id.\ allowed_additional_flag_ids.ids @api.depends( "type_id", ) @api.multi def _compute_allow_reverse(self): for fp in self: fp.allow_reverse = fp.type_id.allow_reverse @api.depends( "type_id", ) @api.multi def _compute_allow_multiple_reference(self): for fp in self: fp.allow_multiple_reference = fp.type_id.allow_multiple_reference @api.depends( "reverse_id", ) @api.multi def _compute_nomor_dokumen_balik(self): for fp in self: fp.enofa_nomor_dokumen_balik = "-" if fp.reverse_id: fp.enofa_nomor_dokumen_balik = fp.reverse_id.name @api.depends( "reverse_id", ) @api.multi def _compute_tanggal_dokumen_balik(self): for fp in self: fp.enofa_tanggal_dokumen_balik = "-" if fp.reverse_id: fp.enofa_tanggal_dokumen_balik = datetime.strptime( fp.reverse_id.date, "%Y-%m-%d").strftime( "%d/%m/%Y") @api.depends( "reference_id", "reference_ids", "allow_multiple_reference", ) @api.multi def _compute_all_reference(self): for fp in self: if fp.type_id.allow_multiple_reference: fp.all_reference_ids = fp.reference_ids.ids else: fp.all_reference_ids = fp.reference_id and \ [fp.reference_id.id] or False @api.depends( "type_id", ) @api.multi def _compute_allow_creditable(self): for fp in self: fp.allow_creditable = fp.type_id.allow_creditable @api.depends( "type_id", ) @api.multi def _compute_allow_substitute(self): for fp in self: fp.allow_substitute = fp.type_id.allow_substitute name = fields.Char( string="# Faktur Pajak", required=True, readonly=True, default="/", states={ "draft": [("readonly", False)], }, ) @api.model def _default_company_id(self): return self.env.user.company_id.id company_id = fields.Many2one( string="Company", comodel_name="res.company", required=True, readonly=True, default=lambda self: self._default_company_id(), states={ "draft": [("readonly", False)], }, ) currency_id = fields.Many2one( string="Currency", comodel_name="res.currency", required=True, readonly=True, states={ "draft": [("readonly", False)]}, ) @api.model def _default_company_currency(self): return self.env.user.company_id.currency_id.id company_currency_id = fields.Many2one( string="Company Currency", comodel_name="res.currency", required=True, readonly=True, default=lambda self: self._default_company_currency(), states={ "draft": [("readonly", False)]}, ) @api.model def _default_fp_direction(self): fp_type = self._get_faktur_pajak_type() if fp_type: return fp_type.fp_direction else: return "keluaran" fp_direction = fields.Selection( string="Jenis Faktur Pajak", selection=[ ("masukan", "Masukan"), ("keluaran", "Keluaran"), ], required=True, readonly=True, states={ "draft": [("readonly", False)], }, default=lambda self: self._default_fp_direction(), ) transaction_type_id = fields.Many2one( string="Transaction Type", comodel_name="l10n_id.faktur_pajak_transaction_type", required=True, readonly=True, states={ "draft": [("readonly", False)]}, ) @api.model def _get_faktur_pajak_type(self): return False @api.model def _default_faktur_pajak_type(self): fp_type = self._get_faktur_pajak_type() if fp_type: return fp_type.id else: return False type_id = fields.Many2one( string="Type", comodel_name="l10n_id.faktur_pajak_type", required=True, default=lambda self: self._default_faktur_pajak_type(), ) @api.model def _default_fp_state(self): return "0" fp_state = fields.Selection( string="Normal/Penggantian?", selection=[ ("0", "Normal"), ("1", "Penggantian"), ], required=True, readonly=True, states={ "draft": [("readonly", False)], }, default=lambda self: self._default_fp_state(), ) @api.model def _default_seller_partner(self): if self._default_fp_direction() == "keluaran": return self.env.user.company_id.partner_id.id seller_partner_id = fields.Many2one( comodel_name="res.partner", string="Seller", required=True, default=lambda self: self._default_seller_partner(), readonly=True, states={ "draft": [("readonly", False)], }, ) seller_branch_id = fields.Many2one( comodel_name="res.partner", string="Seller Branch", required=True, readonly=True, states={ "draft": [("readonly", False)], }, ) @api.model def _default_buyer_partner(self): if self._default_fp_direction() == "masukan": return self.env.user.company_id.partner_id.id buyer_partner_id = fields.Many2one( comodel_name="res.partner", string="Buyer", required=True, default=lambda self: self._default_buyer_partner(), readonly=True, states={ "draft": [("readonly", False)], }, ) buyer_branch_id = fields.Many2one( comodel_name="res.partner", string="Buyer Branch", required=True, readonly=True, states={ "draft": [("readonly", False)], }, ) base = fields.Float( string="Base", digits_compute=dp.get_precision("Account"), required=True, readonly=True, states={ "draft": [("readonly", False)]}, ) base_company_currency = fields.Float( string="Base in Company Currency", digits_compute=dp.get_precision("Account"), required=True, readonly=True, states={ "draft": [("readonly", False)]}, ) ppn_amount = fields.Float( string="PPn Amount", digits_compute=dp.get_precision("Account"), required=True, readonly=True, states={ "draft": [("readonly", False)]}, ) ppnbm_amount = fields.Float( string="PPnBm Amount", digits_compute=dp.get_precision("Account"), required=True, readonly=True, states={ "draft": [("readonly", False)]}, ) date = fields.Date( string="Document Date", required=True, readonly=True, states={ "draft": [("readonly", False)], }, ) taxform_period_id = fields.Many2one( string="Masa Pajak", comodel_name="l10n_id.tax_period", compute="_compute_taxform_period", store=True, ) taxform_year_id = fields.Many2one( string="Tahun Pajak", comodel_name="l10n_id.tax_year", compute="_compute_taxform_year", store=True, ) note = fields.Text( string="Note", ) allow_multiple_reference = fields.Boolean( string="Allow Multiple Doc. References", compute="_compute_allow_multiple_reference", store=False, ) reference_id = fields.Many2one( string="Doc. Reference", comodel_name="account.move.line", readonly=True, states={ "draft": [("readonly", False)]}, ) reference_ids = fields.Many2many( string="Doc. References", comodel_name="account.move.line", relation="rel_fp_dummy", readonly=True, states={ "draft": [("readonly", False)]}, ) all_reference_ids = fields.Many2many( string="Doc. References", comodel_name="account.move", relation="rel_fp_all_dummy", compute="_compute_all_reference", store=True, ) allowed_transaction_type_ids = fields.Many2many( string="Allowed Transaction Type", comodel_name="l10n_id.faktur_pajak_transaction_type", compute="_compute_transaction_type", store=False, ) allowed_dpp_tax_code_ids = fields.Many2many( string="Allowed DPP Tax Codes", comodel_name="account.tax.code", compute="_compute_tax_code", store=False, ) allowed_ppn_tax_code_ids = fields.Many2many( string="Allowed PPn Tax Codes", comodel_name="account.tax.code", compute="_compute_tax_code", store=False, ) allowed_ppnbm_tax_code_ids = fields.Many2many( string="Allowed PPnBm Tax Codes", comodel_name="account.tax.code", compute="_compute_tax_code", store=False, ) allowed_additional_flag_ids = fields.Many2many( string="Allowed Additional Flags", comodel_name="l10n_id.faktur_pajak_additional_flag", compute="_compute_additional_flag", store=False, ) additional_flag_id = fields.Many2one( string="Additional Flag", comodel_name="l10n_id.faktur_pajak_additional_flag", readonly=True, states={ "draft": [("readonly", False)]}, ) reverse_id = fields.Many2one( string="Reverse From", comodel_name="l10n_id.faktur_pajak_common", readonly=True, states={ "draft": [("readonly", False)]}, ) allow_reverse = fields.Boolean( string="Allow to Reverse Document", compute="_compute_allow_reverse", store=False, ) substitute_id = fields.Many2one( string="Substitute For", comodel_name="l10n_id.faktur_pajak_common", readonly=True, states={ "draft": [("readonly", False)]}, ) allow_substitute = fields.Boolean( string="Allow to Substitute Document", compute="_compute_allow_substitute", store=False, ) allow_creditable = fields.Boolean( string="Allow to Creditable", compute="_compute_allow_creditable", store=False, ) @api.model def _default_creditable(self): return "0" creditable = fields.Selection( string="Bisa Dikreditkan?", selection=[ ("0", "Tidak Dikreditkan"), ("1", "Dikreditkan"), ], required=True, readonly=True, states={ "draft": [("readonly", False)], }, default=lambda self: self._default_creditable(), ) state = fields.Selection( string="State", required=True, readonly=True, default="draft", track_visibility="onchange", selection=[ ("draft", "Draft"), ("confirmed", "Waiting for Approval"), ("done", "Done"), ("cancelled", "Cancelled"), ], ) # E-NOFA FIELDS enofa_jenis_transaksi = fields.Char( string="KD_JENIS_TRANSAKSI", compute="_compute_jenis_transaksi", store=False, ) enofa_fg_pengganti = fields.Char( string="FG_PENGGANTI", compute="_compute_fg_pengganti", store=False, ) enofa_nomor_dokumen = fields.Char( string="NOMOR_DOKUMEN", compute="_compute_nomor_dokumen", store=False, ) enofa_masa_pajak = fields.Char( string="MASA_PAJAK", compute="_compute_masa_pajak", store=False, ) enofa_tahun_pajak = fields.Char( string="TAHUN_PAJAK", compute="_compute_tahun_pajak", store=False, ) enofa_tanggal_dokumen = fields.Char( string="TANGGAL_DOKUMEN", compute="_compute_tanggal_dokumen", store=False, ) enofa_npwp = fields.Char( string="NPWP", compute="_compute_npwp", store=False, ) enofa_nama = fields.Char( string="NAMA", compute="_compute_nama", store=False, ) enofa_alamat_lengkap = fields.Char( string="ALAMAT_LENGKAP", compute="_compute_alamat_lengkap", store=False, ) enofa_jumlah_dpp = fields.Integer( string="JUMLAH_DPP", compute="_compute_jumlah_dpp", store=False, ) enofa_jumlah_ppn = fields.Integer( string="JUMLAH_PPN", compute="_compute_jumlah_ppn", store=False, ) enofa_jumlah_ppnbm = fields.Integer( string="JUMLAH_DPP", compute="_compute_jumlah_ppnbm", store=False, ) enofa_is_creditable = fields.Char( string="IS_CREDITABLE", compute="_compute_is_creditable", store=False, ) enofa_nomor_dokumen_balik = fields.Char( string="-", compute="_compute_nomor_dokumen_balik", store=False, ) enofa_tanggal_dokumen_balik = fields.Char( string="-", compute="_compute_tanggal_dokumen_balik", store=False, ) @api.multi def workflow_action_confirm(self): for fp in self: fp.write( fp._prepare_confirm_data()) @api.multi def _prepare_confirm_data(self): self.ensure_one() return { "state": "confirmed", } @api.multi def workflow_action_done(self): for fp in self: fp.write( fp._prepare_done_data()) @api.multi def _prepare_done_data(self): self.ensure_one() return { "state": "done", } @api.multi def workflow_action_cancel(self): for fp in self: fp.write( fp._prepare_cancel_data()) @api.multi def _prepare_cancel_data(self): self.ensure_one() return { "state": "cancelled", } @api.multi def workflow_action_reset(self): for fp in self: fp.write( fp._prepare_reset_data()) @api.multi def _prepare_reset_data(self): self.ensure_one() return { "state": "draft", } @api.onchange("seller_partner_id") def onchange_seller(self): if self.seller_partner_id: partner = self.seller_partner_id.commercial_partner_id if self.seller_branch_id: branch = self.seller_branch_id.commercial_partner_id if partner != branch: self.seller_branch_id = False else: self.seller_branch_id = self.seller_partner_id else: self.seller_branch_id = False @api.onchange( "reference_ids", "reference_id", ) def onchange_all_reference(self): obj_line = self.env["account.move.line"] if self.fp_direction == "masukan": partner_id = self.seller_partner_id and \ self.seller_partner_id.id or 0 else: partner_id = self.buyer_partner_id and \ self.buyer_partner_id.id or 0 criteria = [ ("move_id", "in", self.all_reference_ids.ids), ("tax_code_id", "in", self.allowed_dpp_tax_code_ids.ids), ("partner_id", "=", partner_id), ] for line in obj_line.search(criteria): if line.currency_id: self.base += abs(line.amount_currency) else: self.base += abs(line.tax_amount) self.base_company_currency += abs(line.tax_amount) criteria = [ ("move_id", "in", self.all_reference_ids.ids), ("tax_code_id", "in", self.allowed_ppn_tax_code_ids.ids), ("partner_id", "=", partner_id), ] for line in obj_line.search(criteria): self.ppn_amount += abs(line.tax_amount) criteria = [ ("move_id", "in", self.all_reference_ids.ids), ("tax_code_id", "in", self.allowed_ppnbm_tax_code_ids.ids), ("partner_id", "=", partner_id), ] for line in obj_line.search(criteria): self.ppnbm_amount += abs(line.tax_amount) @api.onchange("buyer_partner_id") def onchange_buyer(self): if self.buyer_partner_id: partner = self.buyer_partner_id.commercial_partner_id if self.buyer_branch_id: branch = self.buyer_branch_id.commercial_partner_id if partner != branch: self.buyer_branch_id = False else: self.buyer_branch_id = self.buyer_partner_id else: self.buyer_branch_id = False @api.onchange("company_id") def onchange_company_id(self): self.currency_id = False if self.company_id: self.currency_id = self.company_id.currency_id.id
agpl-3.0
SantosDevelopers/sborganicos
venv/lib/python3.5/site-packages/django/contrib/postgres/lookups.py
106
1648
from django.db.models import Lookup, Transform from django.utils.encoding import force_text from .search import SearchVector, SearchVectorExact, SearchVectorField class PostgresSimpleLookup(Lookup): def as_sql(self, qn, connection): lhs, lhs_params = self.process_lhs(qn, connection) rhs, rhs_params = self.process_rhs(qn, connection) params = lhs_params + rhs_params return '%s %s %s' % (lhs, self.operator, rhs), params class DataContains(PostgresSimpleLookup): lookup_name = 'contains' operator = '@>' class ContainedBy(PostgresSimpleLookup): lookup_name = 'contained_by' operator = '<@' class Overlap(PostgresSimpleLookup): lookup_name = 'overlap' operator = '&&' class HasKey(PostgresSimpleLookup): lookup_name = 'has_key' operator = '?' prepare_rhs = False class HasKeys(PostgresSimpleLookup): lookup_name = 'has_keys' operator = '?&' def get_prep_lookup(self): return [force_text(item) for item in self.rhs] class HasAnyKeys(HasKeys): lookup_name = 'has_any_keys' operator = '?|' class Unaccent(Transform): bilateral = True lookup_name = 'unaccent' function = 'UNACCENT' class SearchLookup(SearchVectorExact): lookup_name = 'search' def process_lhs(self, qn, connection): if not isinstance(self.lhs.output_field, SearchVectorField): self.lhs = SearchVector(self.lhs) lhs, lhs_params = super(SearchLookup, self).process_lhs(qn, connection) return lhs, lhs_params class TrigramSimilar(PostgresSimpleLookup): lookup_name = 'trigram_similar' operator = '%%'
mit
kije/PySpyX
pyspy/migrations/0004_auto_20150515_1104.py
1
1390
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('pyspy', '0003_surveillancevideos'), ] operations = [ migrations.CreateModel( name='SurveillanceVideo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, help_text='', verbose_name='ID')), ('date', models.DateTimeField(help_text='', verbose_name='Capture Date', auto_now_add=True)), ('path', models.CharField(help_text='', max_length=512, verbose_name='Path')), ('last_modified', models.DateTimeField(auto_now=True, help_text='', null=True, verbose_name='Last modified')), ], ), migrations.DeleteModel( name='SurveillanceVideos', ), migrations.AddField( model_name='camera', name='last_modified', field=models.DateTimeField(auto_now=True, help_text='', null=True, verbose_name='Last modified'), ), migrations.AlterField( model_name='camera', name='name', field=models.CharField(help_text='', max_length=150, null=True, verbose_name='Name', blank=True), ), ]
gpl-3.0
ShaolongHu/Nitrate
tcms/xmlrpc/utils.py
1
7751
# -*- coding: utf-8 -*- import re from django.db.models import Count, FieldDoesNotExist from tcms.management.models import Product COUNT_DISTINCT = 0 QUERY_DISTINCT = 1 ACCEPTABLE_BOOL_VALUES = ('0', '1', 0, 1, True, False) def parse_bool_value(value): if value in ACCEPTABLE_BOOL_VALUES: if value is '0': return False elif value is '1': return True else: return value else: raise ValueError('Unacceptable bool value.') def pre_check_product(values): if isinstance(values, dict): if not values.get('product'): return product_str = values['product'] else: product_str = values if not (isinstance(product_str, str) or isinstance(product_str, int)): raise ValueError('The type of product is not recognizable.') try: product_id = int(product_str) return Product.objects.get(id=product_id) except ValueError: return Product.objects.get(name=product_str) def pre_process_ids(value): if isinstance(value, list): return [isinstance(c, int) and c or int(c.strip()) for c in value if c] if isinstance(value, str): return [int(c.strip()) for c in value.split(',') if c] if isinstance(value, int): return [value] raise TypeError('Unrecognizable type of ids') def compare_list(src_list, dest_list): return list(set(src_list) - set(dest_list)) def _lookup_fields_in_model(cls, fields): """Lookup ManyToMany fields in current table and related tables. For distinct duplicate rows when using inner join @param cls: table model class @type cls: subclass of django.db.models.Model @param fields: fields in where condition. @type fields: list @return: whether use distinct or not @rtype: bool Example: cls is TestRun (<class 'tcms.testruns.models.TestRun'>) fields is 'plan__case__is_automated' | | |----- Normal Field in TestCase | |--------------- ManyToManyKey in TestPlan |--------------------- ForeignKey in TestRun 1. plan is a ForeignKey field of TestRun and it will trigger getting the related model TestPlan by django orm framework. 2. case is a ManyToManyKey field of TestPlan and it will trigger using INNER JOIN to join TestCase, here will be many duplicated rows. 3. is_automated is a local field of TestCase only filter the rows (where condition). So this method will find out that case is a m2m field and notice the outter method use distinct to avoid duplicated rows. """ for field in fields: try: field_info = cls._meta.get_field_by_name(field) if field_info[-1]: yield True else: if getattr(field_info[0], 'related', None): cls = field_info[0].related.parent_model except FieldDoesNotExist: pass def _need_distinct_m2m_rows(cls, fields): """Check whether the query string has ManyToMany field or not, return False if the query string is empty. @param cls: table model class @type cls: subclass of django.db.models.Model @param fields: fields in where condition. @type fields: list @return: whether use distinct or not @rtype: bool """ return next(_lookup_fields_in_model(cls, fields), False) \ if fields else False def distinct_m2m_rows(cls, values, op_type): """By django model field looking up syntax, loop values and check the condition if there is a multi-tables query. @param cls: table model class @type cls: subclass of django.db.models.Model @param values: fields in where condition. @type values: dict @return: QuerySet @rtype: django.db.models.query.QuerySet """ flag = False for field in values.iterkeys(): if '__' in field: if _need_distinct_m2m_rows(cls, field.split('__')): flag = True break qs = cls.objects.filter(**values) if op_type == COUNT_DISTINCT: return qs.aggregate(Count('pk', distinct=True))['pk__count'] if flag \ else qs.count() elif op_type == QUERY_DISTINCT: return qs.distinct() if flag else qs else: raise TypeError('Not implement op type %s' % op_type) def distinct_count(cls, values): return distinct_m2m_rows(cls, values, op_type=COUNT_DISTINCT) def distinct_filter(cls, values): return distinct_m2m_rows(cls, values, op_type=QUERY_DISTINCT) class Comment(object): def __init__(self, request, content_type, object_pks, comment=None): self.request = request self.content_type = content_type self.object_pks = object_pks self.comment = comment def add(self): import time from django.contrib import comments from django.contrib.comments import signals from django.db import models comment_form = comments.get_form() try: model = models.get_model(*self.content_type.split('.', 1)) targets = model._default_manager.filter(pk__in=self.object_pks) except: raise for target in targets.iterator(): d_form = comment_form(target) timestamp = str(time.time()).split('.')[0] object_pk = str(target.pk) data = { 'content_type': self.content_type, 'object_pk': object_pk, 'timestamp': timestamp, 'comment': self.comment } security_hash_dict = { 'content_type': self.content_type, 'object_pk': object_pk, 'timestamp': timestamp } data['security_hash'] = d_form.generate_security_hash( **security_hash_dict) form = comment_form(target, data=data) # Response the errors if got if not form.is_valid(): return form.errors # Otherwise create the comment comment = form.get_comment_object() comment.ip_address = self.request.META.get("REMOTE_ADDR", None) if self.request.user.is_authenticated(): comment.user = self.request.user # Signal that the comment is about to be saved signals.comment_will_be_posted.send( sender=comment.__class__, comment=comment, request=self.request ) # Save the comment and signal that it was saved comment.save() signals.comment_was_posted.send( sender=comment.__class__, comment=comment, request=self.request ) return estimated_time_re = re.compile(r'^(\d+[d])?(\d+[h])?(\d+[m])?(\d+[s])?$') def pre_process_estimated_time(value): '''pre process estiamted_time. support value - HH:MM:SS & xdxhxmxs return xdxhxmxs ''' if isinstance(value, str): match = estimated_time_re.match(value.replace(' ', '')) if match: return value else: # FIXME: missed validation to invalid value in HH:MM:SS format. # for example: sfsdfs:sfwerwe:rerwerwe raw_estimated_time = value.split(':') if len(raw_estimated_time) == 3: hours, minutes, seconds = raw_estimated_time return '{0}h{1}m{2}s'.format(hours, minutes, seconds) else: raise ValueError('Invaild estimated_time format.') else: raise ValueError('Invaild estimated_time format.')
gpl-2.0
misabelcarde/kivy-mobile-computing
src/BoardPlay.py
1
3832
from kivy.uix.gridlayout import GridLayout from kivy.uix.button import Button from kivy.uix.label import Label from Singleton import * from Instructions import Instructions from OwnBoard import BaseOwnBoard from BoardFunctions import * from SendPackage import * from kivy.lang import Builder Builder.load_string(''' <BoardPlay>: id: boardPlayID cols: 11 rows: 11 ''') class BoardPlay(GridLayout): '''Generation of the board and functions for complete it''' def __init__(self, **kwargs): super(BoardPlay, self).__init__(**kwargs) self.diccp = {} self.generateBoard() self.base = Singleton() def generateBoard(self): '''Generation of buttons and labels. Buttons are added to a dictionary where key:id and value:button''' letters = ('A','B','C','D','E','F','G','H','I','J') self.add_widget(Label(text='')) if(Singleton().mode == 'TwoPlayers'): for i in range(1,11): self.add_widget(Label(text=str(i))) for j in range(0,len(letters)): self.add_widget(Label(text=letters[j])) for k in range(0,10): button=Button(id=(str(j)+'_'+str(k)),background_color=(0,2,255,1)) button.bind(on_press=self.putBoat) self.add_widget(button) self.diccp[str(j)+'_'+str(k)]=button if Singleton().matrix[j][k] == 2: button.background_color = [255,0,0,1] button.text = "BOM" button.disabled = True elif Singleton().matrix[j][k] == -1: button.background_color = [0,2,255,1] button.text ='·' button.font_size = 50 button.disabled = True if Singleton().turno == 1 : button.disabled = True else: #ListenSocket() for i in range(1,11): self.add_widget(Label(text=str(i))) for j in range(0,len(letters)): self.add_widget(Label(text=letters[j])) for k in range(0,10): button=Button(id=(str(j)+'_'+str(k)),background_color=(0,2,255,1)) button.bind(on_press=self.putBoat) self.add_widget(button) self.diccp[str(j)+'_'+str(k)]=button if Singleton().matrix2[j][k] == 2: button.background_color = [255,0,0,1] button.text = "BOM" button.disabled = True elif Singleton().matrix2[j][k] == -1: button.background_color = [0,2,255,1] button.text ='·' button.font_size = 50 button.disabled = True def putBoat(self, button): '''Behaviour of board's cells (buttons)''' limits = getLimitingButtons(button) boatsIds = getBoatsIds() pos = getButtonPosition(button) if(Singleton().mode == 'TwoPlayers'): if self.base.matrix[pos[0]][pos[1]] == 1: self.base.matrix[pos[0]][pos[1]] = 2 button.background_color = [255,0,0,1] button.text = "BOM" button.disabled = True self.base.aux+=1 if self.base.aux == 20: self.base.winner=1 elif self.base.matrix[pos[0]][pos[1]] == 0: self.base.matrix[pos[0]][pos[1]] = -1 button.background_color = [0,2,255,1] button.text ='·' button.font_size = 50 button.disabled = True self.base.turno = 1 else: if self.base.matrix2[pos[0]][pos[1]] == 1: self.base.matrix2[pos[0]][pos[1]] = 2 button.background_color = [255,0,0,1] button.text = "BOM" button.disabled = True self.base.aux+=1 if self.base.aux == 20: self.base.winner=1 elif self.base.matrix2[pos[0]][pos[1]] == 0: self.base.matrix2[pos[0]][pos[1]] = -1 button.background_color = [0,2,255,1] button.text ='·' button.font_size = 50 button.disabled = True #solo para 2 jugadores self.base.turno = 1 Singleton().gameboard.clear_widgets(children=None) Singleton().gameboard2.clear_widgets(children=None) if self.base.mode == 'TwoPlayers': Singleton().gameboard2.on_pre_enter() else: Singleton().gameboard.on_pre_enter() send(Singleton().opponentIP,str(self.base.matrix2))
mit
sashakames/COG
filebrowser/tests/settings.py
7
2733
# coding: utf-8 # PYTHON IMPORTS import os # DJANGO IMPORTS from django.test import TestCase from django.contrib.auth.models import User # FILEBROWSER IMPORTS from filebrowser.settings import * class SettingsTests(TestCase): def setUp(self): pass def test_media_root(self): """ Test that ``MEDIA_ROOT`` exists. """ self.assertEqual(os.path.exists(MEDIA_ROOT), 1) def test_directory(self): """ Test that ``MEDIA_ROOT`` plus ``DIRECTORY`` exists. """ self.assertEqual(os.path.exists(os.path.join(MEDIA_ROOT,DIRECTORY)), 1) # Check for trailing slash self.assertEqual(os.path.basename(DIRECTORY), '') def test_path_filebrowser_media(self): """ Test that ``PATH_FILEBROWSER_MEDIA`` exists. """ self.assertEqual(os.path.exists(PATH_FILEBROWSER_MEDIA), 1) def test_versions_basedir(self): """ Test that ``MEDIA_ROOT`` plus ``VERSIONS_BASEDIR`` exists. """ self.assertEqual(os.path.exists(os.path.join(MEDIA_ROOT,VERSIONS_BASEDIR)), 1) def test_admin_thumbnail(self): """ Test if ``ADMIN_THUMBNAIL`` is set and is part of ``VERSIONS``. """ self.assertNotEqual(ADMIN_THUMBNAIL, '') self.assertIn(ADMIN_THUMBNAIL, VERSIONS) def test_admin_versions(self): """ Test if ``ADMIN_VERSIONS`` are part of ``VERSIONS``. """ for item in ADMIN_VERSIONS: self.assertIn(item, VERSIONS) def test_strict_pil(self): """ Test if ``STRICT_PIL`` is in ``True, False``. """ self.assertIn(STRICT_PIL, [True, False]) def test_normalize_filename(self): """ Test if ``NORMALIZE_FILENAME`` is in ``True, False``. """ self.assertIn(NORMALIZE_FILENAME, [True, False]) def test_convert_filename(self): """ Test if ``CONVERT_FILENAME`` is in ``True, False``. """ self.assertIn(CONVERT_FILENAME, [True, False]) def test_default_sorting_by(self): """ Test if ``DEFAULT_SORTING_BY`` is in ``date, filesize, filename_lower, filetype_checked``. """ self.assertIn(DEFAULT_SORTING_BY, ['date','filesize','filename_lower','filetype_checked']) def test_default_sorting_order(self): """ Test if ``DEFAULT_SORTING_ORDER`` is in ``asc, desc``. """ self.assertIn(DEFAULT_SORTING_ORDER, ['asc','desc']) def test_search_traverse(self): """ Test if ``SEARCH_TRAVERSE`` is in ``True, False``. """ self.assertIn(SEARCH_TRAVERSE, [True, False])
bsd-3-clause
shft117/SteckerApp
erpnext/patches/v5_0/fix_taxes_and_totals_in_party_currency.py
102
2496
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt import frappe from frappe.model.meta import get_field_precision def execute(): if not frappe.db.sql("""select name from `tabPatch Log` where patch = 'erpnext.patches.v5_0.taxes_and_totals_in_party_currency'"""): return selling_doctypes = ["Quotation", "Sales Order", "Delivery Note", "Sales Invoice"] buying_doctypes = ["Supplier Quotation", "Purchase Order", "Purchase Receipt", "Purchase Invoice"] for dt in selling_doctypes: update_values(dt, "Sales Taxes and Charges") for dt in buying_doctypes: update_values(dt, "Purchase Taxes and Charges") def update_values(dt, tax_table): rate_field_precision = get_field_precision(frappe.get_meta(dt + " Item").get_field("rate")) tax_amount_precision = get_field_precision(frappe.get_meta(tax_table).get_field("tax_amount")) # update net_total, discount_on frappe.db.sql(""" UPDATE `tab{0}` SET total_taxes_and_charges = round(base_total_taxes_and_charges / conversion_rate, {1}) WHERE docstatus < 2 and ifnull(base_total_taxes_and_charges, 0) != 0 and ifnull(total_taxes_and_charges, 0) = 0 """.format(dt, tax_amount_precision)) # update net_amount frappe.db.sql(""" UPDATE `tab{0}` par, `tab{1}` item SET item.net_amount = round(item.base_net_amount / par.conversion_rate, {2}), item.net_rate = round(item.base_net_rate / par.conversion_rate, {2}) WHERE par.name = item.parent and par.docstatus < 2 and ((ifnull(item.base_net_amount, 0) != 0 and ifnull(item.net_amount, 0) = 0) or (ifnull(item.base_net_rate, 0) != 0 and ifnull(item.net_rate, 0) = 0)) """.format(dt, dt + " Item", rate_field_precision)) # update tax in party currency frappe.db.sql(""" UPDATE `tab{0}` par, `tab{1}` tax SET tax.tax_amount = round(tax.base_tax_amount / par.conversion_rate, {2}), tax.total = round(tax.base_total / conversion_rate, {2}), tax.tax_amount_after_discount_amount = round(tax.base_tax_amount_after_discount_amount / conversion_rate, {2}) WHERE par.name = tax.parent and par.docstatus < 2 and ((ifnull(tax.base_tax_amount, 0) != 0 and ifnull(tax.tax_amount, 0) = 0) or (ifnull(tax.base_total, 0) != 0 and ifnull(tax.total, 0) = 0) or (ifnull(tax.base_tax_amount_after_discount_amount, 0) != 0 and ifnull(tax.tax_amount_after_discount_amount, 0) = 0)) """.format(dt, tax_table, tax_amount_precision))
agpl-3.0
CyanideDevices/android_kernel_samsung_smdk4412
tools/perf/scripts/python/futex-contention.py
11261
1486
# futex contention # (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com> # Licensed under the terms of the GNU GPL License version 2 # # Translation of: # # http://sourceware.org/systemtap/wiki/WSFutexContention # # to perf python scripting. # # Measures futex contention import os, sys sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from Util import * process_names = {} thread_thislock = {} thread_blocktime = {} lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time process_names = {} # long-lived pid-to-execname mapping def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, nr, uaddr, op, val, utime, uaddr2, val3): cmd = op & FUTEX_CMD_MASK if cmd != FUTEX_WAIT: return # we don't care about originators of WAKE events process_names[tid] = comm thread_thislock[tid] = uaddr thread_blocktime[tid] = nsecs(s, ns) def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, nr, ret): if thread_blocktime.has_key(tid): elapsed = nsecs(s, ns) - thread_blocktime[tid] add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed) del thread_blocktime[tid] del thread_thislock[tid] def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): for (tid, lock) in lock_waits: min, max, avg, count = lock_waits[tid, lock] print "%s[%d] lock %x contended %d times, %d avg ns" % \ (process_names[tid], tid, lock, count, avg)
gpl-2.0
vladikoff/loop-server
loadtests/loadtest.py
1
5543
from gevent import monkey monkey.patch_all() import json import gevent from requests_hawk import HawkAuth from loads.case import TestCase class TestLoop(TestCase): def setUp(self): self.wss = [] def tearDown(self): for ws in self.wss: ws.close() # XXX this is missing in ws4py ws._th.join() if ws.sock: ws.sock.close() def _send_ws_message(self, ws, **msg): return ws.send(json.dumps(msg)) def create_ws(self, *args, **kw): ws = TestCase.create_ws(self, *args, **kw) self.wss.append(ws) return ws def test_all(self): self.register() token = self.generate_token() call_data = self.initiate_call(token) calls = self.list_pending_calls() self._test_websockets(token, call_data, calls) def _test_websockets(self, token, call_data, calls): progress_url = call_data['progressURL'] websocket_token = call_data['websocketToken'] call_id = call_data['callId'] caller_alerts = [] callee_alerts = [] self.connected = False def _handle_callee(message_data): message = json.loads(message_data.data) callee_alerts.append(message) state = message.get('state') messageType = message.get('messageType') if messageType == "progress" and state == "connecting": self._send_ws_message( callee_ws, messageType="action", event="media-up") caller_ws.receive() elif messageType == "progress" and state == "connected": self.connected = True def _handle_caller(message_data): message = json.loads(message_data.data) caller_alerts.append(message) state = message.get('state') messageType = message.get('messageType') if messageType == "hello" and state == "init": # This is the first message, Ask the second party to connect. self._send_ws_message( callee_ws, messageType='hello', auth=calls[0]['websocketToken'], callId=call_id) callee_ws.receive() elif messageType == "progress" and state == "alerting": self._send_ws_message( caller_ws, messageType="action", event="accept") callee_ws.receive() elif messageType == "progress" and state == "connecting": self._send_ws_message( caller_ws, messageType="action", event="media-up") callee_ws.receive() elif messageType == "progress" and state == "half-connected": caller_ws.receive() elif messageType == "progress" and state == "connected": self.connected = True # let's connect to the web socket until it gets closed callee_ws = self.create_ws(progress_url, callback=_handle_callee) caller_ws = self.create_ws(progress_url, callback=_handle_caller) self._send_ws_message( caller_ws, messageType='hello', auth=websocket_token, callId=call_id) while not self.connected: gevent.sleep(.5) def _get_json(self, resp): try: return resp.json() except Exception: print resp.text raise def register(self): resp = self.session.post( self.server_url + '/registration', data={'simple_push_url': 'http://httpbin.org/deny'}) self.assertEquals(200, resp.status_code, "Registration failed: %s" % resp.content) try: self.hawk_auth = HawkAuth( hawk_session=resp.headers['hawk-session-token'], server_url=self.server_url) except KeyError: print resp raise def generate_token(self): resp = self.session.post( self.server_url + '/call-url', data=json.dumps({'callerId': 'alexis@mozilla.com'}), headers={'Content-Type': 'application/json'}, auth=self.hawk_auth ) self.assertEquals(resp.status_code, 200, "Call-Url creation failed: %s" % resp.content) data = self._get_json(resp) call_url = data.get('callUrl', data.get('call_url')) return call_url.split('/').pop() def initiate_call(self, token): # This happens when not authenticated. resp = self.session.post( self.server_url + '/calls/%s' % token, data=json.dumps({"callType": "audio-video"}), headers={'Content-Type': 'application/json'} ) self.assertEquals(resp.status_code, 200, "Call Initialization failed: %s" % resp.content) return self._get_json(resp) def list_pending_calls(self): resp = self.session.get( self.server_url + '/calls?version=200', auth=self.hawk_auth) data = self._get_json(resp) return data['calls'] def revoke_token(self, token): # You don't need to be authenticated to revoke a token. self.session.delete(self.server_url + '/call-url/%s' % token)
mpl-2.0
benchisell/photostream-bc
flask/lib/python2.7/site-packages/openid/message.py
146
21562
"""Extension argument processing code """ __all__ = ['Message', 'NamespaceMap', 'no_default', 'registerNamespaceAlias', 'OPENID_NS', 'BARE_NS', 'OPENID1_NS', 'OPENID2_NS', 'SREG_URI', 'IDENTIFIER_SELECT'] import copy import warnings import urllib from openid import oidutil from openid import kvform try: ElementTree = oidutil.importElementTree() except ImportError: # No elementtree found, so give up, but don't fail to import, # since we have fallbacks. ElementTree = None # This doesn't REALLY belong here, but where is better? IDENTIFIER_SELECT = 'http://specs.openid.net/auth/2.0/identifier_select' # URI for Simple Registration extension, the only commonly deployed # OpenID 1.x extension, and so a special case SREG_URI = 'http://openid.net/sreg/1.0' # The OpenID 1.X namespace URI OPENID1_NS = 'http://openid.net/signon/1.0' THE_OTHER_OPENID1_NS = 'http://openid.net/signon/1.1' OPENID1_NAMESPACES = OPENID1_NS, THE_OTHER_OPENID1_NS # The OpenID 2.0 namespace URI OPENID2_NS = 'http://specs.openid.net/auth/2.0' # The namespace consisting of pairs with keys that are prefixed with # "openid." but not in another namespace. NULL_NAMESPACE = oidutil.Symbol('Null namespace') # The null namespace, when it is an allowed OpenID namespace OPENID_NS = oidutil.Symbol('OpenID namespace') # The top-level namespace, excluding all pairs with keys that start # with "openid." BARE_NS = oidutil.Symbol('Bare namespace') # Limit, in bytes, of identity provider and return_to URLs, including # response payload. See OpenID 1.1 specification, Appendix D. OPENID1_URL_LIMIT = 2047 # All OpenID protocol fields. Used to check namespace aliases. OPENID_PROTOCOL_FIELDS = [ 'ns', 'mode', 'error', 'return_to', 'contact', 'reference', 'signed', 'assoc_type', 'session_type', 'dh_modulus', 'dh_gen', 'dh_consumer_public', 'claimed_id', 'identity', 'realm', 'invalidate_handle', 'op_endpoint', 'response_nonce', 'sig', 'assoc_handle', 'trust_root', 'openid', ] class UndefinedOpenIDNamespace(ValueError): """Raised if the generic OpenID namespace is accessed when there is no OpenID namespace set for this message.""" class InvalidOpenIDNamespace(ValueError): """Raised if openid.ns is not a recognized value. For recognized values, see L{Message.allowed_openid_namespaces} """ def __str__(self): s = "Invalid OpenID Namespace" if self.args: s += " %r" % (self.args[0],) return s # Sentinel used for Message implementation to indicate that getArg # should raise an exception instead of returning a default. no_default = object() # Global namespace / alias registration map. See # registerNamespaceAlias. registered_aliases = {} class NamespaceAliasRegistrationError(Exception): """ Raised when an alias or namespace URI has already been registered. """ pass def registerNamespaceAlias(namespace_uri, alias): """ Registers a (namespace URI, alias) mapping in a global namespace alias map. Raises NamespaceAliasRegistrationError if either the namespace URI or alias has already been registered with a different value. This function is required if you want to use a namespace with an OpenID 1 message. """ global registered_aliases if registered_aliases.get(alias) == namespace_uri: return if namespace_uri in registered_aliases.values(): raise NamespaceAliasRegistrationError, \ 'Namespace uri %r already registered' % (namespace_uri,) if alias in registered_aliases: raise NamespaceAliasRegistrationError, \ 'Alias %r already registered' % (alias,) registered_aliases[alias] = namespace_uri class Message(object): """ In the implementation of this object, None represents the global namespace as well as a namespace with no key. @cvar namespaces: A dictionary specifying specific namespace-URI to alias mappings that should be used when generating namespace aliases. @ivar ns_args: two-level dictionary of the values in this message, grouped by namespace URI. The first level is the namespace URI. """ allowed_openid_namespaces = [OPENID1_NS, THE_OTHER_OPENID1_NS, OPENID2_NS] def __init__(self, openid_namespace=None): """Create an empty Message. @raises InvalidOpenIDNamespace: if openid_namespace is not in L{Message.allowed_openid_namespaces} """ self.args = {} self.namespaces = NamespaceMap() if openid_namespace is None: self._openid_ns_uri = None else: implicit = openid_namespace in OPENID1_NAMESPACES self.setOpenIDNamespace(openid_namespace, implicit) def fromPostArgs(cls, args): """Construct a Message containing a set of POST arguments. """ self = cls() # Partition into "openid." args and bare args openid_args = {} for key, value in args.items(): if isinstance(value, list): raise TypeError("query dict must have one value for each key, " "not lists of values. Query is %r" % (args,)) try: prefix, rest = key.split('.', 1) except ValueError: prefix = None if prefix != 'openid': self.args[(BARE_NS, key)] = value else: openid_args[rest] = value self._fromOpenIDArgs(openid_args) return self fromPostArgs = classmethod(fromPostArgs) def fromOpenIDArgs(cls, openid_args): """Construct a Message from a parsed KVForm message. @raises InvalidOpenIDNamespace: if openid.ns is not in L{Message.allowed_openid_namespaces} """ self = cls() self._fromOpenIDArgs(openid_args) return self fromOpenIDArgs = classmethod(fromOpenIDArgs) def _fromOpenIDArgs(self, openid_args): ns_args = [] # Resolve namespaces for rest, value in openid_args.iteritems(): try: ns_alias, ns_key = rest.split('.', 1) except ValueError: ns_alias = NULL_NAMESPACE ns_key = rest if ns_alias == 'ns': self.namespaces.addAlias(value, ns_key) elif ns_alias == NULL_NAMESPACE and ns_key == 'ns': # null namespace self.setOpenIDNamespace(value, False) else: ns_args.append((ns_alias, ns_key, value)) # Implicitly set an OpenID namespace definition (OpenID 1) if not self.getOpenIDNamespace(): self.setOpenIDNamespace(OPENID1_NS, True) # Actually put the pairs into the appropriate namespaces for (ns_alias, ns_key, value) in ns_args: ns_uri = self.namespaces.getNamespaceURI(ns_alias) if ns_uri is None: # we found a namespaced arg without a namespace URI defined ns_uri = self._getDefaultNamespace(ns_alias) if ns_uri is None: ns_uri = self.getOpenIDNamespace() ns_key = '%s.%s' % (ns_alias, ns_key) else: self.namespaces.addAlias(ns_uri, ns_alias, implicit=True) self.setArg(ns_uri, ns_key, value) def _getDefaultNamespace(self, mystery_alias): """OpenID 1 compatibility: look for a default namespace URI to use for this alias.""" global registered_aliases # Only try to map an alias to a default if it's an # OpenID 1.x message. if self.isOpenID1(): return registered_aliases.get(mystery_alias) else: return None def setOpenIDNamespace(self, openid_ns_uri, implicit): """Set the OpenID namespace URI used in this message. @raises InvalidOpenIDNamespace: if the namespace is not in L{Message.allowed_openid_namespaces} """ if openid_ns_uri not in self.allowed_openid_namespaces: raise InvalidOpenIDNamespace(openid_ns_uri) self.namespaces.addAlias(openid_ns_uri, NULL_NAMESPACE, implicit) self._openid_ns_uri = openid_ns_uri def getOpenIDNamespace(self): return self._openid_ns_uri def isOpenID1(self): return self.getOpenIDNamespace() in OPENID1_NAMESPACES def isOpenID2(self): return self.getOpenIDNamespace() == OPENID2_NS def fromKVForm(cls, kvform_string): """Create a Message from a KVForm string""" return cls.fromOpenIDArgs(kvform.kvToDict(kvform_string)) fromKVForm = classmethod(fromKVForm) def copy(self): return copy.deepcopy(self) def toPostArgs(self): """Return all arguments with openid. in front of namespaced arguments. """ args = {} # Add namespace definitions to the output for ns_uri, alias in self.namespaces.iteritems(): if self.namespaces.isImplicit(ns_uri): continue if alias == NULL_NAMESPACE: ns_key = 'openid.ns' else: ns_key = 'openid.ns.' + alias args[ns_key] = ns_uri for (ns_uri, ns_key), value in self.args.iteritems(): key = self.getKey(ns_uri, ns_key) args[key] = value.encode('UTF-8') return args def toArgs(self): """Return all namespaced arguments, failing if any non-namespaced arguments exist.""" # FIXME - undocumented exception post_args = self.toPostArgs() kvargs = {} for k, v in post_args.iteritems(): if not k.startswith('openid.'): raise ValueError( 'This message can only be encoded as a POST, because it ' 'contains arguments that are not prefixed with "openid."') else: kvargs[k[7:]] = v return kvargs def toFormMarkup(self, action_url, form_tag_attrs=None, submit_text="Continue"): """Generate HTML form markup that contains the values in this message, to be HTTP POSTed as x-www-form-urlencoded UTF-8. @param action_url: The URL to which the form will be POSTed @type action_url: str @param form_tag_attrs: Dictionary of attributes to be added to the form tag. 'accept-charset' and 'enctype' have defaults that can be overridden. If a value is supplied for 'action' or 'method', it will be replaced. @type form_tag_attrs: {unicode: unicode} @param submit_text: The text that will appear on the submit button for this form. @type submit_text: unicode @returns: A string containing (X)HTML markup for a form that encodes the values in this Message object. @rtype: str or unicode """ if ElementTree is None: raise RuntimeError('This function requires ElementTree.') assert action_url is not None form = ElementTree.Element('form') if form_tag_attrs: for name, attr in form_tag_attrs.iteritems(): form.attrib[name] = attr form.attrib['action'] = action_url form.attrib['method'] = 'post' form.attrib['accept-charset'] = 'UTF-8' form.attrib['enctype'] = 'application/x-www-form-urlencoded' for name, value in self.toPostArgs().iteritems(): attrs = {'type': 'hidden', 'name': name, 'value': value} form.append(ElementTree.Element('input', attrs)) submit = ElementTree.Element( 'input', {'type':'submit', 'value':submit_text}) form.append(submit) return ElementTree.tostring(form) def toURL(self, base_url): """Generate a GET URL with the parameters in this message attached as query parameters.""" return oidutil.appendArgs(base_url, self.toPostArgs()) def toKVForm(self): """Generate a KVForm string that contains the parameters in this message. This will fail if the message contains arguments outside of the 'openid.' prefix. """ return kvform.dictToKV(self.toArgs()) def toURLEncoded(self): """Generate an x-www-urlencoded string""" args = self.toPostArgs().items() args.sort() return urllib.urlencode(args) def _fixNS(self, namespace): """Convert an input value into the internally used values of this object @param namespace: The string or constant to convert @type namespace: str or unicode or BARE_NS or OPENID_NS """ if namespace == OPENID_NS: if self._openid_ns_uri is None: raise UndefinedOpenIDNamespace('OpenID namespace not set') else: namespace = self._openid_ns_uri if namespace != BARE_NS and type(namespace) not in [str, unicode]: raise TypeError( "Namespace must be BARE_NS, OPENID_NS or a string. got %r" % (namespace,)) if namespace != BARE_NS and ':' not in namespace: fmt = 'OpenID 2.0 namespace identifiers SHOULD be URIs. Got %r' warnings.warn(fmt % (namespace,), DeprecationWarning) if namespace == 'sreg': fmt = 'Using %r instead of "sreg" as namespace' warnings.warn(fmt % (SREG_URI,), DeprecationWarning,) return SREG_URI return namespace def hasKey(self, namespace, ns_key): namespace = self._fixNS(namespace) return (namespace, ns_key) in self.args def getKey(self, namespace, ns_key): """Get the key for a particular namespaced argument""" namespace = self._fixNS(namespace) if namespace == BARE_NS: return ns_key ns_alias = self.namespaces.getAlias(namespace) # No alias is defined, so no key can exist if ns_alias is None: return None if ns_alias == NULL_NAMESPACE: tail = ns_key else: tail = '%s.%s' % (ns_alias, ns_key) return 'openid.' + tail def getArg(self, namespace, key, default=None): """Get a value for a namespaced key. @param namespace: The namespace in the message for this key @type namespace: str @param key: The key to get within this namespace @type key: str @param default: The value to use if this key is absent from this message. Using the special value openid.message.no_default will result in this method raising a KeyError instead of returning the default. @rtype: str or the type of default @raises KeyError: if default is no_default @raises UndefinedOpenIDNamespace: if the message has not yet had an OpenID namespace set """ namespace = self._fixNS(namespace) args_key = (namespace, key) try: return self.args[args_key] except KeyError: if default is no_default: raise KeyError((namespace, key)) else: return default def getArgs(self, namespace): """Get the arguments that are defined for this namespace URI @returns: mapping from namespaced keys to values @returntype: dict """ namespace = self._fixNS(namespace) return dict([ (ns_key, value) for ((pair_ns, ns_key), value) in self.args.iteritems() if pair_ns == namespace ]) def updateArgs(self, namespace, updates): """Set multiple key/value pairs in one call @param updates: The values to set @type updates: {unicode:unicode} """ namespace = self._fixNS(namespace) for k, v in updates.iteritems(): self.setArg(namespace, k, v) def setArg(self, namespace, key, value): """Set a single argument in this namespace""" assert key is not None assert value is not None namespace = self._fixNS(namespace) self.args[(namespace, key)] = value if not (namespace is BARE_NS): self.namespaces.add(namespace) def delArg(self, namespace, key): namespace = self._fixNS(namespace) del self.args[(namespace, key)] def __repr__(self): return "<%s.%s %r>" % (self.__class__.__module__, self.__class__.__name__, self.args) def __eq__(self, other): return self.args == other.args def __ne__(self, other): return not (self == other) def getAliasedArg(self, aliased_key, default=None): if aliased_key == 'ns': return self.getOpenIDNamespace() if aliased_key.startswith('ns.'): uri = self.namespaces.getNamespaceURI(aliased_key[3:]) if uri is None: if default == no_default: raise KeyError else: return default else: return uri try: alias, key = aliased_key.split('.', 1) except ValueError: # need more than x values to unpack ns = None else: ns = self.namespaces.getNamespaceURI(alias) if ns is None: key = aliased_key ns = self.getOpenIDNamespace() return self.getArg(ns, key, default) class NamespaceMap(object): """Maintains a bijective map between namespace uris and aliases. """ def __init__(self): self.alias_to_namespace = {} self.namespace_to_alias = {} self.implicit_namespaces = [] def getAlias(self, namespace_uri): return self.namespace_to_alias.get(namespace_uri) def getNamespaceURI(self, alias): return self.alias_to_namespace.get(alias) def iterNamespaceURIs(self): """Return an iterator over the namespace URIs""" return iter(self.namespace_to_alias) def iterAliases(self): """Return an iterator over the aliases""" return iter(self.alias_to_namespace) def iteritems(self): """Iterate over the mapping @returns: iterator of (namespace_uri, alias) """ return self.namespace_to_alias.iteritems() def addAlias(self, namespace_uri, desired_alias, implicit=False): """Add an alias from this namespace URI to the desired alias """ # Check that desired_alias is not an openid protocol field as # per the spec. assert desired_alias not in OPENID_PROTOCOL_FIELDS, \ "%r is not an allowed namespace alias" % (desired_alias,) # Check that desired_alias does not contain a period as per # the spec. if type(desired_alias) in [str, unicode]: assert '.' not in desired_alias, \ "%r must not contain a dot" % (desired_alias,) # Check that there is not a namespace already defined for # the desired alias current_namespace_uri = self.alias_to_namespace.get(desired_alias) if (current_namespace_uri is not None and current_namespace_uri != namespace_uri): fmt = ('Cannot map %r to alias %r. ' '%r is already mapped to alias %r') msg = fmt % ( namespace_uri, desired_alias, current_namespace_uri, desired_alias) raise KeyError(msg) # Check that there is not already a (different) alias for # this namespace URI alias = self.namespace_to_alias.get(namespace_uri) if alias is not None and alias != desired_alias: fmt = ('Cannot map %r to alias %r. ' 'It is already mapped to alias %r') raise KeyError(fmt % (namespace_uri, desired_alias, alias)) assert (desired_alias == NULL_NAMESPACE or type(desired_alias) in [str, unicode]), repr(desired_alias) assert namespace_uri not in self.implicit_namespaces self.alias_to_namespace[desired_alias] = namespace_uri self.namespace_to_alias[namespace_uri] = desired_alias if implicit: self.implicit_namespaces.append(namespace_uri) return desired_alias def add(self, namespace_uri): """Add this namespace URI to the mapping, without caring what alias it ends up with""" # See if this namespace is already mapped to an alias alias = self.namespace_to_alias.get(namespace_uri) if alias is not None: return alias # Fall back to generating a numerical alias i = 0 while True: alias = 'ext' + str(i) try: self.addAlias(namespace_uri, alias) except KeyError: i += 1 else: return alias assert False, "Not reached" def isDefined(self, namespace_uri): return namespace_uri in self.namespace_to_alias def __contains__(self, namespace_uri): return self.isDefined(namespace_uri) def isImplicit(self, namespace_uri): return namespace_uri in self.implicit_namespaces
bsd-3-clause
schen119/caffe-windows-cudnn
python/caffe/test/test_python_layer.py
12
1969
import unittest import tempfile import os import caffe class SimpleLayer(caffe.Layer): """A layer that just multiplies by ten""" def setup(self, bottom, top): pass def reshape(self, bottom, top): top[0].reshape(*bottom[0].data.shape) def forward(self, bottom, top): top[0].data[...] = 10 * bottom[0].data def backward(self, top, propagate_down, bottom): bottom[0].diff[...] = 10 * top[0].diff def python_net_file(): with tempfile.NamedTemporaryFile(delete=False) as f: f.write("""name: 'pythonnet' force_backward: true input: 'data' input_shape { dim: 10 dim: 9 dim: 8 } layer { type: 'Python' name: 'one' bottom: 'data' top: 'one' python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } layer { type: 'Python' name: 'two' bottom: 'one' top: 'two' python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } layer { type: 'Python' name: 'three' bottom: 'two' top: 'three' python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }""") return f.name class TestPythonLayer(unittest.TestCase): def setUp(self): net_file = python_net_file() self.net = caffe.Net(net_file, caffe.TRAIN) os.remove(net_file) def test_forward(self): x = 8 self.net.blobs['data'].data[...] = x self.net.forward() for y in self.net.blobs['three'].data.flat: self.assertEqual(y, 10**3 * x) def test_backward(self): x = 7 self.net.blobs['three'].diff[...] = x self.net.backward() for y in self.net.blobs['data'].diff.flat: self.assertEqual(y, 10**3 * x) def test_reshape(self): s = 4 self.net.blobs['data'].reshape(s, s, s, s) self.net.forward() for blob in self.net.blobs.itervalues(): for d in blob.data.shape: self.assertEqual(s, d)
bsd-2-clause
0x46616c6b/ansible
lib/ansible/modules/cloud/lxd/lxd_profile.py
19
12218
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: lxd_profile short_description: Manage LXD profiles version_added: "2.2" description: - Management of LXD profiles author: "Hiroaki Nakamura (@hnakamur)" options: name: description: - Name of a profile. required: true config: description: - 'The config for the container (e.g. {"limits.memory": "4GB"}). See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)' - If the profile already exists and its "config" value in metadata obtained from GET /1.0/profiles/<name> U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19) are different, they this module tries to apply the configurations. - Not all config values are supported to apply the existing profile. Maybe you need to delete and recreate a profile. required: false devices: description: - 'The devices for the profile (e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}). See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)' required: false new_name: description: - A new name of a profile. - If this parameter is specified a profile will be renamed to this name. See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11) required: false state: choices: - present - absent description: - Define the state of a profile. required: false default: present url: description: - The unix domain socket path or the https URL for the LXD server. required: false default: unix:/var/lib/lxd/unix.socket key_file: description: - The client certificate key file path. required: false default: '"{}/.config/lxc/client.key" .format(os.environ["HOME"])' cert_file: description: - The client certificate file path. required: false default: '"{}/.config/lxc/client.crt" .format(os.environ["HOME"])' trust_password: description: - The client trusted password. - You need to set this password on the LXD server before running this module using the following command. lxc config set core.trust_password <some random password> See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/) - If trust_password is set, this module send a request for authentication before sending any requests. required: false notes: - Profiles must have a unique name. If you attempt to create a profile with a name that already existed in the users namespace the module will simply return as "unchanged". ''' EXAMPLES = ''' # An example for creating a profile - hosts: localhost connection: local tasks: - name: Create a profile lxd_profile: name: macvlan state: present config: {} description: my macvlan profile devices: eth0: nictype: macvlan parent: br0 type: nic # An example for creating a profile via http connection - hosts: localhost connection: local tasks: - name: create macvlan profile lxd_profile: url: https://127.0.0.1:8443 # These cert_file and key_file values are equal to the default values. #cert_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" #key_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" trust_password: mypassword name: macvlan state: present config: {} description: my macvlan profile devices: eth0: nictype: macvlan parent: br0 type: nic # An example for deleting a profile - hosts: localhost connection: local tasks: - name: Delete a profile lxd_profile: name: macvlan state: absent # An example for renaming a profile - hosts: localhost connection: local tasks: - name: Rename a profile lxd_profile: name: macvlan new_name: macvlan2 state: present ''' RETURN=''' old_state: description: The old state of the profile returned: success type: string sample: "absent" logs: description: The logs of requests and responses. returned: when ansible-playbook is invoked with -vvvv. type: list sample: "(too long to be placed here)" actions: description: List of actions performed for the profile. returned: success type: list sample: '["create"]' ''' import os from ansible.module_utils.lxd import LXDClient, LXDClientException # PROFILE_STATES is a list for states supported PROFILES_STATES = [ 'present', 'absent' ] # CONFIG_PARAMS is a list of config attribute names. CONFIG_PARAMS = [ 'config', 'description', 'devices' ] class LXDProfileManagement(object): def __init__(self, module): """Management of LXC containers via Ansible. :param module: Processed Ansible Module. :type module: ``object`` """ self.module = module self.name = self.module.params['name'] self._build_config() self.state = self.module.params['state'] self.new_name = self.module.params.get('new_name', None) self.url = self.module.params['url'] self.key_file = self.module.params.get('key_file', None) self.cert_file = self.module.params.get('cert_file', None) self.debug = self.module._verbosity >= 4 try: self.client = LXDClient( self.url, key_file=self.key_file, cert_file=self.cert_file, debug=self.debug ) except LXDClientException as e: self.module.fail_json(msg=e.msg) self.trust_password = self.module.params.get('trust_password', None) self.actions = [] def _build_config(self): self.config = {} for attr in CONFIG_PARAMS: param_val = self.module.params.get(attr, None) if param_val is not None: self.config[attr] = param_val def _get_profile_json(self): return self.client.do( 'GET', '/1.0/profiles/{0}'.format(self.name), ok_error_codes=[404] ) @staticmethod def _profile_json_to_module_state(resp_json): if resp_json['type'] == 'error': return 'absent' return 'present' def _update_profile(self): if self.state == 'present': if self.old_state == 'absent': if self.new_name is None: self._create_profile() else: self.module.fail_json( msg='new_name must not be set when the profile does not exist and the specified state is present', changed=False) else: if self.new_name is not None and self.new_name != self.name: self._rename_profile() if self._needs_to_apply_profile_configs(): self._apply_profile_configs() elif self.state == 'absent': if self.old_state == 'present': if self.new_name is None: self._delete_profile() else: self.module.fail_json( msg='new_name must not be set when the profile exists and the specified state is absent', changed=False) def _create_profile(self): config = self.config.copy() config['name'] = self.name self.client.do('POST', '/1.0/profiles', config) self.actions.append('create') def _rename_profile(self): config = {'name': self.new_name} self.client.do('POST', '/1.0/profiles/{}'.format(self.name), config) self.actions.append('rename') self.name = self.new_name def _needs_to_change_profile_config(self, key): if key not in self.config: return False old_configs = self.old_profile_json['metadata'].get(key, None) return self.config[key] != old_configs def _needs_to_apply_profile_configs(self): return ( self._needs_to_change_profile_config('config') or self._needs_to_change_profile_config('description') or self._needs_to_change_profile_config('devices') ) def _apply_profile_configs(self): config = self.old_profile_json.copy() for k, v in self.config.items(): config[k] = v self.client.do('PUT', '/1.0/profiles/{}'.format(self.name), config) self.actions.append('apply_profile_configs') def _delete_profile(self): self.client.do('DELETE', '/1.0/profiles/{}'.format(self.name)) self.actions.append('delete') def run(self): """Run the main method.""" try: if self.trust_password is not None: self.client.authenticate(self.trust_password) self.old_profile_json = self._get_profile_json() self.old_state = self._profile_json_to_module_state(self.old_profile_json) self._update_profile() state_changed = len(self.actions) > 0 result_json = { 'changed': state_changed, 'old_state': self.old_state, 'actions': self.actions } if self.client.debug: result_json['logs'] = self.client.logs self.module.exit_json(**result_json) except LXDClientException as e: state_changed = len(self.actions) > 0 fail_params = { 'msg': e.msg, 'changed': state_changed, 'actions': self.actions } if self.client.debug: fail_params['logs'] = e.kwargs['logs'] self.module.fail_json(**fail_params) def main(): """Ansible Main module.""" module = AnsibleModule( argument_spec=dict( name=dict( type='str', required=True ), new_name=dict( type='str', ), config=dict( type='dict', ), description=dict( type='str', ), devices=dict( type='dict', ), state=dict( choices=PROFILES_STATES, default='present' ), url=dict( type='str', default='unix:/var/lib/lxd/unix.socket' ), key_file=dict( type='str', default='{}/.config/lxc/client.key'.format(os.environ['HOME']) ), cert_file=dict( type='str', default='{}/.config/lxc/client.crt'.format(os.environ['HOME']) ), trust_password=dict( type='str', no_log=True) ), supports_check_mode=False, ) lxd_manage = LXDProfileManagement(module=module) lxd_manage.run() # import module bits from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
CubicERP/odoo
addons/website_forum/__openerp__.py
321
1905
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Forum', 'category': 'Website', 'summary': 'Forum, FAQ, Q&A', 'version': '1.0', 'description': """ Ask questions, get answers, no distractions """, 'author': 'OpenERP SA', 'website': 'https://www.odoo.com/page/community-builder', 'depends': [ 'auth_signup', 'gamification', 'website_mail', 'website_partner' ], 'data': [ 'data/forum_data.xml', 'views/forum.xml', 'views/res_users.xml', 'views/website_forum.xml', 'views/ir_qweb.xml', 'security/ir.model.access.csv', 'data/badges_question.xml', 'data/badges_answer.xml', 'data/badges_participation.xml', 'data/badges_moderation.xml', ], 'qweb': [ 'static/src/xml/*.xml' ], 'demo': [ 'data/forum_demo.xml', ], 'installable': True, 'application': True, }
agpl-3.0
ravenshooter/BA_Analysis
Preprocess.py
1
5604
import numpy import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt import scipy import mdp import csv from thread import start_new_thread import DataSet from DataAnalysis import plot from Main import getProjectPath def readFileToNumpy(fileName): reader=csv.reader(open(fileName,"rb"),delimiter=',') x=list(reader) return numpy.array(x[1:]).astype('float') def separateInputData(fileData,removeErrors=True): if removeErrors: error_inds = fileData[:,-1]==False fileData = fileData[error_inds] fused = numpy.atleast_2d(fileData[:,1:4]) gyro = numpy.atleast_2d(fileData[:,4:7]) acc = numpy.atleast_2d(fileData[:,7:10]) targets = numpy.atleast_2d(fileData[:,10:]) return fused, gyro, acc, targets def transformToDelta(vals): newVals = numpy.zeros((len(vals),len(vals[0]))) for i in range(1,len(vals)): newVals[i-1] = vals[i]-vals[i-1] return newVals def removeLOverflow(fused): for j in range(0,3): for i in range(1,len(fused)): if numpy.abs(fused[i-1,j] - fused[i,j]) > numpy.pi: fused[i:,j] = fused[i:,j] * -1 return fused def applyActivationFilter(inputData, width): actLevel = numpy.sum(numpy.abs(inputData),1) target = numpy.zeros((len(inputData),1)) for i in range(width,len(inputData-width)): target[i] = numpy.mean(actLevel[i-width:i+width]) return target def centerAndNormalize(inputData): means = numpy.mean(inputData, 0) centered = inputData - means vars = numpy.std(centered, 0) normalized = centered/vars return normalized, means, vars def getTrainingBeginAndEndIndex(targetSig): beginInd = 0 endInd = len(targetSig) for i in range(0,len(targetSig)): if targetSig[i] == 1: beginInd= i-1; break for i in range(0,len(targetSig)): if targetSig[len(targetSig)-1-i] == 1: endInd= len(targetSig)-i; break return beginInd,endInd def formatDataSet(data): print data.shape newStart = input("Start:") newEnd = input("End:") newData = data[newStart:newEnd,:] return newData def formatTargetFilter(data): treshold = input('Treshold:') targetFunction = applyFormatTargetFilter(data, treshold) plt.figure() plt.plot(data[:,9]) plt.plot(data[:,10]) plt.plot(targetFunction) return targetFunction def applyFormatTargetFilter(data, treshold): targetFunction = (data[:,10] > treshold).astype(float) return numpy.atleast_2d(targetFunction).T def removeArea(data): cutOutStart = input("Start:") cutOutEnd = input("End:") newDataStart = data[:cutOutStart,:] newDataEnd = data[cutOutEnd:,:] return numpy.concatenate((newDataStart,newDataEnd)) def plotData(data): plt.figure() plt.clf() plt.subplot(411) plt.title('Fused') plt.plot(data[:,0:3]) plt.plot(data[:,9]) plt.plot(data[:,10]) plt.subplot(412) plt.title('Gyro') plt.plot(data[:,3:6]) plt.plot(data[:,9]) plt.plot(data[:,10]) plt.subplot(413) plt.title('Acc') plt.plot(data[:,6:9]) plt.plot(data[:,9]) plt.plot(data[:,10]) plt.subplot(414) plt.title('Targets') plt.plot(data[:,9]) plt.plot(data[:,10]) plt.show() def writeToCSV(data,fileName): numpy.savetxt(getProjectPath()+"\\dataSets\\"+fileName+".csv", data, delimiter=";") def safeToDataSet(fileName, data, means, stds, gestures, targetTreshold): ds = DataSet.DataSet(data[:,0:3],data[:,3:6],data[:,6:9],numpy.append(data[:,9:], applyFormatTargetFilter(data, targetTreshold), 1), \ means, stds, gestures) ds.writeToFile(fileName) def load(nr): global i plt.close('all') i = readFile("nadja\\nadja_"+str(nr)+".csv") plotData(i) def safe(inputData,aaa,nr): writeToCSV(numpy.concatenate((inputData,numpy.atleast_2d(aaa).T),1),"nadja_fitted_"+str(nr)) def readFile(fileName): return readFileToNumpy(getProjectPath()+'dataSets\\'+fileName) if __name__ == '__main__': #def main(): inputFileName = ["2016-03-14-10-30-47-nike_fullSet_0.csv"] fileData = numpy.zeros((1,31)) for fileName in inputFileName: newData = readFileToNumpy(getProjectPath()+'dataSets\\'+fileName) print newData.shape fileData = numpy.append(fileData,newData,0) fused, gyro, acc, targets = separateInputData(fileData) #fused = removeLOverflow(fused) #fused = transformToDelta(fused) _, f_means, f_stds = centerAndNormalize(fused) _, g_means, g_stds = centerAndNormalize(gyro) _, a_means, a_stds = centerAndNormalize(acc) means = numpy.concatenate((f_means,g_means,a_means),0) stds = numpy.concatenate((f_stds,g_stds,a_stds),0) gestures = numpy.max(targets,0) dataSets = [] gestureSets = [] for i in range(0,len(targets[0])): start, end = getTrainingBeginAndEndIndex(targets[:,i]) t_fused = fused[start:end,:] t_gyro = gyro[start:end,:] t_acc = acc[start:end,:] t_target =numpy.atleast_2d(targets[start:end,i]).T t_accFilter = applyActivationFilter(numpy.concatenate((t_fused,t_gyro,t_acc),1),6) a = numpy.concatenate((t_fused,t_gyro,t_acc,t_target,t_accFilter),1) dataSets.append(a) gestureSets.append(numpy.max(targets[start:end,:],0))
mit
cneill/barbican
barbican/tests/api/middleware/test_context.py
2
1970
# Copyright (c) 2015 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import oslotest.base as oslotest from barbican.api.middleware import context class TestUnauthenticatedContextMiddleware(oslotest.BaseTestCase): def setUp(self): super(TestUnauthenticatedContextMiddleware, self).setUp() self.app = mock.MagicMock() self.middleware = context.UnauthenticatedContextMiddleware(self.app) def test_role_defaults_to_admin(self): request = mock.MagicMock() request.headers = {'X-Project-Id': 'trace'} request.environ = {} with mock.patch('barbican.context.RequestContext') as rc: self.middleware.process_request(request) rc.assert_called_with( project='trace', is_admin=True, user=None, roles=['admin'], request_id=request.request_id ) def test_role_used_from_header(self): request = mock.MagicMock() request.headers = {'X-Project-Id': 'trace', 'X-Roles': 'something'} request.environ = {} with mock.patch('barbican.context.RequestContext') as rc: self.middleware.process_request(request) rc.assert_called_with( project='trace', is_admin=False, user=None, roles=['something'], request_id=request.request_id )
apache-2.0
OSSESAC/odoopubarquiluz
addons/membership/report/report_membership.py
52
6137
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp import tools import openerp.addons.decimal_precision as dp STATE = [ ('none', 'Non Member'), ('canceled', 'Cancelled Member'), ('old', 'Old Member'), ('waiting', 'Waiting Member'), ('invoiced', 'Invoiced Member'), ('free', 'Free Member'), ('paid', 'Paid Member'), ] class report_membership(osv.osv): '''Membership Analysis''' _name = 'report.membership' _description = __doc__ _auto = False _rec_name = 'year' _columns = { 'year': fields.char('Year', size=4, readonly=True, select=1), 'month': fields.selection([('01', 'January'), ('02', 'February'), \ ('03', 'March'), ('04', 'April'),\ ('05', 'May'), ('06', 'June'), \ ('07', 'July'), ('08', 'August'),\ ('09', 'September'), ('10', 'October'),\ ('11', 'November'), ('12', 'December')], 'Month', readonly=True), 'date_from': fields.datetime('Start Date', readonly=True, help="Start membership date"), 'date_to': fields.datetime('End Date', readonly=True, help="End membership date"), 'num_waiting': fields.integer('# Waiting', readonly=True), 'num_invoiced': fields.integer('# Invoiced', readonly=True), 'num_paid': fields.integer('# Paid', readonly=True), 'tot_pending': fields.float('Pending Amount', digits_compute= dp.get_precision('Account'), readonly=True), 'tot_earned': fields.float('Earned Amount', digits_compute= dp.get_precision('Account'), readonly=True), 'partner_id': fields.many2one('res.partner', 'Member', readonly=True), 'associate_member_id': fields.many2one('res.partner', 'Associate Member', readonly=True), 'membership_id': fields.many2one('product.product', 'Membership Product', readonly=True), 'membership_state': fields.selection(STATE, 'Current Membership State', readonly=True), 'user_id': fields.many2one('res.users', 'Salesperson', readonly=True), 'company_id': fields.many2one('res.company', 'Company', readonly=True) } def init(self, cr): '''Create the view''' tools.drop_view_if_exists(cr, 'report_membership') cr.execute(""" CREATE OR REPLACE VIEW report_membership AS ( SELECT MIN(id) AS id, partner_id, user_id, membership_state, associate_member_id, membership_amount, date_from, date_to, year, month, COUNT(num_waiting) AS num_waiting, COUNT(num_invoiced) AS num_invoiced, COUNT(num_paid) AS num_paid, SUM(tot_pending) AS tot_pending, SUM(tot_earned) AS tot_earned, membership_id, company_id FROM (SELECT MIN(p.id) AS id, p.id AS partner_id, p.user_id AS user_id, p.membership_state AS membership_state, p.associate_member AS associate_member_id, p.membership_amount AS membership_amount, TO_CHAR(p.membership_start, 'YYYY-MM-DD') AS date_from, TO_CHAR(p.membership_stop, 'YYYY-MM-DD') AS date_to, TO_CHAR(p.membership_start, 'YYYY') AS year, TO_CHAR(p.membership_start,'MM') AS month, CASE WHEN ml.state = 'waiting' THEN ml.id END AS num_waiting, CASE WHEN ml.state = 'invoiced' THEN ml.id END AS num_invoiced, CASE WHEN ml.state = 'paid' THEN ml.id END AS num_paid, CASE WHEN ml.state IN ('waiting', 'invoiced') THEN SUM(il.price_subtotal) ELSE 0 END AS tot_pending, CASE WHEN ml.state = 'paid' OR p.membership_state = 'old' THEN SUM(il.price_subtotal) ELSE 0 END AS tot_earned, ml.membership_id AS membership_id, p.company_id AS company_id FROM res_partner p LEFT JOIN membership_membership_line ml ON (ml.partner = p.id) LEFT JOIN account_invoice_line il ON (ml.account_invoice_line = il.id) LEFT JOIN account_invoice ai ON (il.invoice_id = ai.id) WHERE p.membership_state != 'none' and p.active = 'true' GROUP BY p.id, p.user_id, p.membership_state, p.associate_member, p.membership_amount, TO_CHAR(p.membership_start, 'YYYY-MM-DD'), TO_CHAR(p.membership_stop, 'YYYY-MM-DD'), TO_CHAR(p.membership_start, 'YYYY'), TO_CHAR(p.membership_start,'MM'), ml.membership_id, p.company_id, ml.state, ml.id ) AS foo GROUP BY year, month, date_from, date_to, partner_id, user_id, membership_id, company_id, membership_state, associate_member_id, membership_amount )""") report_membership() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
0k/OpenUpgrade
addons/product_margin/__init__.py
444
1092
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import wizard import product_margin # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
JosmanPS/scikit-learn
sklearn/linear_model/least_angle.py
37
53448
""" Least Angle Regression algorithm. See the documentation on the Generalized Linear Model for a complete discussion. """ from __future__ import print_function # Author: Fabian Pedregosa <fabian.pedregosa@inria.fr> # Alexandre Gramfort <alexandre.gramfort@inria.fr> # Gael Varoquaux # # License: BSD 3 clause from math import log import sys import warnings from distutils.version import LooseVersion import numpy as np from scipy import linalg, interpolate from scipy.linalg.lapack import get_lapack_funcs from .base import LinearModel from ..base import RegressorMixin from ..utils import arrayfuncs, as_float_array, check_X_y from ..cross_validation import check_cv from ..utils import ConvergenceWarning from ..externals.joblib import Parallel, delayed from ..externals.six.moves import xrange import scipy solve_triangular_args = {} if LooseVersion(scipy.__version__) >= LooseVersion('0.12'): solve_triangular_args = {'check_finite': False} def lars_path(X, y, Xy=None, Gram=None, max_iter=500, alpha_min=0, method='lar', copy_X=True, eps=np.finfo(np.float).eps, copy_Gram=True, verbose=0, return_path=True, return_n_iter=False, positive=False): """Compute Least Angle Regression or Lasso path using LARS algorithm [1] The optimization objective for the case method='lasso' is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 in the case of method='lars', the objective function is only known in the form of an implicit equation (see discussion in [1]) Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ----------- X : array, shape: (n_samples, n_features) Input data. y : array, shape: (n_samples) Input targets. positive : boolean (default=False) Restrict coefficients to be >= 0. When using this option together with method 'lasso' the model coefficients will not converge to the ordinary-least-squares solution for small values of alpha (neither will they when using method 'lar' ..). Only coeffiencts up to the smallest alpha value (alphas_[alphas_ > 0.].min() when fit_path=True) reached by the stepwise Lars-Lasso algorithm are typically in congruence with the solution of the coordinate descent lasso_path function. max_iter : integer, optional (default=500) Maximum number of iterations to perform, set to infinity for no limit. Gram : None, 'auto', array, shape: (n_features, n_features), optional Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram matrix is precomputed from the given X, if there are more samples than features. alpha_min : float, optional (default=0) Minimum correlation along the path. It corresponds to the regularization parameter alpha parameter in the Lasso. method : {'lar', 'lasso'}, optional (default='lar') Specifies the returned model. Select ``'lar'`` for Least Angle Regression, ``'lasso'`` for the Lasso. eps : float, optional (default=``np.finfo(np.float).eps``) The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. copy_X : bool, optional (default=True) If ``False``, ``X`` is overwritten. copy_Gram : bool, optional (default=True) If ``False``, ``Gram`` is overwritten. verbose : int (default=0) Controls output verbosity. return_path : bool, optional (default=True) If ``return_path==True`` returns the entire path, else returns only the last point of the path. return_n_iter : bool, optional (default=False) Whether to return the number of iterations. Returns -------- alphas : array, shape: [n_alphas + 1] Maximum of covariances (in absolute value) at each iteration. ``n_alphas`` is either ``max_iter``, ``n_features`` or the number of nodes in the path with ``alpha >= alpha_min``, whichever is smaller. active : array, shape [n_alphas] Indices of active variables at the end of the path. coefs : array, shape (n_features, n_alphas + 1) Coefficients along the path n_iter : int Number of iterations run. Returned only if return_n_iter is set to True. See also -------- lasso_path LassoLars Lars LassoLarsCV LarsCV sklearn.decomposition.sparse_encode References ---------- .. [1] "Least Angle Regression", Effron et al. http://www-stat.stanford.edu/~tibs/ftp/lars.pdf .. [2] `Wikipedia entry on the Least-angle regression <http://en.wikipedia.org/wiki/Least-angle_regression>`_ .. [3] `Wikipedia entry on the Lasso <http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_ """ n_features = X.shape[1] n_samples = y.size max_features = min(max_iter, n_features) if return_path: coefs = np.zeros((max_features + 1, n_features)) alphas = np.zeros(max_features + 1) else: coef, prev_coef = np.zeros(n_features), np.zeros(n_features) alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas? n_iter, n_active = 0, 0 active, indices = list(), np.arange(n_features) # holds the sign of covariance sign_active = np.empty(max_features, dtype=np.int8) drop = False # will hold the cholesky factorization. Only lower part is # referenced. # We are initializing this to "zeros" and not empty, because # it is passed to scipy linalg functions and thus if it has NaNs, # even if they are in the upper part that it not used, we # get errors raised. # Once we support only scipy > 0.12 we can use check_finite=False and # go back to "empty" L = np.zeros((max_features, max_features), dtype=X.dtype) swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,)) solve_cholesky, = get_lapack_funcs(('potrs',), (X,)) if Gram is None: if copy_X: # force copy. setting the array to be fortran-ordered # speeds up the calculation of the (partial) Gram matrix # and allows to easily swap columns X = X.copy('F') elif Gram == 'auto': Gram = None if X.shape[0] > X.shape[1]: Gram = np.dot(X.T, X) elif copy_Gram: Gram = Gram.copy() if Xy is None: Cov = np.dot(X.T, y) else: Cov = Xy.copy() if verbose: if verbose > 1: print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC") else: sys.stdout.write('.') sys.stdout.flush() tiny = np.finfo(np.float).tiny # to avoid division by 0 warning tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning equality_tolerance = np.finfo(np.float32).eps while True: if Cov.size: if positive: C_idx = np.argmax(Cov) else: C_idx = np.argmax(np.abs(Cov)) C_ = Cov[C_idx] if positive: C = C_ else: C = np.fabs(C_) else: C = 0. if return_path: alpha = alphas[n_iter, np.newaxis] coef = coefs[n_iter] prev_alpha = alphas[n_iter - 1, np.newaxis] prev_coef = coefs[n_iter - 1] alpha[0] = C / n_samples if alpha[0] <= alpha_min + equality_tolerance: # early stopping if abs(alpha[0] - alpha_min) > equality_tolerance: # interpolation factor 0 <= ss < 1 if n_iter > 0: # In the first iteration, all alphas are zero, the formula # below would make ss a NaN ss = ((prev_alpha[0] - alpha_min) / (prev_alpha[0] - alpha[0])) coef[:] = prev_coef + ss * (coef - prev_coef) alpha[0] = alpha_min if return_path: coefs[n_iter] = coef break if n_iter >= max_iter or n_active >= n_features: break if not drop: ########################################################## # Append x_j to the Cholesky factorization of (Xa * Xa') # # # # ( L 0 ) # # L -> ( ) , where L * w = Xa' x_j # # ( w z ) and z = ||x_j|| # # # ########################################################## if positive: sign_active[n_active] = np.ones_like(C_) else: sign_active[n_active] = np.sign(C_) m, n = n_active, C_idx + n_active Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0]) indices[n], indices[m] = indices[m], indices[n] Cov_not_shortened = Cov Cov = Cov[1:] # remove Cov[0] if Gram is None: X.T[n], X.T[m] = swap(X.T[n], X.T[m]) c = nrm2(X.T[n_active]) ** 2 L[n_active, :n_active] = \ np.dot(X.T[n_active], X.T[:n_active].T) else: # swap does only work inplace if matrix is fortran # contiguous ... Gram[m], Gram[n] = swap(Gram[m], Gram[n]) Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n]) c = Gram[n_active, n_active] L[n_active, :n_active] = Gram[n_active, :n_active] # Update the cholesky decomposition for the Gram matrix if n_active: linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, **solve_triangular_args) v = np.dot(L[n_active, :n_active], L[n_active, :n_active]) diag = max(np.sqrt(np.abs(c - v)), eps) L[n_active, n_active] = diag if diag < 1e-7: # The system is becoming too ill-conditioned. # We have degenerate vectors in our active set. # We'll 'drop for good' the last regressor added. # Note: this case is very rare. It is no longer triggered by the # test suite. The `equality_tolerance` margin added in 0.16.0 to # get early stopping to work consistently on all versions of # Python including 32 bit Python under Windows seems to make it # very difficult to trigger the 'drop for good' strategy. warnings.warn('Regressors in active set degenerate. ' 'Dropping a regressor, after %i iterations, ' 'i.e. alpha=%.3e, ' 'with an active set of %i regressors, and ' 'the smallest cholesky pivot element being %.3e' % (n_iter, alpha, n_active, diag), ConvergenceWarning) # XXX: need to figure a 'drop for good' way Cov = Cov_not_shortened Cov[0] = 0 Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0]) continue active.append(indices[n_active]) n_active += 1 if verbose > 1: print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '', n_active, C)) if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]: # alpha is increasing. This is because the updates of Cov are # bringing in too much numerical error that is greater than # than the remaining correlation with the # regressors. Time to bail out warnings.warn('Early stopping the lars path, as the residues ' 'are small and the current value of alpha is no ' 'longer well controlled. %i iterations, alpha=%.3e, ' 'previous alpha=%.3e, with an active set of %i ' 'regressors.' % (n_iter, alpha, prev_alpha, n_active), ConvergenceWarning) break # least squares solution least_squares, info = solve_cholesky(L[:n_active, :n_active], sign_active[:n_active], lower=True) if least_squares.size == 1 and least_squares == 0: # This happens because sign_active[:n_active] = 0 least_squares[...] = 1 AA = 1. else: # is this really needed ? AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active])) if not np.isfinite(AA): # L is too ill-conditioned i = 0 L_ = L[:n_active, :n_active].copy() while not np.isfinite(AA): L_.flat[::n_active + 1] += (2 ** i) * eps least_squares, info = solve_cholesky( L_, sign_active[:n_active], lower=True) tmp = max(np.sum(least_squares * sign_active[:n_active]), eps) AA = 1. / np.sqrt(tmp) i += 1 least_squares *= AA if Gram is None: # equiangular direction of variables in the active set eq_dir = np.dot(X.T[:n_active].T, least_squares) # correlation between each unactive variables and # eqiangular vector corr_eq_dir = np.dot(X.T[n_active:], eq_dir) else: # if huge number of features, this takes 50% of time, I # think could be avoided if we just update it using an # orthogonal (QR) decomposition of X corr_eq_dir = np.dot(Gram[:n_active, n_active:].T, least_squares) g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny)) if positive: gamma_ = min(g1, C / AA) else: g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny)) gamma_ = min(g1, g2, C / AA) # TODO: better names for these variables: z drop = False z = -coef[active] / (least_squares + tiny32) z_pos = arrayfuncs.min_pos(z) if z_pos < gamma_: # some coefficients have changed sign idx = np.where(z == z_pos)[0][::-1] # update the sign, important for LAR sign_active[idx] = -sign_active[idx] if method == 'lasso': gamma_ = z_pos drop = True n_iter += 1 if return_path: if n_iter >= coefs.shape[0]: del coef, alpha, prev_alpha, prev_coef # resize the coefs and alphas array add_features = 2 * max(1, (max_features - n_active)) coefs = np.resize(coefs, (n_iter + add_features, n_features)) alphas = np.resize(alphas, n_iter + add_features) coef = coefs[n_iter] prev_coef = coefs[n_iter - 1] alpha = alphas[n_iter, np.newaxis] prev_alpha = alphas[n_iter - 1, np.newaxis] else: # mimic the effect of incrementing n_iter on the array references prev_coef = coef prev_alpha[0] = alpha[0] coef = np.zeros_like(coef) coef[active] = prev_coef[active] + gamma_ * least_squares # update correlations Cov -= gamma_ * corr_eq_dir # See if any coefficient has changed sign if drop and method == 'lasso': # handle the case when idx is not length of 1 [arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in idx] n_active -= 1 m, n = idx, n_active # handle the case when idx is not length of 1 drop_idx = [active.pop(ii) for ii in idx] if Gram is None: # propagate dropped variable for ii in idx: for i in range(ii, n_active): X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1]) # yeah this is stupid indices[i], indices[i + 1] = indices[i + 1], indices[i] # TODO: this could be updated residual = y - np.dot(X[:, :n_active], coef[active]) temp = np.dot(X.T[n_active], residual) Cov = np.r_[temp, Cov] else: for ii in idx: for i in range(ii, n_active): indices[i], indices[i + 1] = indices[i + 1], indices[i] Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1]) Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i], Gram[:, i + 1]) # Cov_n = Cov_j + x_j * X + increment(betas) TODO: # will this still work with multiple drops ? # recompute covariance. Probably could be done better # wrong as Xy is not swapped with the rest of variables # TODO: this could be updated residual = y - np.dot(X, coef) temp = np.dot(X.T[drop_idx], residual) Cov = np.r_[temp, Cov] sign_active = np.delete(sign_active, idx) sign_active = np.append(sign_active, 0.) # just to maintain size if verbose > 1: print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx, n_active, abs(temp))) if return_path: # resize coefs in case of early stop alphas = alphas[:n_iter + 1] coefs = coefs[:n_iter + 1] if return_n_iter: return alphas, active, coefs.T, n_iter else: return alphas, active, coefs.T else: if return_n_iter: return alpha, active, coef, n_iter else: return alpha, active, coef ############################################################################### # Estimator classes class Lars(LinearModel, RegressorMixin): """Least Angle Regression model a.k.a. LAR Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- n_nonzero_coefs : int, optional Target number of non-zero coefficients. Use ``np.inf`` for no limit. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the ``tol`` parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. fit_path : boolean If True the full path is stored in the ``coef_path_`` attribute. If you compute the solution for a large problem or many targets, setting ``fit_path`` to ``False`` will lead to a speedup, especially with a small alpha. Attributes ---------- alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays Maximum of covariances (in absolute value) at each iteration. \ ``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \ whichever is smaller. active_ : list, length = n_alphas | list of n_targets such lists Indices of active variables at the end of the path. coef_path_ : array, shape (n_features, n_alphas + 1) \ | list of n_targets such arrays The varying values of the coefficients along the path. It is not present if the ``fit_path`` parameter is ``False``. coef_ : array, shape (n_features,) or (n_targets, n_features) Parameter vector (w in the formulation formula). intercept_ : float | array, shape (n_targets,) Independent term in decision function. n_iter_ : array-like or int The number of iterations taken by lars_path to find the grid of alphas for each target. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.Lars(n_nonzero_coefs=1) >>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111]) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True, n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto', verbose=False) >>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE [ 0. -1.11...] See also -------- lars_path, LarsCV sklearn.decomposition.sparse_encode """ def __init__(self, fit_intercept=True, verbose=False, normalize=True, precompute='auto', n_nonzero_coefs=500, eps=np.finfo(np.float).eps, copy_X=True, fit_path=True, positive=False): self.fit_intercept = fit_intercept self.verbose = verbose self.normalize = normalize self.method = 'lar' self.precompute = precompute self.n_nonzero_coefs = n_nonzero_coefs self.positive = positive self.eps = eps self.copy_X = copy_X self.fit_path = fit_path def _get_gram(self): # precompute if n_samples > n_features precompute = self.precompute if hasattr(precompute, '__array__'): Gram = precompute elif precompute == 'auto': Gram = 'auto' else: Gram = None return Gram def fit(self, X, y, Xy=None): """Fit the model using X, y as training data. parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \ optional Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, y_numeric=True, multi_output=True) n_features = X.shape[1] X, y, X_mean, y_mean, X_std = self._center_data(X, y, self.fit_intercept, self.normalize, self.copy_X) if y.ndim == 1: y = y[:, np.newaxis] n_targets = y.shape[1] alpha = getattr(self, 'alpha', 0.) if hasattr(self, 'n_nonzero_coefs'): alpha = 0. # n_nonzero_coefs parametrization takes priority max_iter = self.n_nonzero_coefs else: max_iter = self.max_iter precompute = self.precompute if not hasattr(precompute, '__array__') and ( precompute is True or (precompute == 'auto' and X.shape[0] > X.shape[1]) or (precompute == 'auto' and y.shape[1] > 1)): Gram = np.dot(X.T, X) else: Gram = self._get_gram() self.alphas_ = [] self.n_iter_ = [] if self.fit_path: self.coef_ = [] self.active_ = [] self.coef_path_ = [] for k in xrange(n_targets): this_Xy = None if Xy is None else Xy[:, k] alphas, active, coef_path, n_iter_ = lars_path( X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X, copy_Gram=True, alpha_min=alpha, method=self.method, verbose=max(0, self.verbose - 1), max_iter=max_iter, eps=self.eps, return_path=True, return_n_iter=True, positive=self.positive) self.alphas_.append(alphas) self.active_.append(active) self.n_iter_.append(n_iter_) self.coef_path_.append(coef_path) self.coef_.append(coef_path[:, -1]) if n_targets == 1: self.alphas_, self.active_, self.coef_path_, self.coef_ = [ a[0] for a in (self.alphas_, self.active_, self.coef_path_, self.coef_)] self.n_iter_ = self.n_iter_[0] else: self.coef_ = np.empty((n_targets, n_features)) for k in xrange(n_targets): this_Xy = None if Xy is None else Xy[:, k] alphas, _, self.coef_[k], n_iter_ = lars_path( X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X, copy_Gram=True, alpha_min=alpha, method=self.method, verbose=max(0, self.verbose - 1), max_iter=max_iter, eps=self.eps, return_path=False, return_n_iter=True, positive=self.positive) self.alphas_.append(alphas) self.n_iter_.append(n_iter_) if n_targets == 1: self.alphas_ = self.alphas_[0] self.n_iter_ = self.n_iter_[0] self._set_intercept(X_mean, y_mean, X_std) return self class LassoLars(Lars): """Lasso model fit with Least Angle Regression a.k.a. Lars It is a Linear Model trained with an L1 prior as regularizer. The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- alpha : float Constant that multiplies the penalty term. Defaults to 1.0. ``alpha = 0`` is equivalent to an ordinary least square, solved by :class:`LinearRegression`. For numerical reasons, using ``alpha = 0`` with the LassoLars object is not advised and you should prefer the LinearRegression object. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. Under the positive restriction the model coefficients will not converge to the ordinary-least-squares solution for small values of alpha. Only coeffiencts up to the smallest alpha value (alphas_[alphas_ > 0.].min() when fit_path=True) reached by the stepwise Lars-Lasso algorithm are typically in congruence with the solution of the coordinate descent Lasso estimator. verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : integer, optional Maximum number of iterations to perform. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the ``tol`` parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. fit_path : boolean If ``True`` the full path is stored in the ``coef_path_`` attribute. If you compute the solution for a large problem or many targets, setting ``fit_path`` to ``False`` will lead to a speedup, especially with a small alpha. Attributes ---------- alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays Maximum of covariances (in absolute value) at each iteration. \ ``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \ nodes in the path with correlation greater than ``alpha``, whichever \ is smaller. active_ : list, length = n_alphas | list of n_targets such lists Indices of active variables at the end of the path. coef_path_ : array, shape (n_features, n_alphas + 1) or list If a list is passed it's expected to be one of n_targets such arrays. The varying values of the coefficients along the path. It is not present if the ``fit_path`` parameter is ``False``. coef_ : array, shape (n_features,) or (n_targets, n_features) Parameter vector (w in the formulation formula). intercept_ : float | array, shape (n_targets,) Independent term in decision function. n_iter_ : array-like or int. The number of iterations taken by lars_path to find the grid of alphas for each target. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.LassoLars(alpha=0.01) >>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1]) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True, fit_path=True, max_iter=500, normalize=True, positive=False, precompute='auto', verbose=False) >>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE [ 0. -0.963257...] See also -------- lars_path lasso_path Lasso LassoCV LassoLarsCV sklearn.decomposition.sparse_encode """ def __init__(self, alpha=1.0, fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=np.finfo(np.float).eps, copy_X=True, fit_path=True, positive=False): self.alpha = alpha self.fit_intercept = fit_intercept self.max_iter = max_iter self.verbose = verbose self.normalize = normalize self.method = 'lasso' self.positive = positive self.precompute = precompute self.copy_X = copy_X self.eps = eps self.fit_path = fit_path ############################################################################### # Cross-validated estimator classes def _check_copy_and_writeable(array, copy=False): if copy or not array.flags.writeable: return array.copy() return array def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None, copy=True, method='lars', verbose=False, fit_intercept=True, normalize=True, max_iter=500, eps=np.finfo(np.float).eps, positive=False): """Compute the residues on left-out data for a full LARS path Parameters ----------- X_train : array, shape (n_samples, n_features) The data to fit the LARS on y_train : array, shape (n_samples) The target variable to fit LARS on X_test : array, shape (n_samples, n_features) The data to compute the residues on y_test : array, shape (n_samples) The target variable to compute the residues on Gram : None, 'auto', array, shape: (n_features, n_features), optional Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram matrix is precomputed from the given X, if there are more samples than features copy : boolean, optional Whether X_train, X_test, y_train and y_test should be copied; if False, they may be overwritten. method : 'lar' | 'lasso' Specifies the returned model. Select ``'lar'`` for Least Angle Regression, ``'lasso'`` for the Lasso. verbose : integer, optional Sets the amount of verbosity fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. See reservations for using this option in combination with method 'lasso' for expected small values of alpha in the doc of LassoLarsCV and LassoLarsIC. normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. max_iter : integer, optional Maximum number of iterations to perform. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the ``tol`` parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. Returns -------- alphas : array, shape (n_alphas,) Maximum of covariances (in absolute value) at each iteration. ``n_alphas`` is either ``max_iter`` or ``n_features``, whichever is smaller. active : list Indices of active variables at the end of the path. coefs : array, shape (n_features, n_alphas) Coefficients along the path residues : array, shape (n_alphas, n_samples) Residues of the prediction on the test data """ X_train = _check_copy_and_writeable(X_train, copy) y_train = _check_copy_and_writeable(y_train, copy) X_test = _check_copy_and_writeable(X_test, copy) y_test = _check_copy_and_writeable(y_test, copy) if fit_intercept: X_mean = X_train.mean(axis=0) X_train -= X_mean X_test -= X_mean y_mean = y_train.mean(axis=0) y_train = as_float_array(y_train, copy=False) y_train -= y_mean y_test = as_float_array(y_test, copy=False) y_test -= y_mean if normalize: norms = np.sqrt(np.sum(X_train ** 2, axis=0)) nonzeros = np.flatnonzero(norms) X_train[:, nonzeros] /= norms[nonzeros] alphas, active, coefs = lars_path( X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False, method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps, positive=positive) if normalize: coefs[nonzeros] /= norms[nonzeros][:, np.newaxis] residues = np.dot(X_test, coefs) - y_test[:, np.newaxis] return alphas, active, coefs, residues.T class LarsCV(Lars): """Cross-validated Least Angle Regression model Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter: integer, optional Maximum number of iterations to perform. cv : cross-validation generator, optional see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to a 5-fold strategy max_n_alphas : integer, optional The maximum number of points on the path used to compute the residuals in the cross-validation n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Attributes ---------- coef_ : array, shape (n_features,) parameter vector (w in the formulation formula) intercept_ : float independent term in decision function coef_path_ : array, shape (n_features, n_alphas) the varying values of the coefficients along the path alpha_ : float the estimated regularization parameter alpha alphas_ : array, shape (n_alphas,) the different values of alpha along the path cv_alphas_ : array, shape (n_cv_alphas,) all the values of alpha along the path for the different folds cv_mse_path_ : array, shape (n_folds, n_cv_alphas) the mean square error on left-out for each fold along the path (alpha values given by ``cv_alphas``) n_iter_ : array-like or int the number of iterations run by Lars with the optimal alpha. See also -------- lars_path, LassoLars, LassoLarsCV """ method = 'lar' def __init__(self, fit_intercept=True, verbose=False, max_iter=500, normalize=True, precompute='auto', cv=None, max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps, copy_X=True, positive=False): self.fit_intercept = fit_intercept self.positive = positive self.max_iter = max_iter self.verbose = verbose self.normalize = normalize self.precompute = precompute self.copy_X = copy_X self.cv = cv self.max_n_alphas = max_n_alphas self.n_jobs = n_jobs self.eps = eps def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) Target values. Returns ------- self : object returns an instance of self. """ self.fit_path = True X, y = check_X_y(X, y, y_numeric=True) # init cross-validation generator cv = check_cv(self.cv, X, y, classifier=False) Gram = 'auto' if self.precompute else None cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( delayed(_lars_path_residues)( X[train], y[train], X[test], y[test], Gram=Gram, copy=False, method=self.method, verbose=max(0, self.verbose - 1), normalize=self.normalize, fit_intercept=self.fit_intercept, max_iter=self.max_iter, eps=self.eps, positive=self.positive) for train, test in cv) all_alphas = np.concatenate(list(zip(*cv_paths))[0]) # Unique also sorts all_alphas = np.unique(all_alphas) # Take at most max_n_alphas values stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas)))) all_alphas = all_alphas[::stride] mse_path = np.empty((len(all_alphas), len(cv_paths))) for index, (alphas, active, coefs, residues) in enumerate(cv_paths): alphas = alphas[::-1] residues = residues[::-1] if alphas[0] != 0: alphas = np.r_[0, alphas] residues = np.r_[residues[0, np.newaxis], residues] if alphas[-1] != all_alphas[-1]: alphas = np.r_[alphas, all_alphas[-1]] residues = np.r_[residues, residues[-1, np.newaxis]] this_residues = interpolate.interp1d(alphas, residues, axis=0)(all_alphas) this_residues **= 2 mse_path[:, index] = np.mean(this_residues, axis=-1) mask = np.all(np.isfinite(mse_path), axis=-1) all_alphas = all_alphas[mask] mse_path = mse_path[mask] # Select the alpha that minimizes left-out error i_best_alpha = np.argmin(mse_path.mean(axis=-1)) best_alpha = all_alphas[i_best_alpha] # Store our parameters self.alpha_ = best_alpha self.cv_alphas_ = all_alphas self.cv_mse_path_ = mse_path # Now compute the full model # it will call a lasso internally when self if LassoLarsCV # as self.method == 'lasso' Lars.fit(self, X, y) return self @property def alpha(self): # impedance matching for the above Lars.fit (should not be documented) return self.alpha_ class LassoLarsCV(LarsCV): """Cross-validated Lasso, using the LARS algorithm The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. Under the positive restriction the model coefficients do not converge to the ordinary-least-squares solution for small values of alpha. Only coeffiencts up to the smallest alpha value (alphas_[alphas_ > 0.].min() when fit_path=True) reached by the stepwise Lars-Lasso algorithm are typically in congruence with the solution of the coordinate descent Lasso estimator. As a consequence using LassoLarsCV only makes sense for problems where a sparse solution is expected and/or reached. verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : integer, optional Maximum number of iterations to perform. cv : cross-validation generator, optional see sklearn.cross_validation module. If None is passed, default to a 5-fold strategy max_n_alphas : integer, optional The maximum number of points on the path used to compute the residuals in the cross-validation n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. Attributes ---------- coef_ : array, shape (n_features,) parameter vector (w in the formulation formula) intercept_ : float independent term in decision function. coef_path_ : array, shape (n_features, n_alphas) the varying values of the coefficients along the path alpha_ : float the estimated regularization parameter alpha alphas_ : array, shape (n_alphas,) the different values of alpha along the path cv_alphas_ : array, shape (n_cv_alphas,) all the values of alpha along the path for the different folds cv_mse_path_ : array, shape (n_folds, n_cv_alphas) the mean square error on left-out for each fold along the path (alpha values given by ``cv_alphas``) n_iter_ : array-like or int the number of iterations run by Lars with the optimal alpha. Notes ----- The object solves the same problem as the LassoCV object. However, unlike the LassoCV, it find the relevant alphas values by itself. In general, because of this property, it will be more stable. However, it is more fragile to heavily multicollinear datasets. It is more efficient than the LassoCV if only a small number of features are selected compared to the total number, for instance if there are very few samples compared to the number of features. See also -------- lars_path, LassoLars, LarsCV, LassoCV """ method = 'lasso' class LassoLarsIC(LassoLars): """Lasso model fit with Lars using BIC or AIC for model selection The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 AIC is the Akaike information criterion and BIC is the Bayes Information criterion. Such criteria are useful to select the value of the regularization parameter by making a trade-off between the goodness of fit and the complexity of the model. A good model should explain well the data while being simple. Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- criterion : 'bic' | 'aic' The type of criterion to use. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. Under the positive restriction the model coefficients do not converge to the ordinary-least-squares solution for small values of alpha. Only coeffiencts up to the smallest alpha value (alphas_[alphas_ > 0.].min() when fit_path=True) reached by the stepwise Lars-Lasso algorithm are typically in congruence with the solution of the coordinate descent Lasso estimator. As a consequence using LassoLarsIC only makes sense for problems where a sparse solution is expected and/or reached. verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : integer, optional Maximum number of iterations to perform. Can be used for early stopping. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the ``tol`` parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. Attributes ---------- coef_ : array, shape (n_features,) parameter vector (w in the formulation formula) intercept_ : float independent term in decision function. alpha_ : float the alpha parameter chosen by the information criterion n_iter_ : int number of iterations run by lars_path to find the grid of alphas. criterion_ : array, shape (n_alphas,) The value of the information criteria ('aic', 'bic') across all alphas. The alpha which has the smallest information criteria is chosen. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.LassoLarsIC(criterion='bic') >>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111]) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True, max_iter=500, normalize=True, positive=False, precompute='auto', verbose=False) >>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE [ 0. -1.11...] Notes ----- The estimation of the number of degrees of freedom is given by: "On the degrees of freedom of the lasso" Hui Zou, Trevor Hastie, and Robert Tibshirani Ann. Statist. Volume 35, Number 5 (2007), 2173-2192. http://en.wikipedia.org/wiki/Akaike_information_criterion http://en.wikipedia.org/wiki/Bayesian_information_criterion See also -------- lars_path, LassoLars, LassoLarsCV """ def __init__(self, criterion='aic', fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=np.finfo(np.float).eps, copy_X=True, positive=False): self.criterion = criterion self.fit_intercept = fit_intercept self.positive = positive self.max_iter = max_iter self.verbose = verbose self.normalize = normalize self.copy_X = copy_X self.precompute = precompute self.eps = eps def fit(self, X, y, copy_X=True): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape (n_samples, n_features) training data. y : array-like, shape (n_samples,) target values. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. Returns ------- self : object returns an instance of self. """ self.fit_path = True X, y = check_X_y(X, y, y_numeric=True) X, y, Xmean, ymean, Xstd = LinearModel._center_data( X, y, self.fit_intercept, self.normalize, self.copy_X) max_iter = self.max_iter Gram = self._get_gram() alphas_, active_, coef_path_, self.n_iter_ = lars_path( X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0, method='lasso', verbose=self.verbose, max_iter=max_iter, eps=self.eps, return_n_iter=True, positive=self.positive) n_samples = X.shape[0] if self.criterion == 'aic': K = 2 # AIC elif self.criterion == 'bic': K = log(n_samples) # BIC else: raise ValueError('criterion should be either bic or aic') R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals mean_squared_error = np.mean(R ** 2, axis=0) df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom for k, coef in enumerate(coef_path_.T): mask = np.abs(coef) > np.finfo(coef.dtype).eps if not np.any(mask): continue # get the number of degrees of freedom equal to: # Xc = X[:, mask] # Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs df[k] = np.sum(mask) self.alphas_ = alphas_ with np.errstate(divide='ignore'): self.criterion_ = n_samples * np.log(mean_squared_error) + K * df n_best = np.argmin(self.criterion_) self.alpha_ = alphas_[n_best] self.coef_ = coef_path_[:, n_best] self._set_intercept(Xmean, ymean, Xstd) return self
bsd-3-clause
juliengdt/juliengdt-resources
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/token.py
365
5662
# -*- coding: utf-8 -*- """ pygments.token ~~~~~~~~~~~~~~ Basic token types and the standard tokens. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ class _TokenType(tuple): parent = None def split(self): buf = [] node = self while node is not None: buf.append(node) node = node.parent buf.reverse() return buf def __init__(self, *args): # no need to call super.__init__ self.subtypes = set() def __contains__(self, val): return self is val or ( type(val) is self.__class__ and val[:len(self)] == self ) def __getattr__(self, val): if not val or not val[0].isupper(): return tuple.__getattribute__(self, val) new = _TokenType(self + (val,)) setattr(self, val, new) self.subtypes.add(new) new.parent = self return new def __repr__(self): return 'Token' + (self and '.' or '') + '.'.join(self) Token = _TokenType() # Special token types Text = Token.Text Whitespace = Text.Whitespace Error = Token.Error # Text that doesn't belong to this lexer (e.g. HTML in PHP) Other = Token.Other # Common token types for source code Keyword = Token.Keyword Name = Token.Name Literal = Token.Literal String = Literal.String Number = Literal.Number Punctuation = Token.Punctuation Operator = Token.Operator Comment = Token.Comment # Generic types for non-source code Generic = Token.Generic # String and some others are not direct childs of Token. # alias them: Token.Token = Token Token.String = String Token.Number = Number def is_token_subtype(ttype, other): """ Return True if ``ttype`` is a subtype of ``other``. exists for backwards compatibility. use ``ttype in other`` now. """ return ttype in other def string_to_tokentype(s): """ Convert a string into a token type:: >>> string_to_token('String.Double') Token.Literal.String.Double >>> string_to_token('Token.Literal.Number') Token.Literal.Number >>> string_to_token('') Token Tokens that are already tokens are returned unchanged: >>> string_to_token(String) Token.Literal.String """ if isinstance(s, _TokenType): return s if not s: return Token node = Token for item in s.split('.'): node = getattr(node, item) return node # Map standard token types to short names, used in CSS class naming. # If you add a new item, please be sure to run this file to perform # a consistency check for duplicate values. STANDARD_TYPES = { Token: '', Text: '', Whitespace: 'w', Error: 'err', Other: 'x', Keyword: 'k', Keyword.Constant: 'kc', Keyword.Declaration: 'kd', Keyword.Namespace: 'kn', Keyword.Pseudo: 'kp', Keyword.Reserved: 'kr', Keyword.Type: 'kt', Name: 'n', Name.Attribute: 'na', Name.Builtin: 'nb', Name.Builtin.Pseudo: 'bp', Name.Class: 'nc', Name.Constant: 'no', Name.Decorator: 'nd', Name.Entity: 'ni', Name.Exception: 'ne', Name.Function: 'nf', Name.Property: 'py', Name.Label: 'nl', Name.Namespace: 'nn', Name.Other: 'nx', Name.Tag: 'nt', Name.Variable: 'nv', Name.Variable.Class: 'vc', Name.Variable.Global: 'vg', Name.Variable.Instance: 'vi', Literal: 'l', Literal.Date: 'ld', String: 's', String.Backtick: 'sb', String.Char: 'sc', String.Doc: 'sd', String.Double: 's2', String.Escape: 'se', String.Heredoc: 'sh', String.Interpol: 'si', String.Other: 'sx', String.Regex: 'sr', String.Single: 's1', String.Symbol: 'ss', Number: 'm', Number.Float: 'mf', Number.Hex: 'mh', Number.Integer: 'mi', Number.Integer.Long: 'il', Number.Oct: 'mo', Operator: 'o', Operator.Word: 'ow', Punctuation: 'p', Comment: 'c', Comment.Multiline: 'cm', Comment.Preproc: 'cp', Comment.Single: 'c1', Comment.Special: 'cs', Generic: 'g', Generic.Deleted: 'gd', Generic.Emph: 'ge', Generic.Error: 'gr', Generic.Heading: 'gh', Generic.Inserted: 'gi', Generic.Output: 'go', Generic.Prompt: 'gp', Generic.Strong: 'gs', Generic.Subheading: 'gu', Generic.Traceback: 'gt', }
mit
igel-kun/pyload
module/plugins/hoster/JWPlayerBased.py
1
3736
# -*- coding: utf-8 -*- import re from module.network.CookieJar import CookieJar from module.network.PhantomRequest import PhantomRequest from module.plugins.internal.misc import eval_js_script, get_domain, make_oneline from module.plugins.internal.XFSHoster import XFSHoster class JWPlayerBased(XFSHoster): __name__ = "JWPlayerBased" __type__ = "hoster" __version__ = "0.01" __pattern__ = r"undefined" __config__ = [("activated" , "bool" , "Activated" , True )] __description__ = """JWPlayerBased plugin""" __author_name__ = ("igel") __author_mail__ = ("") INFO_PATTERN = None NAME_PATTERN = r'<[tT]itle>(?:[wW]atch )?(?P<N>.*?)</[Tt]itle>' SIZE_PATTERN = None LINK_PATTERN = None # how to find the jwplayer code in the HTML JW_PATTERN = r"<script .*?javascript[\"'][^>]*>\s*(eval.*?)(?:</script>|$)" # how to extract the link from the decoded javascript call to jwplayer JW_LINK_PATTERN = r"play.*?{file:[\"']([^\"']*)[\"']" def setup(self): self.multiDL = True self.chunkLimit = 1 self.resumeDownload = True # use phantomJS to download websites; this will circumvent JS obfuscation but makes everything a bit slower try: self.req.http.close() finally: self.req.http = PhantomRequest( cookies = CookieJar(None), options = self.pyload.requestFactory.getOptions()) def init(self): self.__pattern__ = self.pyload.pluginManager.hosterPlugins[self.classname]['pattern'] if not self.PLUGIN_DOMAIN: m = re.match(self.__pattern__, self.pyfile.url) try: self.PLUGIN_DOMAIN = m.group("DOMAIN").lower() except: self.PLUGIN_DOMAIN = get_domain(m.group(0)) self.PLUGIN_NAME = "".join(part.capitalize() for part in re.split(r'\.|\d+|-', self.PLUGIN_DOMAIN) if part != '.') if not self.LINK_PATTERN: link_patterns = filter(None, [self.JW_PATTERN, self.JW_LINK_PATTERN]) if link_patterns: self.LINK_PATTERN = "(?:%s)" % ('|'.join(link_patterns)) self.log_debug('our link pattern is: %s' % self.LINK_PATTERN) if not self.ERROR_PATTERN: error_patterns = filter(None, [self.OFFLINE_PATTERN, self.TEMP_OFFLINE_PATTERN]) if error_patterns: self.ERROR_PATTERN = "(?:%s)" % ('|'.join(error_patterns)) self.log_debug('our error pattern is: %s' % self.ERROR_PATTERN) def handle_free(self, pyfile): self.log_debug('calling XFSs handle_free to click buttons...') super(JWPlayerBased, self).handle_free(pyfile) self.log_debug('XFSs handle_free found: %s' % make_oneline(self.link)) # step 2: extract file URL m = re.search(self.JW_LINK_PATTERN, self.link, re.MULTILINE | re.DOTALL) if m is not None: for link_match in m.groups(): if link_match: self.link = link_match if 'eval' in self.link: self.log_debug(_("evaluating script to get call to jwplayer")) js_code = re.sub('eval', '', self.link) data = eval_js_script(js_code) # now, data should be a call to jwplayer in plaintext # step 2: extract file URL m = re.search(self.JW_LINK_PATTERN, data, re.MULTILINE | re.DOTALL) if m is not None: for link_match in m.groups(): if link_match: self.link = link_match else: self.error("could not parse call to JWplayer")
gpl-3.0
tempbottle/grpc
src/python/grpcio_test/grpc_test/_cython/adapter_low_test.py
7
8187
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Fork of grpc._adapter._low_test; the grpc._cython.types adapter in # grpc._cython.low should transparently support the semantics expected of # grpc._adapter._low. import time import unittest from grpc._adapter import _types from grpc._cython import adapter_low as _low class InsecureServerInsecureClient(unittest.TestCase): def setUp(self): self.server_completion_queue = _low.CompletionQueue() self.server = _low.Server(self.server_completion_queue, []) self.port = self.server.add_http2_port('[::]:0') self.client_completion_queue = _low.CompletionQueue() self.client_channel = _low.Channel('localhost:%d'%self.port, []) self.server.start() def tearDown(self): self.server.shutdown() del self.client_channel self.client_completion_queue.shutdown() while (self.client_completion_queue.next().type != _types.EventType.QUEUE_SHUTDOWN): pass self.server_completion_queue.shutdown() while (self.server_completion_queue.next().type != _types.EventType.QUEUE_SHUTDOWN): pass del self.client_completion_queue del self.server_completion_queue del self.server @unittest.skip('TODO(atash): implement grpc._cython.adapter_low') def testEcho(self): DEADLINE = time.time()+5 DEADLINE_TOLERANCE = 0.25 CLIENT_METADATA_ASCII_KEY = 'key' CLIENT_METADATA_ASCII_VALUE = 'val' CLIENT_METADATA_BIN_KEY = 'key-bin' CLIENT_METADATA_BIN_VALUE = b'\0'*1000 SERVER_INITIAL_METADATA_KEY = 'init_me_me_me' SERVER_INITIAL_METADATA_VALUE = 'whodawha?' SERVER_TRAILING_METADATA_KEY = 'California_is_in_a_drought' SERVER_TRAILING_METADATA_VALUE = 'zomg it is' SERVER_STATUS_CODE = _types.StatusCode.OK SERVER_STATUS_DETAILS = 'our work is never over' REQUEST = 'in death a member of project mayhem has a name' RESPONSE = 'his name is robert paulson' METHOD = 'twinkies' HOST = 'hostess' server_request_tag = object() request_call_result = self.server.request_call(self.server_completion_queue, server_request_tag) self.assertEqual(_types.CallError.OK, request_call_result) client_call_tag = object() client_call = self.client_channel.create_call(self.client_completion_queue, METHOD, HOST, DEADLINE) client_initial_metadata = [ (CLIENT_METADATA_ASCII_KEY, CLIENT_METADATA_ASCII_VALUE), (CLIENT_METADATA_BIN_KEY, CLIENT_METADATA_BIN_VALUE)] client_start_batch_result = client_call.start_batch([ _types.OpArgs.send_initial_metadata(client_initial_metadata), _types.OpArgs.send_message(REQUEST), _types.OpArgs.send_close_from_client(), _types.OpArgs.recv_initial_metadata(), _types.OpArgs.recv_message(), _types.OpArgs.recv_status_on_client() ], client_call_tag) self.assertEqual(_types.CallError.OK, client_start_batch_result) request_event = self.server_completion_queue.next(DEADLINE) self.assertEqual(_types.EventType.OP_COMPLETE, request_event.type) self.assertIsInstance(request_event.call, _low.Call) self.assertIs(server_request_tag, request_event.tag) self.assertEqual(1, len(request_event.results)) self.assertEqual(dict(client_initial_metadata), dict(request_event.results[0].initial_metadata)) self.assertEqual(METHOD, request_event.call_details.method) self.assertEqual(HOST, request_event.call_details.host) self.assertLess(abs(DEADLINE - request_event.call_details.deadline), DEADLINE_TOLERANCE) server_call_tag = object() server_call = request_event.call server_initial_metadata = [ (SERVER_INITIAL_METADATA_KEY, SERVER_INITIAL_METADATA_VALUE)] server_trailing_metadata = [ (SERVER_TRAILING_METADATA_KEY, SERVER_TRAILING_METADATA_VALUE)] server_start_batch_result = server_call.start_batch([ _types.OpArgs.send_initial_metadata(server_initial_metadata), _types.OpArgs.recv_message(), _types.OpArgs.send_message(RESPONSE), _types.OpArgs.recv_close_on_server(), _types.OpArgs.send_status_from_server( server_trailing_metadata, SERVER_STATUS_CODE, SERVER_STATUS_DETAILS) ], server_call_tag) self.assertEqual(_types.CallError.OK, server_start_batch_result) client_event = self.client_completion_queue.next(DEADLINE) server_event = self.server_completion_queue.next(DEADLINE) self.assertEqual(6, len(client_event.results)) found_client_op_types = set() for client_result in client_event.results: # we expect each op type to be unique self.assertNotIn(client_result.type, found_client_op_types) found_client_op_types.add(client_result.type) if client_result.type == _types.OpType.RECV_INITIAL_METADATA: self.assertEqual(dict(server_initial_metadata), dict(client_result.initial_metadata)) elif client_result.type == _types.OpType.RECV_MESSAGE: self.assertEqual(RESPONSE, client_result.message) elif client_result.type == _types.OpType.RECV_STATUS_ON_CLIENT: self.assertEqual(dict(server_trailing_metadata), dict(client_result.trailing_metadata)) self.assertEqual(SERVER_STATUS_DETAILS, client_result.status.details) self.assertEqual(SERVER_STATUS_CODE, client_result.status.code) self.assertEqual(set([ _types.OpType.SEND_INITIAL_METADATA, _types.OpType.SEND_MESSAGE, _types.OpType.SEND_CLOSE_FROM_CLIENT, _types.OpType.RECV_INITIAL_METADATA, _types.OpType.RECV_MESSAGE, _types.OpType.RECV_STATUS_ON_CLIENT ]), found_client_op_types) self.assertEqual(5, len(server_event.results)) found_server_op_types = set() for server_result in server_event.results: self.assertNotIn(client_result.type, found_server_op_types) found_server_op_types.add(server_result.type) if server_result.type == _types.OpType.RECV_MESSAGE: self.assertEqual(REQUEST, server_result.message) elif server_result.type == _types.OpType.RECV_CLOSE_ON_SERVER: self.assertFalse(server_result.cancelled) self.assertEqual(set([ _types.OpType.SEND_INITIAL_METADATA, _types.OpType.RECV_MESSAGE, _types.OpType.SEND_MESSAGE, _types.OpType.RECV_CLOSE_ON_SERVER, _types.OpType.SEND_STATUS_FROM_SERVER ]), found_server_op_types) del client_call del server_call if __name__ == '__main__': unittest.main(verbosity=2)
bsd-3-clause