repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
nginxxx/ansible
lib/ansible/plugins/shell/powershell.py
35
7595
# (c) 2014, Chris Church <chris@ninemoreminutes.com> # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import base64 import os import re import random import shlex import time from ansible.utils.unicode import to_bytes, to_unicode _common_args = ['PowerShell', '-NoProfile', '-NonInteractive', '-ExecutionPolicy', 'Unrestricted'] # Primarily for testing, allow explicitly specifying PowerShell version via # an environment variable. _powershell_version = os.environ.get('POWERSHELL_VERSION', None) if _powershell_version: _common_args = ['PowerShell', '-Version', _powershell_version] + _common_args[1:] class ShellModule(object): def env_prefix(self, **kwargs): return '' def join_path(self, *args): parts = [] for arg in args: arg = self._unquote(arg).replace('/', '\\') parts.extend([a for a in arg.split('\\') if a]) path = '\\'.join(parts) if path.startswith('~'): return path return '"%s"' % path def path_has_trailing_slash(self, path): # Allow Windows paths to be specified using either slash. path = self._unquote(path) return path.endswith('/') or path.endswith('\\') def chmod(self, mode, path): return '' def remove(self, path, recurse=False): path = self._escape(self._unquote(path)) if recurse: return self._encode_script('''Remove-Item "%s" -Force -Recurse;''' % path) else: return self._encode_script('''Remove-Item "%s" -Force;''' % path) def mkdtemp(self, basefile, system=False, mode=None): basefile = self._escape(self._unquote(basefile)) # FIXME: Support system temp path! return self._encode_script('''(New-Item -Type Directory -Path $env:temp -Name "%s").FullName | Write-Host -Separator '';''' % basefile) def expand_user(self, user_home_path): # PowerShell only supports "~" (not "~username"). Resolve-Path ~ does # not seem to work remotely, though by default we are always starting # in the user's home directory. user_home_path = self._unquote(user_home_path) if user_home_path == '~': script = 'Write-Host (Get-Location).Path' elif user_home_path.startswith('~\\'): script = 'Write-Host ((Get-Location).Path + "%s")' % self._escape(user_home_path[1:]) else: script = 'Write-Host "%s"' % self._escape(user_home_path) return self._encode_script(script) def checksum(self, path, *args, **kwargs): path = self._escape(self._unquote(path)) script = ''' If (Test-Path -PathType Leaf "%(path)s") { $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider; $fp = [System.IO.File]::Open("%(path)s", [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); $fp.Dispose(); } ElseIf (Test-Path -PathType Container "%(path)s") { Write-Host "3"; } Else { Write-Host "1"; } ''' % dict(path=path) return self._encode_script(script) def build_module_command(self, env_string, shebang, cmd, rm_tmp=None): cmd_parts = shlex.split(to_bytes(cmd), posix=False) cmd_parts = map(to_unicode, cmd_parts) if shebang and shebang.lower() == '#!powershell': if not self._unquote(cmd_parts[0]).lower().endswith('.ps1'): cmd_parts[0] = '"%s.ps1"' % self._unquote(cmd_parts[0]) cmd_parts.insert(0, '&') elif shebang and shebang.startswith('#!'): cmd_parts.insert(0, shebang[2:]) script = ''' Try { %s } Catch { $_obj = @{ failed = $true } If ($_.Exception.GetType) { $_obj.Add('msg', $_.Exception.Message) } Else { $_obj.Add('msg', $_.ToString()) } If ($_.InvocationInfo.PositionMessage) { $_obj.Add('exception', $_.InvocationInfo.PositionMessage) } ElseIf ($_.ScriptStackTrace) { $_obj.Add('exception', $_.ScriptStackTrace) } Try { $_obj.Add('error_record', ($_ | ConvertTo-Json | ConvertFrom-Json)) } Catch { } Echo $_obj | ConvertTo-Json -Compress -Depth 99 Exit 1 } ''' % (' '.join(cmd_parts)) if rm_tmp: rm_tmp = self._escape(self._unquote(rm_tmp)) rm_cmd = 'Remove-Item "%s" -Force -Recurse -ErrorAction SilentlyContinue' % rm_tmp script = '%s\nFinally { %s }' % (script, rm_cmd) return self._encode_script(script) def _unquote(self, value): '''Remove any matching quotes that wrap the given value.''' value = to_unicode(value or '') m = re.match(r'^\s*?\'(.*?)\'\s*?$', value) if m: return m.group(1) m = re.match(r'^\s*?"(.*?)"\s*?$', value) if m: return m.group(1) return value def _escape(self, value, include_vars=False): '''Return value escaped for use in PowerShell command.''' # http://www.techotopia.com/index.php/Windows_PowerShell_1.0_String_Quoting_and_Escape_Sequences # http://stackoverflow.com/questions/764360/a-list-of-string-replacements-in-python subs = [('\n', '`n'), ('\r', '`r'), ('\t', '`t'), ('\a', '`a'), ('\b', '`b'), ('\f', '`f'), ('\v', '`v'), ('"', '`"'), ('\'', '`\''), ('`', '``'), ('\x00', '`0')] if include_vars: subs.append(('$', '`$')) pattern = '|'.join('(%s)' % re.escape(p) for p, s in subs) substs = [s for p, s in subs] replace = lambda m: substs[m.lastindex - 1] return re.sub(pattern, replace, value) def _encode_script(self, script, as_list=False, strict_mode=True): '''Convert a PowerShell script to a single base64-encoded command.''' script = to_unicode(script) if strict_mode: script = u'Set-StrictMode -Version Latest\r\n%s' % script script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()]) encoded_script = base64.b64encode(script.encode('utf-16-le')) cmd_parts = _common_args + ['-EncodedCommand', encoded_script] if as_list: return cmd_parts return ' '.join(cmd_parts)
gpl-3.0
nanuxbe/django
docs/search.py
1
7579
import elasticsearch from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator from django.utils.html import strip_tags from django.utils.text import unescape_entities from elasticsearch.helpers import streaming_bulk from elasticsearch_dsl import DocType, Long, Nested, Object, String, analysis from elasticsearch_dsl.connections import connections from .models import Document, document_url class SearchPaginator(Paginator): """ A better paginator for search results The normal Paginator does a .count() query and then a slice. Since ES results contain the total number of results, we can take an optimistic slice and then adjust the count. """ def validate_number(self, number): """ Validates the given 1-based page number. This class overrides the default behavior and ignores the upper bound. """ try: number = int(number) except (TypeError, ValueError): raise PageNotAnInteger('That page number is not an integer') if number < 1: raise EmptyPage('That page number is less than 1') return number def page(self, number): """ Returns a page object. This class overrides the default behavior and ignores "orphans" and assigns the count from the ES result to the Paginator. """ number = self.validate_number(number) bottom = (number - 1) * self.per_page top = bottom + self.per_page # Force the search to evaluate and then attach the count. We want to # avoid an extra useless query even if there are no results, so we # directly fetch the count from hits. result = self.object_list[bottom:top].execute() page = Page(result.hits, number, self) # Update the `_count`. self._count = page.object_list.total # Also store the aggregations, if any. if hasattr(result, 'aggregations'): page.aggregations = result.aggregations # Now that we have the count validate that the page number isn't higher # than the possible number of pages and adjust accordingly. if number > self.num_pages: if number == 1 and self.allow_empty_first_page: pass else: raise EmptyPage('That page contains no results') return page class ImprovedDocType(DocType): @classmethod def index_all(cls, index_name, using=None, **kwargs): def actions_generator(): for obj in cls.index_queryset().iterator(): elastic_data = cls.from_django(obj).to_dict(include_meta=True) elastic_data['_index'] = index_name yield elastic_data client = connections.get_connection(using or cls._doc_type.using) cls.init(index_name) for ok, item in streaming_bulk(client, actions_generator(), chunk_size=100, **kwargs): yield ok, item @classmethod def index_queryset(cls): return cls.model._default_manager.all() @classmethod def index_object(cls, obj): return cls.from_django(obj).save() @classmethod def unindex_object(cls, obj): return cls.get(id=obj.pk).delete() @classmethod def from_django(cls, obj): raise NotImplementedError('You must define a from_django classmethod ' 'to map ORM object fields to ES fields') analysis.Tokenizer._builtins = analysis.TOKENIZERS = frozenset(( 'keyword', 'standard', 'path_hierarchy', 'whitespace' )) class PathHierarchyTokenizer(analysis.Tokenizer): name = 'path_hierarchy' class WhitespaceTokenizer(analysis.Tokenizer): name = 'whitespace' path_analyzer = analysis.CustomAnalyzer('path', tokenizer='path_hierarchy', filter=['lowercase']) lower_whitespace_analyzer = analysis.analyzer('lower_whitespace', tokenizer='whitespace', filter=['lowercase', 'stop'], char_filter=['html_strip']) class DocumentDocType(ImprovedDocType): """ The main documentation doc type to be used for searching. It stores a bit of meta data so we don't have to hit the db when rendering search results. The search view will be using the 'lang' and 'version' fields of the document's release to filter the search results, depending which was found in the URL. The breadcrumbs are shown under the search result title. """ model = Document id = Long() title = String(analyzer=lower_whitespace_analyzer, boost=1.2) path = String(index='no', analyzer=path_analyzer) content = String(analyzer=lower_whitespace_analyzer) content_raw = String(index_options='offsets') release = Object(properties={ 'id': Long(), 'version': String(index='not_analyzed'), 'lang': String(index='not_analyzed'), }) breadcrumbs = Nested(properties={ 'title': String(index='not_analyzed'), 'path': String(index='not_analyzed'), }) class Meta: index = 'docs' doc_type = 'document' @classmethod def alias_to_main_index(cls, index_name, using=None): """ Alias `index_name` to 'docs' (`cls._doc_type.index`). """ body = {'actions': [{'add': {'index': index_name, 'alias': cls._doc_type.index}}]} client = connections.get_connection(using or cls._doc_type.using) client.indices.refresh(index=index_name) try: old_index_name = list(client.indices.get_alias('docs').keys())[0] except elasticsearch.exceptions.NotFoundError: old_index_name = None else: body['actions'].append({'remove': {'index': old_index_name, 'alias': cls._doc_type.index}}) client.indices.update_aliases(body=body) # Delete the old index that was aliased to 'docs'. if old_index_name: client.indices.delete(old_index_name) @classmethod def index_queryset(cls): qs = super(DocumentDocType, cls).index_queryset() return ( # don't index the module pages since source code is hard to # combine with full text search qs.exclude(path__startswith='_modules') # not the crazy big flattened index of the CBVs .exclude(path__startswith='ref/class-based-views/flattened-index') .select_related('release')) @classmethod def from_django(cls, obj): # turns HTML entities into unicode characters again and removes # all HTML tags, aka "plain text" versio of the document raw_body = strip_tags(unescape_entities(obj.body).replace(u'¶', '')) doc = cls(path=obj.path, title=obj.title, content=obj.body, content_raw=raw_body, meta={'id': obj.id}) doc.release = { 'id': obj.release.id, 'lang': obj.release.lang, 'version': obj.release.version, } breadcrumbs = [] for breadcrumb in cls.model.objects.breadcrumbs(obj): breadcrumbs.append({ 'title': breadcrumb.title, 'path': breadcrumb.path, }) doc.breadcrumbs = breadcrumbs return doc def get_absolute_url(self): return document_url(self)
bsd-3-clause
nomadcube/scikit-learn
examples/applications/topics_extraction_with_nmf.py
106
2313
""" ======================================================== Topics extraction with Non-Negative Matrix Factorization ======================================================== This is a proof of concept application of Non Negative Matrix Factorization of the term frequency matrix of a corpus of documents so as to extract an additive model of the topic structure of the corpus. The output is a list of topics, each represented as a list of terms (weights are not shown). The default parameters (n_samples / n_features / n_topics) should make the example runnable in a couple of tens of seconds. You can try to increase the dimensions of the problem, but be aware than the time complexity is polynomial. """ # Author: Olivier Grisel <olivier.grisel@ensta.org> # Lars Buitinck <L.J.Buitinck@uva.nl> # License: BSD 3 clause from __future__ import print_function from time import time from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import NMF from sklearn.datasets import fetch_20newsgroups n_samples = 2000 n_features = 1000 n_topics = 10 n_top_words = 20 # Load the 20 newsgroups dataset and vectorize it. We use a few heuristics # to filter out useless terms early on: the posts are stripped of headers, # footers and quoted replies, and common English words, words occurring in # only one document or in at least 95% of the documents are removed. t0 = time() print("Loading dataset and extracting TF-IDF features...") dataset = fetch_20newsgroups(shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes')) vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features, stop_words='english') tfidf = vectorizer.fit_transform(dataset.data[:n_samples]) print("done in %0.3fs." % (time() - t0)) # Fit the NMF model print("Fitting the NMF model with n_samples=%d and n_features=%d..." % (n_samples, n_features)) nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf) print("done in %0.3fs." % (time() - t0)) feature_names = vectorizer.get_feature_names() for topic_idx, topic in enumerate(nmf.components_): print("Topic #%d:" % topic_idx) print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]])) print()
bsd-3-clause
michaelni/audacity
lib-src/lv2/sratom/waflib/Tools/perl.py
330
3016
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file import os from waflib import Task,Options,Utils from waflib.Configure import conf from waflib.TaskGen import extension,feature,before_method @before_method('apply_incpaths','apply_link','propagate_uselib_vars') @feature('perlext') def init_perlext(self): self.uselib=self.to_list(getattr(self,'uselib',[])) if not'PERLEXT'in self.uselib:self.uselib.append('PERLEXT') self.env['cshlib_PATTERN']=self.env['cxxshlib_PATTERN']=self.env['perlext_PATTERN'] @extension('.xs') def xsubpp_file(self,node): outnode=node.change_ext('.c') self.create_task('xsubpp',node,outnode) self.source.append(outnode) class xsubpp(Task.Task): run_str='${PERL} ${XSUBPP} -noprototypes -typemap ${EXTUTILS_TYPEMAP} ${SRC} > ${TGT}' color='BLUE' ext_out=['.h'] @conf def check_perl_version(self,minver=None): res=True if minver: cver='.'.join(map(str,minver)) else: cver='' self.start_msg('Checking for minimum perl version %s'%cver) perl=getattr(Options.options,'perlbinary',None) if not perl: perl=self.find_program('perl',var='PERL') if not perl: self.end_msg("Perl not found",color="YELLOW") return False self.env['PERL']=perl version=self.cmd_and_log([perl,"-e",'printf \"%vd\", $^V']) if not version: res=False version="Unknown" elif not minver is None: ver=tuple(map(int,version.split("."))) if ver<minver: res=False self.end_msg(version,color=res and"GREEN"or"YELLOW") return res @conf def check_perl_module(self,module): cmd=[self.env['PERL'],'-e','use %s'%module] self.start_msg('perl module %s'%module) try: r=self.cmd_and_log(cmd) except Exception: self.end_msg(False) return None self.end_msg(r or True) return r @conf def check_perl_ext_devel(self): env=self.env perl=env.PERL if not perl: self.fatal('find perl first') def read_out(cmd): return Utils.to_list(self.cmd_and_log(perl+cmd)) env['LINKFLAGS_PERLEXT']=read_out(" -MConfig -e'print $Config{lddlflags}'") env['INCLUDES_PERLEXT']=read_out(" -MConfig -e'print \"$Config{archlib}/CORE\"'") env['CFLAGS_PERLEXT']=read_out(" -MConfig -e'print \"$Config{ccflags} $Config{cccdlflags}\"'") env['XSUBPP']=read_out(" -MConfig -e'print \"$Config{privlib}/ExtUtils/xsubpp$Config{exe_ext}\"'") env['EXTUTILS_TYPEMAP']=read_out(" -MConfig -e'print \"$Config{privlib}/ExtUtils/typemap\"'") if not getattr(Options.options,'perlarchdir',None): env['ARCHDIR_PERL']=self.cmd_and_log(perl+" -MConfig -e'print $Config{sitearch}'") else: env['ARCHDIR_PERL']=getattr(Options.options,'perlarchdir') env['perlext_PATTERN']='%s.'+self.cmd_and_log(perl+" -MConfig -e'print $Config{dlext}'") def options(opt): opt.add_option('--with-perl-binary',type='string',dest='perlbinary',help='Specify alternate perl binary',default=None) opt.add_option('--with-perl-archdir',type='string',dest='perlarchdir',help='Specify directory where to install arch specific files',default=None)
gpl-2.0
sergiocorato/ocb-server-61
openerp/addons/base/res/ir_property.py
13
7906
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from osv import osv,fields from tools.misc import attrgetter import time # ------------------------------------------------------------------------- # Properties # ------------------------------------------------------------------------- class ir_property(osv.osv): _name = 'ir.property' def _models_field_get(self, cr, uid, field_key, field_value, context=None): get = attrgetter(field_key, field_value) obj = self.pool.get('ir.model.fields') ids = obj.search(cr, uid, [('view_load','=',1)], context=context) res = set() for o in obj.browse(cr, uid, ids, context=context): res.add(get(o)) return list(res) def _models_get(self, cr, uid, context=None): return self._models_field_get(cr, uid, 'model', 'model_id.name', context) def _models_get2(self, cr, uid, context=None): return self._models_field_get(cr, uid, 'relation', 'relation', context) _columns = { 'name': fields.char('Name', size=128, select=1), 'res_id': fields.reference('Resource', selection=_models_get, size=128, help="If not set, acts as a default value for new resources", select=1), 'company_id': fields.many2one('res.company', 'Company', select=1), 'fields_id': fields.many2one('ir.model.fields', 'Field', ondelete='cascade', required=True, select=1), 'value_float' : fields.float('Value'), 'value_integer' : fields.integer_big('Value'), # will contain (int, bigint) 'value_text' : fields.text('Value'), # will contain (char, text) 'value_binary' : fields.binary('Value'), 'value_reference': fields.reference('Value', selection=_models_get2, size=128), 'value_datetime' : fields.datetime('Value'), 'type' : fields.selection([('char', 'Char'), ('float', 'Float'), ('boolean', 'Boolean'), ('integer', 'Integer'), ('integer_big', 'Integer Big'), ('text', 'Text'), ('binary', 'Binary'), ('many2one', 'Many2One'), ('date', 'Date'), ('datetime', 'DateTime'), ], 'Type', required=True, select=1), } _defaults = { 'type': 'many2one', } def _update_values(self, cr, uid, ids, values): value = values.pop('value', None) if not value: return values prop = None type_ = values.get('type') if not type_: if ids: prop = self.browse(cr, uid, ids[0]) type_ = prop.type else: type_ = self._defaults['type'] type2field = { 'char': 'value_text', 'float': 'value_float', 'boolean' : 'value_integer', 'integer': 'value_integer', 'integer_big': 'value_integer', 'text': 'value_text', 'binary': 'value_binary', 'many2one': 'value_reference', 'date' : 'value_datetime', 'datetime' : 'value_datetime', } field = type2field.get(type_) if not field: raise osv.except_osv('Error', 'Invalid type') if field == 'value_reference': if isinstance(value, osv.orm.browse_record): value = '%s,%d' % (value._name, value.id) elif isinstance(value, (int, long)): field_id = values.get('fields_id') if not field_id: if not prop: raise ValueError() field_id = prop.fields_id else: field_id = self.pool.get('ir.model.fields').browse(cr, uid, field_id) value = '%s,%d' % (field_id.relation, value) values[field] = value return values def write(self, cr, uid, ids, values, context=None): return super(ir_property, self).write(cr, uid, ids, self._update_values(cr, uid, ids, values), context=context) def create(self, cr, uid, values, context=None): return super(ir_property, self).create(cr, uid, self._update_values(cr, uid, None, values), context=context) def get_by_record(self, cr, uid, record, context=None): if record.type in ('char', 'text'): return record.value_text elif record.type == 'float': return record.value_float elif record.type == 'boolean': return bool(record.value_integer) elif record.type in ('integer', 'integer_big'): return record.value_integer elif record.type == 'binary': return record.value_binary elif record.type == 'many2one': return record.value_reference elif record.type == 'datetime': return record.value_datetime elif record.type == 'date': if not record.value_datetime: return False return time.strftime('%Y-%m-%d', time.strptime(record.value_datetime, '%Y-%m-%d %H:%M:%S')) return False def get(self, cr, uid, name, model, res_id=False, context=None): domain = self._get_domain(cr, uid, name, model, context=context) if domain is not None: domain = [('res_id', '=', res_id)] + domain nid = self.search(cr, uid, domain, context=context) if not nid: return False record = self.browse(cr, uid, nid[0], context=context) return self.get_by_record(cr, uid, record, context=context) return False def _get_domain_default(self, cr, uid, prop_name, model, context=None): domain = self._get_domain(cr, uid, prop_name, model, context=context) if domain is None: return None return ['&', ('res_id', '=', False)] + domain def _get_domain(self, cr, uid, prop_name, model, context=None): context = context or {} cr.execute('select id from ir_model_fields where name=%s and model=%s', (prop_name, model)) res = cr.fetchone() if not res: return None if 'force_company' in context and context['force_company']: cid = context['force_company'] else: company = self.pool.get('res.company') cid = company._company_default_get(cr, uid, model, res[0], context=context) domain = ['&', ('fields_id', '=', res[0]), '|', ('company_id', '=', cid), ('company_id', '=', False)] return domain ir_property() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
lshain-android-source/external-chromium_org
tools/telemetry/telemetry/core/chrome/misc_web_contents_backend.py
23
1489
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json from telemetry.core import web_contents from telemetry.core.chrome import inspector_backend class MiscWebContentsBackend(object): """Provides acccess to chrome://oobe/login page, which is neither an extension nor a tab.""" def __init__(self, browser_backend): self._browser_backend = browser_backend def GetOobe(self): oobe_web_contents_info = self._FindWebContentsInfo() if oobe_web_contents_info: debugger_url = oobe_web_contents_info.get('webSocketDebuggerUrl') if debugger_url: inspector = self._CreateInspectorBackend(debugger_url) return web_contents.WebContents(inspector) return None def _CreateInspectorBackend(self, debugger_url): return inspector_backend.InspectorBackend(self._browser_backend.browser, self._browser_backend, debugger_url) def _ListWebContents(self, timeout=None): data = self._browser_backend.Request('', timeout=timeout) return json.loads(data) def _FindWebContentsInfo(self): for web_contents_info in self._ListWebContents(): # Prior to crrev.com/203152, url was chrome://oobe/login. if (web_contents_info.get('url').startswith('chrome://oobe')): return web_contents_info return None
bsd-3-clause
ghostlander/p2pool-neoscrypt
nattraverso/pynupnp/upnpxml.py
288
3026
""" This module parse an UPnP device's XML definition in an Object. @author: Raphael Slinckx @copyright: Copyright 2005 @license: LGPL @contact: U{raphael@slinckx.net<mailto:raphael@slinckx.net>} @version: 0.1.0 """ __revision__ = "$id" from xml.dom import minidom import logging # Allowed UPnP services to use when mapping ports/external addresses WANSERVICES = ['urn:schemas-upnp-org:service:WANIPConnection:1', 'urn:schemas-upnp-org:service:WANPPPConnection:1'] class UPnPXml: """ This objects parses the XML definition, and stores the useful results in attributes. The device infos dictionnary may contain the following keys: - friendlyname: A friendly name to call the device. - manufacturer: A manufacturer name for the device. Here are the different attributes: - deviceinfos: A dictionnary of device infos as defined above. - controlurl: The control url, this is the url to use when sending SOAP requests to the device, relative to the base url. - wanservice: The WAN service to be used, one of the L{WANSERVICES} - urlbase: The base url to use when talking in SOAP to the device. The full url to use is obtained by urljoin(urlbase, controlurl) """ def __init__(self, xml): """ Parse the given XML string for UPnP infos. This creates the attributes when they are found, or None if no value was found. @param xml: a xml string to parse """ logging.debug("Got UPNP Xml description:\n%s", xml) doc = minidom.parseString(xml) # Fetch various device info self.deviceinfos = {} try: attributes = { 'friendlyname':'friendlyName', 'manufacturer' : 'manufacturer' } device = doc.getElementsByTagName('device')[0] for name, tag in attributes.iteritems(): try: self.deviceinfos[name] = device.getElementsByTagName( tag)[0].firstChild.datas.encode('utf-8') except: pass except: pass # Fetch device control url self.controlurl = None self.wanservice = None for service in doc.getElementsByTagName('service'): try: stype = service.getElementsByTagName( 'serviceType')[0].firstChild.data.encode('utf-8') if stype in WANSERVICES: self.controlurl = service.getElementsByTagName( 'controlURL')[0].firstChild.data.encode('utf-8') self.wanservice = stype break except: pass # Find base url self.urlbase = None try: self.urlbase = doc.getElementsByTagName( 'URLBase')[0].firstChild.data.encode('utf-8') except: pass
gpl-3.0
andela-bojengwa/talk
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/chardetect.py
1786
2504
#!/usr/bin/env python """ Script which takes one or more file paths and reports on their detected encodings Example:: % chardetect somefile someotherfile somefile: windows-1252 with confidence 0.5 someotherfile: ascii with confidence 1.0 If no paths are provided, it takes its input from stdin. """ from __future__ import absolute_import, print_function, unicode_literals import argparse import sys from io import open from chardet import __version__ from chardet.universaldetector import UniversalDetector def description_of(lines, name='stdin'): """ Return a string describing the probable encoding of a file or list of strings. :param lines: The lines to get the encoding of. :type lines: Iterable of bytes :param name: Name of file or collection of lines :type name: str """ u = UniversalDetector() for line in lines: u.feed(line) u.close() result = u.result if result['encoding']: return '{0}: {1} with confidence {2}'.format(name, result['encoding'], result['confidence']) else: return '{0}: no result'.format(name) def main(argv=None): ''' Handles command line arguments and gets things started. :param argv: List of arguments, as if specified on the command-line. If None, ``sys.argv[1:]`` is used instead. :type argv: list of str ''' # Get command line arguments parser = argparse.ArgumentParser( description="Takes one or more file paths and reports their detected \ encodings", formatter_class=argparse.ArgumentDefaultsHelpFormatter, conflict_handler='resolve') parser.add_argument('input', help='File whose encoding we would like to determine.', type=argparse.FileType('rb'), nargs='*', default=[sys.stdin]) parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(__version__)) args = parser.parse_args(argv) for f in args.input: if f.isatty(): print("You are running chardetect interactively. Press " + "CTRL-D twice at the start of a blank line to signal the " + "end of your input. If you want help, run chardetect " + "--help\n", file=sys.stderr) print(description_of(f, f.name)) if __name__ == '__main__': main()
mit
BrittAndrews/martin-june
node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/xml_fix.py
2767
2174
# Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Applies a fix to CR LF TAB handling in xml.dom. Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293 Working around this: http://bugs.python.org/issue5752 TODO(bradnelson): Consider dropping this when we drop XP support. """ import xml.dom.minidom def _Replacement_write_data(writer, data, is_attrib=False): """Writes datachars to writer.""" data = data.replace("&", "&amp;").replace("<", "&lt;") data = data.replace("\"", "&quot;").replace(">", "&gt;") if is_attrib: data = data.replace( "\r", "&#xD;").replace( "\n", "&#xA;").replace( "\t", "&#x9;") writer.write(data) def _Replacement_writexml(self, writer, indent="", addindent="", newl=""): # indent = current indentation # addindent = indentation to add to higher levels # newl = newline string writer.write(indent+"<" + self.tagName) attrs = self._get_attributes() a_names = attrs.keys() a_names.sort() for a_name in a_names: writer.write(" %s=\"" % a_name) _Replacement_write_data(writer, attrs[a_name].value, is_attrib=True) writer.write("\"") if self.childNodes: writer.write(">%s" % newl) for node in self.childNodes: node.writexml(writer, indent + addindent, addindent, newl) writer.write("%s</%s>%s" % (indent, self.tagName, newl)) else: writer.write("/>%s" % newl) class XmlFix(object): """Object to manage temporary patching of xml.dom.minidom.""" def __init__(self): # Preserve current xml.dom.minidom functions. self.write_data = xml.dom.minidom._write_data self.writexml = xml.dom.minidom.Element.writexml # Inject replacement versions of a function and a method. xml.dom.minidom._write_data = _Replacement_write_data xml.dom.minidom.Element.writexml = _Replacement_writexml def Cleanup(self): if self.write_data: xml.dom.minidom._write_data = self.write_data xml.dom.minidom.Element.writexml = self.writexml self.write_data = None def __del__(self): self.Cleanup()
apache-2.0
pratapvardhan/scikit-learn
examples/linear_model/plot_sparse_recovery.py
70
7486
""" ============================================================ Sparse recovery: feature selection for sparse linear models ============================================================ Given a small number of observations, we want to recover which features of X are relevant to explain y. For this :ref:`sparse linear models <l1_feature_selection>` can outperform standard statistical tests if the true model is sparse, i.e. if a small fraction of the features are relevant. As detailed in :ref:`the compressive sensing notes <compressive_sensing>`, the ability of L1-based approach to identify the relevant variables depends on the sparsity of the ground truth, the number of samples, the number of features, the conditioning of the design matrix on the signal subspace, the amount of noise, and the absolute value of the smallest non-zero coefficient [Wainwright2006] (http://statistics.berkeley.edu/sites/default/files/tech-reports/709.pdf). Here we keep all parameters constant and vary the conditioning of the design matrix. For a well-conditioned design matrix (small mutual incoherence) we are exactly in compressive sensing conditions (i.i.d Gaussian sensing matrix), and L1-recovery with the Lasso performs very well. For an ill-conditioned matrix (high mutual incoherence), regressors are very correlated, and the Lasso randomly selects one. However, randomized-Lasso can recover the ground truth well. In each situation, we first vary the alpha parameter setting the sparsity of the estimated model and look at the stability scores of the randomized Lasso. This analysis, knowing the ground truth, shows an optimal regime in which relevant features stand out from the irrelevant ones. If alpha is chosen too small, non-relevant variables enter the model. On the opposite, if alpha is selected too large, the Lasso is equivalent to stepwise regression, and thus brings no advantage over a univariate F-test. In a second time, we set alpha and compare the performance of different feature selection methods, using the area under curve (AUC) of the precision-recall. """ print(__doc__) # Author: Alexandre Gramfort and Gael Varoquaux # License: BSD 3 clause import warnings import matplotlib.pyplot as plt import numpy as np from scipy import linalg from sklearn.linear_model import (RandomizedLasso, lasso_stability_path, LassoLarsCV) from sklearn.feature_selection import f_regression from sklearn.preprocessing import StandardScaler from sklearn.metrics import auc, precision_recall_curve from sklearn.ensemble import ExtraTreesRegressor from sklearn.utils.extmath import pinvh from sklearn.exceptions import ConvergenceWarning def mutual_incoherence(X_relevant, X_irelevant): """Mutual incoherence, as defined by formula (26a) of [Wainwright2006]. """ projector = np.dot(np.dot(X_irelevant.T, X_relevant), pinvh(np.dot(X_relevant.T, X_relevant))) return np.max(np.abs(projector).sum(axis=1)) for conditioning in (1, 1e-4): ########################################################################### # Simulate regression data with a correlated design n_features = 501 n_relevant_features = 3 noise_level = .2 coef_min = .2 # The Donoho-Tanner phase transition is around n_samples=25: below we # will completely fail to recover in the well-conditioned case n_samples = 25 block_size = n_relevant_features rng = np.random.RandomState(42) # The coefficients of our model coef = np.zeros(n_features) coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features) # The correlation of our design: variables correlated by blocs of 3 corr = np.zeros((n_features, n_features)) for i in range(0, n_features, block_size): corr[i:i + block_size, i:i + block_size] = 1 - conditioning corr.flat[::n_features + 1] = 1 corr = linalg.cholesky(corr) # Our design X = rng.normal(size=(n_samples, n_features)) X = np.dot(X, corr) # Keep [Wainwright2006] (26c) constant X[:n_relevant_features] /= np.abs( linalg.svdvals(X[:n_relevant_features])).max() X = StandardScaler().fit_transform(X.copy()) # The output variable y = np.dot(X, coef) y /= np.std(y) # We scale the added noise as a function of the average correlation # between the design and the output variable y += noise_level * rng.normal(size=n_samples) mi = mutual_incoherence(X[:, :n_relevant_features], X[:, n_relevant_features:]) ########################################################################### # Plot stability selection path, using a high eps for early stopping # of the path, to save computation time alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42, eps=0.05) plt.figure() # We plot the path as a function of alpha/alpha_max to the power 1/3: the # power 1/3 scales the path less brutally than the log, and enables to # see the progression along the path hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r') hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k') ymin, ymax = plt.ylim() plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$') plt.ylabel('Stability score: proportion of times selected') plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi) plt.axis('tight') plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'), loc='best') ########################################################################### # Plot the estimated stability scores for a given alpha # Use 6-fold cross-validation rather than the default 3-fold: it leads to # a better choice of alpha: # Stop the user warnings outputs- they are not necessary for the example # as it is specifically set up to be challenging. with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) warnings.simplefilter('ignore', ConvergenceWarning) lars_cv = LassoLarsCV(cv=6).fit(X, y) # Run the RandomizedLasso: we use a paths going down to .1*alpha_max # to avoid exploring the regime in which very noisy variables enter # the model alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6) clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y) trees = ExtraTreesRegressor(100).fit(X, y) # Compare with F-score F, _ = f_regression(X, y) plt.figure() for name, score in [('F-test', F), ('Stability selection', clf.scores_), ('Lasso coefs', np.abs(lars_cv.coef_)), ('Trees', trees.feature_importances_), ]: precision, recall, thresholds = precision_recall_curve(coef != 0, score) plt.semilogy(np.maximum(score / np.max(score), 1e-4), label="%s. AUC: %.3f" % (name, auc(recall, precision))) plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo', label="Ground truth") plt.xlabel("Features") plt.ylabel("Score") # Plot only the 100 first coefficients plt.xlim(0, 100) plt.legend(loc='best') plt.title('Feature selection scores - Mutual incoherence: %.1f' % mi) plt.show()
bsd-3-clause
pbvarga1/pdsview
pdsview/channels_dialog.py
3
6911
import numpy as np from qtpy import QtWidgets, QtCore from .band_widget import BandWidget, BandWidgetModel class ChannelsDialogModel(object): def __init__(self, main_window): self._views = set() self.main_window = main_window self.current_index = self.image_names.index( main_window.current_image.image_name ) self.red_model = BandWidgetModel(self, 0, 'Red') self.green_model = BandWidgetModel(self, 1, 'Green') self.blue_model = BandWidgetModel(self, 2, 'Blue') self.rgb_models = self.red_model, self.green_model, self.blue_model self.menu_indices = [model.index for model in self.rgb_models] @property def images(self): images = self.main_window.image_set.images flatten_images = [band for image in images for band in image] return flatten_images @property def rgb(self): return self.main_window.image_set.rgb @property def image_names(self): return [band.image_name for band in self.images] @property def rgb_names(self): return [band.image_name for band in self.rgb] @property def alphas(self): return [model.alpha_value / 100. for model in self.rgb_models] def update_image(self): for view in self._views: view.display_composite_image() def register(self, view): """Register a view with the model""" self._views.add(view) def unregister(self, view): """Unregister a view with the model""" self._views.remove(view) class ChannelsDialogController(object): def __init__(self, model, view): self.model = model self.view = view def update_current_index(self): self.model.current_index = self.model.image_names.index( self.model.main_window.current_image.image_name ) def update_menu_indices(self): self.model.menu_indices = [ model.index for model in self.model.rgb_models ] class ChannelsDialog(QtWidgets.QDialog): def __init__(self, model): self.model = model self.model.register(self) self.controller = ChannelsDialogController(model, self) super(ChannelsDialog, self).__init__() # Create display of image names and highlight the current image/channel self.image_tree = QtWidgets.QTreeWidget() self.image_tree.setColumnCount(1) self.image_tree.setHeaderLabel('Channels') self.items = [] for image_name in self.model.image_names: item = QtWidgets.QTreeWidgetItem(self.image_tree) item.setText(0, image_name) self.items.append(item) self.image_tree.insertTopLevelItems(1, self.items) self.image_tree.setSelectionMode( QtWidgets.QAbstractItemView.NoSelection) # highlight the current image self.current_item.setSelected(True) self.image_tree.setIndentation(0) self.image_tree.setFixedWidth(400) self.rgb_check_box = QtWidgets.QCheckBox("RGB") self.rgb_check_box.stateChanged.connect(self.check_rgb) self.red_widget = BandWidget(self.model.red_model) self.green_widget = BandWidget(self.model.green_model) self.blue_widget = BandWidget(self.model.blue_model) self.close_button = QtWidgets.QPushButton('Close') self.close_button.clicked.connect(self.close_dialog) self.layout = QtWidgets.QGridLayout() widgets = [ self.image_tree, self.rgb_check_box, self.red_widget, self.green_widget, self.blue_widget, self.close_button ] for row, widget in enumerate(widgets): self.layout.addWidget(widget, row, 0) self.layout.setAlignment(widget, QtCore.Qt.AlignHCenter) self.controller.update_menu_indices() self.setLayout(self.layout) # Match the rgb check box state from the main window state = self.model.main_window.rgb_check_box.checkState() self.rgb_check_box.setCheckState(state) @property def current_item(self): return self.items[self.model.current_index] def check_rgb(self, state): """Displays the rgb image when checked, single band otherwise""" if state == QtCore.Qt.Checked: self.model.main_window.rgb_check_box.setCheckState( QtCore.Qt.Checked ) self.model.main_window.switch_rgb(state) self.display_composite_image() elif state == QtCore.Qt.Unchecked: self.model.main_window.rgb_check_box.setCheckState( QtCore.Qt.Unchecked) def create_composite_image(self): composite_layers = [] for band, alpha in zip(self.model.rgb, self.model.alphas): layer_data = band.data * alpha composite_layers.append(layer_data) composite_image = np.dstack(composite_layers) return composite_image def set_rgb_image(self): composite_image = self.create_composite_image() self.model.main_window.current_image.set_data(composite_image) self.controller.update_menu_indices() self.model.main_window.next_channel_btn.setEnabled(False) self.model.main_window.previous_channel_btn.setEnabled(False) def set_menus_index(self): widgets = self.red_widget, self.green_widget, self.blue_widget for widget, index in zip(widgets, self.model.menu_indices): widget.menu.setCurrentIndex(index) widget.controller.update_index(index, False) def reset_gray_image(self): self.rgb_check_box.setCheckState(QtCore.Qt.Unchecked) self.set_menus_index() def display_composite_image(self): """Display the rgb composite image""" if self.rgb_check_box.checkState() == QtCore.Qt.Unchecked: return try: self.set_rgb_image() except ValueError: print("Images must be the same size") self.reset_gray_image() def update_menus_current_item(self): for widget in self.red_widget, self.green_widget, self.blue_widget: widget.controller.reset_index() self.controller.update_menu_indices() self.set_menus_index() def change_image(self): """Change the menu and image list when the image is changed""" self.current_item.setSelected(False) self.controller.update_current_index() self.current_item.setSelected(True) self.update_menus_current_item() self.display_composite_image() def closeEvent(self, event): self.close_dialog() def close_dialog(self): """Close the dialog and save the position""" self.model.main_window.channels_window_is_open = False self.model.main_window.channels_window_pos = self.pos() self.hide()
bsd-3-clause
DolphinDream/sverchok
nodes/modifier_change/rigid_origami.py
2
4958
# This file is part of project Sverchok. It's copyrighted by the contributors # recorded in the version control history of the file, available from # its original location https://github.com/nortikin/sverchok/commit/master # # SPDX-License-Identifier: GPL3 # License-Filename: LICENSE import bpy from bpy.props import IntProperty, FloatProperty from sverchok.data_structure import updateNode, match_long_repeat from sverchok.node_tree import SverchCustomTreeNode from sverchok.utils.rigid_origami_utils import ObjectParams, \ CreaseLines, InsideVertex, FoldAngleCalculator, FaceRotation class SvRigidOrigamiNode(bpy.types.Node, SverchCustomTreeNode): """ Triggers: Rigid Origami Tooltip: Fold a paper like a rigid origami """ bl_idname = 'SvRigidOrigamiNode' bl_label = 'Rigid Origami' bl_icon = 'OUTLINER_OB_EMPTY' sv_icon = 'SV_ORIGAMI' folding_ratio : FloatProperty( name="Folding ratio", description="folding ratio from 0.0 to 1.0", default=0.0, min=0.0, max=1.0, update=updateNode) division_count : IntProperty( name="Division count", description="Count of dividing angles to calculate precisely", default=20, min=1, max=100, update=updateNode) fixed_face_index : IntProperty( name="Fixed face index", description="index of fixed face when folding", default=0, update=updateNode) def sv_init(self, context): self.inputs.new('SvVerticesSocket', 'Vertices') self.inputs.new('SvStringsSocket', 'Edges') self.inputs.new('SvStringsSocket', 'Faces') self.inputs.new('SvStringsSocket', 'Fold edge indices') self.inputs.new('SvStringsSocket', 'Fold edge angles') self.inputs.new('SvStringsSocket', 'Folding ratio').prop_name = 'folding_ratio' self.inputs.new('SvStringsSocket', 'Division count').prop_name = 'division_count' self.inputs.new('SvStringsSocket', 'Fixed face index').prop_name = 'fixed_face_index' self.outputs.new('SvVerticesSocket', 'Vertices') def process(self): if not any(socket.is_linked for socket in self.outputs): return if not self.inputs['Fold edge indices'].is_linked \ or not self.inputs['Fold edge angles'].is_linked: return verts_in = self.inputs['Vertices'].sv_get() edges_in = self.inputs['Edges'].sv_get() faces_in = self.inputs['Faces'].sv_get() fold_edge_indices = self.inputs['Fold edge indices'].sv_get() fold_edge_angles = self.inputs['Fold edge angles'].sv_get() folding_ratio = self.inputs['Folding ratio'].sv_get() division_count = self.inputs['Division count'].sv_get() fixed_face_index = self.inputs['Fixed face index'].sv_get() meshes = match_long_repeat([verts_in, edges_in, faces_in, \ fold_edge_indices, fold_edge_angles, folding_ratio, \ division_count, fixed_face_index]) verts_out = [] for verts, edges, faces, edge_indices, edge_angles, \ folding, step, fixed_face in zip(*meshes): if isinstance(folding, (list, tuple)): folding = folding[0] if isinstance(step, (list, tuple)): step = step[0] if isinstance(fixed_face, (list, tuple)): fixed_face = fixed_face[0] verts_o = verts try: # Wrap object obj = ObjectParams(verts, edges, faces) # Extract crease lines crease_lines = CreaseLines(obj, edge_indices, edge_angles, folding) if edge_indices: # Extract inside vertices inside_vertices = InsideVertex.generate_inside_vertices( \ obj, crease_lines) # Calculation loop to determine the final angles FoldAngleCalculator.calc_fold_angle(step, crease_lines, inside_vertices) crease_lines.delta_angles = [cur_rho - angle for cur_rho, angle \ in zip(FoldAngleCalculator.current_rhos, crease_lines.angles)] # Rotate each faces using final angles FaceRotation.obj = obj FaceRotation.inside_vertices = inside_vertices FaceRotation.crease_lines = crease_lines FaceRotation.fixed_face_index = int(fixed_face) verts_o = FaceRotation.rotate_faces() verts_out.append(verts_o) finally: if obj is not None: obj.free() self.outputs['Vertices'].sv_set(verts_out) def register(): bpy.utils.register_class(SvRigidOrigamiNode) def unregister(): bpy.utils.unregister_class(SvRigidOrigamiNode)
gpl-3.0
hvanhovell/spark
python/pyspark/mllib/linalg/distributed.py
20
55246
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Package for distributed linear algebra. """ import sys if sys.version >= '3': long = int from py4j.java_gateway import JavaObject from pyspark import RDD, since from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper from pyspark.mllib.linalg import _convert_to_vector, DenseMatrix, Matrix, QRDecomposition from pyspark.mllib.stat import MultivariateStatisticalSummary from pyspark.sql import DataFrame from pyspark.storagelevel import StorageLevel __all__ = ['BlockMatrix', 'CoordinateMatrix', 'DistributedMatrix', 'IndexedRow', 'IndexedRowMatrix', 'MatrixEntry', 'RowMatrix', 'SingularValueDecomposition'] class DistributedMatrix(object): """ Represents a distributively stored matrix backed by one or more RDDs. """ def numRows(self): """Get or compute the number of rows.""" raise NotImplementedError def numCols(self): """Get or compute the number of cols.""" raise NotImplementedError class RowMatrix(DistributedMatrix): """ Represents a row-oriented distributed Matrix with no meaningful row indices. :param rows: An RDD or DataFrame of vectors. If a DataFrame is provided, it must have a single vector typed column. :param numRows: Number of rows in the matrix. A non-positive value means unknown, at which point the number of rows will be determined by the number of records in the `rows` RDD. :param numCols: Number of columns in the matrix. A non-positive value means unknown, at which point the number of columns will be determined by the size of the first row. """ def __init__(self, rows, numRows=0, numCols=0): """ Note: This docstring is not shown publicly. Create a wrapper over a Java RowMatrix. Publicly, we require that `rows` be an RDD or DataFrame. However, for internal usage, `rows` can also be a Java RowMatrix object, in which case we can wrap it directly. This assists in clean matrix conversions. >>> rows = sc.parallelize([[1, 2, 3], [4, 5, 6]]) >>> mat = RowMatrix(rows) >>> mat_diff = RowMatrix(rows) >>> (mat_diff._java_matrix_wrapper._java_model == ... mat._java_matrix_wrapper._java_model) False >>> mat_same = RowMatrix(mat._java_matrix_wrapper._java_model) >>> (mat_same._java_matrix_wrapper._java_model == ... mat._java_matrix_wrapper._java_model) True """ if isinstance(rows, RDD): rows = rows.map(_convert_to_vector) java_matrix = callMLlibFunc("createRowMatrix", rows, long(numRows), int(numCols)) elif isinstance(rows, DataFrame): java_matrix = callMLlibFunc("createRowMatrix", rows, long(numRows), int(numCols)) elif (isinstance(rows, JavaObject) and rows.getClass().getSimpleName() == "RowMatrix"): java_matrix = rows else: raise TypeError("rows should be an RDD of vectors, got %s" % type(rows)) self._java_matrix_wrapper = JavaModelWrapper(java_matrix) @property def rows(self): """ Rows of the RowMatrix stored as an RDD of vectors. >>> mat = RowMatrix(sc.parallelize([[1, 2, 3], [4, 5, 6]])) >>> rows = mat.rows >>> rows.first() DenseVector([1.0, 2.0, 3.0]) """ return self._java_matrix_wrapper.call("rows") def numRows(self): """ Get or compute the number of rows. >>> rows = sc.parallelize([[1, 2, 3], [4, 5, 6], ... [7, 8, 9], [10, 11, 12]]) >>> mat = RowMatrix(rows) >>> print(mat.numRows()) 4 >>> mat = RowMatrix(rows, 7, 6) >>> print(mat.numRows()) 7 """ return self._java_matrix_wrapper.call("numRows") def numCols(self): """ Get or compute the number of cols. >>> rows = sc.parallelize([[1, 2, 3], [4, 5, 6], ... [7, 8, 9], [10, 11, 12]]) >>> mat = RowMatrix(rows) >>> print(mat.numCols()) 3 >>> mat = RowMatrix(rows, 7, 6) >>> print(mat.numCols()) 6 """ return self._java_matrix_wrapper.call("numCols") @since('2.0.0') def computeColumnSummaryStatistics(self): """ Computes column-wise summary statistics. :return: :class:`MultivariateStatisticalSummary` object containing column-wise summary statistics. >>> rows = sc.parallelize([[1, 2, 3], [4, 5, 6]]) >>> mat = RowMatrix(rows) >>> colStats = mat.computeColumnSummaryStatistics() >>> colStats.mean() array([ 2.5, 3.5, 4.5]) """ java_col_stats = self._java_matrix_wrapper.call("computeColumnSummaryStatistics") return MultivariateStatisticalSummary(java_col_stats) @since('2.0.0') def computeCovariance(self): """ Computes the covariance matrix, treating each row as an observation. .. note:: This cannot be computed on matrices with more than 65535 columns. >>> rows = sc.parallelize([[1, 2], [2, 1]]) >>> mat = RowMatrix(rows) >>> mat.computeCovariance() DenseMatrix(2, 2, [0.5, -0.5, -0.5, 0.5], 0) """ return self._java_matrix_wrapper.call("computeCovariance") @since('2.0.0') def computeGramianMatrix(self): """ Computes the Gramian matrix `A^T A`. .. note:: This cannot be computed on matrices with more than 65535 columns. >>> rows = sc.parallelize([[1, 2, 3], [4, 5, 6]]) >>> mat = RowMatrix(rows) >>> mat.computeGramianMatrix() DenseMatrix(3, 3, [17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0], 0) """ return self._java_matrix_wrapper.call("computeGramianMatrix") @since('2.0.0') def columnSimilarities(self, threshold=0.0): """ Compute similarities between columns of this matrix. The threshold parameter is a trade-off knob between estimate quality and computational cost. The default threshold setting of 0 guarantees deterministically correct results, but uses the brute-force approach of computing normalized dot products. Setting the threshold to positive values uses a sampling approach and incurs strictly less computational cost than the brute-force approach. However the similarities computed will be estimates. The sampling guarantees relative-error correctness for those pairs of columns that have similarity greater than the given similarity threshold. To describe the guarantee, we set some notation: * Let A be the smallest in magnitude non-zero element of this matrix. * Let B be the largest in magnitude non-zero element of this matrix. * Let L be the maximum number of non-zeros per row. For example, for {0,1} matrices: A=B=1. Another example, for the Netflix matrix: A=1, B=5 For those column pairs that are above the threshold, the computed similarity is correct to within 20% relative error with probability at least 1 - (0.981)^10/B^ The shuffle size is bounded by the *smaller* of the following two expressions: * O(n log(n) L / (threshold * A)) * O(m L^2^) The latter is the cost of the brute-force approach, so for non-zero thresholds, the cost is always cheaper than the brute-force approach. :param: threshold: Set to 0 for deterministic guaranteed correctness. Similarities above this threshold are estimated with the cost vs estimate quality trade-off described above. :return: An n x n sparse upper-triangular CoordinateMatrix of cosine similarities between columns of this matrix. >>> rows = sc.parallelize([[1, 2], [1, 5]]) >>> mat = RowMatrix(rows) >>> sims = mat.columnSimilarities() >>> sims.entries.first().value 0.91914503... """ java_sims_mat = self._java_matrix_wrapper.call("columnSimilarities", float(threshold)) return CoordinateMatrix(java_sims_mat) @since('2.0.0') def tallSkinnyQR(self, computeQ=False): """ Compute the QR decomposition of this RowMatrix. The implementation is designed to optimize the QR decomposition (factorization) for the RowMatrix of a tall and skinny shape. Reference: Paul G. Constantine, David F. Gleich. "Tall and skinny QR factorizations in MapReduce architectures" ([[https://doi.org/10.1145/1996092.1996103]]) :param: computeQ: whether to computeQ :return: QRDecomposition(Q: RowMatrix, R: Matrix), where Q = None if computeQ = false. >>> rows = sc.parallelize([[3, -6], [4, -8], [0, 1]]) >>> mat = RowMatrix(rows) >>> decomp = mat.tallSkinnyQR(True) >>> Q = decomp.Q >>> R = decomp.R >>> # Test with absolute values >>> absQRows = Q.rows.map(lambda row: abs(row.toArray()).tolist()) >>> absQRows.collect() [[0.6..., 0.0], [0.8..., 0.0], [0.0, 1.0]] >>> # Test with absolute values >>> abs(R.toArray()).tolist() [[5.0, 10.0], [0.0, 1.0]] """ decomp = JavaModelWrapper(self._java_matrix_wrapper.call("tallSkinnyQR", computeQ)) if computeQ: java_Q = decomp.call("Q") Q = RowMatrix(java_Q) else: Q = None R = decomp.call("R") return QRDecomposition(Q, R) @since('2.2.0') def computeSVD(self, k, computeU=False, rCond=1e-9): """ Computes the singular value decomposition of the RowMatrix. The given row matrix A of dimension (m X n) is decomposed into U * s * V'T where * U: (m X k) (left singular vectors) is a RowMatrix whose columns are the eigenvectors of (A X A') * s: DenseVector consisting of square root of the eigenvalues (singular values) in descending order. * v: (n X k) (right singular vectors) is a Matrix whose columns are the eigenvectors of (A' X A) For more specific details on implementation, please refer the Scala documentation. :param k: Number of leading singular values to keep (`0 < k <= n`). It might return less than k if there are numerically zero singular values or there are not enough Ritz values converged before the maximum number of Arnoldi update iterations is reached (in case that matrix A is ill-conditioned). :param computeU: Whether or not to compute U. If set to be True, then U is computed by A * V * s^-1 :param rCond: Reciprocal condition number. All singular values smaller than rCond * s[0] are treated as zero where s[0] is the largest singular value. :returns: :py:class:`SingularValueDecomposition` >>> rows = sc.parallelize([[3, 1, 1], [-1, 3, 1]]) >>> rm = RowMatrix(rows) >>> svd_model = rm.computeSVD(2, True) >>> svd_model.U.rows.collect() [DenseVector([-0.7071, 0.7071]), DenseVector([-0.7071, -0.7071])] >>> svd_model.s DenseVector([3.4641, 3.1623]) >>> svd_model.V DenseMatrix(3, 2, [-0.4082, -0.8165, -0.4082, 0.8944, -0.4472, 0.0], 0) """ j_model = self._java_matrix_wrapper.call( "computeSVD", int(k), bool(computeU), float(rCond)) return SingularValueDecomposition(j_model) @since('2.2.0') def computePrincipalComponents(self, k): """ Computes the k principal components of the given row matrix .. note:: This cannot be computed on matrices with more than 65535 columns. :param k: Number of principal components to keep. :returns: :py:class:`pyspark.mllib.linalg.DenseMatrix` >>> rows = sc.parallelize([[1, 2, 3], [2, 4, 5], [3, 6, 1]]) >>> rm = RowMatrix(rows) >>> # Returns the two principal components of rm >>> pca = rm.computePrincipalComponents(2) >>> pca DenseMatrix(3, 2, [-0.349, -0.6981, 0.6252, -0.2796, -0.5592, -0.7805], 0) >>> # Transform into new dimensions with the greatest variance. >>> rm.multiply(pca).rows.collect() # doctest: +NORMALIZE_WHITESPACE [DenseVector([0.1305, -3.7394]), DenseVector([-0.3642, -6.6983]), \ DenseVector([-4.6102, -4.9745])] """ return self._java_matrix_wrapper.call("computePrincipalComponents", k) @since('2.2.0') def multiply(self, matrix): """ Multiply this matrix by a local dense matrix on the right. :param matrix: a local dense matrix whose number of rows must match the number of columns of this matrix :returns: :py:class:`RowMatrix` >>> rm = RowMatrix(sc.parallelize([[0, 1], [2, 3]])) >>> rm.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect() [DenseVector([2.0, 3.0]), DenseVector([6.0, 11.0])] """ if not isinstance(matrix, DenseMatrix): raise ValueError("Only multiplication with DenseMatrix " "is supported.") j_model = self._java_matrix_wrapper.call("multiply", matrix) return RowMatrix(j_model) class SingularValueDecomposition(JavaModelWrapper): """ Represents singular value decomposition (SVD) factors. .. versionadded:: 2.2.0 """ @property @since('2.2.0') def U(self): """ Returns a distributed matrix whose columns are the left singular vectors of the SingularValueDecomposition if computeU was set to be True. """ u = self.call("U") if u is not None: mat_name = u.getClass().getSimpleName() if mat_name == "RowMatrix": return RowMatrix(u) elif mat_name == "IndexedRowMatrix": return IndexedRowMatrix(u) else: raise TypeError("Expected RowMatrix/IndexedRowMatrix got %s" % mat_name) @property @since('2.2.0') def s(self): """ Returns a DenseVector with singular values in descending order. """ return self.call("s") @property @since('2.2.0') def V(self): """ Returns a DenseMatrix whose columns are the right singular vectors of the SingularValueDecomposition. """ return self.call("V") class IndexedRow(object): """ Represents a row of an IndexedRowMatrix. Just a wrapper over a (long, vector) tuple. :param index: The index for the given row. :param vector: The row in the matrix at the given index. """ def __init__(self, index, vector): self.index = long(index) self.vector = _convert_to_vector(vector) def __repr__(self): return "IndexedRow(%s, %s)" % (self.index, self.vector) def _convert_to_indexed_row(row): if isinstance(row, IndexedRow): return row elif isinstance(row, tuple) and len(row) == 2: return IndexedRow(*row) else: raise TypeError("Cannot convert type %s into IndexedRow" % type(row)) class IndexedRowMatrix(DistributedMatrix): """ Represents a row-oriented distributed Matrix with indexed rows. :param rows: An RDD of IndexedRows or (long, vector) tuples or a DataFrame consisting of a long typed column of indices and a vector typed column. :param numRows: Number of rows in the matrix. A non-positive value means unknown, at which point the number of rows will be determined by the max row index plus one. :param numCols: Number of columns in the matrix. A non-positive value means unknown, at which point the number of columns will be determined by the size of the first row. """ def __init__(self, rows, numRows=0, numCols=0): """ Note: This docstring is not shown publicly. Create a wrapper over a Java IndexedRowMatrix. Publicly, we require that `rows` be an RDD or DataFrame. However, for internal usage, `rows` can also be a Java IndexedRowMatrix object, in which case we can wrap it directly. This assists in clean matrix conversions. >>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]), ... IndexedRow(1, [4, 5, 6])]) >>> mat = IndexedRowMatrix(rows) >>> mat_diff = IndexedRowMatrix(rows) >>> (mat_diff._java_matrix_wrapper._java_model == ... mat._java_matrix_wrapper._java_model) False >>> mat_same = IndexedRowMatrix(mat._java_matrix_wrapper._java_model) >>> (mat_same._java_matrix_wrapper._java_model == ... mat._java_matrix_wrapper._java_model) True """ if isinstance(rows, RDD): rows = rows.map(_convert_to_indexed_row) # We use DataFrames for serialization of IndexedRows from # Python, so first convert the RDD to a DataFrame on this # side. This will convert each IndexedRow to a Row # containing the 'index' and 'vector' values, which can # both be easily serialized. We will convert back to # IndexedRows on the Scala side. java_matrix = callMLlibFunc("createIndexedRowMatrix", rows.toDF(), long(numRows), int(numCols)) elif isinstance(rows, DataFrame): java_matrix = callMLlibFunc("createIndexedRowMatrix", rows, long(numRows), int(numCols)) elif (isinstance(rows, JavaObject) and rows.getClass().getSimpleName() == "IndexedRowMatrix"): java_matrix = rows else: raise TypeError("rows should be an RDD of IndexedRows or (long, vector) tuples, " "got %s" % type(rows)) self._java_matrix_wrapper = JavaModelWrapper(java_matrix) @property def rows(self): """ Rows of the IndexedRowMatrix stored as an RDD of IndexedRows. >>> mat = IndexedRowMatrix(sc.parallelize([IndexedRow(0, [1, 2, 3]), ... IndexedRow(1, [4, 5, 6])])) >>> rows = mat.rows >>> rows.first() IndexedRow(0, [1.0,2.0,3.0]) """ # We use DataFrames for serialization of IndexedRows from # Java, so we first convert the RDD of rows to a DataFrame # on the Scala/Java side. Then we map each Row in the # DataFrame back to an IndexedRow on this side. rows_df = callMLlibFunc("getIndexedRows", self._java_matrix_wrapper._java_model) rows = rows_df.rdd.map(lambda row: IndexedRow(row[0], row[1])) return rows def numRows(self): """ Get or compute the number of rows. >>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]), ... IndexedRow(1, [4, 5, 6]), ... IndexedRow(2, [7, 8, 9]), ... IndexedRow(3, [10, 11, 12])]) >>> mat = IndexedRowMatrix(rows) >>> print(mat.numRows()) 4 >>> mat = IndexedRowMatrix(rows, 7, 6) >>> print(mat.numRows()) 7 """ return self._java_matrix_wrapper.call("numRows") def numCols(self): """ Get or compute the number of cols. >>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]), ... IndexedRow(1, [4, 5, 6]), ... IndexedRow(2, [7, 8, 9]), ... IndexedRow(3, [10, 11, 12])]) >>> mat = IndexedRowMatrix(rows) >>> print(mat.numCols()) 3 >>> mat = IndexedRowMatrix(rows, 7, 6) >>> print(mat.numCols()) 6 """ return self._java_matrix_wrapper.call("numCols") def columnSimilarities(self): """ Compute all cosine similarities between columns. >>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]), ... IndexedRow(6, [4, 5, 6])]) >>> mat = IndexedRowMatrix(rows) >>> cs = mat.columnSimilarities() >>> print(cs.numCols()) 3 """ java_coordinate_matrix = self._java_matrix_wrapper.call("columnSimilarities") return CoordinateMatrix(java_coordinate_matrix) @since('2.0.0') def computeGramianMatrix(self): """ Computes the Gramian matrix `A^T A`. .. note:: This cannot be computed on matrices with more than 65535 columns. >>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]), ... IndexedRow(1, [4, 5, 6])]) >>> mat = IndexedRowMatrix(rows) >>> mat.computeGramianMatrix() DenseMatrix(3, 3, [17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0], 0) """ return self._java_matrix_wrapper.call("computeGramianMatrix") def toRowMatrix(self): """ Convert this matrix to a RowMatrix. >>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]), ... IndexedRow(6, [4, 5, 6])]) >>> mat = IndexedRowMatrix(rows).toRowMatrix() >>> mat.rows.collect() [DenseVector([1.0, 2.0, 3.0]), DenseVector([4.0, 5.0, 6.0])] """ java_row_matrix = self._java_matrix_wrapper.call("toRowMatrix") return RowMatrix(java_row_matrix) def toCoordinateMatrix(self): """ Convert this matrix to a CoordinateMatrix. >>> rows = sc.parallelize([IndexedRow(0, [1, 0]), ... IndexedRow(6, [0, 5])]) >>> mat = IndexedRowMatrix(rows).toCoordinateMatrix() >>> mat.entries.take(3) [MatrixEntry(0, 0, 1.0), MatrixEntry(0, 1, 0.0), MatrixEntry(6, 0, 0.0)] """ java_coordinate_matrix = self._java_matrix_wrapper.call("toCoordinateMatrix") return CoordinateMatrix(java_coordinate_matrix) def toBlockMatrix(self, rowsPerBlock=1024, colsPerBlock=1024): """ Convert this matrix to a BlockMatrix. :param rowsPerBlock: Number of rows that make up each block. The blocks forming the final rows are not required to have the given number of rows. :param colsPerBlock: Number of columns that make up each block. The blocks forming the final columns are not required to have the given number of columns. >>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]), ... IndexedRow(6, [4, 5, 6])]) >>> mat = IndexedRowMatrix(rows).toBlockMatrix() >>> # This IndexedRowMatrix will have 7 effective rows, due to >>> # the highest row index being 6, and the ensuing >>> # BlockMatrix will have 7 rows as well. >>> print(mat.numRows()) 7 >>> print(mat.numCols()) 3 """ java_block_matrix = self._java_matrix_wrapper.call("toBlockMatrix", rowsPerBlock, colsPerBlock) return BlockMatrix(java_block_matrix, rowsPerBlock, colsPerBlock) @since('2.2.0') def computeSVD(self, k, computeU=False, rCond=1e-9): """ Computes the singular value decomposition of the IndexedRowMatrix. The given row matrix A of dimension (m X n) is decomposed into U * s * V'T where * U: (m X k) (left singular vectors) is a IndexedRowMatrix whose columns are the eigenvectors of (A X A') * s: DenseVector consisting of square root of the eigenvalues (singular values) in descending order. * v: (n X k) (right singular vectors) is a Matrix whose columns are the eigenvectors of (A' X A) For more specific details on implementation, please refer the scala documentation. :param k: Number of leading singular values to keep (`0 < k <= n`). It might return less than k if there are numerically zero singular values or there are not enough Ritz values converged before the maximum number of Arnoldi update iterations is reached (in case that matrix A is ill-conditioned). :param computeU: Whether or not to compute U. If set to be True, then U is computed by A * V * s^-1 :param rCond: Reciprocal condition number. All singular values smaller than rCond * s[0] are treated as zero where s[0] is the largest singular value. :returns: SingularValueDecomposition object >>> rows = [(0, (3, 1, 1)), (1, (-1, 3, 1))] >>> irm = IndexedRowMatrix(sc.parallelize(rows)) >>> svd_model = irm.computeSVD(2, True) >>> svd_model.U.rows.collect() # doctest: +NORMALIZE_WHITESPACE [IndexedRow(0, [-0.707106781187,0.707106781187]),\ IndexedRow(1, [-0.707106781187,-0.707106781187])] >>> svd_model.s DenseVector([3.4641, 3.1623]) >>> svd_model.V DenseMatrix(3, 2, [-0.4082, -0.8165, -0.4082, 0.8944, -0.4472, 0.0], 0) """ j_model = self._java_matrix_wrapper.call( "computeSVD", int(k), bool(computeU), float(rCond)) return SingularValueDecomposition(j_model) @since('2.2.0') def multiply(self, matrix): """ Multiply this matrix by a local dense matrix on the right. :param matrix: a local dense matrix whose number of rows must match the number of columns of this matrix :returns: :py:class:`IndexedRowMatrix` >>> mat = IndexedRowMatrix(sc.parallelize([(0, (0, 1)), (1, (2, 3))])) >>> mat.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect() [IndexedRow(0, [2.0,3.0]), IndexedRow(1, [6.0,11.0])] """ if not isinstance(matrix, DenseMatrix): raise ValueError("Only multiplication with DenseMatrix " "is supported.") return IndexedRowMatrix(self._java_matrix_wrapper.call("multiply", matrix)) class MatrixEntry(object): """ Represents an entry of a CoordinateMatrix. Just a wrapper over a (long, long, float) tuple. :param i: The row index of the matrix. :param j: The column index of the matrix. :param value: The (i, j)th entry of the matrix, as a float. """ def __init__(self, i, j, value): self.i = long(i) self.j = long(j) self.value = float(value) def __repr__(self): return "MatrixEntry(%s, %s, %s)" % (self.i, self.j, self.value) def _convert_to_matrix_entry(entry): if isinstance(entry, MatrixEntry): return entry elif isinstance(entry, tuple) and len(entry) == 3: return MatrixEntry(*entry) else: raise TypeError("Cannot convert type %s into MatrixEntry" % type(entry)) class CoordinateMatrix(DistributedMatrix): """ Represents a matrix in coordinate format. :param entries: An RDD of MatrixEntry inputs or (long, long, float) tuples. :param numRows: Number of rows in the matrix. A non-positive value means unknown, at which point the number of rows will be determined by the max row index plus one. :param numCols: Number of columns in the matrix. A non-positive value means unknown, at which point the number of columns will be determined by the max row index plus one. """ def __init__(self, entries, numRows=0, numCols=0): """ Note: This docstring is not shown publicly. Create a wrapper over a Java CoordinateMatrix. Publicly, we require that `rows` be an RDD. However, for internal usage, `rows` can also be a Java CoordinateMatrix object, in which case we can wrap it directly. This assists in clean matrix conversions. >>> entries = sc.parallelize([MatrixEntry(0, 0, 1.2), ... MatrixEntry(6, 4, 2.1)]) >>> mat = CoordinateMatrix(entries) >>> mat_diff = CoordinateMatrix(entries) >>> (mat_diff._java_matrix_wrapper._java_model == ... mat._java_matrix_wrapper._java_model) False >>> mat_same = CoordinateMatrix(mat._java_matrix_wrapper._java_model) >>> (mat_same._java_matrix_wrapper._java_model == ... mat._java_matrix_wrapper._java_model) True """ if isinstance(entries, RDD): entries = entries.map(_convert_to_matrix_entry) # We use DataFrames for serialization of MatrixEntry entries # from Python, so first convert the RDD to a DataFrame on # this side. This will convert each MatrixEntry to a Row # containing the 'i', 'j', and 'value' values, which can # each be easily serialized. We will convert back to # MatrixEntry inputs on the Scala side. java_matrix = callMLlibFunc("createCoordinateMatrix", entries.toDF(), long(numRows), long(numCols)) elif (isinstance(entries, JavaObject) and entries.getClass().getSimpleName() == "CoordinateMatrix"): java_matrix = entries else: raise TypeError("entries should be an RDD of MatrixEntry entries or " "(long, long, float) tuples, got %s" % type(entries)) self._java_matrix_wrapper = JavaModelWrapper(java_matrix) @property def entries(self): """ Entries of the CoordinateMatrix stored as an RDD of MatrixEntries. >>> mat = CoordinateMatrix(sc.parallelize([MatrixEntry(0, 0, 1.2), ... MatrixEntry(6, 4, 2.1)])) >>> entries = mat.entries >>> entries.first() MatrixEntry(0, 0, 1.2) """ # We use DataFrames for serialization of MatrixEntry entries # from Java, so we first convert the RDD of entries to a # DataFrame on the Scala/Java side. Then we map each Row in # the DataFrame back to a MatrixEntry on this side. entries_df = callMLlibFunc("getMatrixEntries", self._java_matrix_wrapper._java_model) entries = entries_df.rdd.map(lambda row: MatrixEntry(row[0], row[1], row[2])) return entries def numRows(self): """ Get or compute the number of rows. >>> entries = sc.parallelize([MatrixEntry(0, 0, 1.2), ... MatrixEntry(1, 0, 2), ... MatrixEntry(2, 1, 3.7)]) >>> mat = CoordinateMatrix(entries) >>> print(mat.numRows()) 3 >>> mat = CoordinateMatrix(entries, 7, 6) >>> print(mat.numRows()) 7 """ return self._java_matrix_wrapper.call("numRows") def numCols(self): """ Get or compute the number of cols. >>> entries = sc.parallelize([MatrixEntry(0, 0, 1.2), ... MatrixEntry(1, 0, 2), ... MatrixEntry(2, 1, 3.7)]) >>> mat = CoordinateMatrix(entries) >>> print(mat.numCols()) 2 >>> mat = CoordinateMatrix(entries, 7, 6) >>> print(mat.numCols()) 6 """ return self._java_matrix_wrapper.call("numCols") @since('2.0.0') def transpose(self): """ Transpose this CoordinateMatrix. >>> entries = sc.parallelize([MatrixEntry(0, 0, 1.2), ... MatrixEntry(1, 0, 2), ... MatrixEntry(2, 1, 3.7)]) >>> mat = CoordinateMatrix(entries) >>> mat_transposed = mat.transpose() >>> print(mat_transposed.numRows()) 2 >>> print(mat_transposed.numCols()) 3 """ java_transposed_matrix = self._java_matrix_wrapper.call("transpose") return CoordinateMatrix(java_transposed_matrix) def toRowMatrix(self): """ Convert this matrix to a RowMatrix. >>> entries = sc.parallelize([MatrixEntry(0, 0, 1.2), ... MatrixEntry(6, 4, 2.1)]) >>> mat = CoordinateMatrix(entries).toRowMatrix() >>> # This CoordinateMatrix will have 7 effective rows, due to >>> # the highest row index being 6, but the ensuing RowMatrix >>> # will only have 2 rows since there are only entries on 2 >>> # unique rows. >>> print(mat.numRows()) 2 >>> # This CoordinateMatrix will have 5 columns, due to the >>> # highest column index being 4, and the ensuing RowMatrix >>> # will have 5 columns as well. >>> print(mat.numCols()) 5 """ java_row_matrix = self._java_matrix_wrapper.call("toRowMatrix") return RowMatrix(java_row_matrix) def toIndexedRowMatrix(self): """ Convert this matrix to an IndexedRowMatrix. >>> entries = sc.parallelize([MatrixEntry(0, 0, 1.2), ... MatrixEntry(6, 4, 2.1)]) >>> mat = CoordinateMatrix(entries).toIndexedRowMatrix() >>> # This CoordinateMatrix will have 7 effective rows, due to >>> # the highest row index being 6, and the ensuing >>> # IndexedRowMatrix will have 7 rows as well. >>> print(mat.numRows()) 7 >>> # This CoordinateMatrix will have 5 columns, due to the >>> # highest column index being 4, and the ensuing >>> # IndexedRowMatrix will have 5 columns as well. >>> print(mat.numCols()) 5 """ java_indexed_row_matrix = self._java_matrix_wrapper.call("toIndexedRowMatrix") return IndexedRowMatrix(java_indexed_row_matrix) def toBlockMatrix(self, rowsPerBlock=1024, colsPerBlock=1024): """ Convert this matrix to a BlockMatrix. :param rowsPerBlock: Number of rows that make up each block. The blocks forming the final rows are not required to have the given number of rows. :param colsPerBlock: Number of columns that make up each block. The blocks forming the final columns are not required to have the given number of columns. >>> entries = sc.parallelize([MatrixEntry(0, 0, 1.2), ... MatrixEntry(6, 4, 2.1)]) >>> mat = CoordinateMatrix(entries).toBlockMatrix() >>> # This CoordinateMatrix will have 7 effective rows, due to >>> # the highest row index being 6, and the ensuing >>> # BlockMatrix will have 7 rows as well. >>> print(mat.numRows()) 7 >>> # This CoordinateMatrix will have 5 columns, due to the >>> # highest column index being 4, and the ensuing >>> # BlockMatrix will have 5 columns as well. >>> print(mat.numCols()) 5 """ java_block_matrix = self._java_matrix_wrapper.call("toBlockMatrix", rowsPerBlock, colsPerBlock) return BlockMatrix(java_block_matrix, rowsPerBlock, colsPerBlock) def _convert_to_matrix_block_tuple(block): if (isinstance(block, tuple) and len(block) == 2 and isinstance(block[0], tuple) and len(block[0]) == 2 and isinstance(block[1], Matrix)): blockRowIndex = int(block[0][0]) blockColIndex = int(block[0][1]) subMatrix = block[1] return ((blockRowIndex, blockColIndex), subMatrix) else: raise TypeError("Cannot convert type %s into a sub-matrix block tuple" % type(block)) class BlockMatrix(DistributedMatrix): """ Represents a distributed matrix in blocks of local matrices. :param blocks: An RDD of sub-matrix blocks ((blockRowIndex, blockColIndex), sub-matrix) that form this distributed matrix. If multiple blocks with the same index exist, the results for operations like add and multiply will be unpredictable. :param rowsPerBlock: Number of rows that make up each block. The blocks forming the final rows are not required to have the given number of rows. :param colsPerBlock: Number of columns that make up each block. The blocks forming the final columns are not required to have the given number of columns. :param numRows: Number of rows of this matrix. If the supplied value is less than or equal to zero, the number of rows will be calculated when `numRows` is invoked. :param numCols: Number of columns of this matrix. If the supplied value is less than or equal to zero, the number of columns will be calculated when `numCols` is invoked. """ def __init__(self, blocks, rowsPerBlock, colsPerBlock, numRows=0, numCols=0): """ Note: This docstring is not shown publicly. Create a wrapper over a Java BlockMatrix. Publicly, we require that `blocks` be an RDD. However, for internal usage, `blocks` can also be a Java BlockMatrix object, in which case we can wrap it directly. This assists in clean matrix conversions. >>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]) >>> mat = BlockMatrix(blocks, 3, 2) >>> mat_diff = BlockMatrix(blocks, 3, 2) >>> (mat_diff._java_matrix_wrapper._java_model == ... mat._java_matrix_wrapper._java_model) False >>> mat_same = BlockMatrix(mat._java_matrix_wrapper._java_model, 3, 2) >>> (mat_same._java_matrix_wrapper._java_model == ... mat._java_matrix_wrapper._java_model) True """ if isinstance(blocks, RDD): blocks = blocks.map(_convert_to_matrix_block_tuple) # We use DataFrames for serialization of sub-matrix blocks # from Python, so first convert the RDD to a DataFrame on # this side. This will convert each sub-matrix block # tuple to a Row containing the 'blockRowIndex', # 'blockColIndex', and 'subMatrix' values, which can # each be easily serialized. We will convert back to # ((blockRowIndex, blockColIndex), sub-matrix) tuples on # the Scala side. java_matrix = callMLlibFunc("createBlockMatrix", blocks.toDF(), int(rowsPerBlock), int(colsPerBlock), long(numRows), long(numCols)) elif (isinstance(blocks, JavaObject) and blocks.getClass().getSimpleName() == "BlockMatrix"): java_matrix = blocks else: raise TypeError("blocks should be an RDD of sub-matrix blocks as " "((int, int), matrix) tuples, got %s" % type(blocks)) self._java_matrix_wrapper = JavaModelWrapper(java_matrix) @property def blocks(self): """ The RDD of sub-matrix blocks ((blockRowIndex, blockColIndex), sub-matrix) that form this distributed matrix. >>> mat = BlockMatrix( ... sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]), 3, 2) >>> blocks = mat.blocks >>> blocks.first() ((0, 0), DenseMatrix(3, 2, [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 0)) """ # We use DataFrames for serialization of sub-matrix blocks # from Java, so we first convert the RDD of blocks to a # DataFrame on the Scala/Java side. Then we map each Row in # the DataFrame back to a sub-matrix block on this side. blocks_df = callMLlibFunc("getMatrixBlocks", self._java_matrix_wrapper._java_model) blocks = blocks_df.rdd.map(lambda row: ((row[0][0], row[0][1]), row[1])) return blocks @property def rowsPerBlock(self): """ Number of rows that make up each block. >>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]) >>> mat = BlockMatrix(blocks, 3, 2) >>> mat.rowsPerBlock 3 """ return self._java_matrix_wrapper.call("rowsPerBlock") @property def colsPerBlock(self): """ Number of columns that make up each block. >>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]) >>> mat = BlockMatrix(blocks, 3, 2) >>> mat.colsPerBlock 2 """ return self._java_matrix_wrapper.call("colsPerBlock") @property def numRowBlocks(self): """ Number of rows of blocks in the BlockMatrix. >>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]) >>> mat = BlockMatrix(blocks, 3, 2) >>> mat.numRowBlocks 2 """ return self._java_matrix_wrapper.call("numRowBlocks") @property def numColBlocks(self): """ Number of columns of blocks in the BlockMatrix. >>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]) >>> mat = BlockMatrix(blocks, 3, 2) >>> mat.numColBlocks 1 """ return self._java_matrix_wrapper.call("numColBlocks") def numRows(self): """ Get or compute the number of rows. >>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]) >>> mat = BlockMatrix(blocks, 3, 2) >>> print(mat.numRows()) 6 >>> mat = BlockMatrix(blocks, 3, 2, 7, 6) >>> print(mat.numRows()) 7 """ return self._java_matrix_wrapper.call("numRows") def numCols(self): """ Get or compute the number of cols. >>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]) >>> mat = BlockMatrix(blocks, 3, 2) >>> print(mat.numCols()) 2 >>> mat = BlockMatrix(blocks, 3, 2, 7, 6) >>> print(mat.numCols()) 6 """ return self._java_matrix_wrapper.call("numCols") @since('2.0.0') def cache(self): """ Caches the underlying RDD. """ self._java_matrix_wrapper.call("cache") return self @since('2.0.0') def persist(self, storageLevel): """ Persists the underlying RDD with the specified storage level. """ if not isinstance(storageLevel, StorageLevel): raise TypeError("`storageLevel` should be a StorageLevel, got %s" % type(storageLevel)) javaStorageLevel = self._java_matrix_wrapper._sc._getJavaStorageLevel(storageLevel) self._java_matrix_wrapper.call("persist", javaStorageLevel) return self @since('2.0.0') def validate(self): """ Validates the block matrix info against the matrix data (`blocks`) and throws an exception if any error is found. """ self._java_matrix_wrapper.call("validate") def add(self, other): """ Adds two block matrices together. The matrices must have the same size and matching `rowsPerBlock` and `colsPerBlock` values. If one of the sub matrix blocks that are being added is a SparseMatrix, the resulting sub matrix block will also be a SparseMatrix, even if it is being added to a DenseMatrix. If two dense sub matrix blocks are added, the output block will also be a DenseMatrix. >>> dm1 = Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6]) >>> dm2 = Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]) >>> sm = Matrices.sparse(3, 2, [0, 1, 3], [0, 1, 2], [7, 11, 12]) >>> blocks1 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)]) >>> blocks2 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)]) >>> blocks3 = sc.parallelize([((0, 0), sm), ((1, 0), dm2)]) >>> mat1 = BlockMatrix(blocks1, 3, 2) >>> mat2 = BlockMatrix(blocks2, 3, 2) >>> mat3 = BlockMatrix(blocks3, 3, 2) >>> mat1.add(mat2).toLocalMatrix() DenseMatrix(6, 2, [2.0, 4.0, 6.0, 14.0, 16.0, 18.0, 8.0, 10.0, 12.0, 20.0, 22.0, 24.0], 0) >>> mat1.add(mat3).toLocalMatrix() DenseMatrix(6, 2, [8.0, 2.0, 3.0, 14.0, 16.0, 18.0, 4.0, 16.0, 18.0, 20.0, 22.0, 24.0], 0) """ if not isinstance(other, BlockMatrix): raise TypeError("Other should be a BlockMatrix, got %s" % type(other)) other_java_block_matrix = other._java_matrix_wrapper._java_model java_block_matrix = self._java_matrix_wrapper.call("add", other_java_block_matrix) return BlockMatrix(java_block_matrix, self.rowsPerBlock, self.colsPerBlock) @since('2.0.0') def subtract(self, other): """ Subtracts the given block matrix `other` from this block matrix: `this - other`. The matrices must have the same size and matching `rowsPerBlock` and `colsPerBlock` values. If one of the sub matrix blocks that are being subtracted is a SparseMatrix, the resulting sub matrix block will also be a SparseMatrix, even if it is being subtracted from a DenseMatrix. If two dense sub matrix blocks are subtracted, the output block will also be a DenseMatrix. >>> dm1 = Matrices.dense(3, 2, [3, 1, 5, 4, 6, 2]) >>> dm2 = Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]) >>> sm = Matrices.sparse(3, 2, [0, 1, 3], [0, 1, 2], [1, 2, 3]) >>> blocks1 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)]) >>> blocks2 = sc.parallelize([((0, 0), dm2), ((1, 0), dm1)]) >>> blocks3 = sc.parallelize([((0, 0), sm), ((1, 0), dm2)]) >>> mat1 = BlockMatrix(blocks1, 3, 2) >>> mat2 = BlockMatrix(blocks2, 3, 2) >>> mat3 = BlockMatrix(blocks3, 3, 2) >>> mat1.subtract(mat2).toLocalMatrix() DenseMatrix(6, 2, [-4.0, -7.0, -4.0, 4.0, 7.0, 4.0, -6.0, -5.0, -10.0, 6.0, 5.0, 10.0], 0) >>> mat2.subtract(mat3).toLocalMatrix() DenseMatrix(6, 2, [6.0, 8.0, 9.0, -4.0, -7.0, -4.0, 10.0, 9.0, 9.0, -6.0, -5.0, -10.0], 0) """ if not isinstance(other, BlockMatrix): raise TypeError("Other should be a BlockMatrix, got %s" % type(other)) other_java_block_matrix = other._java_matrix_wrapper._java_model java_block_matrix = self._java_matrix_wrapper.call("subtract", other_java_block_matrix) return BlockMatrix(java_block_matrix, self.rowsPerBlock, self.colsPerBlock) def multiply(self, other): """ Left multiplies this BlockMatrix by `other`, another BlockMatrix. The `colsPerBlock` of this matrix must equal the `rowsPerBlock` of `other`. If `other` contains any SparseMatrix blocks, they will have to be converted to DenseMatrix blocks. The output BlockMatrix will only consist of DenseMatrix blocks. This may cause some performance issues until support for multiplying two sparse matrices is added. >>> dm1 = Matrices.dense(2, 3, [1, 2, 3, 4, 5, 6]) >>> dm2 = Matrices.dense(2, 3, [7, 8, 9, 10, 11, 12]) >>> dm3 = Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6]) >>> dm4 = Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]) >>> sm = Matrices.sparse(3, 2, [0, 1, 3], [0, 1, 2], [7, 11, 12]) >>> blocks1 = sc.parallelize([((0, 0), dm1), ((0, 1), dm2)]) >>> blocks2 = sc.parallelize([((0, 0), dm3), ((1, 0), dm4)]) >>> blocks3 = sc.parallelize([((0, 0), sm), ((1, 0), dm4)]) >>> mat1 = BlockMatrix(blocks1, 2, 3) >>> mat2 = BlockMatrix(blocks2, 3, 2) >>> mat3 = BlockMatrix(blocks3, 3, 2) >>> mat1.multiply(mat2).toLocalMatrix() DenseMatrix(2, 2, [242.0, 272.0, 350.0, 398.0], 0) >>> mat1.multiply(mat3).toLocalMatrix() DenseMatrix(2, 2, [227.0, 258.0, 394.0, 450.0], 0) """ if not isinstance(other, BlockMatrix): raise TypeError("Other should be a BlockMatrix, got %s" % type(other)) other_java_block_matrix = other._java_matrix_wrapper._java_model java_block_matrix = self._java_matrix_wrapper.call("multiply", other_java_block_matrix) return BlockMatrix(java_block_matrix, self.rowsPerBlock, self.colsPerBlock) @since('2.0.0') def transpose(self): """ Transpose this BlockMatrix. Returns a new BlockMatrix instance sharing the same underlying data. Is a lazy operation. >>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]) >>> mat = BlockMatrix(blocks, 3, 2) >>> mat_transposed = mat.transpose() >>> mat_transposed.toLocalMatrix() DenseMatrix(2, 6, [1.0, 4.0, 2.0, 5.0, 3.0, 6.0, 7.0, 10.0, 8.0, 11.0, 9.0, 12.0], 0) """ java_transposed_matrix = self._java_matrix_wrapper.call("transpose") return BlockMatrix(java_transposed_matrix, self.colsPerBlock, self.rowsPerBlock) def toLocalMatrix(self): """ Collect the distributed matrix on the driver as a DenseMatrix. >>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]) >>> mat = BlockMatrix(blocks, 3, 2).toLocalMatrix() >>> # This BlockMatrix will have 6 effective rows, due to >>> # having two sub-matrix blocks stacked, each with 3 rows. >>> # The ensuing DenseMatrix will also have 6 rows. >>> print(mat.numRows) 6 >>> # This BlockMatrix will have 2 effective columns, due to >>> # having two sub-matrix blocks stacked, each with 2 >>> # columns. The ensuing DenseMatrix will also have 2 columns. >>> print(mat.numCols) 2 """ return self._java_matrix_wrapper.call("toLocalMatrix") def toIndexedRowMatrix(self): """ Convert this matrix to an IndexedRowMatrix. >>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]) >>> mat = BlockMatrix(blocks, 3, 2).toIndexedRowMatrix() >>> # This BlockMatrix will have 6 effective rows, due to >>> # having two sub-matrix blocks stacked, each with 3 rows. >>> # The ensuing IndexedRowMatrix will also have 6 rows. >>> print(mat.numRows()) 6 >>> # This BlockMatrix will have 2 effective columns, due to >>> # having two sub-matrix blocks stacked, each with 2 columns. >>> # The ensuing IndexedRowMatrix will also have 2 columns. >>> print(mat.numCols()) 2 """ java_indexed_row_matrix = self._java_matrix_wrapper.call("toIndexedRowMatrix") return IndexedRowMatrix(java_indexed_row_matrix) def toCoordinateMatrix(self): """ Convert this matrix to a CoordinateMatrix. >>> blocks = sc.parallelize([((0, 0), Matrices.dense(1, 2, [1, 2])), ... ((1, 0), Matrices.dense(1, 2, [7, 8]))]) >>> mat = BlockMatrix(blocks, 1, 2).toCoordinateMatrix() >>> mat.entries.take(3) [MatrixEntry(0, 0, 1.0), MatrixEntry(0, 1, 2.0), MatrixEntry(1, 0, 7.0)] """ java_coordinate_matrix = self._java_matrix_wrapper.call("toCoordinateMatrix") return CoordinateMatrix(java_coordinate_matrix) def _test(): import doctest import numpy from pyspark.sql import SparkSession from pyspark.mllib.linalg import Matrices import pyspark.mllib.linalg.distributed try: # Numpy 1.14+ changed it's string format. numpy.set_printoptions(legacy='1.13') except TypeError: pass globs = pyspark.mllib.linalg.distributed.__dict__.copy() spark = SparkSession.builder\ .master("local[2]")\ .appName("mllib.linalg.distributed tests")\ .getOrCreate() globs['sc'] = spark.sparkContext globs['Matrices'] = Matrices (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) spark.stop() if failure_count: sys.exit(-1) if __name__ == "__main__": _test()
apache-2.0
NickolausDS/oauthenticator
oauthenticator/tests/test_github.py
4
4715
import re import functools import json from io import BytesIO from pytest import fixture, mark from urllib.parse import urlparse, parse_qs from tornado.httpclient import HTTPRequest, HTTPResponse from tornado.httputil import HTTPHeaders from ..github import GitHubOAuthenticator from .mocks import setup_oauth_mock def user_model(username): """Return a user model""" return { 'email': 'dinosaurs@space', 'id': 5, 'login': username, 'name': 'Hoban Washburn', } @fixture def github_client(client): setup_oauth_mock(client, host=['github.com', 'api.github.com'], access_token_path='/login/oauth/access_token', user_path='/user', token_type='token', ) return client async def test_github(github_client): authenticator = GitHubOAuthenticator() handler = github_client.handler_for_user(user_model('wash')) user_info = await authenticator.authenticate(handler) name = user_info['name'] assert name == 'wash' auth_state = user_info['auth_state'] assert 'access_token' in auth_state assert auth_state == { 'access_token': auth_state['access_token'], 'github_user': { 'email': 'dinosaurs@space', 'id': 5, 'login': name, 'name': 'Hoban Washburn', } } def make_link_header(urlinfo, page): return {'Link': '<{}://{}{}?page={}>;rel="next"' .format(urlinfo.scheme, urlinfo.netloc, urlinfo.path, page)} async def test_org_whitelist(github_client): client = github_client authenticator = GitHubOAuthenticator() ## Mock Github API teams = { 'red': ['grif', 'simmons', 'donut', 'sarge', 'lopez'], 'blue': ['tucker', 'caboose', 'burns', 'sheila', 'texas'], } member_regex = re.compile(r'/orgs/(.*)/members') def team_members(paginate, request): urlinfo = urlparse(request.url) team = member_regex.match(urlinfo.path).group(1) if team not in teams: return HTTPResponse(request, 404) if not paginate: return [user_model(m) for m in teams[team]] else: page = parse_qs(urlinfo.query).get('page', ['1']) page = int(page[0]) return team_members_paginated( team, page, urlinfo, functools.partial(HTTPResponse, request)) def team_members_paginated(team, page, urlinfo, response): if page < len(teams[team]): headers = make_link_header(urlinfo, page + 1) elif page == len(teams[team]): headers = {} else: return response(400) headers.update({'Content-Type': 'application/json'}) ret = [user_model(teams[team][page - 1])] return response(200, headers=HTTPHeaders(headers), buffer=BytesIO(json.dumps(ret).encode('utf-8'))) membership_regex = re.compile(r'/orgs/(.*)/members/(.*)') def team_membership(request): urlinfo = urlparse(request.url) urlmatch = membership_regex.match(urlinfo.path) team = urlmatch.group(1) username = urlmatch.group(2) print('Request team = %s, username = %s' % (team, username)) if team not in teams: print('Team not found: team = %s' %(team)) return HTTPResponse(request, 404) if username not in teams[team]: print('Member not found: team = %s, username = %s' %(team, username)) return HTTPResponse(request, 404) return HTTPResponse(request, 204) ## Perform tests for paginate in (False, True): client_hosts = client.hosts['api.github.com'] client_hosts.append((membership_regex, team_membership)) client_hosts.append((member_regex, functools.partial(team_members, paginate))) authenticator.github_organization_whitelist = ['blue'] handler = client.handler_for_user(user_model('caboose')) user = await authenticator.authenticate(handler) assert user['name'] == 'caboose' handler = client.handler_for_user(user_model('donut')) user = await authenticator.authenticate(handler) assert user is None # reverse it, just to be safe authenticator.github_organization_whitelist = ['red'] handler = client.handler_for_user(user_model('caboose')) user = await authenticator.authenticate(handler) assert user is None handler = client.handler_for_user(user_model('donut')) user = await authenticator.authenticate(handler) assert user['name'] == 'donut' client_hosts.pop() client_hosts.pop()
bsd-3-clause
ironman771/xbmc
tools/EventClients/examples/python/example_action.py
10
1118
#!/usr/bin/python # This is a simple example showing how you can send a key press event # to XBMC using the XBMCClient class import sys sys.path.append("../../lib/python") import time from xbmcclient import XBMCClient,ACTION_EXECBUILTIN,ACTION_BUTTON def main(): host = "localhost" port = 9777 # Create an XBMCClient object and connect xbmc = XBMCClient("Example Remote", "../../icons/bluetooth.png") xbmc.connect() # send a up key press using the xbox gamepad map "XG" and button # name "dpadup" ( see PacketBUTTON doc for more details) try: xbmc.send_action(sys.argv[2], ACTION_BUTTON) except: try: xbmc.send_action(sys.argv[1], ACTION_EXECBUILTIN) except Exception as e: print(str(e)) xbmc.send_action("ActivateWindow(ShutdownMenu)") # ok we're done, close the connection # Note that closing the connection clears any repeat key that is # active. So in this example, the actual release button event above # need not have been sent. xbmc.close() if __name__=="__main__": main()
gpl-2.0
aaron-fz/neutron_full_sync
neutron/tests/unit/cisco/test_nexus_db.py
21
10090
# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock import testtools from neutron.db import api as db from neutron.plugins.cisco.common import cisco_exceptions as c_exc from neutron.plugins.cisco.common import config from neutron.plugins.cisco.db import nexus_db_v2 as nxdb from neutron.plugins.cisco.nexus import cisco_nexus_plugin_v2 from neutron.tests import base class CiscoNexusDbTest(base.BaseTestCase): """Unit tests for cisco.db.nexus_models_v2.NexusPortBinding model.""" NpbObj = collections.namedtuple('NpbObj', 'port vlan switch instance') def setUp(self): super(CiscoNexusDbTest, self).setUp() db.configure_db() self.session = db.get_session() self.addCleanup(db.clear_db) def _npb_test_obj(self, pnum, vnum, switch=None, instance=None): """Create a Nexus port binding test object from a pair of numbers.""" if pnum is 'router': port = pnum else: port = '1/%s' % str(pnum) vlan = str(vnum) if switch is None: switch = '10.9.8.7' if instance is None: instance = 'instance_%s_%s' % (str(pnum), str(vnum)) return self.NpbObj(port, vlan, switch, instance) def _assert_equal(self, npb, npb_obj): self.assertEqual(npb.port_id, npb_obj.port) self.assertEqual(int(npb.vlan_id), int(npb_obj.vlan)) self.assertEqual(npb.switch_ip, npb_obj.switch) self.assertEqual(npb.instance_id, npb_obj.instance) def _add_to_db(self, npbs): for npb in npbs: nxdb.add_nexusport_binding( npb.port, npb.vlan, npb.switch, npb.instance) def test_nexusportbinding_add_remove(self): npb11 = self._npb_test_obj(10, 100) npb = nxdb.add_nexusport_binding( npb11.port, npb11.vlan, npb11.switch, npb11.instance) self._assert_equal(npb, npb11) npb = nxdb.remove_nexusport_binding( npb11.port, npb11.vlan, npb11.switch, npb11.instance) self.assertEqual(len(npb), 1) self._assert_equal(npb[0], npb11) with testtools.ExpectedException(c_exc.NexusPortBindingNotFound): nxdb.remove_nexusport_binding( npb11.port, npb11.vlan, npb11.switch, npb11.instance) def test_nexusportbinding_get(self): npb11 = self._npb_test_obj(10, 100) npb21 = self._npb_test_obj(20, 100) npb22 = self._npb_test_obj(20, 200) self._add_to_db([npb11, npb21, npb22]) npb = nxdb.get_nexusport_binding( npb11.port, npb11.vlan, npb11.switch, npb11.instance) self.assertEqual(len(npb), 1) self._assert_equal(npb[0], npb11) npb = nxdb.get_nexusport_binding( npb21.port, npb21.vlan, npb21.switch, npb21.instance) self.assertEqual(len(npb), 1) self._assert_equal(npb[0], npb21) npb = nxdb.get_nexusport_binding( npb22.port, npb22.vlan, npb22.switch, npb22.instance) self.assertEqual(len(npb), 1) self._assert_equal(npb[0], npb22) with testtools.ExpectedException(c_exc.NexusPortBindingNotFound): nxdb.get_nexusport_binding( npb21.port, npb21.vlan, npb21.switch, "dummyInstance") def test_nexusvlanbinding_get(self): npb11 = self._npb_test_obj(10, 100) npb21 = self._npb_test_obj(20, 100) npb22 = self._npb_test_obj(20, 200) self._add_to_db([npb11, npb21, npb22]) npb_all_v100 = nxdb.get_nexusvlan_binding(npb11.vlan, npb11.switch) self.assertEqual(len(npb_all_v100), 2) npb_v200 = nxdb.get_nexusvlan_binding(npb22.vlan, npb22.switch) self.assertEqual(len(npb_v200), 1) self._assert_equal(npb_v200[0], npb22) with testtools.ExpectedException(c_exc.NexusPortBindingNotFound): nxdb.get_nexusvlan_binding(npb21.vlan, "dummySwitch") def test_nexusvmbinding_get(self): npb11 = self._npb_test_obj(10, 100) npb21 = self._npb_test_obj(20, 100) npb22 = self._npb_test_obj(20, 200) self._add_to_db([npb11, npb21, npb22]) npb = nxdb.get_nexusvm_bindings(npb21.vlan, npb21.instance)[0] self._assert_equal(npb, npb21) npb = nxdb.get_nexusvm_bindings(npb22.vlan, npb22.instance)[0] self._assert_equal(npb, npb22) with testtools.ExpectedException(c_exc.NexusPortBindingNotFound): nxdb.get_nexusvm_bindings(npb21.vlan, "dummyInstance") def test_nexusportvlanswitchbinding_get(self): npb11 = self._npb_test_obj(10, 100) npb21 = self._npb_test_obj(20, 100) self._add_to_db([npb11, npb21]) npb = nxdb.get_port_vlan_switch_binding( npb11.port, npb11.vlan, npb11.switch) self.assertEqual(len(npb), 1) self._assert_equal(npb[0], npb11) with testtools.ExpectedException(c_exc.NexusPortBindingNotFound): nxdb.get_port_vlan_switch_binding( npb21.port, npb21.vlan, "dummySwitch") def test_nexusportswitchbinding_get(self): npb11 = self._npb_test_obj(10, 100) npb21 = self._npb_test_obj(20, 100, switch='2.2.2.2') npb22 = self._npb_test_obj(20, 200, switch='2.2.2.2') self._add_to_db([npb11, npb21, npb22]) npb = nxdb.get_port_switch_bindings(npb11.port, npb11.switch) self.assertEqual(len(npb), 1) self._assert_equal(npb[0], npb11) npb_all_p20 = nxdb.get_port_switch_bindings(npb21.port, npb21.switch) self.assertEqual(len(npb_all_p20), 2) npb = nxdb.get_port_switch_bindings(npb21.port, "dummySwitch") self.assertIsNone(npb) def test_nexussvibinding_get(self): npbr1 = self._npb_test_obj('router', 100) npb21 = self._npb_test_obj(20, 100) self._add_to_db([npbr1, npb21]) npb_svi = nxdb.get_nexussvi_bindings() self.assertEqual(len(npb_svi), 1) self._assert_equal(npb_svi[0], npbr1) npbr2 = self._npb_test_obj('router', 200) self._add_to_db([npbr2]) npb_svi = nxdb.get_nexussvi_bindings() self.assertEqual(len(npb_svi), 2) def test_nexussviswitch_find(self): """Test Nexus switch selection for SVI placement.""" # Configure 2 Nexus switches nexus_switches = { ('1.1.1.1', 'username'): 'admin', ('1.1.1.1', 'password'): 'password1', ('1.1.1.1', 'host1'): '1/1', ('2.2.2.2', 'username'): 'admin', ('2.2.2.2', 'password'): 'password2', ('2.2.2.2', 'host2'): '1/1', } nexus_plugin = cisco_nexus_plugin_v2.NexusPlugin() nexus_plugin._client = mock.Mock() nexus_plugin._client.nexus_switches = nexus_switches # Set the Cisco config module's first configured device IP address # according to the preceding switch config with mock.patch.object(config, 'first_device_ip', new='1.1.1.1'): # Enable round-robin mode with no SVIs configured on any of the # Nexus switches (i.e. no entries in the SVI database). The # plugin should select the first switch in the configuration. config.CONF.set_override('svi_round_robin', True, 'CISCO') switch_ip = nexus_plugin._find_switch_for_svi() self.assertEqual(switch_ip, '1.1.1.1') # Keep round-robin mode enabled, and add entries to the SVI # database. The plugin should select the switch with the least # number of entries in the SVI database. vlan = 100 npbr11 = self._npb_test_obj('router', vlan, switch='1.1.1.1', instance='instance11') npbr12 = self._npb_test_obj('router', vlan, switch='1.1.1.1', instance='instance12') npbr21 = self._npb_test_obj('router', vlan, switch='2.2.2.2', instance='instance21') self._add_to_db([npbr11, npbr12, npbr21]) switch_ip = nexus_plugin._find_switch_for_svi() self.assertEqual(switch_ip, '2.2.2.2') # Disable round-robin mode. The plugin should select the # first switch in the configuration. config.CONF.clear_override('svi_round_robin', 'CISCO') switch_ip = nexus_plugin._find_switch_for_svi() self.assertEqual(switch_ip, '1.1.1.1') def test_nexusbinding_update(self): npb11 = self._npb_test_obj(10, 100, switch='1.1.1.1', instance='test') npb21 = self._npb_test_obj(20, 100, switch='1.1.1.1', instance='test') self._add_to_db([npb11, npb21]) npb_all_v100 = nxdb.get_nexusvlan_binding(npb11.vlan, '1.1.1.1') self.assertEqual(len(npb_all_v100), 2) npb22 = self._npb_test_obj(20, 200, switch='1.1.1.1', instance='test') npb = nxdb.update_nexusport_binding(npb21.port, 200) self._assert_equal(npb, npb22) npb_all_v100 = nxdb.get_nexusvlan_binding(npb11.vlan, '1.1.1.1') self.assertEqual(len(npb_all_v100), 1) self._assert_equal(npb_all_v100[0], npb11) npb = nxdb.update_nexusport_binding(npb21.port, 0) self.assertIsNone(npb) npb33 = self._npb_test_obj(30, 300, switch='1.1.1.1', instance='test') with testtools.ExpectedException(c_exc.NexusPortBindingNotFound): nxdb.update_nexusport_binding(npb33.port, 200)
apache-2.0
ceph/teuthology
teuthology/task/internal/vm_setup.py
3
1825
import logging import os import subprocess from teuthology.parallel import parallel from teuthology.task import ansible from teuthology.exceptions import CommandFailedError log = logging.getLogger(__name__) def vm_setup(ctx, config): """ Look for virtual machines and handle their initialization """ all_tasks = [list(x.keys())[0] for x in ctx.config['tasks']] need_ansible = False if 'kernel' in all_tasks and 'ansible.cephlab' not in all_tasks: need_ansible = True ansible_hosts = set() with parallel(): editinfo = os.path.join(os.path.dirname(__file__), 'edit_sudoers.sh') for rem in ctx.cluster.remotes.keys(): if rem.is_vm: ansible_hosts.add(rem.shortname) try: rem.sh('test -e /ceph-qa-ready') except CommandFailedError: p1 = subprocess.Popen(['cat', editinfo], stdout=subprocess.PIPE) p2 = subprocess.Popen( [ 'ssh', '-o', 'StrictHostKeyChecking=no', '-t', '-t', str(rem), 'sudo', 'sh' ], stdin=p1.stdout, stdout=subprocess.PIPE ) _, err = p2.communicate() if err: log.error("Edit of /etc/sudoers failed: %s", err) if need_ansible and ansible_hosts: log.info("Running ansible on %s", list(ansible_hosts)) ansible_config = dict( hosts=list(ansible_hosts), ) with ansible.CephLab(ctx, config=ansible_config): pass
mit
nuclio/nuclio
hack/examples/python/facerecognizer/face.py
1
5037
# Copyright 2017 The Nuclio Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Uses Microsoft's Face API to extract face information from the # picture whose URL is submitted in the request body. The result is # returned as a table of face objects sorted by their center's # position in the given picture, left-to-right and then top-to-bottom. # # You will need a valid key from Microsoft: # https://azure.microsoft.com/en-gb/try/cognitive-services/?api=face-api # # Once a valid Face API key has been acquired, set it and the appropriate # regional base URL as the environment for this function # (in the config section). # # We can also configure the function inline - through a specially crafted # comment such as the below. This is functionally equivalent to creating # a function.yaml file. import os import cognitive_face as cf import tabulate import inflection def handler(context, event): # extract the stuff we need image_url = event.body.decode('utf-8').strip() key = os.environ.get('FACE_API_KEY') base_url = os.environ.get('FACE_API_BASE_URL') if key is None: context.logger.warn('Face API key not set, cannot continue') return _build_response(context, 'Function misconfigured: Face API key not set', 503) if base_url is None: context.logger.warn('Face API base URL not set, cannot continue') return _build_response(context, 'Function misconfigured: Face API base URL not set', 503) if not image_url: context.logger.warn('No URL given in request body') return _build_response(context, 'Image URL required', 400) # configure cognitive face wrapper cf.Key.set(key) cf.BaseUrl.set(base_url) # attempt to request using the provided info try: context.logger.info('Requesting detection from Face API: {0}'.format(image_url)) detected_faces = cf.face.detect(image_url, face_id=False, attributes='age,gender,glasses,smile,emotion') except Exception as error: context.logger.warn('Face API error occurred: {0}'.format(error)) return _build_response(context, 'Face API error occurred', 503) parsed_faces = [] # determine the center point of each detected face and map it to its attributes, # as well as clean up the retreived data for viewing comfort for face in detected_faces: coordinates = face['faceRectangle'] attributes = face['faceAttributes'] center_x = coordinates['left'] + coordinates['width'] / 2 center_y = coordinates['top'] + coordinates['height'] / 2 # determine the primary emotion based on its weighing primary_emotion = sorted(attributes['emotion'].items(), key=lambda item: item[1])[-1][0] parsed_face = { 'x': center_x, 'y': center_y, 'position': '({0},{1})'.format(int(center_x), int(center_y)), 'gender': inflection.humanize(attributes['gender']), 'age': int(attributes['age']), 'glasses': inflection.humanize(inflection.underscore(attributes['glasses'])), 'primary_emotion': inflection.humanize(primary_emotion), 'smile': '{0:.1f}%'.format(attributes['smile'] * 100), } parsed_faces.append(parsed_face) # sort according to center point, first x then y parsed_faces.sort(key=lambda face: (face['x'], face['y'])) # prepare the data for tabulation first_row = ('',) + tuple(face['position'] for face in parsed_faces) make_row = lambda name: (inflection.humanize(name),) + tuple( face[name] for face in parsed_faces) other_rows = [make_row(name) for name in [ 'gender', 'age', 'primary_emotion', 'glasses', 'smile']] # return the human-readable face data in a neat table format return _build_response(context, tabulate.tabulate([first_row] + other_rows, headers='firstrow', tablefmt='fancy_grid', numalign='center', stralign='center'), 200) def _build_response(context, body, status_code): return context.Response(body=body, headers={}, content_type='text/plain', status_code=status_code)
apache-2.0
crossbario/autobahn-testsuite
autobahntestsuite/autobahntestsuite/case/case9_6_5.py
2
1132
############################################################################### ## ## Copyright (c) Crossbar.io Technologies GmbH ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## ############################################################################### from case9_6_1 import Case9_6_1 class Case9_6_5(Case9_6_1): DESCRIPTION = """Send binary message message with payload of length 1 * 2**20 (1M). Sent out data in chops of 1024 octets.""" EXPECTATION = """Receive echo'ed text message (with payload as sent).""" def setChopSize(self): self.chopsize = 1024
apache-2.0
programa-stic/barf-project
examples/misc/translate_code.py
1
1900
#! /usr/bin/env python from __future__ import absolute_import from __future__ import print_function from barf.barf import BARF if __name__ == "__main__": # x86 # ======================================================================= # # # Open file # barf = BARF("./samples/bin/branch4.x86") # # Translate to REIL # print("[+] Translating: x86 -> REIL -> SMT") for addr, asm_instr, reil_instrs in barf.translate(): print("0x{0:08x} : {1}".format(addr, asm_instr)) for reil_instr in reil_instrs: print("{0:14}{1}".format("", reil_instr)) try: # Some instructions cannot be translate to SMT, i.e, # UNKN, UNDEF, JCC. In those cases, an exception is # raised. smt_exprs = barf.smt_translator.translate(reil_instr) for smt_expr in smt_exprs: print("{0:16}{1}".format("", smt_expr)) except: pass # ARM # ======================================================================= # # # Open file # barf = BARF("./samples/bin/branch4.arm") # # Translate to REIL # print("[+] Translating: x86 -> REIL -> SMT") for addr, asm_instr, reil_instrs in barf.translate(start=0x000083c8, end=0x00008404): print("0x{0:08x} : {1}".format(addr, asm_instr)) for reil_instr in reil_instrs: print("{0:14}{1}".format("", reil_instr)) try: # Some instructions cannot be translate to SMT, i.e, # UNKN, UNDEF, JCC. In those cases, an exception is # raised. smt_exprs = barf.smt_translator.translate(reil_instr) for smt_expr in smt_exprs: print("{0:16}{1}".format("", smt_expr)) except: pass
bsd-2-clause
chjw8016/GreenOdoo7-haibao
openerp/addons/membership/wizard/membership_invoice.py
51
3250
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv import openerp.addons.decimal_precision as dp class membership_invoice(osv.osv_memory): """Membership Invoice""" _name = "membership.invoice" _description = "Membership Invoice" _columns = { 'product_id': fields.many2one('product.product','Membership', required=True), 'member_price': fields.float('Member Price', digits_compute= dp.get_precision('Product Price'), required=True), } def onchange_product(self, cr, uid, ids, product_id=False): """This function returns value of product's member price based on product id. """ if not product_id: return {'value': {'member_price': False}} return {'value': {'member_price': self.pool.get('product.product').price_get(cr, uid, [product_id])[product_id]}} def membership_invoice(self, cr, uid, ids, context=None): mod_obj = self.pool.get('ir.model.data') partner_obj = self.pool.get('res.partner') datas = {} if context is None: context = {} data = self.browse(cr, uid, ids, context=context) if data: data = data[0] datas = { 'membership_product_id': data.product_id.id, 'amount': data.member_price } invoice_list = partner_obj.create_membership_invoice(cr, uid, context.get('active_ids', []), datas=datas, context=context) try: search_view_id = mod_obj.get_object_reference(cr, uid, 'account', 'view_account_invoice_filter')[1] except ValueError: search_view_id = False try: form_view_id = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')[1] except ValueError: form_view_id = False return { 'domain': [('id', 'in', invoice_list)], 'name': 'Membership Invoices', 'view_type': 'form', 'view_mode': 'tree,form', 'res_model': 'account.invoice', 'type': 'ir.actions.act_window', 'views': [(False, 'tree'), (form_view_id, 'form')], 'search_view_id': search_view_id, } membership_invoice() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
mit
adykstra/mne-python
mne/__init__.py
1
5438
"""MNE software for MEG and EEG data analysis.""" # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y # X.Y.Z # For bugfix releases # # Admissible pre-release markers: # X.YaN # Alpha release # X.YbN # Beta release # X.YrcN # Release Candidate # X.Y # Final release # # Dev branch marker is: 'X.Y.devN' where N is an integer. # __version__ = '0.19.dev0' # have to import verbose first since it's needed by many things from .utils import (set_log_level, set_log_file, verbose, set_config, get_config, get_config_path, set_cache_dir, set_memmap_min_size, grand_average, sys_info, open_docs) from .io.pick import (pick_types, pick_channels, pick_channels_regexp, pick_channels_forward, pick_types_forward, pick_channels_cov, pick_channels_evoked, pick_info) from .io.base import concatenate_raws from .io.meas_info import create_info, Info from .io.proj import Projection from .io.kit import read_epochs_kit from .io.eeglab import read_epochs_eeglab from .io.reference import (set_eeg_reference, set_bipolar_reference, add_reference_channels) from .bem import (make_sphere_model, make_bem_model, make_bem_solution, read_bem_surfaces, write_bem_surfaces, read_bem_solution, write_bem_solution) from .cov import (read_cov, write_cov, Covariance, compute_raw_covariance, compute_covariance, whiten_evoked, make_ad_hoc_cov) from .event import (read_events, write_events, find_events, merge_events, pick_events, make_fixed_length_events, concatenate_events, find_stim_steps, AcqParserFIF) from .forward import (read_forward_solution, apply_forward, apply_forward_raw, average_forward_solutions, Forward, write_forward_solution, make_forward_solution, convert_forward_solution, make_field_map, make_forward_dipole, use_coil_def) from .source_estimate import (read_source_estimate, MixedSourceEstimate, SourceEstimate, VectorSourceEstimate, VolSourceEstimate, VolVectorSourceEstimate, grade_to_tris, spatial_src_connectivity, spatial_tris_connectivity, spatial_dist_connectivity, spatial_inter_hemi_connectivity, spatio_temporal_src_connectivity, spatio_temporal_tris_connectivity, spatio_temporal_dist_connectivity, extract_label_time_course) from .surface import (read_surface, write_surface, decimate_surface, read_tri, read_morph_map, get_head_surf, get_meg_helmet_surf) from .morph import (SourceMorph, read_source_morph, grade_to_vertices, compute_morph_matrix, compute_source_morph) from .source_space import (read_source_spaces, vertex_to_mni, head_to_mni, head_to_mri, write_source_spaces, setup_source_space, setup_volume_source_space, SourceSpaces, add_source_space_distances, morph_source_spaces, get_volume_labels_from_aseg, get_volume_labels_from_src) from .annotations import Annotations, read_annotations, events_from_annotations from .epochs import (BaseEpochs, Epochs, EpochsArray, read_epochs, concatenate_epochs) from .evoked import Evoked, EvokedArray, read_evokeds, write_evokeds, combine_evoked from .label import (read_label, label_sign_flip, write_label, stc_to_label, grow_labels, Label, split_label, BiHemiLabel, read_labels_from_annot, write_labels_to_annot, random_parcellation, morph_labels, labels_to_stc) from .misc import parse_config, read_reject_parameters from .coreg import (create_default_subject, scale_bem, scale_mri, scale_labels, scale_source_space) from .transforms import (read_trans, write_trans, transform_surface_to, Transform) from .proj import (read_proj, write_proj, compute_proj_epochs, compute_proj_evoked, compute_proj_raw, sensitivity_map) from .selection import read_selection from .dipole import read_dipole, Dipole, DipoleFixed, fit_dipole from .channels import equalize_channels, rename_channels, find_layout from .report import Report, open_report from .io import read_epochs_fieldtrip, read_evoked_fieldtrip from .rank import compute_rank from . import beamformer from . import channels from . import chpi from . import commands from . import connectivity from . import coreg from . import cuda from . import datasets from . import dipole from . import epochs from . import event from . import externals from . import io from . import filter from . import gui from . import minimum_norm from . import preprocessing from . import simulation from . import stats from . import time_frequency from . import viz from . import decoding # initialize logging set_log_level(None, False) set_log_file()
bsd-3-clause
jjx02230808/project0223
examples/cluster/plot_kmeans_assumptions.py
270
2040
""" ==================================== Demonstration of k-means assumptions ==================================== This example is meant to illustrate situations where k-means will produce unintuitive and possibly unexpected clusters. In the first three plots, the input data does not conform to some implicit assumption that k-means makes and undesirable clusters are produced as a result. In the last plot, k-means returns intuitive clusters despite unevenly sized blobs. """ print(__doc__) # Author: Phil Roth <mr.phil.roth@gmail.com> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.datasets import make_blobs plt.figure(figsize=(12, 12)) n_samples = 1500 random_state = 170 X, y = make_blobs(n_samples=n_samples, random_state=random_state) # Incorrect number of clusters y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X) plt.subplot(221) plt.scatter(X[:, 0], X[:, 1], c=y_pred) plt.title("Incorrect Number of Blobs") # Anisotropicly distributed data transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso = np.dot(X, transformation) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso) plt.subplot(222) plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred) plt.title("Anisotropicly Distributed Blobs") # Different variance X_varied, y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied) plt.subplot(223) plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred) plt.title("Unequal Variance") # Unevenly sized blobs X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10])) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered) plt.subplot(224) plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred) plt.title("Unevenly Sized Blobs") plt.show()
bsd-3-clause
DreamerKing/LightweightHtmlWidgets
publish-rc/v1.0/files/Ipy.Lib/ctypes/macholib/dyld.py
253
5341
###################################################################### # This file should be kept compatible with Python 2.3, see PEP 291. # ###################################################################### """ dyld emulation """ import os from framework import framework_info from dylib import dylib_info from itertools import * __all__ = [ 'dyld_find', 'framework_find', 'framework_info', 'dylib_info', ] # These are the defaults as per man dyld(1) # DEFAULT_FRAMEWORK_FALLBACK = [ os.path.expanduser("~/Library/Frameworks"), "/Library/Frameworks", "/Network/Library/Frameworks", "/System/Library/Frameworks", ] DEFAULT_LIBRARY_FALLBACK = [ os.path.expanduser("~/lib"), "/usr/local/lib", "/lib", "/usr/lib", ] def ensure_utf8(s): """Not all of PyObjC and Python understand unicode paths very well yet""" if isinstance(s, unicode): return s.encode('utf8') return s def dyld_env(env, var): if env is None: env = os.environ rval = env.get(var) if rval is None: return [] return rval.split(':') def dyld_image_suffix(env=None): if env is None: env = os.environ return env.get('DYLD_IMAGE_SUFFIX') def dyld_framework_path(env=None): return dyld_env(env, 'DYLD_FRAMEWORK_PATH') def dyld_library_path(env=None): return dyld_env(env, 'DYLD_LIBRARY_PATH') def dyld_fallback_framework_path(env=None): return dyld_env(env, 'DYLD_FALLBACK_FRAMEWORK_PATH') def dyld_fallback_library_path(env=None): return dyld_env(env, 'DYLD_FALLBACK_LIBRARY_PATH') def dyld_image_suffix_search(iterator, env=None): """For a potential path iterator, add DYLD_IMAGE_SUFFIX semantics""" suffix = dyld_image_suffix(env) if suffix is None: return iterator def _inject(iterator=iterator, suffix=suffix): for path in iterator: if path.endswith('.dylib'): yield path[:-len('.dylib')] + suffix + '.dylib' else: yield path + suffix yield path return _inject() def dyld_override_search(name, env=None): # If DYLD_FRAMEWORK_PATH is set and this dylib_name is a # framework name, use the first file that exists in the framework # path if any. If there is none go on to search the DYLD_LIBRARY_PATH # if any. framework = framework_info(name) if framework is not None: for path in dyld_framework_path(env): yield os.path.join(path, framework['name']) # If DYLD_LIBRARY_PATH is set then use the first file that exists # in the path. If none use the original name. for path in dyld_library_path(env): yield os.path.join(path, os.path.basename(name)) def dyld_executable_path_search(name, executable_path=None): # If we haven't done any searching and found a library and the # dylib_name starts with "@executable_path/" then construct the # library name. if name.startswith('@executable_path/') and executable_path is not None: yield os.path.join(executable_path, name[len('@executable_path/'):]) def dyld_default_search(name, env=None): yield name framework = framework_info(name) if framework is not None: fallback_framework_path = dyld_fallback_framework_path(env) for path in fallback_framework_path: yield os.path.join(path, framework['name']) fallback_library_path = dyld_fallback_library_path(env) for path in fallback_library_path: yield os.path.join(path, os.path.basename(name)) if framework is not None and not fallback_framework_path: for path in DEFAULT_FRAMEWORK_FALLBACK: yield os.path.join(path, framework['name']) if not fallback_library_path: for path in DEFAULT_LIBRARY_FALLBACK: yield os.path.join(path, os.path.basename(name)) def dyld_find(name, executable_path=None, env=None): """ Find a library or framework using dyld semantics """ name = ensure_utf8(name) executable_path = ensure_utf8(executable_path) for path in dyld_image_suffix_search(chain( dyld_override_search(name, env), dyld_executable_path_search(name, executable_path), dyld_default_search(name, env), ), env): if os.path.isfile(path): return path raise ValueError("dylib %s could not be found" % (name,)) def framework_find(fn, executable_path=None, env=None): """ Find a framework using dyld semantics in a very loose manner. Will take input such as: Python Python.framework Python.framework/Versions/Current """ try: return dyld_find(fn, executable_path=executable_path, env=env) except ValueError, e: pass fmwk_index = fn.rfind('.framework') if fmwk_index == -1: fmwk_index = len(fn) fn += '.framework' fn = os.path.join(fn, os.path.basename(fn[:fmwk_index])) try: return dyld_find(fn, executable_path=executable_path, env=env) except ValueError: raise e def test_dyld_find(): env = {} assert dyld_find('libSystem.dylib') == '/usr/lib/libSystem.dylib' assert dyld_find('System.framework/System') == '/System/Library/Frameworks/System.framework/System' if __name__ == '__main__': test_dyld_find()
gpl-3.0
drptbl/MITMf
core/responder/odict.py
7
3571
# NBT-NS/LLMNR Responder # Created by Laurent Gaffie # Copyright (C) 2014 Trustwave Holdings, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #Packet class handling all packet generation (see odict.py). from UserDict import DictMixin class OrderedDict(dict, DictMixin): def __init__(self, *args, **kwds): if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__end except AttributeError: self.clear() self.update(*args, **kwds) def clear(self): self.__end = end = [] end += [None, end, end] self.__map = {} dict.clear(self) def __setitem__(self, key, value): if key not in self: end = self.__end curr = end[1] curr[2] = end[1] = self.__map[key] = [key, curr, end] dict.__setitem__(self, key, value) def __delitem__(self, key): dict.__delitem__(self, key) key, prev, next = self.__map.pop(key) prev[2] = next next[1] = prev def __iter__(self): end = self.__end curr = end[2] while curr is not end: yield curr[0] curr = curr[2] def __reversed__(self): end = self.__end curr = end[1] while curr is not end: yield curr[0] curr = curr[1] def popitem(self, last=True): if not self: raise KeyError('dictionary is empty') if last: key = reversed(self).next() else: key = iter(self).next() value = self.pop(key) return key, value def __reduce__(self): items = [[k, self[k]] for k in self] tmp = self.__map, self.__end del self.__map, self.__end inst_dict = vars(self).copy() self.__map, self.__end = tmp if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def keys(self): return list(self) setdefault = DictMixin.setdefault update = DictMixin.update pop = DictMixin.pop values = DictMixin.values items = DictMixin.items iterkeys = DictMixin.iterkeys itervalues = DictMixin.itervalues iteritems = DictMixin.iteritems def __repr__(self): if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) def copy(self): return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): if isinstance(other, OrderedDict): return len(self)==len(other) and \ min(p==q for p, q in zip(self.items(), other.items())) return dict.__eq__(self, other) def __ne__(self, other): return not self == other
gpl-3.0
fernandog/Medusa
ext/boto/kinesis/exceptions.py
170
1611
# -*- coding: utf-8 -*- # Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # from boto.exception import BotoServerError class ProvisionedThroughputExceededException(BotoServerError): pass class LimitExceededException(BotoServerError): pass class ExpiredIteratorException(BotoServerError): pass class ResourceInUseException(BotoServerError): pass class ResourceNotFoundException(BotoServerError): pass class InvalidArgumentException(BotoServerError): pass class SubscriptionRequiredException(BotoServerError): pass
gpl-3.0
diedthreetimes/VCrash
pybindgen-0.15.0.795/.waf-1.5.9-0c853694b62ef4240caa9158a9f2573d/wafadmin/Tools/cflags.py
2
4110
#! /usr/bin/env python # encoding: utf-8 import Logs import Options import Utils class CompilerTraits(object): def get_warnings_flags(self,level): raise NotImplementedError def get_optimization_flags(self,level): raise NotImplementedError def get_debug_flags(self,level): raise NotImplementedError class GccTraits(CompilerTraits): def __init__(self): super(GccTraits,self).__init__() self.warnings_flags=[['-Wall'],['-Werror'],['-Wextra']] def get_warnings_flags(self,level): warnings=[] for l in range(level): if l<len(self.warnings_flags): warnings.extend(self.warnings_flags[l]) else: break return warnings def get_optimization_flags(self,level): if level==0: return['-O0'] elif level==1: return['-O'] elif level==2: return['-O2'] elif level==3: return['-O3'] def get_debug_flags(self,level): if level==0: return(['-g0'],['NDEBUG']) elif level==1: return(['-g'],[]) elif level>=2: return(['-ggdb','-g3'],['_DEBUG']) class IccTraits(CompilerTraits): def __init__(self): super(IccTraits,self).__init__() self.warnings_flags=[[],[],['-Wall']] def get_warnings_flags(self,level): warnings=[] for l in range(level): if l<len(self.warnings_flags): warnings.extend(self.warnings_flags[l]) else: break return warnings def get_optimization_flags(self,level): if level==0: return['-O0'] elif level==1: return['-O'] elif level==2: return['-O2'] elif level==3: return['-O3'] def get_debug_flags(self,level): if level==0: return(['-g0'],['NDEBUG']) elif level==1: return(['-g'],[]) elif level>=2: return(['-ggdb','-g3'],['_DEBUG']) class MsvcTraits(CompilerTraits): def __init__(self): super(MsvcTraits,self).__init__() self.warnings_flags=[['/W2'],['/WX'],['/Wall']] def get_warnings_flags(self,level): warnings=[] for l in range(level): if l<len(self.warnings_flags): warnings.extend(self.warnings_flags[l]) else: break return warnings def get_optimization_flags(self,level): if level==0: return['/Od'] elif level==1: return[] elif level==2: return['/O2'] elif level==3: return['/Ox'] def get_debug_flags(self,level): if level==0: return([],['NDEBUG']) elif level==1: return(['/ZI','/RTC1'],[]) elif level>=2: return(['/ZI','/RTC1'],['_DEBUG']) gcc=GccTraits() icc=IccTraits() msvc=MsvcTraits() compiler_mapping={'gcc':gcc,'g++':gcc,'msvc':msvc,'icc':icc,'icpc':icc,} profiles={'default':[2,1,1],'debug':[0,2,3],'release':[3,1,0],} default_profile='default' def set_options(opt): assert default_profile in profiles opt.add_option('-d','--build-profile',action='store',default=default_profile,help=("Specify the build profile. ""Build profiles control the default compilation flags"" used for C/C++ programs, if CCFLAGS/CXXFLAGS are not"" set set in the environment. [Allowed Values: %s]"%", ".join([repr(p)for p in profiles.keys()])),choices=profiles.keys(),dest='build_profile') def detect(conf): cc=conf.env['COMPILER_CC']or None cxx=conf.env['COMPILER_CXX']or None if not(cc or cxx): raise Utils.WafError("neither COMPILER_CC nor COMPILER_CXX are defined; ""maybe the compiler_cc or compiler_cxx tool has not been configured yet?") try: compiler=compiler_mapping[cc] except KeyError: try: compiler=compiler_mapping[cxx] except KeyError: Logs.warn("No compiler flags support for compiler %r or %r"%(cc,cxx)) return opt_level,warn_level,dbg_level=profiles[Options.options.build_profile] optimizations=compiler.get_optimization_flags(opt_level) debug,debug_defs=compiler.get_debug_flags(dbg_level) warnings=compiler.get_warnings_flags(warn_level) if cc and not conf.env['CCFLAGS']: conf.env.append_value('CCFLAGS',optimizations) conf.env.append_value('CCFLAGS',debug) conf.env.append_value('CCFLAGS',warnings) conf.env.append_value('CCDEFINES',debug_defs) if cxx and not conf.env['CXXFLAGS']: conf.env.append_value('CXXFLAGS',optimizations) conf.env.append_value('CXXFLAGS',debug) conf.env.append_value('CXXFLAGS',warnings) conf.env.append_value('CXXDEFINES',debug_defs)
gpl-2.0
tjanez/ansible
lib/ansible/modules/cloud/openstack/os_keystone_service.py
6
6355
#!/usr/bin/python # Copyright 2016 Sam Yaple # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: os_keystone_service short_description: Manage OpenStack Identity services extends_documentation_fragment: openstack author: "Sam Yaple (@SamYaple)" version_added: "2.2" description: - Create, update, or delete OpenStack Identity service. If a service with the supplied name already exists, it will be updated with the new description and enabled attributes. options: name: description: - Name of the service required: true description: description: - Description of the service required: false default: None enabled: description: - Is the service enabled required: false default: True service_type: description: - The type of service required: true state: description: - Should the resource be present or absent. choices: [present, absent] default: present requirements: - "python >= 2.6" - "shade" ''' EXAMPLES = ''' # Create a service for glance - os_keystone_service: cloud: mycloud state: present name: glance service_type: image description: OpenStack Image Service # Delete a service - os_keystone_service: cloud: mycloud state: absent name: glance service_type: image ''' RETURN = ''' service: description: Dictionary describing the service. returned: On success when I(state) is 'present' type: dictionary contains: id: description: Service ID. type: string sample: "3292f020780b4d5baf27ff7e1d224c44" name: description: Service name. type: string sample: "glance" service_type: description: Service type. type: string sample: "image" description: description: Service description. type: string sample: "OpenStack Image Service" enabled: description: Service status. type: boolean sample: True id: description: The service ID. returned: On success when I(state) is 'present' type: string sample: "3292f020780b4d5baf27ff7e1d224c44" ''' try: import shade HAS_SHADE = True except ImportError: HAS_SHADE = False from distutils.version import StrictVersion def _needs_update(module, service): if service.enabled != module.params['enabled']: return True if service.description is not None and \ service.description != module.params['description']: return True return False def _system_state_change(module, service): state = module.params['state'] if state == 'absent' and service: return True if state == 'present': if service is None: return True return _needs_update(module, service) return False def main(): argument_spec = openstack_full_argument_spec( description=dict(default=None), enabled=dict(default=True, type='bool'), name=dict(required=True), service_type=dict(required=True), state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') if StrictVersion(shade.__version__) < StrictVersion('1.6.0'): module.fail_json(msg="To utilize this module, the installed version of" "the shade library MUST be >=1.6.0") description = module.params['description'] enabled = module.params['enabled'] name = module.params['name'] state = module.params['state'] service_type = module.params['service_type'] try: cloud = shade.operator_cloud(**module.params) services = cloud.search_services(name_or_id=name, filters=dict(type=service_type)) if len(services) > 1: module.fail_json(msg='Service name %s and type %s are not unique' % (name, service_type)) elif len(services) == 1: service = services[0] else: service = None if module.check_mode: module.exit_json(changed=_system_state_change(module, service)) if state == 'present': if service is None: service = cloud.create_service(name=name, description=description, type=service_type, enabled=True) changed = True else: if _needs_update(module, service): service = cloud.update_service( service.id, name=name, type=service_type, enabled=enabled, description=description) changed = True else: changed = False module.exit_json(changed=changed, service=service, id=service.id) elif state == 'absent': if service is None: changed=False else: cloud.delete_service(service.id) changed=True module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=str(e)) from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main()
gpl-3.0
kimshinelove/naver-npm
node_modules/node-gyp/gyp/pylib/gyp/generator/gypd.py
1824
3474
# Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """gypd output module This module produces gyp input as its output. Output files are given the .gypd extension to avoid overwriting the .gyp files that they are generated from. Internal references to .gyp files (such as those found in "dependencies" sections) are not adjusted to point to .gypd files instead; unlike other paths, which are relative to the .gyp or .gypd file, such paths are relative to the directory from which gyp was run to create the .gypd file. This generator module is intended to be a sample and a debugging aid, hence the "d" for "debug" in .gypd. It is useful to inspect the results of the various merges, expansions, and conditional evaluations performed by gyp and to see a representation of what would be fed to a generator module. It's not advisable to rename .gypd files produced by this module to .gyp, because they will have all merges, expansions, and evaluations already performed and the relevant constructs not present in the output; paths to dependencies may be wrong; and various sections that do not belong in .gyp files such as such as "included_files" and "*_excluded" will be present. Output will also be stripped of comments. This is not intended to be a general-purpose gyp pretty-printer; for that, you probably just want to run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip comments but won't do all of the other things done to this module's output. The specific formatting of the output generated by this module is subject to change. """ import gyp.common import errno import os import pprint # These variables should just be spit back out as variable references. _generator_identity_variables = [ 'CONFIGURATION_NAME', 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', 'INTERMEDIATE_DIR', 'LIB_DIR', 'PRODUCT_DIR', 'RULE_INPUT_ROOT', 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', 'RULE_INPUT_NAME', 'RULE_INPUT_PATH', 'SHARED_INTERMEDIATE_DIR', 'SHARED_LIB_DIR', 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX', 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX', ] # gypd doesn't define a default value for OS like many other generator # modules. Specify "-D OS=whatever" on the command line to provide a value. generator_default_variables = { } # gypd supports multiple toolsets generator_supports_multiple_toolsets = True # TODO(mark): This always uses <, which isn't right. The input module should # notify the generator to tell it which phase it is operating in, and this # module should use < for the early phase and then switch to > for the late # phase. Bonus points for carrying @ back into the output too. for v in _generator_identity_variables: generator_default_variables[v] = '<(%s)' % v def GenerateOutput(target_list, target_dicts, data, params): output_files = {} for qualified_target in target_list: [input_file, target] = \ gyp.common.ParseQualifiedTarget(qualified_target)[0:2] if input_file[-4:] != '.gyp': continue input_file_stem = input_file[:-4] output_file = input_file_stem + params['options'].suffix + '.gypd' if not output_file in output_files: output_files[output_file] = input_file for output_file, input_file in output_files.iteritems(): output = open(output_file, 'w') pprint.pprint(data[input_file], output) output.close()
artistic-2.0
juanalfonsopr/odoo
openerp/modules/graph.py
260
7763
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## """ Modules dependency graph. """ import os, sys, imp from os.path import join as opj import itertools import zipimport import openerp import openerp.osv as osv import openerp.tools as tools import openerp.tools.osutil as osutil from openerp.tools.safe_eval import safe_eval as eval from openerp.tools.translate import _ import zipfile import openerp.release as release import re import base64 from zipfile import PyZipFile, ZIP_DEFLATED from cStringIO import StringIO import logging _logger = logging.getLogger(__name__) class Graph(dict): """ Modules dependency graph. The graph is a mapping from module name to Nodes. """ def add_node(self, name, info): max_depth, father = 0, None for d in info['depends']: n = self.get(d) or Node(d, self, None) # lazy creation, do not use default value for get() if n.depth >= max_depth: father = n max_depth = n.depth if father: return father.add_child(name, info) else: return Node(name, self, info) def update_from_db(self, cr): if not len(self): return # update the graph with values from the database (if exist) ## First, we set the default values for each package in graph additional_data = dict((key, {'id': 0, 'state': 'uninstalled', 'dbdemo': False, 'installed_version': None}) for key in self.keys()) ## Then we get the values from the database cr.execute('SELECT name, id, state, demo AS dbdemo, latest_version AS installed_version' ' FROM ir_module_module' ' WHERE name IN %s',(tuple(additional_data),) ) ## and we update the default values with values from the database additional_data.update((x['name'], x) for x in cr.dictfetchall()) for package in self.values(): for k, v in additional_data[package.name].items(): setattr(package, k, v) def add_module(self, cr, module, force=None): self.add_modules(cr, [module], force) def add_modules(self, cr, module_list, force=None): if force is None: force = [] packages = [] len_graph = len(self) for module in module_list: # This will raise an exception if no/unreadable descriptor file. # NOTE The call to load_information_from_description_file is already # done by db.initialize, so it is possible to not do it again here. info = openerp.modules.module.load_information_from_description_file(module) if info and info['installable']: packages.append((module, info)) # TODO directly a dict, like in get_modules_with_version else: _logger.warning('module %s: not installable, skipped', module) dependencies = dict([(p, info['depends']) for p, info in packages]) current, later = set([p for p, info in packages]), set() while packages and current > later: package, info = packages[0] deps = info['depends'] # if all dependencies of 'package' are already in the graph, add 'package' in the graph if reduce(lambda x, y: x and y in self, deps, True): if not package in current: packages.pop(0) continue later.clear() current.remove(package) node = self.add_node(package, info) for kind in ('init', 'demo', 'update'): if package in tools.config[kind] or 'all' in tools.config[kind] or kind in force: setattr(node, kind, True) else: later.add(package) packages.append((package, info)) packages.pop(0) self.update_from_db(cr) for package in later: unmet_deps = filter(lambda p: p not in self, dependencies[package]) _logger.error('module %s: Unmet dependencies: %s', package, ', '.join(unmet_deps)) result = len(self) - len_graph if result != len(module_list): _logger.warning('Some modules were not loaded.') return result def __iter__(self): level = 0 done = set(self.keys()) while done: level_modules = sorted((name, module) for name, module in self.items() if module.depth==level) for name, module in level_modules: done.remove(name) yield module level += 1 def __str__(self): return '\n'.join(str(n) for n in self if n.depth == 0) class Node(object): """ One module in the modules dependency graph. Node acts as a per-module singleton. A node is constructed via Graph.add_module() or Graph.add_modules(). Some of its fields are from ir_module_module (setted by Graph.update_from_db()). """ def __new__(cls, name, graph, info): if name in graph: inst = graph[name] else: inst = object.__new__(cls) graph[name] = inst return inst def __init__(self, name, graph, info): self.name = name self.graph = graph self.info = info or getattr(self, 'info', {}) if not hasattr(self, 'children'): self.children = [] if not hasattr(self, 'depth'): self.depth = 0 @property def data(self): return self.info def add_child(self, name, info): node = Node(name, self.graph, info) node.depth = self.depth + 1 if node not in self.children: self.children.append(node) for attr in ('init', 'update', 'demo'): if hasattr(self, attr): setattr(node, attr, True) self.children.sort(lambda x, y: cmp(x.name, y.name)) return node def __setattr__(self, name, value): super(Node, self).__setattr__(name, value) if name in ('init', 'update', 'demo'): tools.config[name][self.name] = 1 for child in self.children: setattr(child, name, value) if name == 'depth': for child in self.children: setattr(child, name, value + 1) def __iter__(self): return itertools.chain(iter(self.children), *map(iter, self.children)) def __str__(self): return self._pprint() def _pprint(self, depth=0): s = '%s\n' % self.name for c in self.children: s += '%s`-> %s' % (' ' * depth, c._pprint(depth+1)) return s # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
icio/github3.py
tests/integration/test_users.py
7
4156
"""Integration tests for the User class.""" import github3 from .helper import IntegrationHelper class TestUser(IntegrationHelper): """Integration tests for methods on the User class.""" def test_events(self): """Show that a user can retrieve a events performed by a user.""" cassette_name = self.cassette_name('events') with self.recorder.use_cassette(cassette_name): user = self.gh.user('sigmavirus24') events = list(user.events(25)) assert len(events) > 0 for event in events: assert isinstance(event, github3.events.Event) def test_followers(self): """Show that a user can retrieve any user's followers.""" cassette_name = self.cassette_name('followers') with self.recorder.use_cassette(cassette_name): user = self.gh.user('sigmavirus24') followers = list(user.followers(50)) assert len(followers) > 0 for follower in followers: assert isinstance(follower, github3.users.User) def test_following(self): """Show that a user can retrieve users that a user is following.""" cassette_name = self.cassette_name('following') with self.recorder.use_cassette(cassette_name): user = self.gh.user('sigmavirus24') following = list(user.following(50)) assert len(following) > 0 for person in following: assert isinstance(person, github3.users.User) def test_keys(self): """Show that a user can retrieve any user's public keys.""" cassette_name = self.cassette_name('keys') with self.recorder.use_cassette(cassette_name): user = self.gh.user('sigmavirus24') keys = list(user.keys()) assert len(keys) > 0 for key in keys: assert isinstance(key, github3.users.Key) def test_organization_events(self): """Show that a user can retrieve their events on an organization.""" self.basic_login() cassette_name = self.cassette_name('organization_events') with self.recorder.use_cassette(cassette_name): user = self.gh.user('sigmavirus24') events = list(user.organization_events('pdfkit', 25)) assert len(events) > 0 for event in events: assert isinstance(event, github3.events.Event) def test_organizations(self): """Show that a user can retrieve any user's organizations.""" cassette_name = self.cassette_name('organizations') with self.recorder.use_cassette(cassette_name): u = self.gh.user('sigmavirus24') for o in u.organizations(number=25): assert isinstance(o, github3.orgs.Organization) def test_received_events(self): """Show that a user can retrieve any user's received events.""" cassette_name = self.cassette_name('received_events') with self.recorder.use_cassette(cassette_name): user = self.gh.user('sigmavirus24') events = list(user.received_events(number=25)) assert len(events) > 0 for event in events: assert isinstance(event, github3.events.Event) def test_starred_repositories(self): """Show that a user can retrieve the repositories starred by a user.""" cassette_name = self.cassette_name('starred_repositories') with self.recorder.use_cassette(cassette_name): user = self.gh.user('sigmavirus24') repos = list(user.starred_repositories(50)) assert len(repos) > 0 for starred in repos: assert isinstance(starred, github3.repos.Repository) def test_subscriptions(self): """Show that a user can retrieve the repos subscribed to by a user.""" cassette_name = self.cassette_name('subscriptions') with self.recorder.use_cassette(cassette_name): user = self.gh.user('sigmavirus24') repos = list(user.subscriptions()) assert len(repos) > 0 for repository in repos: assert isinstance(repository, github3.repos.Repository)
bsd-3-clause
simonwydooghe/ansible
test/units/modules/network/onyx/test_onyx_interface.py
68
5233
# # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from units.compat.mock import patch from ansible.modules.network.onyx import onyx_interface from units.modules.utils import set_module_args from .onyx_module import TestOnyxModule, load_fixture class TestOnyxInterfaceModule(TestOnyxModule): module = onyx_interface def setUp(self): super(TestOnyxInterfaceModule, self).setUp() self.mock_get_config = patch.object( onyx_interface.OnyxInterfaceModule, "_get_interfaces_config") self.get_config = self.mock_get_config.start() self.mock_get_interfaces_status = patch.object( onyx_interface.OnyxInterfaceModule, "_get_interfaces_status") self.get_interfaces_status = self.mock_get_interfaces_status.start() self.mock_get_interfaces_rates = patch.object( onyx_interface.OnyxInterfaceModule, "_get_interfaces_rates") self.get_interfaces_rates = self.mock_get_interfaces_rates.start() self.mock_load_config = patch( 'ansible.module_utils.network.onyx.onyx.load_config') self.load_config = self.mock_load_config.start() self.mock_get_version = patch.object( onyx_interface.OnyxInterfaceModule, "_get_os_version") self.get_version = self.mock_get_version.start() def tearDown(self): super(TestOnyxInterfaceModule, self).tearDown() self.mock_get_config.stop() self.mock_load_config.stop() def load_fixtures(self, commands=None, transport='cli'): config_file = 'onyx_interfaces_show.cfg' self.get_config.return_value = load_fixture(config_file) self.load_config.return_value = None self.get_version.return_value = "3.6.5000" def test_mtu_no_change(self): set_module_args(dict(name='Eth1/1', mtu=1500)) self.execute_module(changed=False) def test_mtu_change(self): set_module_args(dict(name='Eth1/1', mtu=1522)) commands = ['interface ethernet 1/1', 'mtu 1522 force', 'exit'] self.execute_module(changed=True, commands=commands) def test_speed_no_change(self): set_module_args(dict(name='Eth1/1', speed='40G')) self.execute_module(changed=False) def test_speed_change(self): set_module_args(dict(name='Eth1/1', speed='100G')) commands = ['interface ethernet 1/1', 'speed 100G force', 'exit'] self.execute_module(changed=True, commands=commands) def test_mtu_speed_change(self): set_module_args(dict(name='Eth1/1', speed='100G', mtu=1522)) commands = ['interface ethernet 1/1', 'speed 100G force', 'mtu 1522 force', 'exit'] self.execute_module(changed=True, commands=commands) def test_admin_state_no_change(self): set_module_args(dict(name='Eth1/1', enabled=True)) self.execute_module(changed=False) def test_admin_state_change(self): set_module_args(dict(name='Eth1/1', enabled=False)) commands = ['interface ethernet 1/1', 'shutdown', 'exit'] self.execute_module(changed=True, commands=commands) def test_add_loopback_if(self): set_module_args(dict(name='Loopback 1', description='Loopback test')) commands = ['interface loopback 1', 'description Loopback test', 'exit'] self.execute_module(changed=True, commands=commands) def test_add_vlan_if(self): set_module_args(dict(name='Vlan 101', description='Vlan test', enabled=True)) commands = ['interface vlan 101', 'description Vlan test', 'no shutdown', 'exit'] self.execute_module(changed=True, commands=commands) def test_remove_vlan_if(self): set_module_args(dict(name='Vlan 1002', state='absent')) commands = ['no interface vlan 1002'] self.execute_module(changed=True, commands=commands) def test_oper_state_check(self): set_module_args(dict(name='Eth1/1', enabled=True, state='down')) config_file = 'onyx_interfaces_status.cfg' self.get_interfaces_status.return_value = load_fixture(config_file) self.execute_module(changed=False) def test_vlan_oper_state_check(self): set_module_args(dict(name='Vlan 1002', state='down')) config_file = 'onyx_interfaces_status.cfg' self.get_interfaces_status.return_value = load_fixture(config_file) self.execute_module(changed=False) def test_rx_rate_check(self): set_module_args(dict(name='Eth1/1', enabled=True, rx_rate='ge(9000)')) config_file = 'onyx_interfaces_rates.cfg' self.get_interfaces_rates.return_value = load_fixture(config_file) self.execute_module(changed=False) def test_tx_rate_check(self): set_module_args(dict(name='Eth1/1', enabled=True, tx_rate='ge(10000)')) config_file = 'onyx_interfaces_rates.cfg' self.get_interfaces_rates.return_value = load_fixture(config_file) self.execute_module(changed=False)
gpl-3.0
yieldbot/herodb
herodb/test/server_tests.py
1
12456
import types from requests.exceptions import HTTPError from herodb.client import StoreClient from herodb.test.util import run_server, stop_server from nose import tools as nt import time client = None def setup_hero(): global client run_server() time.sleep(1) client = StoreClient('http://localhost:8081', 'test') client.create_store('test') def teardown_hero(): global client stop_server() client = None @nt.with_setup(setup=setup_hero, teardown=teardown_hero) def test_put(): sha = client.put("test", "foo", "foo") nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) nt.assert_equal(client.get('test', "foo", commit_sha=sha['sha']), "foo") client.get('test', 'foo', commit_sha=sha['sha']) sha = client.put('test', "a/b", "a/b") nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) nt.assert_equal(client.get('test', "a/b"), "a/b") sha = client.put('test', "a/b/c", "a/b/c") nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) @nt.with_setup(setup=setup_hero, teardown=teardown_hero) def test_delete(): sha = client.put('test', "foo", "foo") nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) sha = client.put('test', "a/b", "a/b") nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) sha = client.delete('test', "foo") nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) nt.assert_raises(HTTPError, client.get, 'test', "foo" ) nt.assert_equal(client.get('test', "a/b"), "a/b") sha = client.delete('test', "a/b") nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) nt.assert_raises(HTTPError, client.get, 'test', 'a/b') @nt.with_setup(setup=setup_hero, teardown=teardown_hero) def test_put_many(): sha = client.put('test', 'a/b', {'x': 1, 'y': 2}) nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) nt.assert_equal(client.get('test', "a/b/x"), 1) nt.assert_equal(client.get('test', "a/b/y"), 2) @nt.with_setup(setup=setup_hero, teardown=teardown_hero) def test_trees(): sha = client.put('test', 'foo', 'foo') nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) sha = client.put('test', 'a', {'b': 'a/b'}) nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) sha = client.put('test', 'x', {'y': {'z': 'x/y/z'}}) nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) d = client.trees('test') nt.assert_true('foo' in d) nt.assert_true('a' in d) nt.assert_equal(type(d['a']), types.DictType) nt.assert_true('b' in d['a']) nt.assert_equal(d['a']['b'], 'a/b') nt.assert_true('x' in d) nt.assert_equal(type(d['x']), types.DictType) nt.assert_true('y' in d['x']) nt.assert_equal(type(d['x']['y']), types.DictType) nt.assert_true('z' in d['x']['y']) nt.assert_equal(d['x']['y']['z'], 'x/y/z') @nt.with_setup(setup=setup_hero, teardown=teardown_hero) def test_serialization(): sha = client.put('test', 'int_attr', 1) nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) sha = client.put('test', 'bool_attr', True) nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) sha = client.put('test', 'string_attr', 'foobar') nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) check_type_and_value(client.get('test', 'int_attr'), 1, types.IntType) check_type_and_value(client.get('test', 'bool_attr'), True, types.BooleanType) check_type_and_value(client.get('test', 'string_attr'), 'foobar', types.UnicodeType) entries = { 'foo': 'foo', 'a/b': 'a/b', 'x/y/z': 'x/y/z' } sha = client.put('test', 'bar', entries, flatten_keys=False) nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) d = client.get('test', 'bar') nt.assert_equal(type(d), types.DictType) nt.assert_equal(d, entries) @nt.with_setup(setup=setup_hero, teardown=teardown_hero) def test_sparse_trees(): sha = client.put('test', 'a/1', {'x': 1}) nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) sha = client.put('test', 'b/1', {'x': 3}) nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) t = client.trees('test') nt.assert_true('a' in t) nt.assert_true('1' in t['a']) nt.assert_true('x' in t['a']['1']) nt.assert_equal(t['a']['1']['x'], 1) nt.assert_true('b' in t) nt.assert_true('1' in t['b']) nt.assert_true('x' in t['b']['1']) nt.assert_equal(t['b']['1']['x'], 3) t = client.trees('test', pattern='a') nt.assert_true('a' in t) nt.assert_true('1' in t['a']) nt.assert_true('x' in t['a']['1']) nt.assert_true('b' not in t) t = client.trees('test', pattern='a', max_level=1) nt.assert_true('a' not in t) t = client.trees('test', pattern='a', max_level=2) nt.assert_true('a' not in t) t = client.trees('test', pattern='a', max_level=3) nt.assert_true('a' in t) @nt.with_setup(setup=setup_hero, teardown=teardown_hero) def test_branch_merge(): sha = client.put('test', 'foo', 'bar') nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) nt.assert_equal(client.get('test', 'foo'), "bar") sha = client.put('test', 'foo', 'foo', branch='b1') nt.assert_equal(sha['sha'], client.get_branch('test', 'b1')['sha']) nt.assert_equal(client.get('test', 'foo', branch='b1'), "foo") sha = client.merge('test', 'b1') nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) nt.assert_equal(client.get('test', 'foo'), "foo") sha = client.delete('test', 'foo', branch='b1') nt.assert_equal(sha['sha'], client.get_branch('test', 'b1')['sha']) nt.assert_raises(HTTPError, client.get, 'test', 'foo', branch='b1') sha = client.merge('test', 'b1') nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) nt.assert_raises(HTTPError, client.get, 'test', 'foo') sha = client.put('test', 'bar', 'bar') nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) nt.assert_equal(client.get('test', 'bar'), "bar") sha = client.delete('test', 'bar', branch='b2') nt.assert_equal(sha['sha'], client.get_branch('test', 'b2')['sha']) nt.assert_equal(client.get('test', 'bar'), "bar") nt.assert_raises(HTTPError, client.get, 'test', 'bar', branch='b2') sha = client.merge('test', 'b2') nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) #nt.assert_equal(client.get('test', 'bar'), None) nt.assert_raises(HTTPError, client.get, 'test', 'bar') sha = client.create_branch('test', 'b3') nt.assert_equal(sha['sha'], client.get_branch('test', 'b3')['sha']) nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) sha = client.put('test', 'foo', 'baz', branch='b3') nt.assert_equal(sha['sha'], client.get_branch('test', 'b3')['sha']) sha = client.merge('test', 'b3') nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) @nt.with_setup(setup=setup_hero, teardown=teardown_hero) def test_sparse_puts(): sha = client.put('test', 'a', {'x': 1, 'y': 2, 'z': {'a': 1}}) nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) t = client.trees('test') nt.assert_true('a' in t) nt.assert_true('x' in t['a']) nt.assert_equal(t['a']['x'], 1) nt.assert_true('y' in t['a']) nt.assert_equal(t['a']['y'], 2) nt.assert_true('a' in t['a']['z']) nt.assert_equal(t['a']['z']['a'], 1) sha = client.put('test', 'a', {'x': 2}, overwrite=False) nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) t = client.trees('test') nt.assert_true('a' in t) nt.assert_true('x' in t['a']) nt.assert_equal(t['a']['x'], 2) nt.assert_true('y' in t['a']) nt.assert_equal(t['a']['y'], 2) nt.assert_true('z' in t['a']) nt.assert_true('a' in t['a']['z']) nt.assert_equal(t['a']['z']['a'], 1) sha = client.put('test', 'a', {'x': 3}, overwrite=True) nt.assert_equal(sha['sha'], client.get_branch('test', 'master')['sha']) t = client.trees('test') nt.assert_true('a' in t) nt.assert_true('x' in t['a']) nt.assert_equal(t['a']['x'], 3) nt.assert_true('y' not in t['a']) nt.assert_true('z' in t['a']) nt.assert_true('a' in t['a']['z']) nt.assert_equal(t['a']['z']['a'], 1) @nt.with_setup(setup=setup_hero, teardown=teardown_hero) def test_server_caching(): client.cache.enabled = False sha = client.put('test', 'foo', 'bar') # get verify_cache_stats(client.get_cache_stats(), 0, 0, 0) client.get('test', 'foo') verify_cache_stats(client.get_cache_stats(), 1, 0, 0) client.get('test', 'foo', commit_sha=sha['sha']) verify_cache_stats(client.get_cache_stats(), 2, 0, 1) client.get('test', 'foo', commit_sha=sha['sha']) verify_cache_stats(client.get_cache_stats(), 3, 1, 1) # keys client.reset_cache_stats() verify_cache_stats(client.get_cache_stats(), 0, 0, 0) client.keys('test') verify_cache_stats(client.get_cache_stats(), 1, 0, 0) client.keys('test', commit_sha=sha['sha']) verify_cache_stats(client.get_cache_stats(), 2, 0, 1) client.keys('test', commit_sha=sha['sha']) verify_cache_stats(client.get_cache_stats(), 3, 1, 1) # entries client.reset_cache_stats() verify_cache_stats(client.get_cache_stats(), 0, 0, 0) client.entries('test') verify_cache_stats(client.get_cache_stats(), 1, 0, 0) client.entries('test', commit_sha=sha['sha']) verify_cache_stats(client.get_cache_stats(), 2, 0, 1) client.entries('test', commit_sha=sha['sha']) verify_cache_stats(client.get_cache_stats(), 3, 1, 1) # trees client.reset_cache_stats() verify_cache_stats(client.get_cache_stats(), 0, 0, 0) client.trees('test') verify_cache_stats(client.get_cache_stats(), 1, 0, 0) client.trees('test', commit_sha=sha['sha']) verify_cache_stats(client.get_cache_stats(), 2, 0, 1) client.trees('test', commit_sha=sha['sha']) verify_cache_stats(client.get_cache_stats(), 3, 1, 1) @nt.with_setup(setup=setup_hero, teardown=teardown_hero) def test_client_caching(): sha = client.put('test', 'foo', 'bar') # get verify_cache_stats(client.get_local_cache_stats(), 0, 0, 0) client.get('test', 'foo') verify_cache_stats(client.get_local_cache_stats(), 1, 0, 0) client.get('test', 'foo', commit_sha=sha['sha']) verify_cache_stats(client.get_local_cache_stats(), 2, 0, 1) client.get('test', 'foo', commit_sha=sha['sha']) verify_cache_stats(client.get_local_cache_stats(), 3, 1, 1) # keys client.cache.reset_stats() verify_cache_stats(client.get_local_cache_stats(), 0, 0, 0) client.keys('test') verify_cache_stats(client.get_local_cache_stats(), 1, 0, 0) client.keys('test', commit_sha=sha['sha']) verify_cache_stats(client.get_local_cache_stats(), 2, 0, 1) client.keys('test', commit_sha=sha['sha']) verify_cache_stats(client.get_local_cache_stats(), 3, 1, 1) # entries client.cache.reset_stats() verify_cache_stats(client.get_local_cache_stats(), 0, 0, 0) client.entries('test') verify_cache_stats(client.get_local_cache_stats(), 1, 0, 0) client.entries('test', commit_sha=sha['sha']) verify_cache_stats(client.get_local_cache_stats(), 2, 0, 1) client.entries('test', commit_sha=sha['sha']) verify_cache_stats(client.get_local_cache_stats(), 3, 1, 1) # trees client.cache.reset_stats() verify_cache_stats(client.get_local_cache_stats(), 0, 0, 0) client.trees('test') verify_cache_stats(client.get_local_cache_stats(), 1, 0, 0) client.trees('test', commit_sha=sha['sha']) verify_cache_stats(client.get_local_cache_stats(), 2, 0, 1) client.trees('test', commit_sha=sha['sha']) verify_cache_stats(client.get_local_cache_stats(), 3, 1, 1) def verify_cache_stats(cache_stats, requests=None, hits=None, misses=None): if requests: nt.assert_equal(cache_stats['requests'], requests) if hits: nt.assert_equal(cache_stats['hits'], hits) if misses: nt.assert_equal(cache_stats['misses'], misses) def check_type_and_value(v, ev, et): nt.assert_equal(type(v), et) nt.assert_equal(v, ev)
gpl-3.0
AsherBond/MondocosmOS
gdal/swig/python/samples/attachpct.py
3
3133
#!/usr/bin/env python #****************************************************************************** # $Id: attachpct.py 18195 2009-12-06 20:24:39Z rouault $ # # Project: GDAL # Purpose: Simple command line program for translating ESRI .prj files # into WKT. # Author: Frank Warmerdam, warmerda@home.com # #****************************************************************************** # Copyright (c) 2000, Frank Warmerdam # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. #****************************************************************************** try: from osgeo import gdal except ImportError: import gdal import sys import string if len(sys.argv) < 3: print('Usage: attachpct.py <pctfile> <infile> <outfile>') sys.exit(1) # ============================================================================= # Get the PCT. # ============================================================================= ds = gdal.Open( sys.argv[1] ) ct = ds.GetRasterBand(1).GetRasterColorTable() if ct is None: print('No color table on file ', sys.argv[1]) sys.exit(1) ct = ct.Clone() ds = None # ============================================================================= # Create a MEM clone of the source file. # ============================================================================= src_ds = gdal.Open( sys.argv[2] ) mem_ds = gdal.GetDriverByName( 'MEM' ).CreateCopy( 'mem', src_ds ) # ============================================================================= # Assign the color table in memory. # ============================================================================= mem_ds.GetRasterBand(1).SetRasterColorTable( ct ) mem_ds.GetRasterBand(1).SetRasterColorInterpretation( gdal.GCI_PaletteIndex ) # ============================================================================= # Write the dataset to the output file. # ============================================================================= drv = gdal.GetDriverByName( 'GTiff' ) out_ds = drv.CreateCopy( sys.argv[3], mem_ds ) out_ds = None mem_ds = None src_ds = None
agpl-3.0
tmimori/frappe
frappe/core/doctype/version/test_version.py
5
1238
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # See license.txt from __future__ import unicode_literals import frappe import unittest, copy from frappe.test_runner import make_test_objects from frappe.core.doctype.version.version import get_diff class TestVersion(unittest.TestCase): def test_get_diff(self): test_records = make_test_objects('Event', reset = True) old_doc = frappe.get_doc("Event", test_records[0]) new_doc = copy.deepcopy(old_doc) old_doc.color = None new_doc.color = '#fafafa' diff = get_diff(old_doc, new_doc)['changed'] self.assertEquals(get_fieldnames(diff)[0], 'color') self.assertTrue(get_old_values(diff)[0] is None) self.assertEquals(get_new_values(diff)[0], '#fafafa') new_doc.starts_on = "2017-07-20" diff = get_diff(old_doc, new_doc)['changed'] self.assertEquals(get_fieldnames(diff)[0], 'starts_on') self.assertEquals(get_old_values(diff)[0], '01-01-2014 00:00:00') self.assertEquals(get_new_values(diff)[0], '07-20-2017 00:00:00') def get_fieldnames(change_array): return [d[0] for d in change_array] def get_old_values(change_array): return [d[1] for d in change_array] def get_new_values(change_array): return [d[2] for d in change_array]
mit
abhikpal/p5py
p5/sketch/Vispy3DRenderer/renderer3d.py
2
18575
# # Part of p5: A Python package based on Processing # Copyright (C) 2017-2019 Abhik Pal # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from numpy.linalg import inv from dataclasses import dataclass from sys import stderr import numpy as np import math from p5.pmath import matrix import builtins from vispy import gloo from vispy.gloo import Texture2D, Program from contextlib import contextmanager from p5.core.constants import Z_EPSILON from p5.core.geometry import Geometry from ..Vispy2DRenderer.shape import PShape from p5.pmath.matrix import translation_matrix from ..Vispy2DRenderer.openglrenderer import OpenGLRenderer, get_render_primitives, to_3x3, Style, COLOR_WHITE from .shaders3d import src_default, src_fbuffer, src_normal, src_phong from p5.core.material import BasicMaterial, NormalMaterial, BlinnPhongMaterial class GlslList: """List of objects to be used in glsl """ def __init__(self, max_size, obj_size, dtype): """Initialize GlslList max_size: The maximum size of the list obj_size: The length of an individual object dtype: The data type of this list """ list_shape = (max_size, obj_size) self.data = np.zeros(list_shape, dtype=dtype) self.size = 0 self.max_size = max_size def add(self, obj): if self.size == self.max_size: print("Too many instances of {} are added. Max size {}.".format(type(obj), self.max_size), file=stderr) return self.data[self.size] = obj self.size += 1 def clear(self): self.data = np.zeros_like(self.data) self.size = 0 @dataclass class Style3D(Style): ambient = np.array([0.2] * 3) diffuse = np.array([0.6] * 3) specular = np.array([0.8] * 3) shininess = 8 material = BasicMaterial(COLOR_WHITE) class Renderer3D(OpenGLRenderer): def __init__(self): super().__init__(src_fbuffer, src_default) self.style = Style3D() self.normal_prog = Program(src_normal.vert, src_normal.frag) self.phong_prog = Program(src_phong.vert, src_phong.frag) self.lookat_matrix = np.identity(4) # Camera position self.camera_pos = np.zeros(3) # Lights self.MAX_LIGHTS_PER_CATEGORY = 8 self.ambient_light_color = GlslList( self.MAX_LIGHTS_PER_CATEGORY, 3, np.float32) self.directional_light_dir = GlslList( self.MAX_LIGHTS_PER_CATEGORY, 3, np.float32) self.directional_light_color = GlslList( self.MAX_LIGHTS_PER_CATEGORY, 3, np.float32) self.directional_light_specular = GlslList( self.MAX_LIGHTS_PER_CATEGORY, 3, np.float32) self.point_light_color = GlslList( self.MAX_LIGHTS_PER_CATEGORY, 3, np.float32) self.point_light_pos = GlslList( self.MAX_LIGHTS_PER_CATEGORY, 3, np.float32) self.point_light_specular = GlslList( self.MAX_LIGHTS_PER_CATEGORY, 3, np.float32) self.const_falloff = GlslList( self.MAX_LIGHTS_PER_CATEGORY, 1, np.float32) self.linear_falloff = GlslList( self.MAX_LIGHTS_PER_CATEGORY, 1, np.float32) self.quadratic_falloff = GlslList( self.MAX_LIGHTS_PER_CATEGORY, 1, np.float32) self.curr_linear_falloff, self.curr_quadratic_falloff, self.curr_constant_falloff = 0.0, 0.0, 0.0 self.light_specular = np.array([0.0] * 3) def initialize_renderer(self): super().initialize_renderer() self.reset_view() def reset_view(self): self.viewport = ( 0, 0, int(builtins.width * builtins.pixel_x_density), int(builtins.height * builtins.pixel_y_density), ) self.texture_viewport = ( 0, 0, builtins.width, builtins.height, ) gloo.set_viewport(*self.viewport) # pylint: disable=no-member cz = (builtins.height / 2) / math.tan(math.radians(30)) self.projection_matrix = matrix.perspective_matrix( math.radians(60), builtins.width / builtins.height, 0.1 * cz, 10 * cz ) self.transform_matrix = np.identity(4) self._update_shader_transforms() self.fbuffer_tex_front = Texture2D( (builtins.height, builtins.width, 3)) self.fbuffer_tex_back = Texture2D((builtins.height, builtins.width, 3)) self.fbuffer.depth_buffer = gloo.RenderBuffer( (builtins.height, builtins.width)) for buf in [self.fbuffer_tex_front, self.fbuffer_tex_back]: self.fbuffer.color_buffer = buf with self.fbuffer: self.clear() def clear(self, color=True, depth=True): """Clear the renderer background.""" gloo.set_state(clear_color=self.style.background_color) # pylint: disable=no-member gloo.clear(color=color, depth=depth) # pylint: disable=no-member def clear_lights(self): self.ambient_light_color.clear() self.directional_light_color.clear() self.directional_light_dir.clear() self.directional_light_specular.clear() self.point_light_color.clear() self.point_light_pos.clear() self.point_light_specular.clear() self.const_falloff.clear() self.linear_falloff.clear() self.quadratic_falloff.clear() def _comm_toggles(self, state=True): gloo.set_state(blend=state) # pylint: disable=no-member gloo.set_state(depth_test=state) # pylint: disable=no-member if state: gloo.set_state(blend_func=('src_alpha', 'one_minus_src_alpha')) # pylint: disable=no-member gloo.set_state(depth_func='lequal') # pylint: disable=no-member def _update_shader_transforms(self): # Default shader self.default_prog['projection'] = self.projection_matrix.T.flatten() self.default_prog['perspective_matrix'] = self.lookat_matrix.T.flatten() # Normal shader self.normal_prog['projection'] = self.projection_matrix.T.flatten() self.normal_prog['perspective'] = self.lookat_matrix.T.flatten() # This is a no-op, meaning that the normals stay in world space, which # matches the behavior in p5.js normal_transform = np.identity(3) # I think the transformation below takes the vertices to camera space, but # the results are funky, so it's probably incorrect? - ziyaointl, 2020/07/20 # normal_transform = np.linalg.inv(self.projection_matrix[:3, :3] @ self.lookat_matrix[:3, :3]) self.normal_prog['normal_transform'] = normal_transform.flatten() # Blinn-Phong Shader self.phong_prog['projection'] = self.projection_matrix.T.flatten() self.phong_prog['perspective'] = self.lookat_matrix.T.flatten() @contextmanager def draw_loop(self): """The main draw loop context manager. """ self.transform_matrix = np.identity(4) self._update_shader_transforms() self.fbuffer.color_buffer = self.fbuffer_tex_back with self.fbuffer: gloo.set_viewport(*self.texture_viewport) # pylint: disable=no-member self._comm_toggles() self.fbuffer_prog['texture'] = self.fbuffer_tex_front self.fbuffer_prog.draw('triangle_strip') self.clear(color=False, depth=True) self.clear_lights() yield self.flush_geometry() self.transform_matrix = np.identity(4) gloo.set_viewport(*self.viewport) # pylint: disable=no-member self._comm_toggles(False) self.clear() self.fbuffer_prog['texture'] = self.fbuffer_tex_back self.fbuffer_prog.draw('triangle_strip') self.fbuffer_tex_front, self.fbuffer_tex_back = self.fbuffer_tex_back, self.fbuffer_tex_front def _add_to_draw_queue_simple(self, stype, vertices, idx, color): """Adds shape of stype to draw queue """ self.draw_queue.append((stype, (vertices, idx, color, None, None))) def tnormals(self, shape): """Obtain a list of vertex normals in world coordinates """ if isinstance(shape.material, BasicMaterial): # Basic shader doesn't need this return None return shape.vertex_normals @ np.linalg.inv( to_3x3(self.transform_matrix) @ to_3x3(shape.matrix)) def render(self, shape): if isinstance(shape, Geometry): n = len(shape.vertices) # Perform model transform # TODO: Investigate moving model transform from CPU to the GPU tverts = self._transform_vertices( np.hstack([shape.vertices, np.ones((n, 1))]), shape.matrix, self.transform_matrix) tnormals = self.tnormals(shape) edges = shape.edges faces = shape.faces self.add_to_draw_queue( 'poly', tverts, edges, faces, self.style.fill_color, self.style.stroke_color, tnormals, self.style.material) elif isinstance(shape, PShape): fill = shape.fill.normalized if shape.fill else None stroke = shape.stroke.normalized if shape.stroke else None obj_list = get_render_primitives(shape) for obj in obj_list: stype, vertices, idx = obj # Transform vertices vertices = self._transform_vertices( np.hstack([vertices, np.ones((len(vertices), 1))]), shape._matrix, self.transform_matrix) # Add to draw queue self._add_to_draw_queue_simple( stype, vertices, idx, stroke if stype == 'lines' else fill) def shape(self, vertices, contours, shape_type, *args): """Render a PShape""" self.render(PShape(vertices=vertices, contours=contours, shape_type=shape_type)) def add_to_draw_queue(self, stype, vertices, edges, faces, fill=None, stroke=None, normals=None, material=None): """Add the given vertex data to the draw queue. :param stype: type of shape to be added. Should be one of {'poly', 'path', 'point'} :type stype: str :param vertices: (N, 3) array containing the vertices to be drawn. :type vertices: np.ndarray :param edges: (N, 2) array containing edges as tuples of indices into the vertex array. This can be None when not appropriate (eg. for points) :type edges: None | np.ndarray :param faces: (N, 3) array containing faces as tuples of indices into the vertex array. For 'point' and 'path' shapes, this can be None :type faces: np.ndarray :param fill: Fill color of the shape as a normalized RGBA tuple. When set to `None` the shape doesn't get a fill (default: None) :type fill: None | tuple :param stroke: Stroke color of the shape as a normalized RGBA tuple. When set to `None` the shape doesn't get stroke (default: None) :type stroke: None | tuple // TODO: Update documentation // TODO: Unite style-related attributes for both 2D and 3D under one material class """ fill_shape = self.style.fill_enabled and not (fill is None) stroke_shape = self.style.stroke_enabled and not (stroke is None) if fill_shape and stype not in ['point', 'path']: idx = np.array(faces, dtype=np.uint32).ravel() self.draw_queue.append( ["triangles", (vertices, idx, fill, normals, material)]) if stroke_shape: if stype == 'point': idx = np.arange(0, len(vertices), dtype=np.uint32) self.draw_queue.append( ["points", (vertices, idx, stroke, normals, material)]) else: idx = np.array(edges, dtype=np.uint32).ravel() self.draw_queue.append( ["lines", (vertices, idx, stroke, normals, material)]) def render_with_shaders(self, draw_type, draw_obj): vertices, idx, color, normals, material = draw_obj """Like render_default but is aware of shaders other than the basic one""" # 0. If material does not need normals nor extra info, strip them out # and use the method from superclass if material is None or isinstance(material, BasicMaterial) or draw_type in [ 'points', 'lines']: OpenGLRenderer.render_default(self, draw_type, [draw_obj[:3]]) return # 1. Get the number of vertices num_vertices = len(vertices) # 2. Create empty buffers based on the number of vertices. # data = np.zeros(num_vertices, dtype=[('position', np.float32, 3), ('normal', np.float32, 3)]) # 3. Loop through all the shapes in the geometry queue adding # it's information to the buffer. # draw_indices = [] data['position'][0:num_vertices, ] = np.array(vertices) draw_indices.append(idx) data['normal'][0:num_vertices, ] = np.array(normals) self.vertex_buffer.set_data(data) self.index_buffer.set_data(np.hstack(draw_indices)) if isinstance(material, NormalMaterial): # 4. Bind the buffer to the shader. # self.normal_prog.bind(self.vertex_buffer) # 5. Draw the shape using the proper shape type and get rid of # the buffers. # self.normal_prog.draw(draw_type, indices=self.index_buffer) elif isinstance(material, BlinnPhongMaterial): self.phong_prog.bind(self.vertex_buffer) self.phong_prog['u_cam_pos'] = self.camera_pos # Material attributes self.phong_prog['u_ambient_color'] = material.ambient self.phong_prog['u_diffuse_color'] = material.diffuse self.phong_prog['u_specular_color'] = material.specular self.phong_prog['u_shininess'] = material.shininess # Directional lights self.phong_prog['u_directional_light_count'] = self.directional_light_color.size self.phong_prog['u_directional_light_dir'] = self.directional_light_dir.data self.phong_prog['u_directional_light_color'] = self.directional_light_color.data self.phong_prog['u_directional_light_specular'] = self.directional_light_specular.data # Ambient lights self.phong_prog['u_ambient_light_count'] = self.ambient_light_color.size self.phong_prog['u_ambient_light_color'] = self.ambient_light_color.data # Point lights self.phong_prog['u_point_light_count'] = self.point_light_color.size self.phong_prog['u_point_light_color'] = self.point_light_color.data self.phong_prog['u_point_light_pos'] = self.point_light_pos.data self.phong_prog['u_point_light_specular'] = self.point_light_specular.data # Point light falloffs self.phong_prog['u_const_falloff'] = self.const_falloff.data self.phong_prog['u_linear_falloff'] = self.linear_falloff.data self.phong_prog['u_quadratic_falloff'] = self.quadratic_falloff.data # Draw self.phong_prog.draw(draw_type, indices=self.index_buffer) else: raise NotImplementedError("Material not implemented") def flush_geometry(self): """Flush all the shape geometry from the draw queue to the GPU. """ for index, shape in enumerate(self.draw_queue): current_shape, current_obj = self.draw_queue[index][0], self.draw_queue[index][1] # If current_shape is lines, bring it to the front by epsilon # to resolve z-fighting if current_shape == 'lines': # line_transform is used whenever we render lines to break ties in depth # We transform the points to camera space, move them by # Z_EPSILON, and them move them back to world space line_transform = inv( self.lookat_matrix).dot( translation_matrix( 0, 0, Z_EPSILON).dot( self.lookat_matrix)) vertices = current_obj[0] current_obj = (np.hstack([vertices, np.ones((vertices.shape[0], 1))]).dot(line_transform.T)[:, :3], *current_obj[1:]) self.render_with_shaders(current_shape, current_obj) self.draw_queue = [] def cleanup(self): super(Renderer3D, self).cleanup() self.normal_prog.delete() self.phong_prog.delete() def add_ambient_light(self, r, g, b): self.ambient_light_color.add(np.array((r, g, b))) def add_directional_light(self, r, g, b, x, y, z): self.directional_light_color.add(np.array((r, g, b))) self.directional_light_dir.add(np.array((x, y, z))) self.directional_light_specular.add(self.light_specular) def add_point_light(self, r, g, b, x, y, z): self.point_light_color.add(np.array((r, g, b))) self.point_light_pos.add(np.array((x, y, z))) self.point_light_specular.add(self.light_specular) self.const_falloff.add(self.curr_constant_falloff) self.linear_falloff.add(self.curr_linear_falloff) self.quadratic_falloff.add(self.curr_quadratic_falloff)
gpl-3.0
akehrer/Motome
Motome/Models/NoteModel.py
1
12624
# Import the future from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import import datetime import hashlib import logging import os import re import shutil import zipfile import yaml from Motome.config import ZIP_EXTENSION, NOTE_EXTENSION, ENCODING, STATUS_TEMPLATE, HISTORY_FOLDER, YAML_BRACKET # Set up the logger logger = logging.getLogger(__name__) class NoteModel(object): """ The main note model contains note information and name conversions for a given note. It also handles reading and writing data to the note file. """ def __init__(self, filepath=None): self.filepath = filepath self.wordset = '' self.is_saved = True self._content = '' self._metadata = dict() self._history = [] self._last_seen = -1 def __repr__(self): return '<Note: {0}, Last Modified: {1}>'.format(self.notename, self.timestamp) def __getstate__(self): """ This is used when pickling to remove data we don't want to store """ state = self.__dict__.copy() state['_content'] = '' state['_history'] = [] state['is_saved'] = True return state def __eq__(self, other): if not isinstance(other, self.__class__): return False return self.filepath == other.filepath @property def content(self): if self.timestamp > self._last_seen or self._content == '': self._update_from_file() return self._content @content.setter def content(self, value): if value != self._content: self._content = value self.is_saved = False # self._save_to_file() @property def metadata(self): if self.timestamp > self._last_seen: self._update_from_file() return self._metadata @metadata.setter def metadata(self, value): """ The note's metadata setter, expects a dict :param value: dict of the new metadata """ self._metadata = value self.is_saved = False # self._save_to_file() @property def history(self): zip_filepath = self.historypath self._history = [] try: with zipfile.ZipFile(zip_filepath, 'r') as myzip: self._history = sorted(myzip.infolist(), key=lambda x: x.filename) except IOError: pass return self._history @property def pinned(self): try: if int(self.metadata['pinned']) > 0: return True else: return False except (KeyError, TypeError): return False @pinned.setter def pinned(self, value): if value: self._metadata['pinned'] = 1 else: self._metadata['pinned'] = 0 self.is_saved = False # self.save_to_file() @property def recorded(self): if len(self.history) == 0: return False else: two_sec = datetime.timedelta(seconds=2) dt = self.history[-1].date_time latest_dt = datetime.datetime(*dt) current_dt = datetime.datetime.fromtimestamp(self.timestamp) if abs(current_dt - latest_dt) < two_sec: return True else: return False @property def filename(self): try: return os.path.basename(self.filepath) except AttributeError: return None @property def notename(self): try: return os.path.basename(os.path.splitext(self.filepath)[0]) except AttributeError: return None @notename.setter def notename(self, value): """ Handles renaming the note so makes sure all the files get renamed too :param value: string of the new name """ basepath, ext = os.path.splitext(self.filepath) newname = value + ext newpath = ''.join([basepath[:-len(self.notename)], newname]) try: shutil.move(self.filepath, newpath) except OSError: logging.error('Note renaming error: %s to %s'%(self.notename, value)) return try: new_history = os.path.join(self.notedirectory, HISTORY_FOLDER, newname) + ZIP_EXTENSION shutil.move(self.historypath, new_history) except IOError: pass self.filepath = newpath @property def historypath(self): return os.path.join(self.notedirectory, HISTORY_FOLDER, self.filename) + ZIP_EXTENSION @property def notedirectory(self): return os.path.dirname(self.filepath) @property def safename(self): return self.safe_filename(self.notename) @property def unsafename(self): return self.safename.replace('_', ' ') @property def hashname(self): try: return hashlib.sha1(self.filepath.encode('UTF-8')).hexdigest() except AttributeError: return None @property def timestamp(self): try: return os.stat(self.filepath).st_mtime except OSError: return -1 @property def first_line(self): return self.content.split('\n', 1)[0] @property def title(self): try: return self.metadata['title'] except (TypeError, KeyError): return self.unsafename @property def urls(self): """ Get all the urls from the content :return: a list of (title, url) tuples found in the content """ url_re_compile = re.compile(r'\[([^\[]+)\]\(([^\)]+)\)', re.VERBOSE | re.MULTILINE) return url_re_compile.findall(self.content) def load_old_note(self, index): """ Load a note from the history :param index: the index value in the history list :returns: a tuple containing the unparsed note content, a date string ('YYYYMMDDHHMMSS') """ try: zip_filepath = self.historypath with zipfile.ZipFile(zip_filepath, 'r') as myzip: old_content_bytes = myzip.read(self.history[index]) old_content = old_content_bytes.decode(ENCODING) old_date = self.history[index].filename[:-(len(ZIP_EXTENSION)+1)] except Exception as e: logger.debug('[NoteModel/load_old_note] %s'%e) old_content = None old_date = None return old_content, old_date def record(self): """ Write the old file data to the zip archive """ history_dir = os.path.join(self.notedirectory, HISTORY_FOLDER) now = datetime.datetime.now().strftime('%Y%m%d%H%M%S') old_filename = now + NOTE_EXTENSION old_filepath = os.path.join(history_dir, old_filename) # create the history storage directory if not os.path.exists(history_dir): try: os.makedirs(history_dir) except OSError as e: logger.warning(e) return self.save_to_file() self.save_to_file(filepath=old_filepath) zip_filepath = self.historypath # self.filepath + ZIP_EXTENSION with zipfile.ZipFile(zip_filepath, 'a') as myzip: myzip.write(old_filepath, old_filename) os.remove(old_filepath) def rename(self): """ Renames the note using the metadata['title'] value """ if self._metadata['title'] == self.notename: return else: self.notename = self._metadata['title'] return self def remove(self): """ Deletes all the note's associated files and clears the object's properties :return: boolean of removal success """ ret = False paths = [self.filepath, self.historypath] for path in paths: if os.path.exists(path): try: os.remove(path) ret = True except OSError as e: logger.warning(e) if ret: # clear all info self.wordset = '' self._content = '' self._metadata = {} self._history = [] self._last_seen = -1 return ret def get_status(self): """ Create an html document of basic note status information :return: an html document of note status data """ dt = datetime.datetime.fromtimestamp(self.timestamp) html = STATUS_TEMPLATE.format(notename=self.notename, timestamp=dt.strftime('%c'), recorded=self._latest_record_date()) return html def _latest_record_date(self): """ Get a string of the last history record's datetime :return: A string representation of the date and time """ try: dt = self.history[-1].date_time latest_dt = datetime.datetime(*dt) return latest_dt.strftime('%c') except IndexError: return 'Never' def _update_from_file(self): """ Update the object's internal values from the file """ try: self._content, self._metadata = self.parse_note_content(self.enc_read(self.filepath)) self._last_seen = self.timestamp self.wordset = ' '.join(set(re.findall(r'\w+', self._content.lower()))) except IOError: # file not there or couldn't access it, things may be different self._last_seen = -1 def save_to_file(self, filepath=None): """ Save the content and metadata to the note file """ if filepath is None: filepath = self.filepath if not 'title' in self.metadata.keys(): self.metadata['title'] = self.notename if self.content[-1] == '\n': filedata = self.content else: filedata = self.content + '\n' # use safe_dump to prevent dumping non-standard YAML tags filedata += YAML_BRACKET + '\n' + yaml.safe_dump(self.metadata, default_flow_style=False) + YAML_BRACKET self.enc_write(filepath, filedata) self.is_saved = True @staticmethod def safe_filename(filename): """ Convert the filename into something more url safe :param filename: :return: safer filename string or None on failure """ # TODO: Look at a slugify module instead pattern = re.compile('[\W_]+') # find all words root, ext = os.path.splitext(os.path.basename(filename)) return pattern.sub('_', root) if ext is '' else ''.join([pattern.sub('_', root), ext]) @staticmethod def parse_note_content(data): """ Given a file's raw data, split it into its note content and metadata. :param data: file data :return: content str, metadata dict """ meta = dict() try: # find the metadata at the end of the document s = data.split(YAML_BRACKET) m = s[-2] content = ''.join(s[:-2]) meta = yaml.safe_load(m.strip()) # use safe_load to prevent loading non-standard YAML tags # sanity check, is it valid metadata? if meta is None or 'title' not in meta.keys(): meta = dict() content = data except IndexError: content = data except yaml.YAMLError: content = data return content, meta @staticmethod def enc_write(filepath, filedata): """ Encode and write data to a file (unicode inside, bytes outside) :param filepath: the path to the output file :param filedata: the data to write """ # encode things ufilepath = filepath.encode(ENCODING) ufiledata = filedata.encode(ENCODING) with open(ufilepath, mode='wb') as f: f.write(ufiledata) @staticmethod def enc_read(filepath): """ Read and decode data from a file (bytes outside, unicode inside) :param filepath: the path to the input file :return: decoded file data """ ufilepath = filepath.encode(ENCODING) with open(ufilepath, mode='rb') as f: data = f.read() try: return data.decode(ENCODING) except UnicodeDecodeError as e: logger.error('{0} with {1}'.format(e, filepath))
bsd-2-clause
Palasekm/Kaira
ptp/gencpp/library.py
1
4000
# # Copyright (C) 2011-2013 Stanislav Bohm # # This file is part of Kaira. # # Kaira is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3 of the License, or # (at your option) any later version. # # Kaira is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Kaira. If not, see <http://www.gnu.org/licenses/>. # import build import buildnet import writer def write_library_functions(builder): for net in builder.project.nets: write_library_function(builder, net) write_parameters_setters(builder) write_library_init_function(builder, ) def write_parameters_setters(builder): for p in builder.project.get_parameters(): builder.line("void set_parameter_{0.name}({0.type} value)", p) builder.block_begin() builder.line("param::{0.name}.__set_value(value);", p) builder.block_end() def write_library(builder, header_filename): builder.line("#include \"{0}\"", header_filename) builder.emptyline() buildnet.write_core(builder) write_library_functions(builder) buildnet.write_user_functions(builder) def get_library_function_declaration(net): inputs = net.get_input_places() input_names = set(place.interface_input for place in inputs) outputs = [ place for place in net.get_output_places() if place.interface_output not in input_names ] return ",".join([ "{0} &{1}".format(place.type, place.interface_input) for place in inputs ] + [ "{0} &{1}".format(place.type, place.interface_output) for place in outputs ]) def write_library_header_file(builder): build.write_header_file(builder, close_guard=False) builder.emptyline() builder.line("void calib_init(int argc, char **argv);") for p in builder.project.get_parameters(): builder.line("void set_parameter_{0.name}({0.type} {0.name});", p) for net in builder.project.nets: builder.line("void {0}({1});", net.name, get_library_function_declaration(net)) build.write_header_file_close_guard(builder) def write_library_init_function(builder): builder.line("void calib_init(int argc, char **argv)") builder.block_begin() buildnet.write_main_setup(builder) builder.block_end() def write_library_function(builder, net, rpc=False): if rpc: args = builder.expand("void *$data, ca::Packer &$packer") else: args = get_library_function_declaration(net) builder.line("void {0}({1})", net.name, args) builder.block_begin() if rpc: builder.line("ca::Unpacker $unpacker($data);") builder.line("ca::spawn_net({0});", net.get_index()) builder.line("Net_{0} *$n = (Net_{0}*)ca::get_main_net();", net.id) for place in net.get_input_places(): if rpc: builder.line("$n->place_{0.id}.add(ca::unpack<{0.type}>($unpacker));", place) else: builder.line("$n->place_{0.id}.add({0.interface_input});", place) builder.line("$n->set_manual_delete();") builder.line("ca::main();") for place in net.get_output_places(): builder.if_begin("$n->place_{0.id}.is_empty()", place) builder.line('fprintf(stderr, "Token in output place of net {0} not found. Aborting.\\n");', net.get_name()) builder.line("exit(-1);") builder.block_end() if rpc: builder.line("ca::pack($packer, $n->place_{0.id}.begin()->value);", place) else: builder.line("{0.interface_output} = $n->place_{0.id}.begin()->value;", place) builder.line("delete $n;") builder.block_end()
gpl-3.0
fclesio/learning-space
Lightning Talk @Movile - ML with Scikit-Learn/Recipes/MeanShift.py
1
1580
# Mean Shift: http://scikit-learn.org/stable/auto_examples/cluster/plot_mean_shift.html#example-cluster-plot-mean-shift-py print(__doc__) import numpy as np from sklearn.cluster import MeanShift, estimate_bandwidth from sklearn.datasets.samples_generator import make_blobs ############################################################################### # Generate sample data centers = [[1, 1], [-1, -1], [1, -1]] X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6) ############################################################################### # Compute clustering with MeanShift # The following bandwidth can be automatically detected using bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500) ms = MeanShift(bandwidth=bandwidth, bin_seeding=True) ms.fit(X) labels = ms.labels_ cluster_centers = ms.cluster_centers_ labels_unique = np.unique(labels) n_clusters_ = len(labels_unique) print("number of estimated clusters : %d" % n_clusters_) ############################################################################### # Plot result import matplotlib.pyplot as plt from itertools import cycle plt.figure(1) plt.clf() colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk') for k, col in zip(range(n_clusters_), colors): my_members = labels == k cluster_center = cluster_centers[k] plt.plot(X[my_members, 0], X[my_members, 1], col + '.') plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=14) plt.title('Estimated number of clusters: %d' % n_clusters_) plt.show()
gpl-2.0
GDGLima/contentbox
third_party/django/contrib/gis/management/commands/inspectdb.py
315
1466
from django.core.management.commands.inspectdb import Command as InspectDBCommand class Command(InspectDBCommand): db_module = 'django.contrib.gis.db' gis_tables = {} def get_field_type(self, connection, table_name, row): field_type, field_params, field_notes = super(Command, self).get_field_type(connection, table_name, row) if field_type == 'GeometryField': geo_col = row[0] # Getting a more specific field type and any additional parameters # from the `get_geometry_type` routine for the spatial backend. field_type, geo_params = connection.introspection.get_geometry_type(table_name, geo_col) field_params.update(geo_params) # Adding the table name and column to the `gis_tables` dictionary, this # allows us to track which tables need a GeoManager. if table_name in self.gis_tables: self.gis_tables[table_name].append(geo_col) else: self.gis_tables[table_name] = [geo_col] return field_type, field_params, field_notes def get_meta(self, table_name): meta_lines = super(Command, self).get_meta(table_name) if table_name in self.gis_tables: # If the table is a geographic one, then we need make # GeoManager the default manager for the model. meta_lines.insert(0, ' objects = models.GeoManager()') return meta_lines
apache-2.0
jean/python-docx
docx/table.py
12
12846
# encoding: utf-8 """ The |Table| object and related proxy classes. """ from __future__ import absolute_import, print_function, unicode_literals from .blkcntnr import BlockItemContainer from .enum.style import WD_STYLE_TYPE from .oxml.simpletypes import ST_Merge from .shared import Inches, lazyproperty, Parented class Table(Parented): """ Proxy class for a WordprocessingML ``<w:tbl>`` element. """ def __init__(self, tbl, parent): super(Table, self).__init__(parent) self._element = self._tbl = tbl def add_column(self, width): """ Return a |_Column| object of *width*, newly added rightmost to the table. """ tblGrid = self._tbl.tblGrid gridCol = tblGrid.add_gridCol() gridCol.w = width for tr in self._tbl.tr_lst: tc = tr.add_tc() tc.width = width return _Column(gridCol, self) def add_row(self): """ Return a |_Row| instance, newly added bottom-most to the table. """ tbl = self._tbl tr = tbl.add_tr() for gridCol in tbl.tblGrid.gridCol_lst: tc = tr.add_tc() tc.width = gridCol.w return _Row(tr, self) @property def alignment(self): """ Read/write. A member of :ref:`WdRowAlignment` or None, specifying the positioning of this table between the page margins. |None| if no setting is specified, causing the effective value to be inherited from the style hierarchy. """ return self._tblPr.alignment @alignment.setter def alignment(self, value): self._tblPr.alignment = value @property def autofit(self): """ |True| if column widths can be automatically adjusted to improve the fit of cell contents. |False| if table layout is fixed. Column widths are adjusted in either case if total column width exceeds page width. Read/write boolean. """ return self._tblPr.autofit @autofit.setter def autofit(self, value): self._tblPr.autofit = value def cell(self, row_idx, col_idx): """ Return |_Cell| instance correponding to table cell at *row_idx*, *col_idx* intersection, where (0, 0) is the top, left-most cell. """ cell_idx = col_idx + (row_idx * self._column_count) return self._cells[cell_idx] def column_cells(self, column_idx): """ Sequence of cells in the column at *column_idx* in this table. """ cells = self._cells idxs = range(column_idx, len(cells), self._column_count) return [cells[idx] for idx in idxs] @lazyproperty def columns(self): """ |_Columns| instance representing the sequence of columns in this table. """ return _Columns(self._tbl, self) def row_cells(self, row_idx): """ Sequence of cells in the row at *row_idx* in this table. """ column_count = self._column_count start = row_idx * column_count end = start + column_count return self._cells[start:end] @lazyproperty def rows(self): """ |_Rows| instance containing the sequence of rows in this table. """ return _Rows(self._tbl, self) @property def style(self): """ Read/write. A |_TableStyle| object representing the style applied to this table. The default table style for the document (often `Normal Table`) is returned if the table has no directly-applied style. Assigning |None| to this property removes any directly-applied table style causing it to inherit the default table style of the document. Note that the style name of a table style differs slightly from that displayed in the user interface; a hyphen, if it appears, must be removed. For example, `Light Shading - Accent 1` becomes `Light Shading Accent 1`. """ style_id = self._tbl.tblStyle_val return self.part.get_style(style_id, WD_STYLE_TYPE.TABLE) @style.setter def style(self, style_or_name): style_id = self.part.get_style_id( style_or_name, WD_STYLE_TYPE.TABLE ) self._tbl.tblStyle_val = style_id @property def table(self): """ Provide child objects with reference to the |Table| object they belong to, without them having to know their direct parent is a |Table| object. This is the terminus of a series of `parent._table` calls from an arbitrary child through its ancestors. """ return self @property def table_direction(self): """ A member of :ref:`WdTableDirection` indicating the direction in which the table cells are ordered, e.g. `WD_TABLE_DIRECTION.LTR`. |None| indicates the value is inherited from the style hierarchy. """ return self._element.bidiVisual_val @table_direction.setter def table_direction(self, value): self._element.bidiVisual_val = value @property def _cells(self): """ A sequence of |_Cell| objects, one for each cell of the layout grid. If the table contains a span, one or more |_Cell| object references are repeated. """ col_count = self._column_count cells = [] for tc in self._tbl.iter_tcs(): for grid_span_idx in range(tc.grid_span): if tc.vMerge == ST_Merge.CONTINUE: cells.append(cells[-col_count]) elif grid_span_idx > 0: cells.append(cells[-1]) else: cells.append(_Cell(tc, self)) return cells @property def _column_count(self): """ The number of grid columns in this table. """ return self._tbl.col_count @property def _tblPr(self): return self._tbl.tblPr class _Cell(BlockItemContainer): """ Table cell """ def __init__(self, tc, parent): super(_Cell, self).__init__(tc, parent) self._tc = tc def add_paragraph(self, text='', style=None): """ Return a paragraph newly added to the end of the content in this cell. If present, *text* is added to the paragraph in a single run. If specified, the paragraph style *style* is applied. If *style* is not specified or is |None|, the result is as though the 'Normal' style was applied. Note that the formatting of text in a cell can be influenced by the table style. *text* can contain tab (``\\t``) characters, which are converted to the appropriate XML form for a tab. *text* can also include newline (``\\n``) or carriage return (``\\r``) characters, each of which is converted to a line break. """ return super(_Cell, self).add_paragraph(text, style) def add_table(self, rows, cols): """ Return a table newly added to this cell after any existing cell content, having *rows* rows and *cols* columns. An empty paragraph is added after the table because Word requires a paragraph element as the last element in every cell. """ width = self.width if self.width is not None else Inches(1) table = super(_Cell, self).add_table(rows, cols, width) self.add_paragraph() return table def merge(self, other_cell): """ Return a merged cell created by spanning the rectangular region having this cell and *other_cell* as diagonal corners. Raises |InvalidSpanError| if the cells do not define a rectangular region. """ tc, tc_2 = self._tc, other_cell._tc merged_tc = tc.merge(tc_2) return _Cell(merged_tc, self._parent) @property def paragraphs(self): """ List of paragraphs in the cell. A table cell is required to contain at least one block-level element and end with a paragraph. By default, a new cell contains a single paragraph. Read-only """ return super(_Cell, self).paragraphs @property def tables(self): """ List of tables in the cell, in the order they appear. Read-only. """ return super(_Cell, self).tables @property def text(self): """ The entire contents of this cell as a string of text. Assigning a string to this property replaces all existing content with a single paragraph containing the assigned text in a single run. """ return '\n'.join(p.text for p in self.paragraphs) @text.setter def text(self, text): """ Write-only. Set entire contents of cell to the string *text*. Any existing content or revisions are replaced. """ tc = self._tc tc.clear_content() p = tc.add_p() r = p.add_r() r.text = text @property def width(self): """ The width of this cell in EMU, or |None| if no explicit width is set. """ return self._tc.width @width.setter def width(self, value): self._tc.width = value class _Column(Parented): """ Table column """ def __init__(self, gridCol, parent): super(_Column, self).__init__(parent) self._gridCol = gridCol @property def cells(self): """ Sequence of |_Cell| instances corresponding to cells in this column. """ return tuple(self.table.column_cells(self._index)) @property def table(self): """ Reference to the |Table| object this column belongs to. """ return self._parent.table @property def width(self): """ The width of this column in EMU, or |None| if no explicit width is set. """ return self._gridCol.w @width.setter def width(self, value): self._gridCol.w = value @property def _index(self): """ Index of this column in its table, starting from zero. """ return self._gridCol.gridCol_idx class _Columns(Parented): """ Sequence of |_Column| instances corresponding to the columns in a table. Supports ``len()``, iteration and indexed access. """ def __init__(self, tbl, parent): super(_Columns, self).__init__(parent) self._tbl = tbl def __getitem__(self, idx): """ Provide indexed access, e.g. 'columns[0]' """ try: gridCol = self._gridCol_lst[idx] except IndexError: msg = "column index [%d] is out of range" % idx raise IndexError(msg) return _Column(gridCol, self) def __iter__(self): for gridCol in self._gridCol_lst: yield _Column(gridCol, self) def __len__(self): return len(self._gridCol_lst) @property def table(self): """ Reference to the |Table| object this column collection belongs to. """ return self._parent.table @property def _gridCol_lst(self): """ Sequence containing ``<w:gridCol>`` elements for this table, each representing a table column. """ tblGrid = self._tbl.tblGrid return tblGrid.gridCol_lst class _Row(Parented): """ Table row """ def __init__(self, tr, parent): super(_Row, self).__init__(parent) self._tr = tr @property def cells(self): """ Sequence of |_Cell| instances corresponding to cells in this row. """ return tuple(self.table.row_cells(self._index)) @property def table(self): """ Reference to the |Table| object this row belongs to. """ return self._parent.table @property def _index(self): """ Index of this row in its table, starting from zero. """ return self._tr.tr_idx class _Rows(Parented): """ Sequence of |_Row| objects corresponding to the rows in a table. Supports ``len()``, iteration, indexed access, and slicing. """ def __init__(self, tbl, parent): super(_Rows, self).__init__(parent) self._tbl = tbl def __getitem__(self, idx): """ Provide indexed access, (e.g. 'rows[0]') """ return list(self)[idx] def __iter__(self): return (_Row(tr, self) for tr in self._tbl.tr_lst) def __len__(self): return len(self._tbl.tr_lst) @property def table(self): """ Reference to the |Table| object this row collection belongs to. """ return self._parent.table
mit
bradparks/sleepy-puppy
sleepypuppy/admin/collector/views.py
14
1736
# Copyright 2015 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from flask.ext.admin.contrib.sqla import ModelView from models import GenericCollector from flask.ext import login from flask_wtf import Form class GenericCollectorView(ModelView): """ ModelView override of Flask Admin for Puppyscripts. """ # CSRF protection form_base_class = Form # Ensure user is authenticated def is_accessible(self): return login.current_user.is_authenticated() # No need to show the many/many relationship for payloads can_create = False can_edit = False list_template = 'generic_list.html' column_list = ( 'pub_date', 'payload', 'assessment', 'puppyscript_name', 'url', 'referrer', 'data' ) column_filters = ('id', 'assessment', 'payload', 'puppyscript_name', 'url', 'referrer') column_sortable_list = ( 'pub_date', 'payload', 'assessment', 'puppyscript_name', 'url', 'referrer' ) def __init__(self, session, **kwargs): super(GenericCollectorView, self).__init__(GenericCollector, session, **kwargs)
apache-2.0
vine/luigi
examples/elasticsearch_index.py
57
3399
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import datetime import json import luigi from luigi.contrib.esindex import CopyToIndex class FakeDocuments(luigi.Task): """ Generates a local file containing 5 elements of data in JSON format. """ #: the date parameter. date = luigi.DateParameter(default=datetime.date.today()) def run(self): """ Writes data in JSON format into the task's output target. The data objects have the following attributes: * `_id` is the default Elasticsearch id field, * `text`: the text, * `date`: the day when the data was created. """ today = datetime.date.today() with self.output().open('w') as output: for i in range(5): output.write(json.dumps({'_id': i, 'text': 'Hi %s' % i, 'date': str(today)})) output.write('\n') def output(self): """ Returns the target output for this task. In this case, a successful execution of this task will create a file on the local filesystem. :return: the target output for this task. :rtype: object (:py:class:`luigi.target.Target`) """ return luigi.LocalTarget(path='/tmp/_docs-%s.ldj' % self.date) class IndexDocuments(CopyToIndex): """ This task loads JSON data contained in a :py:class:`luigi.target.Target` into an ElasticSearch index. This task's input will the target returned by :py:meth:`~.FakeDocuments.output`. This class uses :py:meth:`luigi.contrib.esindex.CopyToIndex.run`. After running this task you can run: .. code-block:: console $ curl "localhost:9200/example_index/_search?pretty" to see the indexed documents. To see the update log, run .. code-block:: console $ curl "localhost:9200/update_log/_search?q=target_index:example_index&pretty" To cleanup both indexes run: .. code-block:: console $ curl -XDELETE "localhost:9200/example_index" $ curl -XDELETE "localhost:9200/update_log/_query?q=target_index:example_index" """ #: date task parameter (default = today) date = luigi.DateParameter(default=datetime.date.today()) #: the name of the index in ElasticSearch to be updated. index = 'example_index' #: the name of the document type. doc_type = 'greetings' #: the host running the ElasticSearch service. host = 'localhost' #: the port used by the ElasticSearch service. port = 9200 def requires(self): """ This task's dependencies: * :py:class:`~.FakeDocuments` :return: object (:py:class:`luigi.task.Task`) """ return FakeDocuments() if __name__ == "__main__": luigi.run(['--task', 'IndexDocuments'])
apache-2.0
ParticulateFlow/Palabos-PFM
scons/scons-local-2.1.0/SCons/Platform/os2.py
21
2229
"""SCons.Platform.os2 Platform-specific initialization for OS/2 systems. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Platform.Platform() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Platform/os2.py 5357 2011/09/09 21:31:03 bdeegan" import win32 def generate(env): if 'ENV' not in env: env['ENV'] = {} env['OBJPREFIX'] = '' env['OBJSUFFIX'] = '.obj' env['SHOBJPREFIX'] = '$OBJPREFIX' env['SHOBJSUFFIX'] = '$OBJSUFFIX' env['PROGPREFIX'] = '' env['PROGSUFFIX'] = '.exe' env['LIBPREFIX'] = '' env['LIBSUFFIX'] = '.lib' env['SHLIBPREFIX'] = '' env['SHLIBSUFFIX'] = '.dll' env['LIBPREFIXES'] = '$LIBPREFIX' env['LIBSUFFIXES'] = [ '$LIBSUFFIX', '$SHLIBSUFFIX' ] env['HOST_OS'] = 'os2' env['HOST_ARCH'] = win32.get_architecture().arch # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
agpl-3.0
square/pants
src/python/pants/backend/jvm/tasks/dependencies.py
2
2650
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals) from twitter.common.collections import OrderedSet from pants.backend.core.tasks.console_task import ConsoleTask from pants.base.exceptions import TaskError from pants.base.payload_field import JarsField, PythonRequirementsField # XXX(pl): JVM/Python hairball violator class Dependencies(ConsoleTask): """Generates a textual list (using the target format) for the dependency set of a target.""" @staticmethod def _is_jvm(target): return target.is_jvm or target.is_jvm_app @classmethod def register_options(cls, register): super(Dependencies, cls).register_options(register) register('--internal-only', default=False, action='store_true', help='Specifies that only internal dependencies should be included in the graph ' 'output (no external jars).') register('--external-only', default=False, action='store_true', help='Specifies that only external dependencies should be included in the graph ' 'output (only external jars).') def __init__(self, *args, **kwargs): super(Dependencies, self).__init__(*args, **kwargs) self.is_internal_only = self.get_options().internal_only self.is_external_only = self.get_options().external_only if self.is_internal_only and self.is_external_only: raise TaskError('At most one of --internal-only or --external-only can be selected.') def console_output(self, unused_method_argument): for target in self.context.target_roots: ordered_closure = OrderedSet() target.walk(ordered_closure.add) for tgt in ordered_closure: if not self.is_external_only: yield tgt.address.spec if not self.is_internal_only: # TODO(John Sirois): We need an external payload abstraction at which point knowledge # of jar and requirement payloads can go and this hairball will be untangled. if isinstance(tgt.payload.get_field('requirements'), PythonRequirementsField): for requirement in tgt.payload.requirements: yield str(requirement.requirement) elif isinstance(tgt.payload.get_field('jars'), JarsField): for jar in tgt.payload.jars: data = dict(org=jar.org, name=jar.name, rev=jar.rev) yield ('{org}:{name}:{rev}' if jar.rev else '{org}:{name}').format(**data)
apache-2.0
MOLSSI-BSE/basis_set_exchange
basis_set_exchange/tests/test_io.py
1
1467
""" Tests for the BSE IO functions """ # Most functionality is covered under other tests. # This tests the remainder import os import pytest from basis_set_exchange import fileio from .common_testvars import data_dir # yapf: disable @pytest.mark.parametrize('file_path', ['cc-pVDZ.0.table.json', 'CRENBL.0.table.json', 'dunning/cc-pVDZ.1.element.json', 'crenb/CRENBL.0.element.json', 'dunning/cc-pVDZ.1.json', 'crenb/CRENBL.0.json', 'crenb/CRENBL-ECP.0.json']) # yapf: enable def test_read_write_basis(file_path): # needed to be tested to make sure something isn't left # out of the sort lists, etc full_path = os.path.join(data_dir, file_path) full_path_new = full_path + '.new' data = fileio.read_json_basis(full_path) fileio.write_json_basis(full_path_new, data) os.remove(full_path_new) @pytest.mark.parametrize('file_path', ['REFERENCES.json']) def test_read_write_references(file_path): # needed to be tested to make sure something isn't left # out of the sort lists, etc full_path = os.path.join(data_dir, file_path) full_path_new = full_path + '.new' data = fileio.read_references(full_path) fileio.write_references(full_path_new, data) os.remove(full_path_new)
bsd-3-clause
pdufour/sqlalchemy
lib/sqlalchemy/orm/evaluator.py
60
4731
# orm/evaluator.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import operator from ..sql import operators class UnevaluatableError(Exception): pass _straight_ops = set(getattr(operators, op) for op in ('add', 'mul', 'sub', 'div', 'mod', 'truediv', 'lt', 'le', 'ne', 'gt', 'ge', 'eq')) _notimplemented_ops = set(getattr(operators, op) for op in ('like_op', 'notlike_op', 'ilike_op', 'notilike_op', 'between_op', 'in_op', 'notin_op', 'endswith_op', 'concat_op')) class EvaluatorCompiler(object): def __init__(self, target_cls=None): self.target_cls = target_cls def process(self, clause): meth = getattr(self, "visit_%s" % clause.__visit_name__, None) if not meth: raise UnevaluatableError( "Cannot evaluate %s" % type(clause).__name__) return meth(clause) def visit_grouping(self, clause): return self.process(clause.element) def visit_null(self, clause): return lambda obj: None def visit_false(self, clause): return lambda obj: False def visit_true(self, clause): return lambda obj: True def visit_column(self, clause): if 'parentmapper' in clause._annotations: parentmapper = clause._annotations['parentmapper'] if self.target_cls and not issubclass( self.target_cls, parentmapper.class_): raise UnevaluatableError( "Can't evaluate criteria against alternate class %s" % parentmapper.class_ ) key = parentmapper._columntoproperty[clause].key else: key = clause.key get_corresponding_attr = operator.attrgetter(key) return lambda obj: get_corresponding_attr(obj) def visit_clauselist(self, clause): evaluators = list(map(self.process, clause.clauses)) if clause.operator is operators.or_: def evaluate(obj): has_null = False for sub_evaluate in evaluators: value = sub_evaluate(obj) if value: return True has_null = has_null or value is None if has_null: return None return False elif clause.operator is operators.and_: def evaluate(obj): for sub_evaluate in evaluators: value = sub_evaluate(obj) if not value: if value is None: return None return False return True else: raise UnevaluatableError( "Cannot evaluate clauselist with operator %s" % clause.operator) return evaluate def visit_binary(self, clause): eval_left, eval_right = list(map(self.process, [clause.left, clause.right])) operator = clause.operator if operator is operators.is_: def evaluate(obj): return eval_left(obj) == eval_right(obj) elif operator is operators.isnot: def evaluate(obj): return eval_left(obj) != eval_right(obj) elif operator in _straight_ops: def evaluate(obj): left_val = eval_left(obj) right_val = eval_right(obj) if left_val is None or right_val is None: return None return operator(eval_left(obj), eval_right(obj)) else: raise UnevaluatableError( "Cannot evaluate %s with operator %s" % (type(clause).__name__, clause.operator)) return evaluate def visit_unary(self, clause): eval_inner = self.process(clause.element) if clause.operator is operators.inv: def evaluate(obj): value = eval_inner(obj) if value is None: return None return not value return evaluate raise UnevaluatableError( "Cannot evaluate %s with operator %s" % (type(clause).__name__, clause.operator)) def visit_bindparam(self, clause): val = clause.value return lambda obj: val
mit
oktayacikalin/pyglet
contrib/layout/layout/Plex/Regexps.py
30
14714
#======================================================================= # # Python Lexical Analyser # # Regular Expressions # #======================================================================= import array import string import types from sys import maxint import Errors # # Constants # BOL = 'bol' EOL = 'eol' EOF = 'eof' nl_code = ord('\n') # # Helper functions # def chars_to_ranges(s): """ Return a list of character codes consisting of pairs [code1a, code1b, code2a, code2b,...] which cover all the characters in |s|. """ char_list = list(s) char_list.sort() i = 0 n = len(char_list) result = [] while i < n: code1 = ord(char_list[i]) code2 = code1 + 1 i = i + 1 while i < n and code2 >= ord(char_list[i]): code2 = code2 + 1 i = i + 1 result.append(code1) result.append(code2) return result def uppercase_range(code1, code2): """ If the range of characters from code1 to code2-1 includes any lower case letters, return the corresponding upper case range. """ code3 = max(code1, ord('a')) code4 = min(code2, ord('z') + 1) if code3 < code4: d = ord('A') - ord('a') return (code3 + d, code4 + d) else: return None def lowercase_range(code1, code2): """ If the range of characters from code1 to code2-1 includes any upper case letters, return the corresponding lower case range. """ code3 = max(code1, ord('A')) code4 = min(code2, ord('Z') + 1) if code3 < code4: d = ord('a') - ord('A') return (code3 + d, code4 + d) else: return None def CodeRanges(code_list): """ Given a list of codes as returned by chars_to_ranges, return an RE which will match a character in any of the ranges. """ re_list = [] for i in xrange(0, len(code_list), 2): re_list.append(CodeRange(code_list[i], code_list[i + 1])) return apply(Alt, tuple(re_list)) def CodeRange(code1, code2): """ CodeRange(code1, code2) is an RE which matches any character with a code |c| in the range |code1| <= |c| < |code2|. """ if code1 <= nl_code < code2: return Alt(RawCodeRange(code1, nl_code), RawNewline, RawCodeRange(nl_code + 1, code2)) else: return RawCodeRange(code1, code2) # # Abstract classes # class RE: """RE is the base class for regular expression constructors. The following operators are defined on REs: re1 + re2 is an RE which matches |re1| followed by |re2| re1 | re2 is an RE which matches either |re1| or |re2| """ nullable = 1 # True if this RE can match 0 input symbols match_nl = 1 # True if this RE can match a string ending with '\n' str = None # Set to a string to override the class's __str__ result def build_machine(self, machine, initial_state, final_state, match_bol, nocase): """ This method should add states to |machine| to implement this RE, starting at |initial_state| and ending at |final_state|. If |match_bol| is true, the RE must be able to match at the beginning of a line. If nocase is true, upper and lower case letters should be treated as equivalent. """ raise exceptions.UnimplementedMethod("%s.build_machine not implemented" % self.__class__.__name__) def build_opt(self, m, initial_state, c): """ Given a state |s| of machine |m|, return a new state reachable from |s| on character |c| or epsilon. """ s = m.new_state() initial_state.link_to(s) initial_state.add_transition(c, s) return s def __add__(self, other): return Seq(self, other) def __or__(self, other): return Alt(self, other) def __str__(self): if self.str: return self.str else: return self.calc_str() def check_re(self, num, value): if not isinstance(value, RE): self.wrong_type(num, value, "Plex.RE instance") def check_string(self, num, value): if type(value) <> type(''): self.wrong_type(num, value, "string") def check_char(self, num, value): self.check_string(num, value) if len(value) <> 1: raise Errors.PlexValueError("Invalid value for argument %d of Plex.%s." "Expected a string of length 1, got: %s" % ( num, self.__class__.__name__, repr(value))) def wrong_type(self, num, value, expected): if type(value) == types.InstanceType: got = "%s.%s instance" % ( value.__class__.__module__, value.__class__.__name__) else: got = type(value).__name__ raise Errors.PlexTypeError("Invalid type for argument %d of Plex.%s " "(expected %s, got %s" % ( num, self.__class__.__name__, expected, got)) # # Primitive RE constructors # ------------------------- # # These are the basic REs from which all others are built. # ## class Char(RE): ## """ ## Char(c) is an RE which matches the character |c|. ## """ ## nullable = 0 ## def __init__(self, char): ## self.char = char ## self.match_nl = char == '\n' ## def build_machine(self, m, initial_state, final_state, match_bol, nocase): ## c = self.char ## if match_bol and c <> BOL: ## s1 = self.build_opt(m, initial_state, BOL) ## else: ## s1 = initial_state ## if c == '\n' or c == EOF: ## s1 = self.build_opt(m, s1, EOL) ## if len(c) == 1: ## code = ord(self.char) ## s1.add_transition((code, code+1), final_state) ## if nocase and is_letter_code(code): ## code2 = other_case_code(code) ## s1.add_transition((code2, code2+1), final_state) ## else: ## s1.add_transition(c, final_state) ## def calc_str(self): ## return "Char(%s)" % repr(self.char) def Char(c): """ Char(c) is an RE which matches the character |c|. """ if len(c) == 1: result = CodeRange(ord(c), ord(c) + 1) else: result = SpecialSymbol(c) result.str = "Char(%s)" % repr(c) return result class RawCodeRange(RE): """ RawCodeRange(code1, code2) is a low-level RE which matches any character with a code |c| in the range |code1| <= |c| < |code2|, where the range does not include newline. For internal use only. """ nullable = 0 match_nl = 0 range = None # (code, code) uppercase_range = None # (code, code) or None lowercase_range = None # (code, code) or None def __init__(self, code1, code2): self.range = (code1, code2) self.uppercase_range = uppercase_range(code1, code2) self.lowercase_range = lowercase_range(code1, code2) def build_machine(self, m, initial_state, final_state, match_bol, nocase): if match_bol: initial_state = self.build_opt(m, initial_state, BOL) initial_state.add_transition(self.range, final_state) if nocase: if self.uppercase_range: initial_state.add_transition(self.uppercase_range, final_state) if self.lowercase_range: initial_state.add_transition(self.lowercase_range, final_state) def calc_str(self): return "CodeRange(%d,%d)" % (self.code1, self.code2) class _RawNewline(RE): """ RawNewline is a low-level RE which matches a newline character. For internal use only. """ nullable = 0 match_nl = 1 def build_machine(self, m, initial_state, final_state, match_bol, nocase): if match_bol: initial_state = self.build_opt(m, initial_state, BOL) s = self.build_opt(m, initial_state, EOL) s.add_transition((nl_code, nl_code + 1), final_state) RawNewline = _RawNewline() class SpecialSymbol(RE): """ SpecialSymbol(sym) is an RE which matches the special input symbol |sym|, which is one of BOL, EOL or EOF. """ nullable = 0 match_nl = 0 sym = None def __init__(self, sym): self.sym = sym def build_machine(self, m, initial_state, final_state, match_bol, nocase): # Sequences 'bol bol' and 'bol eof' are impossible, so only need # to allow for bol if sym is eol if match_bol and self.sym == EOL: initial_state = self.build_opt(m, initial_state, BOL) initial_state.add_transition(self.sym, final_state) class Seq(RE): """Seq(re1, re2, re3...) is an RE which matches |re1| followed by |re2| followed by |re3|...""" def __init__(self, *re_list): nullable = 1 for i in xrange(len(re_list)): re = re_list[i] self.check_re(i, re) nullable = nullable and re.nullable self.re_list = re_list self.nullable = nullable i = len(re_list) match_nl = 0 while i: i = i - 1 re = re_list[i] if re.match_nl: match_nl = 1 break if not re.nullable: break self.match_nl = match_nl def build_machine(self, m, initial_state, final_state, match_bol, nocase): re_list = self.re_list if len(re_list) == 0: initial_state.link_to(final_state) else: s1 = initial_state n = len(re_list) for i in xrange(n): if i < n - 1: s2 = m.new_state() else: s2 = final_state re = re_list[i] re.build_machine(m, s1, s2, match_bol, nocase) s1 = s2 match_bol = re.match_nl or (match_bol and re.nullable) def calc_str(self): return "Seq(%s)" % string.join(map(str, self.re_list), ",") class Alt(RE): """Alt(re1, re2, re3...) is an RE which matches either |re1| or |re2| or |re3|...""" def __init__(self, *re_list): self.re_list = re_list nullable = 0 match_nl = 0 nullable_res = [] non_nullable_res = [] i = 1 for re in re_list: self.check_re(i, re) if re.nullable: nullable_res.append(re) nullable = 1 else: non_nullable_res.append(re) if re.match_nl: match_nl = 1 i = i + 1 self.nullable_res = nullable_res self.non_nullable_res = non_nullable_res self.nullable = nullable self.match_nl = match_nl def build_machine(self, m, initial_state, final_state, match_bol, nocase): for re in self.nullable_res: re.build_machine(m, initial_state, final_state, match_bol, nocase) if self.non_nullable_res: if match_bol: initial_state = self.build_opt(m, initial_state, BOL) for re in self.non_nullable_res: re.build_machine(m, initial_state, final_state, 0, nocase) def calc_str(self): return "Alt(%s)" % string.join(map(str, self.re_list), ",") class Rep1(RE): """Rep1(re) is an RE which matches one or more repetitions of |re|.""" def __init__(self, re): self.check_re(1, re) self.re = re self.nullable = re.nullable self.match_nl = re.match_nl def build_machine(self, m, initial_state, final_state, match_bol, nocase): s1 = m.new_state() s2 = m.new_state() initial_state.link_to(s1) self.re.build_machine(m, s1, s2, match_bol or self.re.match_nl, nocase) s2.link_to(s1) s2.link_to(final_state) def calc_str(self): return "Rep1(%s)" % self.re class SwitchCase(RE): """ SwitchCase(re, nocase) is an RE which matches the same strings as RE, but treating upper and lower case letters according to |nocase|. If |nocase| is true, case is ignored, otherwise it is not. """ re = None nocase = None def __init__(self, re, nocase): self.re = re self.nocase = nocase self.nullable = re.nullable self.match_nl = re.match_nl def build_machine(self, m, initial_state, final_state, match_bol, nocase): self.re.build_machine(m, initial_state, final_state, match_bol, self.nocase) def calc_str(self): if self.nocase: name = "NoCase" else: name = "Case" return "%s(%s)" % (name, self.re) # # Composite RE constructors # ------------------------- # # These REs are defined in terms of the primitive REs. # Empty = Seq() Empty.__doc__ = \ """ Empty is an RE which matches the empty string. """ Empty.str = "Empty" def Str1(s): """ Str1(s) is an RE which matches the literal string |s|. """ result = apply(Seq, tuple(map(Char, s))) result.str = "Str(%s)" % repr(s) return result def Str(*strs): """ Str(s) is an RE which matches the literal string |s|. Str(s1, s2, s3, ...) is an RE which matches any of |s1| or |s2| or |s3|... """ if len(strs) == 1: return Str1(strs[0]) else: result = apply(Alt, tuple(map(Str1, strs))) result.str = "Str(%s)" % string.join(map(repr, strs), ",") return result def Any(s): """ Any(s) is an RE which matches any character in the string |s|. """ #result = apply(Alt, tuple(map(Char, s))) result = CodeRanges(chars_to_ranges(s)) result.str = "Any(%s)" % repr(s) return result def AnyBut(s): """ AnyBut(s) is an RE which matches any character (including newline) which is not in the string |s|. """ ranges = chars_to_ranges(s) ranges.insert(0, -maxint) ranges.append(maxint) result = CodeRanges(ranges) result.str = "AnyBut(%s)" % repr(s) return result AnyChar = AnyBut("") AnyChar.__doc__ = \ """ AnyChar is an RE which matches any single character (including a newline). """ AnyChar.str = "AnyChar" def Range(s1, s2 = None): """ Range(c1, c2) is an RE which matches any single character in the range |c1| to |c2| inclusive. Range(s) where |s| is a string of even length is an RE which matches any single character in the ranges |s[0]| to |s[1]|, |s[2]| to |s[3]|,... """ if s2: result = CodeRange(ord(s1), ord(s2) + 1) result.str = "Range(%s,%s)" % (s1, s2) else: ranges = [] for i in range(0, len(s1), 2): ranges.append(CodeRange(ord(s1[i]), ord(s1[i+1]) + 1)) result = apply(Alt, tuple(ranges)) result.str = "Range(%s)" % repr(s1) return result def Opt(re): """ Opt(re) is an RE which matches either |re| or the empty string. """ result = Alt(re, Empty) result.str = "Opt(%s)" % re return result def Rep(re): """ Rep(re) is an RE which matches zero or more repetitions of |re|. """ result = Opt(Rep1(re)) result.str = "Rep(%s)" % re return result def NoCase(re): """ NoCase(re) is an RE which matches the same strings as RE, but treating upper and lower case letters as equivalent. """ return SwitchCase(re, nocase = 1) def Case(re): """ Case(re) is an RE which matches the same strings as RE, but treating upper and lower case letters as distinct, i.e. it cancels the effect of any enclosing NoCase(). """ return SwitchCase(re, nocase = 0) # # RE Constants # Bol = Char(BOL) Bol.__doc__ = \ """ Bol is an RE which matches the beginning of a line. """ Bol.str = "Bol" Eol = Char(EOL) Eol.__doc__ = \ """ Eol is an RE which matches the end of a line. """ Eol.str = "Eol" Eof = Char(EOF) Eof.__doc__ = \ """ Eof is an RE which matches the end of the file. """ Eof.str = "Eof"
bsd-3-clause
HellerCommaA/flask-materialize
flask_material/__init__.py
1
5756
#!/usr/bin/env python # coding=utf8 __app_version__ = '0.1.1' __material_version__ = '0.96.1' import re from flask import Blueprint, current_app, url_for try: from wtforms.fields import HiddenField except ImportError: def is_hidden_field_filter(field): raise RuntimeError('WTForms is not installed.') else: def is_hidden_field_filter(field): return isinstance(field, HiddenField) class CDN(object): """Base class for CDN objects.""" def get_resource_url(self, filename): """Return resource url for filename.""" raise NotImplementedError class StaticCDN(object): """A CDN that serves content from the local application. :param static_endpoint: Endpoint to use. :param rev: If ``True``, honor ``MATERIAL_QUERYSTRING_REVVING``. """ def __init__(self, static_endpoint='static', rev=False): self.static_endpoint = static_endpoint self.rev = rev def get_resource_url(self, filename): extra_args = {} if self.rev and current_app.config['MATERIAL_QUERYSTRING_REVVING']: extra_args['material'] = __version__ return url_for(self.static_endpoint, filename=filename, **extra_args) class WebCDN(object): """Serves files from the Web. :param baseurl: The baseurl. Filenames are simply appended to this URL. """ def __init__(self, baseurl): self.baseurl = baseurl def get_resource_url(self, filename): return self.baseurl + filename class ConditionalCDN(object): """Serves files from one CDN or another, depending on whether a configuration value is set. :param confvar: Configuration variable to use. :param primary: CDN to use if the configuration variable is ``True``. :param fallback: CDN to use otherwise. """ def __init__(self, confvar, primary, fallback): self.confvar = confvar self.primary = primary self.fallback = fallback def get_resource_url(self, filename): if current_app.config[self.confvar]: return self.primary.get_resource_url(filename) return self.fallback.get_resource_url(filename) def material_find_resource(filename, cdn, use_minified=None, local=True): """Resource finding function, also available in templates. Tries to find a resource, will force SSL depending on ``MATERIAL_CDN_FORCE_SSL`` settings. :param filename: File to find a URL for. :param cdn: Name of the CDN to use. :param use_minified': If set to ``True``/``False``, use/don't use minified. If ``None``, honors ``MATERIAL_USE_MINIFIED``. :param local: If ``True``, uses the ``local``-CDN when ``MATERIAL_SERVE_LOCAL`` is enabled. If ``False``, uses the ``static``-CDN instead. :return: A URL. """ config = current_app.config if config['MATERIAL_SERVE_LOCAL']: if 'css/' not in filename and 'js/' not in filename: filename = 'js/' + filename if None == use_minified: use_minified = config['MATERIAL_USE_MINIFIED'] if use_minified: filename = '%s.min.%s' % tuple(filename.rsplit('.', 1)) cdns = current_app.extensions['material']['cdns'] resource_url = cdns[cdn].get_resource_url(filename) if resource_url.startswith('//') and config['MATERIAL_CDN_FORCE_SSL']: resource_url = 'https:%s' % resource_url return resource_url class Material(object): def __init__(self, app=None): if app is not None: self.init_app(app) def init_app(self, app): MATERIAL_VERSION = '0.96.1' JQUERY_VERSION = '1.11.3' HTML5SHIV_VERSION = '3.7.2' RESPONDJS_VERSION = '1.4.2' app.config.setdefault('MATERIAL_USE_MINIFIED', True) app.config.setdefault('MATERIAL_CDN_FORCE_SSL', False) app.config.setdefault('MATERIAL_QUERYSTRING_REVVING', True) app.config.setdefault('MATERIAL_SERVE_LOCAL', False) app.config.setdefault('MATERIAL_LOCAL_SUBDOMAIN', None) blueprint = Blueprint( 'material', __name__, template_folder='templates', static_folder='static', static_url_path=app.static_url_path + '/material', subdomain=app.config['MATERIAL_LOCAL_SUBDOMAIN']) app.register_blueprint(blueprint) app.jinja_env.globals['material_is_hidden_field'] =\ is_hidden_field_filter app.jinja_env.globals['material_find_resource'] =\ material_find_resource if not hasattr(app, 'extensions'): app.extensions = {} local = StaticCDN('material.static', rev=True) static = StaticCDN() def lwrap(cdn, primary=static): return ConditionalCDN('MATERIAL_SERVE_LOCAL', primary, cdn) material = lwrap( WebCDN('//cdnjs.cloudflare.com/ajax/libs/materialize/%s/' % MATERIAL_VERSION), local) jquery = lwrap( WebCDN('//cdnjs.cloudflare.com/ajax/libs/jquery/%s/' % JQUERY_VERSION), local) html5shiv = lwrap( WebCDN('//cdnjs.cloudflare.com/ajax/libs/html5shiv/%s/' % HTML5SHIV_VERSION)) respondjs = lwrap( WebCDN('//cdnjs.cloudflare.com/ajax/libs/respond.js/%s/' % RESPONDJS_VERSION)) app.extensions['material'] = { 'cdns': { 'local': local, 'static': static, 'material': material, 'jquery': jquery, 'html5shiv': html5shiv, 'respond.js': respondjs, }, }
mit
vigilv/scikit-learn
examples/ensemble/plot_adaboost_multiclass.py
354
4124
""" ===================================== Multi-class AdaBoosted Decision Trees ===================================== This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can improve prediction accuracy on a multi-class problem. The classification dataset is constructed by taking a ten-dimensional standard normal distribution and defining three classes separated by nested concentric ten-dimensional spheres such that roughly equal numbers of samples are in each class (quantiles of the :math:`\chi^2` distribution). The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R uses the probability estimates to update the additive model, while SAMME uses the classifications only. As the example illustrates, the SAMME.R algorithm typically converges faster than SAMME, achieving a lower test error with fewer boosting iterations. The error of each algorithm on the test set after each boosting iteration is shown on the left, the classification error on the test set of each tree is shown in the middle, and the boost weight of each tree is shown on the right. All trees have a weight of one in the SAMME.R algorithm and therefore are not shown. .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. """ print(__doc__) # Author: Noel Dawe <noel.dawe@gmail.com> # # License: BSD 3 clause from sklearn.externals.six.moves import zip import matplotlib.pyplot as plt from sklearn.datasets import make_gaussian_quantiles from sklearn.ensemble import AdaBoostClassifier from sklearn.metrics import accuracy_score from sklearn.tree import DecisionTreeClassifier X, y = make_gaussian_quantiles(n_samples=13000, n_features=10, n_classes=3, random_state=1) n_split = 3000 X_train, X_test = X[:n_split], X[n_split:] y_train, y_test = y[:n_split], y[n_split:] bdt_real = AdaBoostClassifier( DecisionTreeClassifier(max_depth=2), n_estimators=600, learning_rate=1) bdt_discrete = AdaBoostClassifier( DecisionTreeClassifier(max_depth=2), n_estimators=600, learning_rate=1.5, algorithm="SAMME") bdt_real.fit(X_train, y_train) bdt_discrete.fit(X_train, y_train) real_test_errors = [] discrete_test_errors = [] for real_test_predict, discrete_train_predict in zip( bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)): real_test_errors.append( 1. - accuracy_score(real_test_predict, y_test)) discrete_test_errors.append( 1. - accuracy_score(discrete_train_predict, y_test)) n_trees_discrete = len(bdt_discrete) n_trees_real = len(bdt_real) # Boosting might terminate early, but the following arrays are always # n_estimators long. We crop them to the actual number of trees here: discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete] real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real] discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete] plt.figure(figsize=(15, 5)) plt.subplot(131) plt.plot(range(1, n_trees_discrete + 1), discrete_test_errors, c='black', label='SAMME') plt.plot(range(1, n_trees_real + 1), real_test_errors, c='black', linestyle='dashed', label='SAMME.R') plt.legend() plt.ylim(0.18, 0.62) plt.ylabel('Test Error') plt.xlabel('Number of Trees') plt.subplot(132) plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors, "b", label='SAMME', alpha=.5) plt.plot(range(1, n_trees_real + 1), real_estimator_errors, "r", label='SAMME.R', alpha=.5) plt.legend() plt.ylabel('Error') plt.xlabel('Number of Trees') plt.ylim((.2, max(real_estimator_errors.max(), discrete_estimator_errors.max()) * 1.2)) plt.xlim((-20, len(bdt_discrete) + 20)) plt.subplot(133) plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights, "b", label='SAMME') plt.legend() plt.ylabel('Weight') plt.xlabel('Number of Trees') plt.ylim((0, discrete_estimator_weights.max() * 1.2)) plt.xlim((-20, n_trees_discrete + 20)) # prevent overlapping y-axis labels plt.subplots_adjust(wspace=0.25) plt.show()
bsd-3-clause
40223231/2015-cdb-g4-final-test-by-6-22
static/Brython3.1.1-20150328-091302/Lib/unittest/test/_test_warnings.py
858
2304
# helper module for test_runner.Test_TextTestRunner.test_warnings """ This module has a number of tests that raise different kinds of warnings. When the tests are run, the warnings are caught and their messages are printed to stdout. This module also accepts an arg that is then passed to unittest.main to affect the behavior of warnings. Test_TextTestRunner.test_warnings executes this script with different combinations of warnings args and -W flags and check that the output is correct. See #10535. """ import sys import unittest import warnings def warnfun(): warnings.warn('rw', RuntimeWarning) class TestWarnings(unittest.TestCase): # unittest warnings will be printed at most once per type (max one message # for the fail* methods, and one for the assert* methods) def test_assert(self): self.assertEquals(2+2, 4) self.assertEquals(2*2, 4) self.assertEquals(2**2, 4) def test_fail(self): self.failUnless(1) self.failUnless(True) def test_other_unittest(self): self.assertAlmostEqual(2+2, 4) self.assertNotAlmostEqual(4+4, 2) # these warnings are normally silenced, but they are printed in unittest def test_deprecation(self): warnings.warn('dw', DeprecationWarning) warnings.warn('dw', DeprecationWarning) warnings.warn('dw', DeprecationWarning) def test_import(self): warnings.warn('iw', ImportWarning) warnings.warn('iw', ImportWarning) warnings.warn('iw', ImportWarning) # user warnings should always be printed def test_warning(self): warnings.warn('uw') warnings.warn('uw') warnings.warn('uw') # these warnings come from the same place; they will be printed # only once by default or three times if the 'always' filter is used def test_function(self): warnfun() warnfun() warnfun() if __name__ == '__main__': with warnings.catch_warnings(record=True) as ws: # if an arg is provided pass it to unittest.main as 'warnings' if len(sys.argv) == 2: unittest.main(exit=False, warnings=sys.argv.pop()) else: unittest.main(exit=False) # print all the warning messages collected for w in ws: print(w.message)
gpl-3.0
tempredirect/zxing
cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/rpm.py
34
4547
"""SCons.Tool.rpm Tool-specific initialization for rpm. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. The rpm tool calls the rpmbuild command. The first and only argument should a tar.gz consisting of the source file and a specfile. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/rpm.py 5023 2010/06/14 22:05:46 scons" import os import re import shutil import subprocess import SCons.Builder import SCons.Node.FS import SCons.Util import SCons.Action import SCons.Defaults def get_cmd(source, env): tar_file_with_included_specfile = source if SCons.Util.is_List(source): tar_file_with_included_specfile = source[0] return "%s %s %s"%(env['RPM'], env['RPMFLAGS'], tar_file_with_included_specfile.abspath ) def build_rpm(target, source, env): # create a temporary rpm build root. tmpdir = os.path.join( os.path.dirname( target[0].abspath ), 'rpmtemp' ) if os.path.exists(tmpdir): shutil.rmtree(tmpdir) # now create the mandatory rpm directory structure. for d in ['RPMS', 'SRPMS', 'SPECS', 'BUILD']: os.makedirs( os.path.join( tmpdir, d ) ) # set the topdir as an rpmflag. env.Prepend( RPMFLAGS = '--define \'_topdir %s\'' % tmpdir ) # now call rpmbuild to create the rpm package. handle = subprocess.Popen(get_cmd(source, env), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) output = handle.stdout.read() status = handle.wait() if status: raise SCons.Errors.BuildError( node=target[0], errstr=output, filename=str(target[0]) ) else: # XXX: assume that LC_ALL=c is set while running rpmbuild output_files = re.compile( 'Wrote: (.*)' ).findall( output ) for output, input in zip( output_files, target ): rpm_output = os.path.basename(output) expected = os.path.basename(input.get_path()) assert expected == rpm_output, "got %s but expected %s" % (rpm_output, expected) shutil.copy( output, input.abspath ) # cleanup before leaving. shutil.rmtree(tmpdir) return status def string_rpm(target, source, env): try: return env['RPMCOMSTR'] except KeyError: return get_cmd(source, env) rpmAction = SCons.Action.Action(build_rpm, string_rpm) RpmBuilder = SCons.Builder.Builder(action = SCons.Action.Action('$RPMCOM', '$RPMCOMSTR'), source_scanner = SCons.Defaults.DirScanner, suffix = '$RPMSUFFIX') def generate(env): """Add Builders and construction variables for rpm to an Environment.""" try: bld = env['BUILDERS']['Rpm'] except KeyError: bld = RpmBuilder env['BUILDERS']['Rpm'] = bld env.SetDefault(RPM = 'LC_ALL=c rpmbuild') env.SetDefault(RPMFLAGS = SCons.Util.CLVar('-ta')) env.SetDefault(RPMCOM = rpmAction) env.SetDefault(RPMSUFFIX = '.rpm') def exists(env): return env.Detect('rpmbuild') # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
apache-2.0
ecosoft-odoo/odoo
addons/product_margin/wizard/product_margin.py
338
3457
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import fields, osv from openerp.tools.translate import _ class product_margin(osv.osv_memory): _name = 'product.margin' _description = 'Product Margin' _columns = { 'from_date': fields.date('From'), 'to_date': fields.date('To'), 'invoice_state': fields.selection([ ('paid', 'Paid'), ('open_paid', 'Open and Paid'), ('draft_open_paid', 'Draft, Open and Paid'), ], 'Invoice State', select=True, required=True), } _defaults = { 'from_date': time.strftime('%Y-01-01'), 'to_date': time.strftime('%Y-12-31'), 'invoice_state': "open_paid", } def action_open_window(self, cr, uid, ids, context=None): """ @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param ids: the ID or list of IDs if we want more than one @return: """ context = dict(context or {}) def ref(module, xml_id): proxy = self.pool.get('ir.model.data') return proxy.get_object_reference(cr, uid, module, xml_id) model, search_view_id = ref('product', 'product_search_form_view') model, graph_view_id = ref('product_margin', 'view_product_margin_graph') model, form_view_id = ref('product_margin', 'view_product_margin_form') model, tree_view_id = ref('product_margin', 'view_product_margin_tree') #get the current product.margin object to obtain the values from it records = self.browse(cr, uid, ids, context=context) record = records[0] context.update(invoice_state=record.invoice_state) if record.from_date: context.update(date_from=record.from_date) if record.to_date: context.update(date_to=record.to_date) views = [ (tree_view_id, 'tree'), (form_view_id, 'form'), (graph_view_id, 'graph') ] return { 'name': _('Product Margins'), 'context': context, 'view_type': 'form', "view_mode": 'tree,form,graph', 'res_model': 'product.product', 'type': 'ir.actions.act_window', 'views': views, 'view_id': False, 'search_view_id': search_view_id, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
elmar-peise/python-lsf
lsf/esub.py
1
1740
#!/usr/bin/env python """Wrapper script with bsub functionality.""" from __future__ import print_function import sys import os import shlex import argparse from submitjob import submitjob from utility import color def esub(args, bsubargs, jobscript): """Wrapper script with bsub functionality.""" data = {"command": ""} scriptargs = [] for line in jobscript.splitlines(True): if line.startswith("#!"): data["command"] += line elif line.startswith("#BSUB "): scriptargs += shlex.split(line[6:].split("#")[0]) else: data["command"] += line.split("#")[0] bsubargs = scriptargs + bsubargs last = False cmd = False for arg in bsubargs: if cmd: data["command"] += " " + arg continue if arg[0] == "-": if last: data[last] = True last = arg else: if last: data[last] = arg last = False else: cmd = True data["command"] = arg if last: data[last] = True try: jobid = submitjob(data) print(jobid) except Exception as e: print(color(e.strerror, "r")) sys.exit(-1) def main(): """Main program entry point.""" parser = argparse.ArgumentParser( description="Wrapper for bsub." ) parser.add_argument_group("further arguments", description="are passed to bsub") args, bsubargs = parser.parse_known_args() jobscript = sys.stdin.read() try: esub(args, bsubargs, jobscript) except KeyboardInterrupt: pass if __name__ == "__main__": main()
mit
isb-cgc/ISB-CGC-data-proc
tcga_etl_pipeline/clin_bio/metadata/parse_clinical_metadata.py
1
2284
import os import sys import re import hashlib import json from cStringIO import StringIO import pandas as pd import logging from HTMLParser import HTMLParser import datetime import os.path from google.cloud import storage from lxml import etree from collections import Counter #-------------------------------------- # set default bucket #-------------------------------------- storage.set_default_bucket("isb-cgc") storage_conn = storage.get_connection() storage.set_default_connection(storage_conn) all_elements = {} #-------------------------------------- # get the bucket contents #-------------------------------------- bucket = storage.get_bucket('isb-cgc-open') for k in bucket.list_blobs(prefix="tcga/"): if '.xml' in k.name and 'clinical' in k.name: print k.name disease_type = k.name.split("/")[1] maf_data = StringIO() k.download_to_file(maf_data) maf_data.seek(0) tree = etree.parse(maf_data) root = tree.getroot() #this is the root; we can use it to find elements blank_elements = re.compile("^\\n\s*$") admin_element = root.findall('.//*/[@procurement_status="Completed"]', namespaces=root.nsmap) maf_data.close() # ------------------------------------ unique_elements = {} for i in admin_element: unique_elements[i.tag.split("}")[1]] = 1 for j in unique_elements: if disease_type in all_elements: all_elements[disease_type].append(j) else: all_elements[disease_type] = [] all_elements[disease_type].append(j) for dt in all_elements: c = dict(Counter(all_elements[dt])) df = pd.DataFrame(c.items(), columns=["item", "counts"]) df = df.sort(['counts'], ascending=[False]) df_stringIO = df.to_csv(sep='\t', index=False) # upload the file upload_bucket = storage.get_bucket('ptone-experiments') upload_blob = storage.blob.Blob('working-files/clinical_metadata/' + dt + ".counts.txt", bucket=upload_bucket) upload_blob.upload_from_string(df_stringIO) #for dt in all_elements: # c = dict(Counter(all_elements[dt])) # df = pd.DataFrame(c.items(), columns=["item", "counts"]) # for ele in df[df.counts >= round(int(df.counts.quantile(.70)))]['item']: # print ele #
apache-2.0
berendkleinhaneveld/VTK
ThirdParty/Twisted/twisted/python/test/test_versions.py
33
10680
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.python.versions}. """ from __future__ import division, absolute_import import sys import operator from io import BytesIO from twisted.python.versions import getVersionString, IncomparableVersions from twisted.python.versions import Version, _inf from twisted.python.filepath import FilePath from twisted.trial.unittest import SynchronousTestCase as TestCase VERSION_4_ENTRIES = b"""\ <?xml version="1.0" encoding="utf-8"?> <wc-entries xmlns="svn:"> <entry committed-rev="18210" name="" committed-date="2006-09-21T04:43:09.542953Z" url="svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk/twisted" last-author="exarkun" kind="dir" uuid="bbbe8e31-12d6-0310-92fd-ac37d47ddeeb" repos="svn+ssh://svn.twistedmatrix.com/svn/Twisted" revision="18211"/> </wc-entries> """ VERSION_8_ENTRIES = b"""\ 8 dir 22715 svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk """ VERSION_9_ENTRIES = b"""\ 9 dir 22715 svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk """ VERSION_10_ENTRIES = b"""\ 10 dir 22715 svn+ssh://svn.twistedmatrix.com/svn/Twisted/trunk """ class VersionsTest(TestCase): def test_versionComparison(self): """ Versions can be compared for equality and order. """ va = Version("dummy", 1, 0, 0) vb = Version("dummy", 0, 1, 0) self.assertTrue(va > vb) self.assertTrue(vb < va) self.assertTrue(va >= vb) self.assertTrue(vb <= va) self.assertTrue(va != vb) self.assertTrue(vb == Version("dummy", 0, 1, 0)) self.assertTrue(vb == vb) def test_comparingPrereleasesWithReleases(self): """ Prereleases are always less than versions without prereleases. """ va = Version("whatever", 1, 0, 0, prerelease=1) vb = Version("whatever", 1, 0, 0) self.assertTrue(va < vb) self.assertFalse(va > vb) self.assertNotEquals(vb, va) def test_comparingPrereleases(self): """ The value specified as the prerelease is used in version comparisons. """ va = Version("whatever", 1, 0, 0, prerelease=1) vb = Version("whatever", 1, 0, 0, prerelease=2) self.assertTrue(va < vb) self.assertTrue(vb > va) self.assertTrue(va <= vb) self.assertTrue(vb >= va) self.assertTrue(va != vb) self.assertTrue(vb == Version("whatever", 1, 0, 0, prerelease=2)) self.assertTrue(va == va) def test_infComparison(self): """ L{_inf} is equal to L{_inf}. This is a regression test. """ self.assertEqual(_inf, _inf) def test_disallowBuggyComparisons(self): """ The package names of the Version objects need to be the same, """ self.assertRaises(IncomparableVersions, operator.eq, Version("dummy", 1, 0, 0), Version("dumym", 1, 0, 0)) def test_notImplementedComparisons(self): """ Comparing a L{Version} to some other object type results in C{NotImplemented}. """ va = Version("dummy", 1, 0, 0) vb = ("dummy", 1, 0, 0) # a tuple is not a Version object self.assertEqual(va.__cmp__(vb), NotImplemented) def test_repr(self): """ Calling C{repr} on a version returns a human-readable string representation of the version. """ self.assertEqual(repr(Version("dummy", 1, 2, 3)), "Version('dummy', 1, 2, 3)") def test_reprWithPrerelease(self): """ Calling C{repr} on a version with a prerelease returns a human-readable string representation of the version including the prerelease. """ self.assertEqual(repr(Version("dummy", 1, 2, 3, prerelease=4)), "Version('dummy', 1, 2, 3, prerelease=4)") def test_str(self): """ Calling C{str} on a version returns a human-readable string representation of the version. """ self.assertEqual(str(Version("dummy", 1, 2, 3)), "[dummy, version 1.2.3]") def test_strWithPrerelease(self): """ Calling C{str} on a version with a prerelease includes the prerelease. """ self.assertEqual(str(Version("dummy", 1, 0, 0, prerelease=1)), "[dummy, version 1.0.0pre1]") def testShort(self): self.assertEqual(Version('dummy', 1, 2, 3).short(), '1.2.3') def test_goodSVNEntries_4(self): """ Version should be able to parse an SVN format 4 entries file. """ version = Version("dummy", 1, 0, 0) self.assertEqual( version._parseSVNEntries_4(BytesIO(VERSION_4_ENTRIES)), b'18211') def test_goodSVNEntries_8(self): """ Version should be able to parse an SVN format 8 entries file. """ version = Version("dummy", 1, 0, 0) self.assertEqual( version._parseSVNEntries_8(BytesIO(VERSION_8_ENTRIES)), b'22715') def test_goodSVNEntries_9(self): """ Version should be able to parse an SVN format 9 entries file. """ version = Version("dummy", 1, 0, 0) self.assertEqual( version._parseSVNEntries_9(BytesIO(VERSION_9_ENTRIES)), b'22715') def test_goodSVNEntriesTenPlus(self): """ Version should be able to parse an SVN format 10 entries file. """ version = Version("dummy", 1, 0, 0) self.assertEqual( version._parseSVNEntriesTenPlus(BytesIO(VERSION_10_ENTRIES)), b'22715') def test_getVersionString(self): """ L{getVersionString} returns a string with the package name and the short version number. """ self.assertEqual( 'Twisted 8.0.0', getVersionString(Version('Twisted', 8, 0, 0))) def test_getVersionStringWithPrerelease(self): """ L{getVersionString} includes the prerelease, if any. """ self.assertEqual( getVersionString(Version("whatever", 8, 0, 0, prerelease=1)), "whatever 8.0.0pre1") def test_base(self): """ The L{base} method returns a very simple representation of the version. """ self.assertEqual(Version("foo", 1, 0, 0).base(), "1.0.0") def test_baseWithPrerelease(self): """ The base version includes 'preX' for versions with prereleases. """ self.assertEqual(Version("foo", 1, 0, 0, prerelease=8).base(), "1.0.0pre8") class FormatDiscoveryTests(TestCase): """ Tests which discover the parsing method based on the imported module name. """ def mktemp(self): return TestCase.mktemp(self).encode("utf-8") def setUp(self): """ Create a temporary directory with a package structure in it. """ self.entry = FilePath(self.mktemp()) self.preTestModules = sys.modules.copy() sys.path.append(self.entry.path.decode('utf-8')) pkg = self.entry.child(b"twisted_python_versions_package") pkg.makedirs() pkg.child(b"__init__.py").setContent( b"from twisted.python.versions import Version\n" b"version = Version('twisted_python_versions_package', 1, 0, 0)\n") self.svnEntries = pkg.child(b".svn") self.svnEntries.makedirs() def tearDown(self): """ Remove the imported modules and sys.path modifications. """ sys.modules.clear() sys.modules.update(self.preTestModules) sys.path.remove(self.entry.path.decode('utf-8')) def checkSVNFormat(self, formatVersion, entriesText, expectedRevision): """ Check for the given revision being detected after setting the SVN entries text and format version of the test directory structure. """ self.svnEntries.child(b"format").setContent(formatVersion + b"\n") self.svnEntries.child(b"entries").setContent(entriesText) self.assertEqual(self.getVersion()._getSVNVersion(), expectedRevision) def getVersion(self): """ Import and retrieve the Version object from our dynamically created package. """ import twisted_python_versions_package return twisted_python_versions_package.version def test_detectVersion4(self): """ Verify that version 4 format file will be properly detected and parsed. """ self.checkSVNFormat(b"4", VERSION_4_ENTRIES, b'18211') def test_detectVersion8(self): """ Verify that version 8 format files will be properly detected and parsed. """ self.checkSVNFormat(b"8", VERSION_8_ENTRIES, b'22715') def test_detectVersion9(self): """ Verify that version 9 format files will be properly detected and parsed. """ self.checkSVNFormat(b"9", VERSION_9_ENTRIES, b'22715') def test_unparseableEntries(self): """ Verify that the result is C{b"Unknown"} for an apparently supported version for which parsing of the entries file fails. """ self.checkSVNFormat(b"4", b"some unsupported stuff", b"Unknown") def test_detectVersion10(self): """ Verify that version 10 format files will be properly detected and parsed. Differing from previous formats, the version 10 format lacks a I{format} file and B{only} has the version information on the first line of the I{entries} file. """ self.svnEntries.child(b"entries").setContent(VERSION_10_ENTRIES) self.assertEqual(self.getVersion()._getSVNVersion(), b'22715') def test_detectUnknownVersion(self): """ Verify that a new version of SVN will result in the revision 'Unknown'. """ self.checkSVNFormat(b"some-random-new-version", b"ooga booga!", b'Unknown') def test_getVersionStringWithRevision(self): """ L{getVersionString} includes the discovered revision number. """ self.svnEntries.child(b"format").setContent(b"9\n") self.svnEntries.child(b"entries").setContent(VERSION_10_ENTRIES) version = getVersionString(self.getVersion()) self.assertEqual( "twisted_python_versions_package 1.0.0+r22715", version) self.assertTrue(isinstance(version, type("")))
bsd-3-clause
dineshkummarc/gitak-1.0
server/selenium-remote-control-1.0.3/selenium-python-client-driver-1.0.1/test_ajax_jsf.py
6
1808
""" Copyright 2006 ThoughtWorks, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from selenium import selenium import unittest import sys, time class TestAjaxJSF(unittest.TestCase): seleniumHost = 'localhost' seleniumPort = str(4444) #browserStartCommand = "c:\\program files\\internet explorer\\iexplore.exe" browserStartCommand = "*firefox" browserURL = "http://www.irian.at" def setUp(self): print "Using selenium server at " + self.seleniumHost + ":" + self.seleniumPort self.selenium = selenium(self.seleniumHost, self.seleniumPort, self.browserStartCommand, self.browserURL) self.selenium.start() def testKeyPress(self): selenium = self.selenium input_id = 'ac4' update_id = 'ac4update' selenium.open("http://www.irian.at/selenium-server/tests/html/ajax/ajax_autocompleter2_test.html") selenium.key_press(input_id, 74) time.sleep(0.5) selenium.key_press(input_id, 97) selenium.key_press(input_id, 110) time.sleep(0.5) self.failUnless('Jane Agnews' == selenium.get_text(update_id)) selenium.key_press(input_id, '\9') time.sleep(0.5) self.failUnless('Jane Agnews' == selenium.get_value(input_id)) def tearDown(self): self.selenium.stop() if __name__ == "__main__": unittest.main()
apache-2.0
sergeykolychev/mxnet
example/rcnn/rcnn/symbol/symbol_resnet.py
18
13105
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import mxnet as mx import proposal import proposal_target from rcnn.config import config eps = 2e-5 use_global_stats = True workspace = 512 res_deps = {'50': (3, 4, 6, 3), '101': (3, 4, 23, 3), '152': (3, 8, 36, 3), '200': (3, 24, 36, 3)} units = res_deps['101'] filter_list = [256, 512, 1024, 2048] def residual_unit(data, num_filter, stride, dim_match, name): bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name=name + '_bn1') act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1') conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter * 0.25), kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, workspace=workspace, name=name + '_conv1') bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name=name + '_bn2') act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2') conv2 = mx.sym.Convolution(data=act2, num_filter=int(num_filter * 0.25), kernel=(3, 3), stride=stride, pad=(1, 1), no_bias=True, workspace=workspace, name=name + '_conv2') bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name=name + '_bn3') act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3') conv3 = mx.sym.Convolution(data=act3, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, workspace=workspace, name=name + '_conv3') if dim_match: shortcut = data else: shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True, workspace=workspace, name=name + '_sc') sum = mx.sym.ElementWiseSum(*[conv3, shortcut], name=name + '_plus') return sum def get_resnet_conv(data): # res1 data_bn = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=eps, use_global_stats=use_global_stats, name='bn_data') conv0 = mx.sym.Convolution(data=data_bn, num_filter=64, kernel=(7, 7), stride=(2, 2), pad=(3, 3), no_bias=True, name="conv0", workspace=workspace) bn0 = mx.sym.BatchNorm(data=conv0, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name='bn0') relu0 = mx.sym.Activation(data=bn0, act_type='relu', name='relu0') pool0 = mx.symbol.Pooling(data=relu0, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max', name='pool0') # res2 unit = residual_unit(data=pool0, num_filter=filter_list[0], stride=(1, 1), dim_match=False, name='stage1_unit1') for i in range(2, units[0] + 1): unit = residual_unit(data=unit, num_filter=filter_list[0], stride=(1, 1), dim_match=True, name='stage1_unit%s' % i) # res3 unit = residual_unit(data=unit, num_filter=filter_list[1], stride=(2, 2), dim_match=False, name='stage2_unit1') for i in range(2, units[1] + 1): unit = residual_unit(data=unit, num_filter=filter_list[1], stride=(1, 1), dim_match=True, name='stage2_unit%s' % i) # res4 unit = residual_unit(data=unit, num_filter=filter_list[2], stride=(2, 2), dim_match=False, name='stage3_unit1') for i in range(2, units[2] + 1): unit = residual_unit(data=unit, num_filter=filter_list[2], stride=(1, 1), dim_match=True, name='stage3_unit%s' % i) return unit def get_resnet_train(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS): data = mx.symbol.Variable(name="data") im_info = mx.symbol.Variable(name="im_info") gt_boxes = mx.symbol.Variable(name="gt_boxes") rpn_label = mx.symbol.Variable(name='label') rpn_bbox_target = mx.symbol.Variable(name='bbox_target') rpn_bbox_weight = mx.symbol.Variable(name='bbox_weight') # shared convolutional layers conv_feat = get_resnet_conv(data) # RPN layers rpn_conv = mx.symbol.Convolution( data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3") rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu") rpn_cls_score = mx.symbol.Convolution( data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score") rpn_bbox_pred = mx.symbol.Convolution( data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred") # prepare rpn data rpn_cls_score_reshape = mx.symbol.Reshape( data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape") # classification rpn_cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape, label=rpn_label, multi_output=True, normalization='valid', use_ignore=True, ignore_label=-1, name="rpn_cls_prob") # bounding box regression rpn_bbox_loss_ = rpn_bbox_weight * mx.symbol.smooth_l1(name='rpn_bbox_loss_', scalar=3.0, data=(rpn_bbox_pred - rpn_bbox_target)) rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_, grad_scale=1.0 / config.TRAIN.RPN_BATCH_SIZE) # ROI proposal rpn_cls_act = mx.symbol.SoftmaxActivation( data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_act") rpn_cls_act_reshape = mx.symbol.Reshape( data=rpn_cls_act, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_act_reshape') if config.TRAIN.CXX_PROPOSAL: rois = mx.symbol.contrib.Proposal( cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', feature_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS), rpn_pre_nms_top_n=config.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TRAIN.RPN_POST_NMS_TOP_N, threshold=config.TRAIN.RPN_NMS_THRESH, rpn_min_size=config.TRAIN.RPN_MIN_SIZE) else: rois = mx.symbol.Custom( cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', op_type='proposal', feat_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS), rpn_pre_nms_top_n=config.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TRAIN.RPN_POST_NMS_TOP_N, threshold=config.TRAIN.RPN_NMS_THRESH, rpn_min_size=config.TRAIN.RPN_MIN_SIZE) # ROI proposal target gt_boxes_reshape = mx.symbol.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape') group = mx.symbol.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_classes, batch_images=config.TRAIN.BATCH_IMAGES, batch_rois=config.TRAIN.BATCH_ROIS, fg_fraction=config.TRAIN.FG_FRACTION) rois = group[0] label = group[1] bbox_target = group[2] bbox_weight = group[3] # Fast R-CNN roi_pool = mx.symbol.ROIPooling( name='roi_pool5', data=conv_feat, rois=rois, pooled_size=(14, 14), spatial_scale=1.0 / config.RCNN_FEAT_STRIDE) # res5 unit = residual_unit(data=roi_pool, num_filter=filter_list[3], stride=(2, 2), dim_match=False, name='stage4_unit1') for i in range(2, units[3] + 1): unit = residual_unit(data=unit, num_filter=filter_list[3], stride=(1, 1), dim_match=True, name='stage4_unit%s' % i) bn1 = mx.sym.BatchNorm(data=unit, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name='bn1') relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1') pool1 = mx.symbol.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1') # classification cls_score = mx.symbol.FullyConnected(name='cls_score', data=pool1, num_hidden=num_classes) cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='batch') # bounding box regression bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=pool1, num_hidden=num_classes * 4) bbox_loss_ = bbox_weight * mx.symbol.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target)) bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / config.TRAIN.BATCH_ROIS) # reshape output label = mx.symbol.Reshape(data=label, shape=(config.TRAIN.BATCH_IMAGES, -1), name='label_reshape') cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape') bbox_loss = mx.symbol.Reshape(data=bbox_loss, shape=(config.TRAIN.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_loss_reshape') group = mx.symbol.Group([rpn_cls_prob, rpn_bbox_loss, cls_prob, bbox_loss, mx.symbol.BlockGrad(label)]) return group def get_resnet_test(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS): data = mx.symbol.Variable(name="data") im_info = mx.symbol.Variable(name="im_info") # shared convolutional layers conv_feat = get_resnet_conv(data) # RPN rpn_conv = mx.symbol.Convolution( data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3") rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu") rpn_cls_score = mx.symbol.Convolution( data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score") rpn_bbox_pred = mx.symbol.Convolution( data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred") # ROI Proposal rpn_cls_score_reshape = mx.symbol.Reshape( data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape") rpn_cls_prob = mx.symbol.SoftmaxActivation( data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob") rpn_cls_prob_reshape = mx.symbol.Reshape( data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape') if config.TEST.CXX_PROPOSAL: rois = mx.symbol.contrib.Proposal( cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', feature_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS), rpn_pre_nms_top_n=config.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TEST.RPN_POST_NMS_TOP_N, threshold=config.TEST.RPN_NMS_THRESH, rpn_min_size=config.TEST.RPN_MIN_SIZE) else: rois = mx.symbol.Custom( cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', op_type='proposal', feat_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS), rpn_pre_nms_top_n=config.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TEST.RPN_POST_NMS_TOP_N, threshold=config.TEST.RPN_NMS_THRESH, rpn_min_size=config.TEST.RPN_MIN_SIZE) # Fast R-CNN roi_pool = mx.symbol.ROIPooling( name='roi_pool5', data=conv_feat, rois=rois, pooled_size=(14, 14), spatial_scale=1.0 / config.RCNN_FEAT_STRIDE) # res5 unit = residual_unit(data=roi_pool, num_filter=filter_list[3], stride=(2, 2), dim_match=False, name='stage4_unit1') for i in range(2, units[3] + 1): unit = residual_unit(data=unit, num_filter=filter_list[3], stride=(1, 1), dim_match=True, name='stage4_unit%s' % i) bn1 = mx.sym.BatchNorm(data=unit, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name='bn1') relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1') pool1 = mx.symbol.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1') # classification cls_score = mx.symbol.FullyConnected(name='cls_score', data=pool1, num_hidden=num_classes) cls_prob = mx.symbol.softmax(name='cls_prob', data=cls_score) # bounding box regression bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=pool1, num_hidden=num_classes * 4) # reshape output cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape') bbox_pred = mx.symbol.Reshape(data=bbox_pred, shape=(config.TEST.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_pred_reshape') # group output group = mx.symbol.Group([rois, cls_prob, bbox_pred]) return group
apache-2.0
EricCline/CEM_inc
env/lib/python2.7/site-packages/django/test/utils.py
78
11107
import re import warnings from xml.dom.minidom import parseString, Node from django.conf import settings, UserSettingsHolder from django.core import mail from django.http import request from django.template import Template, loader, TemplateDoesNotExist from django.template.loaders import cached from django.test.signals import template_rendered, setting_changed from django.utils.encoding import force_str from django.utils.functional import wraps from django.utils import six from django.utils.translation import deactivate __all__ = ( 'Approximate', 'ContextList', 'get_runner', 'override_settings', 'setup_test_environment', 'teardown_test_environment', ) RESTORE_LOADERS_ATTR = '_original_template_source_loaders' class Approximate(object): def __init__(self, val, places=7): self.val = val self.places = places def __repr__(self): return repr(self.val) def __eq__(self, other): if self.val == other: return True return round(abs(self.val - other), self.places) == 0 class ContextList(list): """A wrapper that provides direct key access to context items contained in a list of context objects. """ def __getitem__(self, key): if isinstance(key, six.string_types): for subcontext in self: if key in subcontext: return subcontext[key] raise KeyError(key) else: return super(ContextList, self).__getitem__(key) def __contains__(self, key): try: self[key] except KeyError: return False return True def instrumented_test_render(self, context): """ An instrumented Template render method, providing a signal that can be intercepted by the test system Client """ template_rendered.send(sender=self, template=self, context=context) return self.nodelist.render(context) def setup_test_environment(): """Perform any global pre-test setup. This involves: - Installing the instrumented test renderer - Set the email backend to the locmem email backend. - Setting the active locale to match the LANGUAGE_CODE setting. """ Template._original_render = Template._render Template._render = instrumented_test_render # Storing previous values in the settings module itself is problematic. # Store them in arbitrary (but related) modules instead. See #20636. mail._original_email_backend = settings.EMAIL_BACKEND settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend' request._original_allowed_hosts = settings.ALLOWED_HOSTS settings.ALLOWED_HOSTS = ['*'] mail.outbox = [] deactivate() def teardown_test_environment(): """Perform any global post-test teardown. This involves: - Restoring the original test renderer - Restoring the email sending functions """ Template._render = Template._original_render del Template._original_render settings.EMAIL_BACKEND = mail._original_email_backend del mail._original_email_backend settings.ALLOWED_HOSTS = request._original_allowed_hosts del request._original_allowed_hosts del mail.outbox def get_warnings_state(): """ Returns an object containing the state of the warnings module """ # There is no public interface for doing this, but this implementation of # get_warnings_state and restore_warnings_state appears to work on Python # 2.4 to 2.7. return warnings.filters[:] def restore_warnings_state(state): """ Restores the state of the warnings module when passed an object that was returned by get_warnings_state() """ warnings.filters = state[:] def get_runner(settings, test_runner_class=None): if not test_runner_class: test_runner_class = settings.TEST_RUNNER test_path = test_runner_class.split('.') # Allow for Python 2.5 relative paths if len(test_path) > 1: test_module_name = '.'.join(test_path[:-1]) else: test_module_name = '.' test_module = __import__(test_module_name, {}, {}, force_str(test_path[-1])) test_runner = getattr(test_module, test_path[-1]) return test_runner def setup_test_template_loader(templates_dict, use_cached_loader=False): """ Changes Django to only find templates from within a dictionary (where each key is the template name and each value is the corresponding template content to return). Use meth:`restore_template_loaders` to restore the original loaders. """ if hasattr(loader, RESTORE_LOADERS_ATTR): raise Exception("loader.%s already exists" % RESTORE_LOADERS_ATTR) def test_template_loader(template_name, template_dirs=None): "A custom template loader that loads templates from a dictionary." try: return (templates_dict[template_name], "test:%s" % template_name) except KeyError: raise TemplateDoesNotExist(template_name) if use_cached_loader: template_loader = cached.Loader(('test_template_loader',)) template_loader._cached_loaders = (test_template_loader,) else: template_loader = test_template_loader setattr(loader, RESTORE_LOADERS_ATTR, loader.template_source_loaders) loader.template_source_loaders = (template_loader,) return template_loader def restore_template_loaders(): """ Restores the original template loaders after :meth:`setup_test_template_loader` has been run. """ loader.template_source_loaders = getattr(loader, RESTORE_LOADERS_ATTR) delattr(loader, RESTORE_LOADERS_ATTR) class override_settings(object): """ Acts as either a decorator, or a context manager. If it's a decorator it takes a function and returns a wrapped function. If it's a contextmanager it's used with the ``with`` statement. In either event entering/exiting are called before and after, respectively, the function/block is executed. """ def __init__(self, **kwargs): self.options = kwargs self.wrapped = settings._wrapped def __enter__(self): self.enable() def __exit__(self, exc_type, exc_value, traceback): self.disable() def __call__(self, test_func): from django.test import SimpleTestCase if isinstance(test_func, type): if not issubclass(test_func, SimpleTestCase): raise Exception( "Only subclasses of Django SimpleTestCase can be decorated " "with override_settings") original_pre_setup = test_func._pre_setup original_post_teardown = test_func._post_teardown def _pre_setup(innerself): self.enable() original_pre_setup(innerself) def _post_teardown(innerself): original_post_teardown(innerself) self.disable() test_func._pre_setup = _pre_setup test_func._post_teardown = _post_teardown return test_func else: @wraps(test_func) def inner(*args, **kwargs): with self: return test_func(*args, **kwargs) return inner def enable(self): override = UserSettingsHolder(settings._wrapped) for key, new_value in self.options.items(): setattr(override, key, new_value) settings._wrapped = override for key, new_value in self.options.items(): setting_changed.send(sender=settings._wrapped.__class__, setting=key, value=new_value) def disable(self): settings._wrapped = self.wrapped for key in self.options: new_value = getattr(settings, key, None) setting_changed.send(sender=settings._wrapped.__class__, setting=key, value=new_value) def compare_xml(want, got): """Tries to do a 'xml-comparison' of want and got. Plain string comparison doesn't always work because, for example, attribute ordering should not be important. Comment nodes are not considered in the comparison. Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py """ _norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+') def norm_whitespace(v): return _norm_whitespace_re.sub(' ', v) def child_text(element): return ''.join([c.data for c in element.childNodes if c.nodeType == Node.TEXT_NODE]) def children(element): return [c for c in element.childNodes if c.nodeType == Node.ELEMENT_NODE] def norm_child_text(element): return norm_whitespace(child_text(element)) def attrs_dict(element): return dict(element.attributes.items()) def check_element(want_element, got_element): if want_element.tagName != got_element.tagName: return False if norm_child_text(want_element) != norm_child_text(got_element): return False if attrs_dict(want_element) != attrs_dict(got_element): return False want_children = children(want_element) got_children = children(got_element) if len(want_children) != len(got_children): return False for want, got in zip(want_children, got_children): if not check_element(want, got): return False return True def first_node(document): for node in document.childNodes: if node.nodeType != Node.COMMENT_NODE: return node want, got = strip_quotes(want, got) want = want.replace('\\n','\n') got = got.replace('\\n','\n') # If the string is not a complete xml document, we may need to add a # root element. This allow us to compare fragments, like "<foo/><bar/>" if not want.startswith('<?xml'): wrapper = '<root>%s</root>' want = wrapper % want got = wrapper % got # Parse the want and got strings, and compare the parsings. want_root = first_node(parseString(want)) got_root = first_node(parseString(got)) return check_element(want_root, got_root) def strip_quotes(want, got): """ Strip quotes of doctests output values: >>> strip_quotes("'foo'") "foo" >>> strip_quotes('"foo"') "foo" """ def is_quoted_string(s): s = s.strip() return (len(s) >= 2 and s[0] == s[-1] and s[0] in ('"', "'")) def is_quoted_unicode(s): s = s.strip() return (len(s) >= 3 and s[0] == 'u' and s[1] == s[-1] and s[1] in ('"', "'")) if is_quoted_string(want) and is_quoted_string(got): want = want.strip()[1:-1] got = got.strip()[1:-1] elif is_quoted_unicode(want) and is_quoted_unicode(got): want = want.strip()[2:-1] got = got.strip()[2:-1] return want, got def str_prefix(s): return s % {'_': '' if six.PY3 else 'u'}
mit
brian-yang/mozillians
vendor-local/lib/python/tablib/packages/odf3/math.py
56
1068
# -*- coding: utf-8 -*- # Copyright (C) 2006-2007 Søren Roug, European Environment Agency # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Contributor(s): # from .namespaces import MATHNS from .element import Element # ODF 1.0 section 12.5 # Mathematical content is represented by MathML 2.0 # Autogenerated def Math(**args): return Element(qname = (MATHNS,'math'), **args)
bsd-3-clause
JohnGriffiths/nipype
nipype/interfaces/afni/tests/test_auto_Bandpass.py
9
1844
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.afni.preprocess import Bandpass def test_Bandpass_inputs(): input_map = dict(args=dict(argstr='%s', ), automask=dict(argstr='-automask', ), blur=dict(argstr='-blur %f', ), despike=dict(argstr='-despike', ), environ=dict(nohash=True, usedefault=True, ), highpass=dict(argstr='%f', mandatory=True, position=-3, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', copyfile=False, mandatory=True, position=-1, ), localPV=dict(argstr='-localPV %f', ), lowpass=dict(argstr='%f', mandatory=True, position=-2, ), mask=dict(argstr='-mask %s', position=2, ), nfft=dict(argstr='-nfft %d', ), no_detrend=dict(argstr='-nodetrend', ), normalize=dict(argstr='-norm', ), notrans=dict(argstr='-notrans', ), orthogonalize_dset=dict(argstr='-dsort %s', ), orthogonalize_file=dict(argstr='-ort %s', ), out_file=dict(argstr='-prefix %s', genfile=True, name_source='in_file', name_template='%s_bp', position=1, ), outputtype=dict(), terminal_output=dict(nohash=True, ), tr=dict(argstr='-dt %f', ), ) inputs = Bandpass.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Bandpass_outputs(): output_map = dict(out_file=dict(), ) outputs = Bandpass.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value
bsd-3-clause
WizeCommerce/medusa
tests/test_client.py
1
2745
""" Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest2 import os from thrift_medusa.clients.client import Client from thrift_medusa.thrift.thrift_compiler import ThriftCompiler from thrift_medusa.utils.config import Config class BaseClientTests(unittest2.TestCase): def setUp(self): self.dict = {} self.client = self.__get_client__() # self.client.initialize() self.service_name = os.path.join(os.getcwd(), "../thrift/services/", "wizecommerce.services.example.thrift") def __get_client__(self): self.config = Config() self.config.reset_configuration() compiler = None for item in self.config.get_thrift_option("compilers"): compiler = ThriftCompiler(item) return Client([], compiler) def test_not_implemented_methods(self): client = self.__get_client__() self.assertRaises(NotImplementedError, client.__build_dependency__,"DummyFile") self.assertRaises(NotImplementedError, client.__build_client__, "service") self.assertRaises(NotImplementedError, client.check_version, **self.dict) self.assertRaises(NotImplementedError, client.__deploy_production_artifact__, self.dict, "boo") self.assertRaises(NotImplementedError, client.__deploy_local_artifact__, self.dict, "boo") self.assertRaises(NotImplementedError, client.finalize) self.assertRaises(NotImplementedError, client.initialize) def test_deploy_object(self): client = self.__get_client__() self.config.set_local(True) self.assertRaisesRegexp(NotImplementedError, ".*Deploy Local Artifact.*", client.deploy_object, self.dict, "dummy") client = self.__get_client__() self.config.set_local(False) self.assertRaisesRegexp(NotImplementedError, ".*Deploy Production Artifact.*", client.deploy_object, self.dict, "dummy") def test_sandbox(self): client = self.__get_client__() client.set_sandbox("DummySandbox") self.assertEquals("DummySandbox", client.get_sandbox()) if __name__ == "__main__": # Run unit tests suite = unittest2.TestLoader().loadTestsFromTestCase(BaseClientTests) unittest2.TextTestRunner(verbosity=2).run(suite)
apache-2.0
nrc/servo
tests/wpt/css-tests/tools/wptserve/wptserve/server.py
136
17886
import BaseHTTPServer import errno import os import re import socket from SocketServer import ThreadingMixIn import ssl import sys import threading import time import traceback import types import urlparse import routes as default_routes from logger import get_logger from request import Server, Request from response import Response from router import Router from utils import HTTPException """HTTP server designed for testing purposes. The server is designed to provide flexibility in the way that requests are handled, and to provide control both of exactly what bytes are put on the wire for the response, and in the timing of sending those bytes. The server is based on the stdlib HTTPServer, but with some notable differences in the way that requests are processed. Overall processing is handled by a WebTestRequestHandler, which is a subclass of BaseHTTPRequestHandler. This is responsible for parsing the incoming request. A RequestRewriter is then applied and may change the request data if it matches a supplied rule. Once the request data had been finalised, Request and Reponse objects are constructed. These are used by the other parts of the system to read information about the request and manipulate the response. Each request is handled by a particular handler function. The mapping between Request and the appropriate handler is determined by a Router. By default handlers are installed to interpret files under the document root with .py extensions as executable python files (see handlers.py for the api for such files), .asis files as bytestreams to be sent literally and all other files to be served statically. The handler functions are responsible for either populating the fields of the response object, which will then be written when the handler returns, or for directly writing to the output stream. """ class RequestRewriter(object): def __init__(self, rules): """Object for rewriting the request path. :param rules: Initial rules to add; a list of three item tuples (method, input_path, output_path), defined as for register() """ self.rules = {} for rule in reversed(rules): self.register(*rule) self.logger = get_logger() def register(self, methods, input_path, output_path): """Register a rewrite rule. :param methods: Set of methods this should match. "*" is a special value indicating that all methods should be matched. :param input_path: Path to match for the initial request. :param output_path: Path to replace the input path with in the request. """ if type(methods) in types.StringTypes: methods = [methods] self.rules[input_path] = (methods, output_path) def rewrite(self, request_handler): """Rewrite the path in a BaseHTTPRequestHandler instance, if it matches a rule. :param request_handler: BaseHTTPRequestHandler for which to rewrite the request. """ split_url = urlparse.urlsplit(request_handler.path) if split_url.path in self.rules: methods, destination = self.rules[split_url.path] if "*" in methods or request_handler.command in methods: self.logger.debug("Rewriting request path %s to %s" % (request_handler.path, destination)) new_url = list(split_url) new_url[2] = destination new_url = urlparse.urlunsplit(new_url) request_handler.path = new_url class WebTestServer(ThreadingMixIn, BaseHTTPServer.HTTPServer): allow_reuse_address = True acceptable_errors = (errno.EPIPE, errno.ECONNABORTED) request_queue_size = 2000 # Ensure that we don't hang on shutdown waiting for requests daemon_threads = True def __init__(self, server_address, RequestHandlerClass, router, rewriter, bind_hostname, config=None, use_ssl=False, key_file=None, certificate=None, encrypt_after_connect=False, latency=None, **kwargs): """Server for HTTP(s) Requests :param server_address: tuple of (server_name, port) :param RequestHandlerClass: BaseHTTPRequestHandler-like class to use for handling requests. :param router: Router instance to use for matching requests to handler functions :param rewriter: RequestRewriter-like instance to use for preprocessing requests before they are routed :param config: Dictionary holding environment configuration settings for handlers to read, or None to use the default values. :param use_ssl: Boolean indicating whether the server should use SSL :param key_file: Path to key file to use if SSL is enabled. :param certificate: Path to certificate to use if SSL is enabled. :param encrypt_after_connect: For each connection, don't start encryption until a CONNECT message has been received. This enables the server to act as a self-proxy. :param bind_hostname True to bind the server to both the hostname and port specified in the server_address parameter. False to bind the server only to the port in the server_address parameter, but not to the hostname. :param latency: Delay in ms to wait before seving each response, or callable that returns a delay in ms """ self.router = router self.rewriter = rewriter self.scheme = "https" if use_ssl else "http" self.logger = get_logger() self.latency = latency if bind_hostname: hostname_port = server_address else: hostname_port = ("",server_address[1]) #super doesn't work here because BaseHTTPServer.HTTPServer is old-style BaseHTTPServer.HTTPServer.__init__(self, hostname_port, RequestHandlerClass, **kwargs) if config is not None: Server.config = config else: self.logger.debug("Using default configuration") Server.config = {"host": server_address[0], "domains": {"": server_address[0]}, "ports": {"http": [self.server_address[1]]}} self.key_file = key_file self.certificate = certificate self.encrypt_after_connect = use_ssl and encrypt_after_connect if use_ssl and not encrypt_after_connect: self.socket = ssl.wrap_socket(self.socket, keyfile=self.key_file, certfile=self.certificate, server_side=True) def handle_error(self, request, client_address): error = sys.exc_value if ((isinstance(error, socket.error) and isinstance(error.args, tuple) and error.args[0] in self.acceptable_errors) or (isinstance(error, IOError) and error.errno in self.acceptable_errors)): pass # remote hang up before the result is sent else: self.logger.error(traceback.format_exc()) class WebTestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): """RequestHandler for WebTestHttpd""" protocol_version = "HTTP/1.1" def handle_one_request(self): response = None self.logger = get_logger() try: self.close_connection = False request_line_is_valid = self.get_request_line() if self.close_connection: return request_is_valid = self.parse_request() if not request_is_valid: #parse_request() actually sends its own error responses return self.server.rewriter.rewrite(self) request = Request(self) response = Response(self, request) if request.method == "CONNECT": self.handle_connect(response) return if not request_line_is_valid: response.set_error(414) response.write() return self.logger.debug("%s %s" % (request.method, request.request_path)) handler = self.server.router.get_handler(request) # If the handler we used for the request had a non-default base path # set update the doc_root of the request to reflect this if hasattr(handler, "base_path") and handler.base_path: request.doc_root = handler.base_path if hasattr(handler, "url_base") and handler.url_base != "/": request.url_base = handler.url_base if self.server.latency is not None: if callable(self.server.latency): latency = self.server.latency() else: latency = self.server.latency self.logger.warning("Latency enabled. Sleeping %i ms" % latency) time.sleep(latency / 1000.) if handler is None: response.set_error(404) else: try: handler(request, response) except HTTPException as e: response.set_error(e.code, e.message) except Exception as e: if e.message: err = [e.message] else: err = [] err.append(traceback.format_exc()) response.set_error(500, "\n".join(err)) self.logger.debug("%i %s %s (%s) %i" % (response.status[0], request.method, request.request_path, request.headers.get('Referer'), request.raw_input.length)) if not response.writer.content_written: response.write() # If we want to remove this in the future, a solution is needed for # scripts that produce a non-string iterable of content, since these # can't set a Content-Length header. A notable example of this kind of # problem is with the trickle pipe i.e. foo.js?pipe=trickle(d1) if response.close_connection: self.close_connection = True if not self.close_connection: # Ensure that the whole request has been read from the socket request.raw_input.read() except socket.timeout, e: self.log_error("Request timed out: %r", e) self.close_connection = True return except Exception as e: err = traceback.format_exc() if response: response.set_error(500, err) response.write() self.logger.error(err) def get_request_line(self): try: self.raw_requestline = self.rfile.readline(65537) except socket.error: self.close_connection = True return False if len(self.raw_requestline) > 65536: self.requestline = '' self.request_version = '' self.command = '' return False if not self.raw_requestline: self.close_connection = True return True def handle_connect(self, response): self.logger.debug("Got CONNECT") response.status = 200 response.write() if self.server.encrypt_after_connect: self.logger.debug("Enabling SSL for connection") self.request = ssl.wrap_socket(self.connection, keyfile=self.server.key_file, certfile=self.server.certificate, server_side=True) self.setup() return class WebTestHttpd(object): """ :param host: Host from which to serve (default: 127.0.0.1) :param port: Port from which to serve (default: 8000) :param server_cls: Class to use for the server (default depends on ssl vs non-ssl) :param handler_cls: Class to use for the RequestHandler :param use_ssl: Use a SSL server if no explicit server_cls is supplied :param key_file: Path to key file to use if ssl is enabled :param certificate: Path to certificate file to use if ssl is enabled :param encrypt_after_connect: For each connection, don't start encryption until a CONNECT message has been received. This enables the server to act as a self-proxy. :param router_cls: Router class to use when matching URLs to handlers :param doc_root: Document root for serving files :param routes: List of routes with which to initialize the router :param rewriter_cls: Class to use for request rewriter :param rewrites: List of rewrites with which to initialize the rewriter_cls :param config: Dictionary holding environment configuration settings for handlers to read, or None to use the default values. :param bind_hostname: Boolean indicating whether to bind server to hostname. :param latency: Delay in ms to wait before seving each response, or callable that returns a delay in ms HTTP server designed for testing scenarios. Takes a router class which provides one method get_handler which takes a Request and returns a handler function. .. attribute:: host The host name or ip address of the server .. attribute:: port The port on which the server is running .. attribute:: router The Router object used to associate requests with resources for this server .. attribute:: rewriter The Rewriter object used for URL rewriting .. attribute:: use_ssl Boolean indicating whether the server is using ssl .. attribute:: started Boolean indictaing whether the server is running """ def __init__(self, host="127.0.0.1", port=8000, server_cls=None, handler_cls=WebTestRequestHandler, use_ssl=False, key_file=None, certificate=None, encrypt_after_connect=False, router_cls=Router, doc_root=os.curdir, routes=None, rewriter_cls=RequestRewriter, bind_hostname=True, rewrites=None, latency=None, config=None): if routes is None: routes = default_routes.routes self.host = host self.router = router_cls(doc_root, routes) self.rewriter = rewriter_cls(rewrites if rewrites is not None else []) self.use_ssl = use_ssl self.logger = get_logger() if server_cls is None: server_cls = WebTestServer if use_ssl: if key_file is not None: assert os.path.exists(key_file) assert certificate is not None and os.path.exists(certificate) try: self.httpd = server_cls((host, port), handler_cls, self.router, self.rewriter, config=config, bind_hostname=bind_hostname, use_ssl=use_ssl, key_file=key_file, certificate=certificate, encrypt_after_connect=encrypt_after_connect, latency=latency) self.started = False _host, self.port = self.httpd.socket.getsockname() except Exception: self.logger.error('Init failed! You may need to modify your hosts file. Refer to README.md.'); raise def start(self, block=False): """Start the server. :param block: True to run the server on the current thread, blocking, False to run on a separate thread.""" self.logger.info("Starting http server on %s:%s" % (self.host, self.port)) self.started = True if block: self.httpd.serve_forever() else: self.server_thread = threading.Thread(target=self.httpd.serve_forever) self.server_thread.setDaemon(True) # don't hang on exit self.server_thread.start() def stop(self): """ Stops the server. If the server is not running, this method has no effect. """ if self.started: try: self.httpd.shutdown() self.httpd.server_close() self.server_thread.join() self.server_thread = None self.logger.info("Stopped http server on %s:%s" % (self.host, self.port)) except AttributeError: pass self.started = False self.httpd = None def get_url(self, path="/", query=None, fragment=None): if not self.started: return None return urlparse.urlunsplit(("http" if not self.use_ssl else "https", "%s:%s" % (self.host, self.port), path, query, fragment))
mpl-2.0
unnikrishnankgs/va
venv/lib/python3.5/site-packages/django/contrib/gis/db/backends/spatialite/introspection.py
391
3131
from django.contrib.gis.gdal import OGRGeomType from django.db.backends.sqlite3.introspection import ( DatabaseIntrospection, FlexibleFieldLookupDict, ) from django.utils import six class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict): """ Sublcass that includes updates the `base_data_types_reverse` dict for geometry field types. """ base_data_types_reverse = FlexibleFieldLookupDict.base_data_types_reverse.copy() base_data_types_reverse.update( {'point': 'GeometryField', 'linestring': 'GeometryField', 'polygon': 'GeometryField', 'multipoint': 'GeometryField', 'multilinestring': 'GeometryField', 'multipolygon': 'GeometryField', 'geometrycollection': 'GeometryField', }) class SpatiaLiteIntrospection(DatabaseIntrospection): data_types_reverse = GeoFlexibleFieldLookupDict() def get_geometry_type(self, table_name, geo_col): cursor = self.connection.cursor() try: # Querying the `geometry_columns` table to get additional metadata. type_col = 'type' if self.connection.ops.spatial_version < (4, 0, 0) else 'geometry_type' cursor.execute('SELECT coord_dimension, srid, %s ' 'FROM geometry_columns ' 'WHERE f_table_name=%%s AND f_geometry_column=%%s' % type_col, (table_name, geo_col)) row = cursor.fetchone() if not row: raise Exception('Could not find a geometry column for "%s"."%s"' % (table_name, geo_col)) # OGRGeomType does not require GDAL and makes it easy to convert # from OGC geom type name to Django field. ogr_type = row[2] if isinstance(ogr_type, six.integer_types) and ogr_type > 1000: # Spatialite versions >= 4 use the new SFSQL 1.2 offsets # 1000 (Z), 2000 (M), and 3000 (ZM) to indicate the presence of # higher dimensional coordinates (M not yet supported by Django). ogr_type = ogr_type % 1000 + OGRGeomType.wkb25bit field_type = OGRGeomType(ogr_type).django # Getting any GeometryField keyword arguments that are not the default. dim = row[0] srid = row[1] field_params = {} if srid != 4326: field_params['srid'] = srid if (isinstance(dim, six.string_types) and 'Z' in dim) or dim == 3: field_params['dim'] = 3 finally: cursor.close() return field_type, field_params def get_indexes(self, cursor, table_name): indexes = super(SpatiaLiteIntrospection, self).get_indexes(cursor, table_name) cursor.execute('SELECT f_geometry_column ' 'FROM geometry_columns ' 'WHERE f_table_name=%s AND spatial_index_enabled=1', (table_name,)) for row in cursor.fetchall(): indexes[row[0]] = {'primary_key': False, 'unique': False} return indexes
bsd-2-clause
supernova2468/gae-smart-relay
lib/werkzeug/__init__.py
296
7210
# -*- coding: utf-8 -*- """ werkzeug ~~~~~~~~ Werkzeug is the Swiss Army knife of Python web development. It provides useful classes and functions for any WSGI application to make the life of a python web developer much easier. All of the provided classes are independent from each other so you can mix it with any other library. :copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from types import ModuleType import sys from werkzeug._compat import iteritems # the version. Usually set automatically by a script. __version__ = '0.9.4' # This import magic raises concerns quite often which is why the implementation # and motivation is explained here in detail now. # # The majority of the functions and classes provided by Werkzeug work on the # HTTP and WSGI layer. There is no useful grouping for those which is why # they are all importable from "werkzeug" instead of the modules where they are # implemented. The downside of that is, that now everything would be loaded at # once, even if unused. # # The implementation of a lazy-loading module in this file replaces the # werkzeug package when imported from within. Attribute access to the werkzeug # module will then lazily import from the modules that implement the objects. # import mapping to objects in other modules all_by_module = { 'werkzeug.debug': ['DebuggedApplication'], 'werkzeug.local': ['Local', 'LocalManager', 'LocalProxy', 'LocalStack', 'release_local'], 'werkzeug.serving': ['run_simple'], 'werkzeug.test': ['Client', 'EnvironBuilder', 'create_environ', 'run_wsgi_app'], 'werkzeug.testapp': ['test_app'], 'werkzeug.exceptions': ['abort', 'Aborter'], 'werkzeug.urls': ['url_decode', 'url_encode', 'url_quote', 'url_quote_plus', 'url_unquote', 'url_unquote_plus', 'url_fix', 'Href', 'iri_to_uri', 'uri_to_iri'], 'werkzeug.formparser': ['parse_form_data'], 'werkzeug.utils': ['escape', 'environ_property', 'append_slash_redirect', 'redirect', 'cached_property', 'import_string', 'dump_cookie', 'parse_cookie', 'unescape', 'format_string', 'find_modules', 'header_property', 'html', 'xhtml', 'HTMLBuilder', 'validate_arguments', 'ArgumentValidationError', 'bind_arguments', 'secure_filename'], 'werkzeug.wsgi': ['get_current_url', 'get_host', 'pop_path_info', 'peek_path_info', 'SharedDataMiddleware', 'DispatcherMiddleware', 'ClosingIterator', 'FileWrapper', 'make_line_iter', 'LimitedStream', 'responder', 'wrap_file', 'extract_path_info'], 'werkzeug.datastructures': ['MultiDict', 'CombinedMultiDict', 'Headers', 'EnvironHeaders', 'ImmutableList', 'ImmutableDict', 'ImmutableMultiDict', 'TypeConversionDict', 'ImmutableTypeConversionDict', 'Accept', 'MIMEAccept', 'CharsetAccept', 'LanguageAccept', 'RequestCacheControl', 'ResponseCacheControl', 'ETags', 'HeaderSet', 'WWWAuthenticate', 'Authorization', 'FileMultiDict', 'CallbackDict', 'FileStorage', 'OrderedMultiDict', 'ImmutableOrderedMultiDict'], 'werkzeug.useragents': ['UserAgent'], 'werkzeug.http': ['parse_etags', 'parse_date', 'http_date', 'cookie_date', 'parse_cache_control_header', 'is_resource_modified', 'parse_accept_header', 'parse_set_header', 'quote_etag', 'unquote_etag', 'generate_etag', 'dump_header', 'parse_list_header', 'parse_dict_header', 'parse_authorization_header', 'parse_www_authenticate_header', 'remove_entity_headers', 'is_entity_header', 'remove_hop_by_hop_headers', 'parse_options_header', 'dump_options_header', 'is_hop_by_hop_header', 'unquote_header_value', 'quote_header_value', 'HTTP_STATUS_CODES'], 'werkzeug.wrappers': ['BaseResponse', 'BaseRequest', 'Request', 'Response', 'AcceptMixin', 'ETagRequestMixin', 'ETagResponseMixin', 'ResponseStreamMixin', 'CommonResponseDescriptorsMixin', 'UserAgentMixin', 'AuthorizationMixin', 'WWWAuthenticateMixin', 'CommonRequestDescriptorsMixin'], 'werkzeug.security': ['generate_password_hash', 'check_password_hash'], # the undocumented easteregg ;-) 'werkzeug._internal': ['_easteregg'] } # modules that should be imported when accessed as attributes of werkzeug attribute_modules = frozenset(['exceptions', 'routing', 'script']) object_origins = {} for module, items in iteritems(all_by_module): for item in items: object_origins[item] = module class module(ModuleType): """Automatically import objects from the modules.""" def __getattr__(self, name): if name in object_origins: module = __import__(object_origins[name], None, None, [name]) for extra_name in all_by_module[module.__name__]: setattr(self, extra_name, getattr(module, extra_name)) return getattr(module, name) elif name in attribute_modules: __import__('werkzeug.' + name) return ModuleType.__getattribute__(self, name) def __dir__(self): """Just show what we want to show.""" result = list(new_module.__all__) result.extend(('__file__', '__path__', '__doc__', '__all__', '__docformat__', '__name__', '__path__', '__package__', '__version__')) return result # keep a reference to this module so that it's not garbage collected old_module = sys.modules['werkzeug'] # setup the new module and patch it into the dict of loaded modules new_module = sys.modules['werkzeug'] = module('werkzeug') new_module.__dict__.update({ '__file__': __file__, '__package__': 'werkzeug', '__path__': __path__, '__doc__': __doc__, '__version__': __version__, '__all__': tuple(object_origins) + tuple(attribute_modules), '__docformat__': 'restructuredtext en' }) # Due to bootstrapping issues we need to import exceptions here. # Don't ask :-( __import__('werkzeug.exceptions')
mit
dim0/ansible-commander
lib/main/views.py
1
22186
# Copyright (c) 2013 AnsibleWorks, Inc. # # This file is part of Ansible Commander # # Ansible Commander is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from django.http import HttpResponse from django.views.decorators.csrf import csrf_exempt from lib.main.models import * from django.contrib.auth.models import User from lib.main.serializers import * from lib.main.rbac import * from django.core.exceptions import PermissionDenied from rest_framework import mixins from rest_framework import generics from rest_framework import permissions from rest_framework.response import Response from rest_framework import status import exceptions import datetime from base_views import * class OrganizationsList(BaseList): model = Organization serializer_class = OrganizationSerializer permission_classes = (CustomRbac,) # I can see the organizations if: # I am a superuser # I am an admin of the organization # I am a member of the organization def _get_queryset(self): ''' I can see organizations when I am a superuser, or I am an admin or user in that organization ''' base = Organization.objects if self.request.user.is_superuser: return base.all() return base.filter( admins__in = [ self.request.user ] ).distinct() | base.filter( users__in = [ self.request.user ] ).distinct() class OrganizationsDetail(BaseDetail): model = Organization serializer_class = OrganizationSerializer permission_classes = (CustomRbac,) class OrganizationsAuditTrailList(BaseSubList): model = AuditTrail serializer_class = AuditTrailSerializer permission_classes = (CustomRbac,) parent_model = Organization relationship = 'audit_trail' postable = False def _get_queryset(self): ''' to list tags in the organization, I must be a superuser or org admin ''' organization = Organization.objects.get(pk=self.kwargs['pk']) if not (self.request.user.is_superuser or self.request.user in organization.admins.all()): # FIXME: use: organization.can_user_administrate(self.request.user) raise PermissionDenied() return AuditTrail.objects.filter(organization_by_audit_trail__in = [ organization ]) class OrganizationsUsersList(BaseSubList): model = User serializer_class = UserSerializer permission_classes = (CustomRbac,) parent_model = Organization relationship = 'users' postable = True inject_primary_key_on_post_as = 'organization' def _get_queryset(self): ''' to list users in the organization, I must be a superuser or org admin ''' organization = Organization.objects.get(pk=self.kwargs['pk']) if not self.request.user.is_superuser and not self.request.user in organization.admins.all(): raise PermissionDenied() return User.objects.filter(organizations__in = [ organization ]) class OrganizationsAdminsList(BaseSubList): model = User serializer_class = UserSerializer permission_classes = (CustomRbac,) parent_model = Organization relationship = 'admins' postable = True inject_primary_key_on_post_as = 'organization' def _get_queryset(self): ''' to list admins in the organization, I must be a superuser or org admin ''' organization = Organization.objects.get(pk=self.kwargs['pk']) if not self.request.user.is_superuser and not self.request.user in organization.admins.all(): raise PermissionDenied() return User.objects.filter(admin_of_organizations__in = [ organization ]) class OrganizationsProjectsList(BaseSubList): model = Project serializer_class = ProjectSerializer permission_classes = (CustomRbac,) parent_model = Organization # for sub list relationship = 'projects' # " " postable = True inject_primary_key_on_post_as = 'organization' def _get_queryset(self): ''' to list projects in the organization, I must be a superuser or org admin ''' organization = Organization.objects.get(pk=self.kwargs['pk']) if not (self.request.user.is_superuser or self.request.user in organization.admins.all()): raise PermissionDenied() return Project.objects.filter(organizations__in = [ organization ]) class OrganizationsTagsList(BaseSubList): model = Tag serializer_class = TagSerializer permission_classes = (CustomRbac,) parent_model = Organization # for sub list relationship = 'tags' # " " postable = True inject_primary_key_on_post_as = 'organization' def _get_queryset(self): ''' to list tags in the organization, I must be a superuser or org admin ''' organization = Organization.objects.get(pk=self.kwargs['pk']) if not (self.request.user.is_superuser or self.request.user in organization.admins.all()): # FIXME: use: organization.can_user_administrate(self.request.user) raise PermissionDenied() return Tag.objects.filter(organization_by_tag__in = [ organization ]) class OrganizationsTeamsList(BaseSubList): model = Team serializer_class = TeamSerializer permission_classes = (CustomRbac,) parent_model = Organization relationship = 'teams' postable = True inject_primary_key_on_post_as = 'organization' severable = False def _get_queryset(self): ''' to list users in the organization, I must be a superuser or org admin ''' organization = Organization.objects.get(pk=self.kwargs['pk']) if not self.request.user.is_superuser and not self.request.user in organization.admins.all(): raise PermissionDenied() return Team.objects.filter(organization = organization) class TeamsList(BaseList): model = Team serializer_class = TeamSerializer permission_classes = (CustomRbac,) # I can see a team if: # I am a superuser # I am an admin of the organization that the team is # I am on that team def _get_queryset(self): ''' I can see organizations when I am a superuser, or I am an admin or user in that organization ''' base = Team.objects if self.request.user.is_superuser: return base.all() return base.filter( admins__in = [ self.request.user ] ).distinct() | base.filter( users__in = [ self.request.user ] ).distinct() class TeamsDetail(BaseDetail): model = Team serializer_class = TeamSerializer permission_classes = (CustomRbac,) class TeamsUsersList(BaseSubList): model = User serializer_class = UserSerializer permission_classes = (CustomRbac,) parent_model = Team relationship = 'users' postable = True inject_primary_key_on_post_as = 'team' severable = True def _get_queryset(self): # FIXME: audit all BaseSubLists to check for permissions on the original object too 'team members can see the whole team, as can org admins or superusers' team = Team.objects.get(pk=self.kwargs['pk']) base = team.users.all() if self.request.user.is_superuser or self.request.user in team.organization.admins.all(): return base if self.request.user in team.users.all(): return base raise PermissionDenied() class TeamsCredentialsList(BaseSubList): model = Credential serializer_class = CredentialSerializer permission_classes = (CustomRbac,) parent_model = Team relationship = 'credentials' postable = True inject_primary_key_on_post_as = 'team' def _get_queryset(self): team = Team.objects.get(pk=self.kwargs['pk']) if not Team.can_user_administrate(self.request.user, team): if not (self.request.user.is_superuser or self.request.user in team.users.all()): raise PermissionDenied() project_credentials = Credential.objects.filter( team = team ) return project_credentials.distinct() class ProjectsList(BaseList): model = Project serializer_class = ProjectSerializer permission_classes = (CustomRbac,) # I can see a project if # I am a superuser # I am an admin of the organization that contains the project # I am a member of a team that also contains the project def _get_queryset(self): ''' I can see organizations when I am a superuser, or I am an admin or user in that organization ''' base = Project.objects if self.request.user.is_superuser: return base.all() my_teams = Team.objects.filter(users__in = [ self.request.user]) my_orgs = Organization.objects.filter(admins__in = [ self.request.user ]) return base.filter( teams__in = my_teams ).distinct() | base.filter( organizations__in = my_orgs ).distinct() class ProjectsDetail(BaseDetail): model = Project serializer_class = ProjectSerializer permission_classes = (CustomRbac,) class ProjectsOrganizationsList(BaseSubList): model = Organization serializer_class = OrganizationSerializer permission_classes = (CustomRbac,) parent_model = Project relationship = 'organizations' postable = False def _get_queryset(self): project = Project.objects.get(pk=self.kwargs['pk']) if not self.request.user.is_superuser: raise PermissionDenied() return Organization.objects.filter(projects__in = [ project ]) class TagsDetail(BaseDetail): model = Tag serializer_class = TagSerializer permission_classes = (CustomRbac,) class UsersList(BaseList): model = User serializer_class = UserSerializer permission_classes = (CustomRbac,) def post(self, request, *args, **kwargs): password = request.DATA.get('password', None) result = super(UsersList, self).post(request, *args, **kwargs) if password: pk = result.data['id'] user = User.objects.get(pk=pk) user.set_password(password) user.save() return result def _get_queryset(self): ''' I can see user records when I'm a superuser, I'm that user, I'm their org admin, or I'm on a team with that user ''' base = User.objects if self.request.user.is_superuser: return base.all() mine = base.filter(pk = self.request.user.pk).distinct() admin_of = base.filter(organizations__in = self.request.user.admin_of_organizations.all()).distinct() same_team = base.filter(teams__in = self.request.user.teams.all()).distinct() return mine | admin_of | same_team class UsersMeList(BaseList): model = User serializer_class = UserSerializer permission_classes = (CustomRbac,) def post(self, request, *args, **kwargs): raise PermissionDenied() def _get_queryset(self): ''' a quick way to find my user record ''' return User.objects.filter(pk=self.request.user.pk) class UsersTeamsList(BaseSubList): model = Team serializer_class = TeamSerializer permission_classes = (CustomRbac,) parent_model = User relationship = 'teams' postable = False def _get_queryset(self): user = User.objects.get(pk=self.kwargs['pk']) if not UserHelper.can_user_administrate(self.request.user, user): raise PermissionDenied() return Team.objects.filter(users__in = [ user ]) class UsersProjectsList(BaseSubList): model = Project serializer_class = ProjectSerializer permission_classes = (CustomRbac,) parent_model = User relationship = 'teams' postable = False def _get_queryset(self): user = User.objects.get(pk=self.kwargs['pk']) if not UserHelper.can_user_administrate(self.request.user, user): raise PermissionDenied() teams = user.teams.all() return Project.objects.filter(teams__in = teams) class UsersCredentialsList(BaseSubList): model = Credential serializer_class = CredentialSerializer permission_classes = (CustomRbac,) parent_model = User relationship = 'credentials' postable = True inject_primary_key_on_post_as = 'user' def _get_queryset(self): user = User.objects.get(pk=self.kwargs['pk']) if not UserHelper.can_user_administrate(self.request.user, user): raise PermissionDenied() project_credentials = Credential.objects.filter( team__users__in = [ user ] ) return user.credentials.distinct() | project_credentials.distinct() class UsersOrganizationsList(BaseSubList): model = Organization serializer_class = OrganizationSerializer permission_classes = (CustomRbac,) parent_model = User relationship = 'organizations' postable = False def _get_queryset(self): user = User.objects.get(pk=self.kwargs['pk']) if not UserHelper.can_user_administrate(self.request.user, user): raise PermissionDenied() return Organization.objects.filter(users__in = [ user ]) class UsersAdminOrganizationsList(BaseSubList): model = Organization serializer_class = OrganizationSerializer permission_classes = (CustomRbac,) parent_model = User relationship = 'admin_of_organizations' postable = False def _get_queryset(self): user = User.objects.get(pk=self.kwargs['pk']) if not UserHelper.can_user_administrate(self.request.user, user): raise PermissionDenied() return Organization.objects.filter(admins__in = [ user ]) class UsersDetail(BaseDetail): model = User serializer_class = UserSerializer permission_classes = (CustomRbac,) def put_filter(self, request, *args, **kwargs): ''' make sure non-read-only fields that can only be edited by admins, are only edited by admins ''' obj = User.objects.get(pk=kwargs['pk']) if EditHelper.illegal_changes(request, obj, UserHelper): raise PermissionDenied() if 'password' in request.DATA: obj.set_password(request.DATA['password']) obj.save() request.DATA.pop('password') class CredentialsDetail(BaseDetail): model = Credential serializer_class = CredentialSerializer permission_classes = (CustomRbac,) class InventoryList(BaseList): model = Inventory serializer_class = InventorySerializer permission_classes = (CustomRbac,) def _filter_queryset(self, base): if self.request.user.is_superuser: return base.all() admin_of = base.filter(organization__admins__in = [ self.request.user ]).distinct() has_user_perms = base.filter( permissions__user__in = [ self.request.user ], permissions__permission_type__in = PERMISSION_TYPES_ALLOWING_INVENTORY_READ, ).distinct() has_team_perms = base.filter( permissions__team__in = self.request.user.teams.all(), permissions__permission_type__in = PERMISSION_TYPES_ALLOWING_INVENTORY_READ, ).distinct() return admin_of | has_user_perms | has_team_perms def _get_queryset(self): ''' I can see inventory when I'm a superuser, an org admin of the inventory, or I have permissions on it ''' base = Inventory.objects return self._filter_queryset(base) class InventoryDetail(BaseDetail): model = Inventory serializer_class = InventorySerializer permission_classes = (CustomRbac,) class HostsList(BaseList): model = Host serializer_class = HostSerializer permission_classes = (CustomRbac,) def _get_queryset(self): ''' I can see hosts when: I'm a superuser, or an organization admin of an inventory they are in or when I have allowing read permissions via a user or team on an inventory they are in ''' base = Host.objects if self.request.user.is_superuser: return base.all() admin_of = base.filter(inventory__organization__admins__in = [ self.request.user ]).distinct() has_user_perms = base.filter( inventory__permissions__user__in = [ self.request.user ], inventory__permissions__permission_type__in = PERMISSION_TYPES_ALLOWING_INVENTORY_READ, ).distinct() has_team_perms = base.filter( inventory__permissions__team__in = self.request.user.teams.all(), inventory__permissions__permission_type__in = PERMISSION_TYPES_ALLOWING_INVENTORY_READ, ).distinct() return admin_of | has_user_perms | has_team_perms class HostsDetail(BaseDetail): model = Host serializer_class = HostSerializer permission_classes = (CustomRbac,) class InventoryHostsList(BaseSubList): model = Host serializer_class = HostSerializer permission_classes = (CustomRbac,) # to allow the sub-aspect listing parent_model = Inventory relationship = 'hosts' # to allow posting to this resource to create resources postable = True # FIXME: go back and add these to other SubLists inject_primary_key_on_post_as = 'inventory' severable = False def _get_queryset(self): inventory = Inventory.objects.get(pk=self.kwargs['pk']) base = inventory.hosts # FIXME: verify that you can can_read permission on the inventory is required return base.all() class GroupsList(BaseList): model = Group serializer_class = GroupSerializer permission_classes = (CustomRbac,) def _get_queryset(self): ''' I can see groups when: I'm a superuser, or an organization admin of an inventory they are in or when I have allowing read permissions via a user or team on an inventory they are in ''' base = Groups.objects if self.request.user.is_superuser: return base.all() admin_of = base.filter(inventory__organization__admins__in = [ self.request.user ]).distinct() has_user_perms = base.filter( inventory__permissions__user__in = [ self.request.user ], inventory__permissions__permission_type__in = PERMISSION_TYPES_ALLOWING_INVENTORY_READ, ).distinct() has_team_perms = base.filter( inventory__permissions__team__in = self.request.user.teams.all(), inventory__permissions__permission_type__in = PERMISSION_TYPES_ALLOWING_INVENTORY_READ, ).distinct() return admin_of | has_user_perms | has_team_perms class GroupsChildrenList(BaseSubList): model = Group serializer_class = GroupSerializer permission_classes = (CustomRbac,) parent_model = Group relationship = 'children' postable = True inject_primary_key_on_post_as = 'parent' def _get_queryset(self): # FIXME: this is the mostly the same as GroupsList, share code similar to how done with Host and Group objects. parent = Group.objects.get(pk=self.kwargs['pk']) # FIXME: verify read permissions on this object are still required at a higher level base = parent.children if self.request.user.is_superuser: return base.all() admin_of = base.filter(inventory__organization__admins__in = [ self.request.user ]).distinct() has_user_perms = base.filter( inventory__permissions__user__in = [ self.request.user ], inventory__permissions__permission_type__in = PERMISSION_TYPES_ALLOWING_INVENTORY_READ, ).distinct() has_team_perms = base.filter( inventory__permissions__team__in = self.request.user.teams.all(), inventory__permissions__permission_type__in = PERMISSION_TYPES_ALLOWING_INVENTORY_READ, ).distinct() return admin_of | has_user_perms | has_team_perms class GroupsDetail(BaseDetail): model = Group serializer_class = GroupSerializer permission_classes = (CustomRbac,) class InventoryGroupsList(BaseSubList): model = Group serializer_class = GroupSerializer permission_classes = (CustomRbac,) # to allow the sub-aspect listing parent_model = Inventory relationship = 'groups' # to allow posting to this resource to create resources postable = True # FIXME: go back and add these to other SubLists inject_primary_key_on_post_as = 'inventory' severable = False def _get_queryset(self): # FIXME: share code with inventory filter queryset methods (make that a classmethod) inventory = Inventory.objects.get(pk=self.kwargs['pk']) base = inventory.groups # FIXME: verify that you can can_read permission on the inventory is required return base class GroupsVariableDetail(VariableBaseDetail): model = VariableData serializer_class = VariableDataSerializer permission_classes = (CustomRbac,) parent_model = Group reverse_relationship = 'variable_data' relationship = 'group' class HostsVariableDetail(VariableBaseDetail): model = VariableData serializer_class = VariableDataSerializer permission_classes = (CustomRbac,) parent_model = Host reverse_relationship = 'variable_data' relationship = 'host' class VariableDetail(BaseDetail): model = VariableData serializer_class = VariableDataSerializer permission_classes = (CustomRbac,) def put(self, request, *args, **kwargs): raise PermissionDenied()
agpl-3.0
kryztof/pit
smsread.py
1
4142
#!/usr/bin/python import subprocess import string import urllib.request import urllib.parse import xml.etree.ElementTree as ET from histelement import * from histelementcontainer import * from utils import * import pygame class SmsReader: def __init__(self,historycontainer): self.token = -1 self.smses = -1 self.histcontainer = historycontainer self.reading = 0 def fetch_smses(self): if self.reading == 1 : dbgprint("Already busy reading sms messages") return dbgprint("TTTTTTTTTTTTTTTTTTTTTTTTTTTTT") dbgprint("Fetching sms!") self.reading = 1 self.post_progress(10) self.get_token() self.post_progress(30) if self.token != -1: self.get_smses() self.post_progress(60) self.parse_sms_answer() self.post_progress(80) self.reading = 0 self.post_progress(100) dbgprint("Fetching sms done!") dbgprint("TTTTTTTTTTTTTTTTTTTTTTTTTTTTT") def get_token(self): dbgprint("GETTING TOKEN") #self.token = subprocess.check_output(['curl', '-s', #'http://192.168.8.1/api/webserver/token']) try: url='http://192.168.8.1/api/webserver/token' request = urllib.request.Request(url) response = urllib.request.urlopen(request) root = ET.fromstring(response.read()) self.token = root.find('token').text #dbgprint(self.token) except: dbgprint("ERROR reading token") self.token = -1 """ Next was copied from the chome browser when opening the page SMS and looking into de debug console under network filtering with sms-list. Then you can right click on it and copy as cUrl. Then the url was simplified (unnecessary stuff was removed). curl 'http://192.168.8.1/api/sms/sms-list' -H '__RequestVerificationToken: 1185367643' --data '<?xml version="1.0" encoding="UTF-8"?><request><PageIndex>1</PageIndex><ReadCount>20</ReadCount><BoxType>1</BoxType><SortType>0</SortType><Ascending>0</Ascending><UnreadPreferred>0</UnreadPreferred></request>' """ def get_smses(self): dbgprint("GETTING SMSES") if self.token == -1: dbgprint ("ERROR invalid token") return -1 #try: xml = '<?xml version="1.0" encoding="UTF-8"?><request><PageIndex>{:d}</PageIndex><ReadCount>10</ReadCount><BoxType>1</BoxType><SortType>0</SortType><Ascending>0</Ascending><UnreadPreferred>0</UnreadPreferred></request>' url = 'http://192.168.8.1/api/sms/sms-list' headers = {'__RequestVerificationToken': self.token , 'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8'} page = 1 maxpages = 100 self.smses = [] while True: xmlp = xml.format(page) data = xmlp.encode('utf-8') req = urllib.request.Request(url, data, headers) response = urllib.request.urlopen(req) response = response.read().decode('utf-8') if "error" in response: dbgprint("Error reading smses: ",response) break if "<Count>0</Count>" in response: break self.smses.append(response) #dbgprint(response) page += 1 if page == maxpages: break #except Exception as e: #dbgprint(e) #self.smses = -1 def parse_sms_answer(self): dbgprint("PARSING SMS ANSWER") if len(self.smses) == 0 : dbgprint("ERROR: len(self.smses) == 0") return -1 for page in self.smses: #dbgprint(page) root = ET.fromstring(page) for msg in root.iter('Messages'): for child in msg: nr = child.find('Phone').text content = child.find('Content').text date = child.find('Date').text #dbgprint("sms date:", date) index = child.find('Index').text #smstype = child.find('SmsType').text self.histcontainer.add_element_from_sms(nr,index,content,date) #dbgprint (nr,index,smstype,content, date) return 0 def post_progress(self, percentage): smsprogressevent = pygame.event.Event(pygame.USEREVENT+2, progress=percentage) pygame.event.post(smsprogressevent) #if __name__ == '__main__': # smsReader = SmsReader() # smsReader.fetch_smses()
gpl-3.0
jonathanslenders/asyncssh
asyncssh/ed25519.py
1
3294
# Copyright (c) 2015 by Ron Frederick <ronf@timeheart.net>. # All rights reserved. # # This program and the accompanying materials are made available under # the terms of the Eclipse Public License v1.0 which accompanies this # distribution and is available at: # # http://www.eclipse.org/legal/epl-v10.html # # Contributors: # Ron Frederick - initial implementation, API, and documentation """Ed25519 public key encryption handler""" from .packet import String, SSHPacket from .public_key import SSHKey, SSHCertificateV01, KeyExportError from .public_key import register_public_key_alg, register_certificate_alg # Short variable names are used here, matching names in the spec # pylint: disable=invalid-name class _Ed25519Key(SSHKey): """Handler for Ed25519 public key encryption""" algorithm = b'ssh-ed25519' def __init__(self, vk, sk): self._vk = vk self._sk = sk def __eq__(self, other): # This isn't protected access - both objects are _Ed25519Key instances # pylint: disable=protected-access return (isinstance(other, self.__class__) and self._vk == other._vk and self._sk == other._sk) def __hash__(self): return hash(self._vk) @classmethod def make_private(cls, vk, sk): """Construct an Ed25519 private key""" return cls(vk, sk) @classmethod def make_public(cls, vk): """Construct an Ed25519 public key""" return cls(vk, None) @classmethod def decode_ssh_private(cls, packet): """Decode an SSH format Ed25519 private key""" vk = packet.get_string() sk = packet.get_string() return vk, sk @classmethod def decode_ssh_public(cls, packet): """Decode an SSH format Ed25519 public key""" vk = packet.get_string() return (vk,) def encode_ssh_private(self): """Encode an SSH format Ed25519 private key""" if self._sk is None: raise KeyExportError('Key is not private') return b''.join((String(self.algorithm), String(self._vk), String(self._sk))) def encode_ssh_public(self): """Encode an SSH format Ed25519 public key""" return b''.join((String(self.algorithm), String(self._vk))) def sign(self, data): """Return a signature of the specified data using this key""" if self._sk is None: raise ValueError('Private key needed for signing') sig = libnacl.crypto_sign(data, self._sk) return b''.join((String(self.algorithm), String(sig[:-len(data)]))) def verify(self, data, sig): """Verify a signature of the specified data using this key""" packet = SSHPacket(sig) if packet.get_string() != self.algorithm: return False sig = packet.get_string() packet.check_end() try: return libnacl.crypto_sign_open(sig + data, self._vk) == data except ValueError: return False try: import libnacl except (ImportError, OSError): pass else: register_public_key_alg(b'ssh-ed25519', _Ed25519Key) register_certificate_alg(b'ssh-ed25519-cert-v01@openssh.com', _Ed25519Key, SSHCertificateV01)
epl-1.0
akhmadMizkat/odoo
addons/delivery/models/delivery_price_rule.py
33
2283
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from openerp import models, fields, api import openerp.addons.decimal_precision as dp class PriceRule(models.Model): _name = "delivery.price.rule" _description = "Delivery Price Rules" _order = 'sequence, list_price' @api.depends('variable', 'operator', 'max_value', 'list_base_price', 'list_price', 'variable_factor') def _get_name(self): for rule in self: name = 'if %s %s %s then' % (rule.variable, rule.operator, rule.max_value) if rule.list_base_price and not rule.list_price: name = '%s fixed price %s' % (name, rule.list_base_price) elif rule.list_price and not rule.list_base_price: name = '%s %s times %s' % (name, rule.list_price, rule.variable_factor) else: name = '%s fixed price %s and %s times %s Extra' % (name, rule.list_base_price, rule.list_price, rule.variable_factor) rule.name = name name = fields.Char(compute='_get_name') sequence = fields.Integer(required=True, help="Gives the sequence order when calculating delivery carrier.", default=10) carrier_id = fields.Many2one('delivery.carrier', 'Carrier', required=True, ondelete='cascade') variable = fields.Selection([('weight', 'Weight'), ('volume', 'Volume'), ('wv', 'Weight * Volume'), ('price', 'Price'), ('quantity', 'Quantity')], 'Variable', required=True, default='weight') operator = fields.Selection([('==', '='), ('<=', '<='), ('<', '<'), ('>=', '>='), ('>', '>')], 'Operator', required=True, default='<=') max_value = fields.Float('Maximum Value', required=True) variable_factor = fields.Selection([('weight', 'Weight'), ('volume', 'Volume'), ('wv', 'Weight * Volume'), ('price', 'Price'), ('quantity', 'Quantity')], 'Variable Factor', required=True, default='weight') list_base_price = fields.Float(string='Sale Base Price', digits=dp.get_precision('Product Price'), required=True, default=0.0) list_price = fields.Float('Sale Price', digits=dp.get_precision('Product Price'), required=True, default=0.0) standard_price = fields.Float('Cost Price', digits=dp.get_precision('Product Price'), required=True, default=0.0)
gpl-3.0
tysonholub/twilio-python
twilio/rest/sync/v1/service/sync_stream/__init__.py
1
16807
# coding=utf-8 r""" This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from twilio.base import deserialize from twilio.base import values from twilio.base.instance_context import InstanceContext from twilio.base.instance_resource import InstanceResource from twilio.base.list_resource import ListResource from twilio.base.page import Page from twilio.rest.sync.v1.service.sync_stream.stream_message import StreamMessageList class SyncStreamList(ListResource): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, service_sid): """ Initialize the SyncStreamList :param Version version: Version that contains the resource :param service_sid: The SID of the Sync Service that the resource is associated with :returns: twilio.rest.sync.v1.service.sync_stream.SyncStreamList :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamList """ super(SyncStreamList, self).__init__(version) # Path Solution self._solution = {'service_sid': service_sid, } self._uri = '/Services/{service_sid}/Streams'.format(**self._solution) def create(self, unique_name=values.unset, ttl=values.unset): """ Create a new SyncStreamInstance :param unicode unique_name: An application-defined string that uniquely identifies the resource :param unicode ttl: How long, in seconds, before the Stream expires and is deleted :returns: Newly created SyncStreamInstance :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamInstance """ data = values.of({'UniqueName': unique_name, 'Ttl': ttl, }) payload = self._version.create( 'POST', self._uri, data=data, ) return SyncStreamInstance(self._version, payload, service_sid=self._solution['service_sid'], ) def stream(self, limit=None, page_size=None): """ Streams SyncStreamInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.sync.v1.service.sync_stream.SyncStreamInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page(page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit']) def list(self, limit=None, page_size=None): """ Lists SyncStreamInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.sync.v1.service.sync_stream.SyncStreamInstance] """ return list(self.stream(limit=limit, page_size=page_size, )) def page(self, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of SyncStreamInstance records from the API. Request is executed immediately :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of SyncStreamInstance :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamPage """ params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return SyncStreamPage(self._version, response, self._solution) def get_page(self, target_url): """ Retrieve a specific page of SyncStreamInstance records from the API. Request is executed immediately :param str target_url: API-generated URL for the requested results page :returns: Page of SyncStreamInstance :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamPage """ response = self._version.domain.twilio.request( 'GET', target_url, ) return SyncStreamPage(self._version, response, self._solution) def get(self, sid): """ Constructs a SyncStreamContext :param sid: The SID of the Stream resource to fetch :returns: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext """ return SyncStreamContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) def __call__(self, sid): """ Constructs a SyncStreamContext :param sid: The SID of the Stream resource to fetch :returns: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext """ return SyncStreamContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Sync.V1.SyncStreamList>' class SyncStreamPage(Page): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, response, solution): """ Initialize the SyncStreamPage :param Version version: Version that contains the resource :param Response response: Response from the API :param service_sid: The SID of the Sync Service that the resource is associated with :returns: twilio.rest.sync.v1.service.sync_stream.SyncStreamPage :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamPage """ super(SyncStreamPage, self).__init__(version, response) # Path Solution self._solution = solution def get_instance(self, payload): """ Build an instance of SyncStreamInstance :param dict payload: Payload response from the API :returns: twilio.rest.sync.v1.service.sync_stream.SyncStreamInstance :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamInstance """ return SyncStreamInstance(self._version, payload, service_sid=self._solution['service_sid'], ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Sync.V1.SyncStreamPage>' class SyncStreamContext(InstanceContext): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, service_sid, sid): """ Initialize the SyncStreamContext :param Version version: Version that contains the resource :param service_sid: The SID of the Sync Service with the Sync Stream resource to fetch :param sid: The SID of the Stream resource to fetch :returns: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext """ super(SyncStreamContext, self).__init__(version) # Path Solution self._solution = {'service_sid': service_sid, 'sid': sid, } self._uri = '/Services/{service_sid}/Streams/{sid}'.format(**self._solution) # Dependents self._stream_messages = None def fetch(self): """ Fetch a SyncStreamInstance :returns: Fetched SyncStreamInstance :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return SyncStreamInstance( self._version, payload, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) def delete(self): """ Deletes the SyncStreamInstance :returns: True if delete succeeds, False otherwise :rtype: bool """ return self._version.delete('delete', self._uri) def update(self, ttl=values.unset): """ Update the SyncStreamInstance :param unicode ttl: How long, in seconds, before the Stream expires and is deleted :returns: Updated SyncStreamInstance :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamInstance """ data = values.of({'Ttl': ttl, }) payload = self._version.update( 'POST', self._uri, data=data, ) return SyncStreamInstance( self._version, payload, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) @property def stream_messages(self): """ Access the stream_messages :returns: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessageList :rtype: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessageList """ if self._stream_messages is None: self._stream_messages = StreamMessageList( self._version, service_sid=self._solution['service_sid'], stream_sid=self._solution['sid'], ) return self._stream_messages def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Sync.V1.SyncStreamContext {}>'.format(context) class SyncStreamInstance(InstanceResource): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, payload, service_sid, sid=None): """ Initialize the SyncStreamInstance :returns: twilio.rest.sync.v1.service.sync_stream.SyncStreamInstance :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamInstance """ super(SyncStreamInstance, self).__init__(version) # Marshaled Properties self._properties = { 'sid': payload.get('sid'), 'unique_name': payload.get('unique_name'), 'account_sid': payload.get('account_sid'), 'service_sid': payload.get('service_sid'), 'url': payload.get('url'), 'links': payload.get('links'), 'date_expires': deserialize.iso8601_datetime(payload.get('date_expires')), 'date_created': deserialize.iso8601_datetime(payload.get('date_created')), 'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')), 'created_by': payload.get('created_by'), } # Context self._context = None self._solution = {'service_sid': service_sid, 'sid': sid or self._properties['sid'], } @property def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: SyncStreamContext for this SyncStreamInstance :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext """ if self._context is None: self._context = SyncStreamContext( self._version, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) return self._context @property def sid(self): """ :returns: The unique string that identifies the resource :rtype: unicode """ return self._properties['sid'] @property def unique_name(self): """ :returns: An application-defined string that uniquely identifies the resource :rtype: unicode """ return self._properties['unique_name'] @property def account_sid(self): """ :returns: The SID of the Account that created the resource :rtype: unicode """ return self._properties['account_sid'] @property def service_sid(self): """ :returns: The SID of the Sync Service that the resource is associated with :rtype: unicode """ return self._properties['service_sid'] @property def url(self): """ :returns: The absolute URL of the Message Stream resource :rtype: unicode """ return self._properties['url'] @property def links(self): """ :returns: The URLs of the Stream's nested resources :rtype: unicode """ return self._properties['links'] @property def date_expires(self): """ :returns: The ISO 8601 date and time in GMT when the Message Stream expires :rtype: datetime """ return self._properties['date_expires'] @property def date_created(self): """ :returns: The ISO 8601 date and time in GMT when the resource was created :rtype: datetime """ return self._properties['date_created'] @property def date_updated(self): """ :returns: The ISO 8601 date and time in GMT when the resource was last updated :rtype: datetime """ return self._properties['date_updated'] @property def created_by(self): """ :returns: The Identity of the Stream's creator :rtype: unicode """ return self._properties['created_by'] def fetch(self): """ Fetch a SyncStreamInstance :returns: Fetched SyncStreamInstance :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamInstance """ return self._proxy.fetch() def delete(self): """ Deletes the SyncStreamInstance :returns: True if delete succeeds, False otherwise :rtype: bool """ return self._proxy.delete() def update(self, ttl=values.unset): """ Update the SyncStreamInstance :param unicode ttl: How long, in seconds, before the Stream expires and is deleted :returns: Updated SyncStreamInstance :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamInstance """ return self._proxy.update(ttl=ttl, ) @property def stream_messages(self): """ Access the stream_messages :returns: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessageList :rtype: twilio.rest.sync.v1.service.sync_stream.stream_message.StreamMessageList """ return self._proxy.stream_messages def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Sync.V1.SyncStreamInstance {}>'.format(context)
mit
webgeodatavore/django
tests/utils_tests/test_timezone.py
149
7857
import copy import datetime import pickle import unittest from django.test import override_settings from django.utils import timezone try: import pytz except ImportError: pytz = None requires_pytz = unittest.skipIf(pytz is None, "this test requires pytz") if pytz is not None: CET = pytz.timezone("Europe/Paris") EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok class TimezoneTests(unittest.TestCase): def test_localtime(self): now = datetime.datetime.utcnow().replace(tzinfo=timezone.utc) local_tz = timezone.LocalTimezone() local_now = timezone.localtime(now, local_tz) self.assertEqual(local_now.tzinfo, local_tz) def test_localtime_naive(self): with self.assertRaises(ValueError): timezone.localtime(datetime.datetime.now()) def test_localtime_out_of_range(self): local_tz = timezone.LocalTimezone() long_ago = datetime.datetime(1900, 1, 1, tzinfo=timezone.utc) try: timezone.localtime(long_ago, local_tz) except (OverflowError, ValueError) as exc: self.assertIn("install pytz", exc.args[0]) else: raise unittest.SkipTest("Failed to trigger an OverflowError or ValueError") def test_now(self): with override_settings(USE_TZ=True): self.assertTrue(timezone.is_aware(timezone.now())) with override_settings(USE_TZ=False): self.assertTrue(timezone.is_naive(timezone.now())) def test_override(self): default = timezone.get_default_timezone() try: timezone.activate(ICT) with timezone.override(EAT): self.assertIs(EAT, timezone.get_current_timezone()) self.assertIs(ICT, timezone.get_current_timezone()) with timezone.override(None): self.assertIs(default, timezone.get_current_timezone()) self.assertIs(ICT, timezone.get_current_timezone()) timezone.deactivate() with timezone.override(EAT): self.assertIs(EAT, timezone.get_current_timezone()) self.assertIs(default, timezone.get_current_timezone()) with timezone.override(None): self.assertIs(default, timezone.get_current_timezone()) self.assertIs(default, timezone.get_current_timezone()) finally: timezone.deactivate() def test_override_decorator(self): default = timezone.get_default_timezone() @timezone.override(EAT) def func_tz_eat(): self.assertIs(EAT, timezone.get_current_timezone()) @timezone.override(None) def func_tz_none(): self.assertIs(default, timezone.get_current_timezone()) try: timezone.activate(ICT) func_tz_eat() self.assertIs(ICT, timezone.get_current_timezone()) func_tz_none() self.assertIs(ICT, timezone.get_current_timezone()) timezone.deactivate() func_tz_eat() self.assertIs(default, timezone.get_current_timezone()) func_tz_none() self.assertIs(default, timezone.get_current_timezone()) finally: timezone.deactivate() def test_copy(self): self.assertIsInstance(copy.copy(timezone.UTC()), timezone.UTC) self.assertIsInstance(copy.copy(timezone.LocalTimezone()), timezone.LocalTimezone) def test_deepcopy(self): self.assertIsInstance(copy.deepcopy(timezone.UTC()), timezone.UTC) self.assertIsInstance(copy.deepcopy(timezone.LocalTimezone()), timezone.LocalTimezone) def test_pickling_unpickling(self): self.assertIsInstance(pickle.loads(pickle.dumps(timezone.UTC())), timezone.UTC) self.assertIsInstance(pickle.loads(pickle.dumps(timezone.LocalTimezone())), timezone.LocalTimezone) def test_is_aware(self): self.assertTrue(timezone.is_aware(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))) self.assertFalse(timezone.is_aware(datetime.datetime(2011, 9, 1, 13, 20, 30))) def test_is_naive(self): self.assertFalse(timezone.is_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))) self.assertTrue(timezone.is_naive(datetime.datetime(2011, 9, 1, 13, 20, 30))) def test_make_aware(self): self.assertEqual( timezone.make_aware(datetime.datetime(2011, 9, 1, 13, 20, 30), EAT), datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)) with self.assertRaises(ValueError): timezone.make_aware(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), EAT) def test_make_naive(self): self.assertEqual( timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), EAT), datetime.datetime(2011, 9, 1, 13, 20, 30)) self.assertEqual( timezone.make_naive(datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT), EAT), datetime.datetime(2011, 9, 1, 13, 20, 30)) with self.assertRaises(ValueError): timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30), EAT) @requires_pytz def test_make_aware2(self): self.assertEqual( timezone.make_aware(datetime.datetime(2011, 9, 1, 12, 20, 30), CET), CET.localize(datetime.datetime(2011, 9, 1, 12, 20, 30))) with self.assertRaises(ValueError): timezone.make_aware(CET.localize(datetime.datetime(2011, 9, 1, 12, 20, 30)), CET) @requires_pytz def test_make_aware_pytz(self): self.assertEqual( timezone.make_naive(CET.localize(datetime.datetime(2011, 9, 1, 12, 20, 30)), CET), datetime.datetime(2011, 9, 1, 12, 20, 30)) self.assertEqual( timezone.make_naive( pytz.timezone("Asia/Bangkok").localize(datetime.datetime(2011, 9, 1, 17, 20, 30)), CET ), datetime.datetime(2011, 9, 1, 12, 20, 30)) with self.assertRaises(ValueError): timezone.make_naive(datetime.datetime(2011, 9, 1, 12, 20, 30), CET) @requires_pytz def test_make_aware_pytz_ambiguous(self): # 2:30 happens twice, once before DST ends and once after ambiguous = datetime.datetime(2015, 10, 25, 2, 30) with self.assertRaises(pytz.AmbiguousTimeError): timezone.make_aware(ambiguous, timezone=CET) std = timezone.make_aware(ambiguous, timezone=CET, is_dst=False) dst = timezone.make_aware(ambiguous, timezone=CET, is_dst=True) self.assertEqual(std - dst, datetime.timedelta(hours=1)) self.assertEqual(std.tzinfo.utcoffset(std), datetime.timedelta(hours=1)) self.assertEqual(dst.tzinfo.utcoffset(dst), datetime.timedelta(hours=2)) @requires_pytz def test_make_aware_pytz_non_existent(self): # 2:30 never happened due to DST non_existent = datetime.datetime(2015, 3, 29, 2, 30) with self.assertRaises(pytz.NonExistentTimeError): timezone.make_aware(non_existent, timezone=CET) std = timezone.make_aware(non_existent, timezone=CET, is_dst=False) dst = timezone.make_aware(non_existent, timezone=CET, is_dst=True) self.assertEqual(std - dst, datetime.timedelta(hours=1)) self.assertEqual(std.tzinfo.utcoffset(std), datetime.timedelta(hours=1)) self.assertEqual(dst.tzinfo.utcoffset(dst), datetime.timedelta(hours=2)) # round trip to UTC then back to CET std = timezone.localtime(timezone.localtime(std, timezone.UTC()), CET) dst = timezone.localtime(timezone.localtime(dst, timezone.UTC()), CET) self.assertEqual((std.hour, std.minute), (3, 30)) self.assertEqual((dst.hour, dst.minute), (1, 30))
bsd-3-clause
letuananh/beautifulsoup
bs4-python2/dammit.py
408
29302
# -*- coding: utf-8 -*- """Beautiful Soup bonus library: Unicode, Dammit This library converts a bytestream to Unicode through any means necessary. It is heavily based on code from Mark Pilgrim's Universal Feed Parser. It works best on XML and XML, but it does not rewrite the XML or HTML to reflect a new encoding; that's the tree builder's job. """ import codecs from htmlentitydefs import codepoint2name import re import logging import string # Import a library to autodetect character encodings. chardet_type = None try: # First try the fast C implementation. # PyPI package: cchardet import cchardet def chardet_dammit(s): return cchardet.detect(s)['encoding'] except ImportError: try: # Fall back to the pure Python implementation # Debian package: python-chardet # PyPI package: chardet import chardet def chardet_dammit(s): return chardet.detect(s)['encoding'] #import chardet.constants #chardet.constants._debug = 1 except ImportError: # No chardet available. def chardet_dammit(s): return None # Available from http://cjkpython.i18n.org/. try: import iconv_codec except ImportError: pass xml_encoding_re = re.compile( '^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode(), re.I) html_meta_re = re.compile( '<\s*meta[^>]+charset\s*=\s*["\']?([^>]*?)[ /;\'">]'.encode(), re.I) class EntitySubstitution(object): """Substitute XML or HTML entities for the corresponding characters.""" def _populate_class_variables(): lookup = {} reverse_lookup = {} characters_for_re = [] for codepoint, name in list(codepoint2name.items()): character = unichr(codepoint) if codepoint != 34: # There's no point in turning the quotation mark into # &quot;, unless it happens within an attribute value, which # is handled elsewhere. characters_for_re.append(character) lookup[character] = name # But we do want to turn &quot; into the quotation mark. reverse_lookup[name] = character re_definition = "[%s]" % "".join(characters_for_re) return lookup, reverse_lookup, re.compile(re_definition) (CHARACTER_TO_HTML_ENTITY, HTML_ENTITY_TO_CHARACTER, CHARACTER_TO_HTML_ENTITY_RE) = _populate_class_variables() CHARACTER_TO_XML_ENTITY = { "'": "apos", '"': "quot", "&": "amp", "<": "lt", ">": "gt", } BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" ")") AMPERSAND_OR_BRACKET = re.compile("([<>&])") @classmethod def _substitute_html_entity(cls, matchobj): entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0)) return "&%s;" % entity @classmethod def _substitute_xml_entity(cls, matchobj): """Used with a regular expression to substitute the appropriate XML entity for an XML special character.""" entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)] return "&%s;" % entity @classmethod def quoted_attribute_value(self, value): """Make a value into a quoted XML attribute, possibly escaping it. Most strings will be quoted using double quotes. Bob's Bar -> "Bob's Bar" If a string contains double quotes, it will be quoted using single quotes. Welcome to "my bar" -> 'Welcome to "my bar"' If a string contains both single and double quotes, the double quotes will be escaped, and the string will be quoted using double quotes. Welcome to "Bob's Bar" -> "Welcome to &quot;Bob's bar&quot; """ quote_with = '"' if '"' in value: if "'" in value: # The string contains both single and double # quotes. Turn the double quotes into # entities. We quote the double quotes rather than # the single quotes because the entity name is # "&quot;" whether this is HTML or XML. If we # quoted the single quotes, we'd have to decide # between &apos; and &squot;. replace_with = "&quot;" value = value.replace('"', replace_with) else: # There are double quotes but no single quotes. # We can use single quotes to quote the attribute. quote_with = "'" return quote_with + value + quote_with @classmethod def substitute_xml(cls, value, make_quoted_attribute=False): """Substitute XML entities for special XML characters. :param value: A string to be substituted. The less-than sign will become &lt;, the greater-than sign will become &gt;, and any ampersands will become &amp;. If you want ampersands that appear to be part of an entity definition to be left alone, use substitute_xml_containing_entities() instead. :param make_quoted_attribute: If True, then the string will be quoted, as befits an attribute value. """ # Escape angle brackets and ampersands. value = cls.AMPERSAND_OR_BRACKET.sub( cls._substitute_xml_entity, value) if make_quoted_attribute: value = cls.quoted_attribute_value(value) return value @classmethod def substitute_xml_containing_entities( cls, value, make_quoted_attribute=False): """Substitute XML entities for special XML characters. :param value: A string to be substituted. The less-than sign will become &lt;, the greater-than sign will become &gt;, and any ampersands that are not part of an entity defition will become &amp;. :param make_quoted_attribute: If True, then the string will be quoted, as befits an attribute value. """ # Escape angle brackets, and ampersands that aren't part of # entities. value = cls.BARE_AMPERSAND_OR_BRACKET.sub( cls._substitute_xml_entity, value) if make_quoted_attribute: value = cls.quoted_attribute_value(value) return value @classmethod def substitute_html(cls, s): """Replace certain Unicode characters with named HTML entities. This differs from data.encode(encoding, 'xmlcharrefreplace') in that the goal is to make the result more readable (to those with ASCII displays) rather than to recover from errors. There's absolutely nothing wrong with a UTF-8 string containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that character with "&eacute;" will make it more readable to some people. """ return cls.CHARACTER_TO_HTML_ENTITY_RE.sub( cls._substitute_html_entity, s) class EncodingDetector: """Suggests a number of possible encodings for a bytestring. Order of precedence: 1. Encodings you specifically tell EncodingDetector to try first (the override_encodings argument to the constructor). 2. An encoding declared within the bytestring itself, either in an XML declaration (if the bytestring is to be interpreted as an XML document), or in a <meta> tag (if the bytestring is to be interpreted as an HTML document.) 3. An encoding detected through textual analysis by chardet, cchardet, or a similar external library. 4. UTF-8. 5. Windows-1252. """ def __init__(self, markup, override_encodings=None, is_html=False): self.override_encodings = override_encodings or [] self.chardet_encoding = None self.is_html = is_html self.declared_encoding = None # First order of business: strip a byte-order mark. self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup) def _usable(self, encoding, tried): if encoding is not None: encoding = encoding.lower() if encoding not in tried: tried.add(encoding) return True return False @property def encodings(self): """Yield a number of encodings that might work for this markup.""" tried = set() for e in self.override_encodings: if self._usable(e, tried): yield e # Did the document originally start with a byte-order mark # that indicated its encoding? if self._usable(self.sniffed_encoding, tried): yield self.sniffed_encoding # Look within the document for an XML or HTML encoding # declaration. if self.declared_encoding is None: self.declared_encoding = self.find_declared_encoding( self.markup, self.is_html) if self._usable(self.declared_encoding, tried): yield self.declared_encoding # Use third-party character set detection to guess at the # encoding. if self.chardet_encoding is None: self.chardet_encoding = chardet_dammit(self.markup) if self._usable(self.chardet_encoding, tried): yield self.chardet_encoding # As a last-ditch effort, try utf-8 and windows-1252. for e in ('utf-8', 'windows-1252'): if self._usable(e, tried): yield e @classmethod def strip_byte_order_mark(cls, data): """If a byte-order mark is present, strip it and return the encoding it implies.""" encoding = None if (len(data) >= 4) and (data[:2] == b'\xfe\xff') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == b'\xff\xfe') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == b'\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == b'\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == b'\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] return data, encoding @classmethod def find_declared_encoding(cls, markup, is_html=False, search_entire_document=False): """Given a document, tries to find its declared encoding. An XML encoding is declared at the beginning of the document. An HTML encoding is declared in a <meta> tag, hopefully near the beginning of the document. """ if search_entire_document: xml_endpos = html_endpos = len(markup) else: xml_endpos = 1024 html_endpos = max(2048, int(len(markup) * 0.05)) declared_encoding = None declared_encoding_match = xml_encoding_re.search(markup, endpos=xml_endpos) if not declared_encoding_match and is_html: declared_encoding_match = html_meta_re.search(markup, endpos=html_endpos) if declared_encoding_match is not None: declared_encoding = declared_encoding_match.groups()[0].decode( 'ascii') if declared_encoding: return declared_encoding.lower() return None class UnicodeDammit: """A class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is windows-1252, can replace MS smart quotes with their HTML or XML equivalents.""" # This dictionary maps commonly seen values for "charset" in HTML # meta tags to the corresponding Python codec names. It only covers # values that aren't in Python's aliases and can't be determined # by the heuristics in find_codec. CHARSET_ALIASES = {"macintosh": "mac-roman", "x-sjis": "shift-jis"} ENCODINGS_WITH_SMART_QUOTES = [ "windows-1252", "iso-8859-1", "iso-8859-2", ] def __init__(self, markup, override_encodings=[], smart_quotes_to=None, is_html=False): self.smart_quotes_to = smart_quotes_to self.tried_encodings = [] self.contains_replacement_characters = False self.is_html = is_html self.detector = EncodingDetector(markup, override_encodings, is_html) # Short-circuit if the data is in Unicode to begin with. if isinstance(markup, unicode) or markup == '': self.markup = markup self.unicode_markup = unicode(markup) self.original_encoding = None return # The encoding detector may have stripped a byte-order mark. # Use the stripped markup from this point on. self.markup = self.detector.markup u = None for encoding in self.detector.encodings: markup = self.detector.markup u = self._convert_from(encoding) if u is not None: break if not u: # None of the encodings worked. As an absolute last resort, # try them again with character replacement. for encoding in self.detector.encodings: if encoding != "ascii": u = self._convert_from(encoding, "replace") if u is not None: logging.warning( "Some characters could not be decoded, and were " "replaced with REPLACEMENT CHARACTER.") self.contains_replacement_characters = True break # If none of that worked, we could at this point force it to # ASCII, but that would destroy so much data that I think # giving up is better. self.unicode_markup = u if not u: self.original_encoding = None def _sub_ms_char(self, match): """Changes a MS smart quote character to an XML or HTML entity, or an ASCII character.""" orig = match.group(1) if self.smart_quotes_to == 'ascii': sub = self.MS_CHARS_TO_ASCII.get(orig).encode() else: sub = self.MS_CHARS.get(orig) if type(sub) == tuple: if self.smart_quotes_to == 'xml': sub = '&#x'.encode() + sub[1].encode() + ';'.encode() else: sub = '&'.encode() + sub[0].encode() + ';'.encode() else: sub = sub.encode() return sub def _convert_from(self, proposed, errors="strict"): proposed = self.find_codec(proposed) if not proposed or (proposed, errors) in self.tried_encodings: return None self.tried_encodings.append((proposed, errors)) markup = self.markup # Convert smart quotes to HTML if coming from an encoding # that might have them. if (self.smart_quotes_to is not None and proposed in self.ENCODINGS_WITH_SMART_QUOTES): smart_quotes_re = b"([\x80-\x9f])" smart_quotes_compiled = re.compile(smart_quotes_re) markup = smart_quotes_compiled.sub(self._sub_ms_char, markup) try: #print "Trying to convert document to %s (errors=%s)" % ( # proposed, errors) u = self._to_unicode(markup, proposed, errors) self.markup = u self.original_encoding = proposed except Exception as e: #print "That didn't work!" #print e return None #print "Correct encoding: %s" % proposed return self.markup def _to_unicode(self, data, encoding, errors="strict"): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' return unicode(data, encoding, errors) @property def declared_html_encoding(self): if not self.is_html: return None return self.detector.declared_encoding def find_codec(self, charset): value = (self._codec(self.CHARSET_ALIASES.get(charset, charset)) or (charset and self._codec(charset.replace("-", ""))) or (charset and self._codec(charset.replace("-", "_"))) or (charset and charset.lower()) or charset ) if value: return value.lower() return None def _codec(self, charset): if not charset: return charset codec = None try: codecs.lookup(charset) codec = charset except (LookupError, ValueError): pass return codec # A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities. MS_CHARS = {b'\x80': ('euro', '20AC'), b'\x81': ' ', b'\x82': ('sbquo', '201A'), b'\x83': ('fnof', '192'), b'\x84': ('bdquo', '201E'), b'\x85': ('hellip', '2026'), b'\x86': ('dagger', '2020'), b'\x87': ('Dagger', '2021'), b'\x88': ('circ', '2C6'), b'\x89': ('permil', '2030'), b'\x8A': ('Scaron', '160'), b'\x8B': ('lsaquo', '2039'), b'\x8C': ('OElig', '152'), b'\x8D': '?', b'\x8E': ('#x17D', '17D'), b'\x8F': '?', b'\x90': '?', b'\x91': ('lsquo', '2018'), b'\x92': ('rsquo', '2019'), b'\x93': ('ldquo', '201C'), b'\x94': ('rdquo', '201D'), b'\x95': ('bull', '2022'), b'\x96': ('ndash', '2013'), b'\x97': ('mdash', '2014'), b'\x98': ('tilde', '2DC'), b'\x99': ('trade', '2122'), b'\x9a': ('scaron', '161'), b'\x9b': ('rsaquo', '203A'), b'\x9c': ('oelig', '153'), b'\x9d': '?', b'\x9e': ('#x17E', '17E'), b'\x9f': ('Yuml', ''),} # A parochial partial mapping of ISO-Latin-1 to ASCII. Contains # horrors like stripping diacritical marks to turn á into a, but also # contains non-horrors like turning “ into ". MS_CHARS_TO_ASCII = { b'\x80' : 'EUR', b'\x81' : ' ', b'\x82' : ',', b'\x83' : 'f', b'\x84' : ',,', b'\x85' : '...', b'\x86' : '+', b'\x87' : '++', b'\x88' : '^', b'\x89' : '%', b'\x8a' : 'S', b'\x8b' : '<', b'\x8c' : 'OE', b'\x8d' : '?', b'\x8e' : 'Z', b'\x8f' : '?', b'\x90' : '?', b'\x91' : "'", b'\x92' : "'", b'\x93' : '"', b'\x94' : '"', b'\x95' : '*', b'\x96' : '-', b'\x97' : '--', b'\x98' : '~', b'\x99' : '(TM)', b'\x9a' : 's', b'\x9b' : '>', b'\x9c' : 'oe', b'\x9d' : '?', b'\x9e' : 'z', b'\x9f' : 'Y', b'\xa0' : ' ', b'\xa1' : '!', b'\xa2' : 'c', b'\xa3' : 'GBP', b'\xa4' : '$', #This approximation is especially parochial--this is the #generic currency symbol. b'\xa5' : 'YEN', b'\xa6' : '|', b'\xa7' : 'S', b'\xa8' : '..', b'\xa9' : '', b'\xaa' : '(th)', b'\xab' : '<<', b'\xac' : '!', b'\xad' : ' ', b'\xae' : '(R)', b'\xaf' : '-', b'\xb0' : 'o', b'\xb1' : '+-', b'\xb2' : '2', b'\xb3' : '3', b'\xb4' : ("'", 'acute'), b'\xb5' : 'u', b'\xb6' : 'P', b'\xb7' : '*', b'\xb8' : ',', b'\xb9' : '1', b'\xba' : '(th)', b'\xbb' : '>>', b'\xbc' : '1/4', b'\xbd' : '1/2', b'\xbe' : '3/4', b'\xbf' : '?', b'\xc0' : 'A', b'\xc1' : 'A', b'\xc2' : 'A', b'\xc3' : 'A', b'\xc4' : 'A', b'\xc5' : 'A', b'\xc6' : 'AE', b'\xc7' : 'C', b'\xc8' : 'E', b'\xc9' : 'E', b'\xca' : 'E', b'\xcb' : 'E', b'\xcc' : 'I', b'\xcd' : 'I', b'\xce' : 'I', b'\xcf' : 'I', b'\xd0' : 'D', b'\xd1' : 'N', b'\xd2' : 'O', b'\xd3' : 'O', b'\xd4' : 'O', b'\xd5' : 'O', b'\xd6' : 'O', b'\xd7' : '*', b'\xd8' : 'O', b'\xd9' : 'U', b'\xda' : 'U', b'\xdb' : 'U', b'\xdc' : 'U', b'\xdd' : 'Y', b'\xde' : 'b', b'\xdf' : 'B', b'\xe0' : 'a', b'\xe1' : 'a', b'\xe2' : 'a', b'\xe3' : 'a', b'\xe4' : 'a', b'\xe5' : 'a', b'\xe6' : 'ae', b'\xe7' : 'c', b'\xe8' : 'e', b'\xe9' : 'e', b'\xea' : 'e', b'\xeb' : 'e', b'\xec' : 'i', b'\xed' : 'i', b'\xee' : 'i', b'\xef' : 'i', b'\xf0' : 'o', b'\xf1' : 'n', b'\xf2' : 'o', b'\xf3' : 'o', b'\xf4' : 'o', b'\xf5' : 'o', b'\xf6' : 'o', b'\xf7' : '/', b'\xf8' : 'o', b'\xf9' : 'u', b'\xfa' : 'u', b'\xfb' : 'u', b'\xfc' : 'u', b'\xfd' : 'y', b'\xfe' : 'b', b'\xff' : 'y', } # A map used when removing rogue Windows-1252/ISO-8859-1 # characters in otherwise UTF-8 documents. # # Note that \x81, \x8d, \x8f, \x90, and \x9d are undefined in # Windows-1252. WINDOWS_1252_TO_UTF8 = { 0x80 : b'\xe2\x82\xac', # € 0x82 : b'\xe2\x80\x9a', # ‚ 0x83 : b'\xc6\x92', # ƒ 0x84 : b'\xe2\x80\x9e', # „ 0x85 : b'\xe2\x80\xa6', # … 0x86 : b'\xe2\x80\xa0', # † 0x87 : b'\xe2\x80\xa1', # ‡ 0x88 : b'\xcb\x86', # ˆ 0x89 : b'\xe2\x80\xb0', # ‰ 0x8a : b'\xc5\xa0', # Š 0x8b : b'\xe2\x80\xb9', # ‹ 0x8c : b'\xc5\x92', # Œ 0x8e : b'\xc5\xbd', # Ž 0x91 : b'\xe2\x80\x98', # ‘ 0x92 : b'\xe2\x80\x99', # ’ 0x93 : b'\xe2\x80\x9c', # “ 0x94 : b'\xe2\x80\x9d', # ” 0x95 : b'\xe2\x80\xa2', # • 0x96 : b'\xe2\x80\x93', # – 0x97 : b'\xe2\x80\x94', # — 0x98 : b'\xcb\x9c', # ˜ 0x99 : b'\xe2\x84\xa2', # ™ 0x9a : b'\xc5\xa1', # š 0x9b : b'\xe2\x80\xba', # › 0x9c : b'\xc5\x93', # œ 0x9e : b'\xc5\xbe', # ž 0x9f : b'\xc5\xb8', # Ÿ 0xa0 : b'\xc2\xa0', #   0xa1 : b'\xc2\xa1', # ¡ 0xa2 : b'\xc2\xa2', # ¢ 0xa3 : b'\xc2\xa3', # £ 0xa4 : b'\xc2\xa4', # ¤ 0xa5 : b'\xc2\xa5', # ¥ 0xa6 : b'\xc2\xa6', # ¦ 0xa7 : b'\xc2\xa7', # § 0xa8 : b'\xc2\xa8', # ¨ 0xa9 : b'\xc2\xa9', # © 0xaa : b'\xc2\xaa', # ª 0xab : b'\xc2\xab', # « 0xac : b'\xc2\xac', # ¬ 0xad : b'\xc2\xad', # ­ 0xae : b'\xc2\xae', # ® 0xaf : b'\xc2\xaf', # ¯ 0xb0 : b'\xc2\xb0', # ° 0xb1 : b'\xc2\xb1', # ± 0xb2 : b'\xc2\xb2', # ² 0xb3 : b'\xc2\xb3', # ³ 0xb4 : b'\xc2\xb4', # ´ 0xb5 : b'\xc2\xb5', # µ 0xb6 : b'\xc2\xb6', # ¶ 0xb7 : b'\xc2\xb7', # · 0xb8 : b'\xc2\xb8', # ¸ 0xb9 : b'\xc2\xb9', # ¹ 0xba : b'\xc2\xba', # º 0xbb : b'\xc2\xbb', # » 0xbc : b'\xc2\xbc', # ¼ 0xbd : b'\xc2\xbd', # ½ 0xbe : b'\xc2\xbe', # ¾ 0xbf : b'\xc2\xbf', # ¿ 0xc0 : b'\xc3\x80', # À 0xc1 : b'\xc3\x81', # Á 0xc2 : b'\xc3\x82', #  0xc3 : b'\xc3\x83', # à 0xc4 : b'\xc3\x84', # Ä 0xc5 : b'\xc3\x85', # Å 0xc6 : b'\xc3\x86', # Æ 0xc7 : b'\xc3\x87', # Ç 0xc8 : b'\xc3\x88', # È 0xc9 : b'\xc3\x89', # É 0xca : b'\xc3\x8a', # Ê 0xcb : b'\xc3\x8b', # Ë 0xcc : b'\xc3\x8c', # Ì 0xcd : b'\xc3\x8d', # Í 0xce : b'\xc3\x8e', # Î 0xcf : b'\xc3\x8f', # Ï 0xd0 : b'\xc3\x90', # Ð 0xd1 : b'\xc3\x91', # Ñ 0xd2 : b'\xc3\x92', # Ò 0xd3 : b'\xc3\x93', # Ó 0xd4 : b'\xc3\x94', # Ô 0xd5 : b'\xc3\x95', # Õ 0xd6 : b'\xc3\x96', # Ö 0xd7 : b'\xc3\x97', # × 0xd8 : b'\xc3\x98', # Ø 0xd9 : b'\xc3\x99', # Ù 0xda : b'\xc3\x9a', # Ú 0xdb : b'\xc3\x9b', # Û 0xdc : b'\xc3\x9c', # Ü 0xdd : b'\xc3\x9d', # Ý 0xde : b'\xc3\x9e', # Þ 0xdf : b'\xc3\x9f', # ß 0xe0 : b'\xc3\xa0', # à 0xe1 : b'\xa1', # á 0xe2 : b'\xc3\xa2', # â 0xe3 : b'\xc3\xa3', # ã 0xe4 : b'\xc3\xa4', # ä 0xe5 : b'\xc3\xa5', # å 0xe6 : b'\xc3\xa6', # æ 0xe7 : b'\xc3\xa7', # ç 0xe8 : b'\xc3\xa8', # è 0xe9 : b'\xc3\xa9', # é 0xea : b'\xc3\xaa', # ê 0xeb : b'\xc3\xab', # ë 0xec : b'\xc3\xac', # ì 0xed : b'\xc3\xad', # í 0xee : b'\xc3\xae', # î 0xef : b'\xc3\xaf', # ï 0xf0 : b'\xc3\xb0', # ð 0xf1 : b'\xc3\xb1', # ñ 0xf2 : b'\xc3\xb2', # ò 0xf3 : b'\xc3\xb3', # ó 0xf4 : b'\xc3\xb4', # ô 0xf5 : b'\xc3\xb5', # õ 0xf6 : b'\xc3\xb6', # ö 0xf7 : b'\xc3\xb7', # ÷ 0xf8 : b'\xc3\xb8', # ø 0xf9 : b'\xc3\xb9', # ù 0xfa : b'\xc3\xba', # ú 0xfb : b'\xc3\xbb', # û 0xfc : b'\xc3\xbc', # ü 0xfd : b'\xc3\xbd', # ý 0xfe : b'\xc3\xbe', # þ } MULTIBYTE_MARKERS_AND_SIZES = [ (0xc2, 0xdf, 2), # 2-byte characters start with a byte C2-DF (0xe0, 0xef, 3), # 3-byte characters start with E0-EF (0xf0, 0xf4, 4), # 4-byte characters start with F0-F4 ] FIRST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[0][0] LAST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[-1][1] @classmethod def detwingle(cls, in_bytes, main_encoding="utf8", embedded_encoding="windows-1252"): """Fix characters from one encoding embedded in some other encoding. Currently the only situation supported is Windows-1252 (or its subset ISO-8859-1), embedded in UTF-8. The input must be a bytestring. If you've already converted the document to Unicode, you're too late. The output is a bytestring in which `embedded_encoding` characters have been converted to their `main_encoding` equivalents. """ if embedded_encoding.replace('_', '-').lower() not in ( 'windows-1252', 'windows_1252'): raise NotImplementedError( "Windows-1252 and ISO-8859-1 are the only currently supported " "embedded encodings.") if main_encoding.lower() not in ('utf8', 'utf-8'): raise NotImplementedError( "UTF-8 is the only currently supported main encoding.") byte_chunks = [] chunk_start = 0 pos = 0 while pos < len(in_bytes): byte = in_bytes[pos] if not isinstance(byte, int): # Python 2.x byte = ord(byte) if (byte >= cls.FIRST_MULTIBYTE_MARKER and byte <= cls.LAST_MULTIBYTE_MARKER): # This is the start of a UTF-8 multibyte character. Skip # to the end. for start, end, size in cls.MULTIBYTE_MARKERS_AND_SIZES: if byte >= start and byte <= end: pos += size break elif byte >= 0x80 and byte in cls.WINDOWS_1252_TO_UTF8: # We found a Windows-1252 character! # Save the string up to this point as a chunk. byte_chunks.append(in_bytes[chunk_start:pos]) # Now translate the Windows-1252 character into UTF-8 # and add it as another, one-byte chunk. byte_chunks.append(cls.WINDOWS_1252_TO_UTF8[byte]) pos += 1 chunk_start = pos else: # Go on to the next character. pos += 1 if chunk_start == 0: # The string is unchanged. return in_bytes else: # Store the final chunk. byte_chunks.append(in_bytes[chunk_start:]) return b''.join(byte_chunks)
mit
Yuudachimoe/HikariChun-RedBot
lib/youtube_dl/extractor/mofosex.py
39
2020
from __future__ import unicode_literals from ..utils import ( int_or_none, str_to_int, unified_strdate, ) from .keezmovies import KeezMoviesIE class MofosexIE(KeezMoviesIE): _VALID_URL = r'https?://(?:www\.)?mofosex\.com/videos/(?P<id>\d+)/(?P<display_id>[^/?#&.]+)\.html' _TESTS = [{ 'url': 'http://www.mofosex.com/videos/318131/amateur-teen-playing-and-masturbating-318131.html', 'md5': '39a15853632b7b2e5679f92f69b78e91', 'info_dict': { 'id': '318131', 'display_id': 'amateur-teen-playing-and-masturbating-318131', 'ext': 'mp4', 'title': 'amateur teen playing and masturbating', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20121114', 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, } }, { # This video is no longer available 'url': 'http://www.mofosex.com/videos/5018/japanese-teen-music-video.html', 'only_matching': True, }] def _real_extract(self, url): webpage, info = self._extract_info(url) view_count = str_to_int(self._search_regex( r'VIEWS:</span>\s*([\d,.]+)', webpage, 'view count', fatal=False)) like_count = int_or_none(self._search_regex( r'id=["\']amountLikes["\'][^>]*>(\d+)', webpage, 'like count', fatal=False)) dislike_count = int_or_none(self._search_regex( r'id=["\']amountDislikes["\'][^>]*>(\d+)', webpage, 'like count', fatal=False)) upload_date = unified_strdate(self._html_search_regex( r'Added:</span>([^<]+)', webpage, 'upload date', fatal=False)) info.update({ 'view_count': view_count, 'like_count': like_count, 'dislike_count': dislike_count, 'upload_date': upload_date, 'thumbnail': self._og_search_thumbnail(webpage), }) return info
gpl-3.0
DanielSBrown/osf.io
api/institutions/views.py
3
10565
from rest_framework import generics from rest_framework import permissions as drf_permissions from rest_framework import exceptions from rest_framework import status from rest_framework.response import Response from modularodm import Q from framework.auth.oauth_scopes import CoreScopes from website.models import Node, User, Institution from website.util import permissions as osf_permissions from api.base import permissions as base_permissions from api.base.filters import ODMFilterMixin from api.base.views import JSONAPIBaseView from api.base.serializers import JSONAPISerializer from api.base.utils import get_object_or_error, get_user_auth from api.base.pagination import MaxSizePagination from api.base.parsers import ( JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON, ) from api.base.exceptions import RelationshipPostMakesNoChanges from api.nodes.serializers import NodeSerializer from api.users.serializers import UserSerializer from api.institutions.authentication import InstitutionAuthentication from api.institutions.serializers import InstitutionSerializer, InstitutionNodesRelationshipSerializer from api.institutions.permissions import UserIsAffiliated class InstitutionMixin(object): """Mixin with convenience method get_institution """ institution_lookup_url_kwarg = 'institution_id' def get_institution(self): inst = get_object_or_error( Institution, self.kwargs[self.institution_lookup_url_kwarg], display_name='institution' ) return inst class InstitutionList(JSONAPIBaseView, generics.ListAPIView, ODMFilterMixin): """ Paginated list of verified Institutions affiliated with COS ##Institution Attributes OSF Institutions have the "institutions" `type`. name type description ========================================================================= name string title of the institution id string unique identifier in the OSF logo_path string a path to the institution's static logo #This Request/Response """ permission_classes = ( drf_permissions.IsAuthenticatedOrReadOnly, base_permissions.TokenHasScope, ) required_read_scopes = [CoreScopes.INSTITUTION_READ] required_write_scopes = [CoreScopes.NULL] model_class = Institution pagination_class = MaxSizePagination serializer_class = InstitutionSerializer view_category = 'institutions' view_name = 'institution-list' ordering = ('name', ) def get_default_odm_query(self): return Q('_id', 'ne', None) # overrides ListAPIView def get_queryset(self): return Institution.find(self.get_query_from_request()) class InstitutionDetail(JSONAPIBaseView, generics.RetrieveAPIView, InstitutionMixin): """ Details about a given institution. ##Attributes OSF Institutions have the "institutions" `type`. name type description ========================================================================= name string title of the institution id string unique identifier in the OSF logo_path string a path to the institution's static logo ##Relationships ###Nodes List of nodes that have this institution as its primary institution. ###Users List of users that are affiliated with this institution. ##Links self: the canonical api endpoint of this institution html: this institution's page on the OSF website #This Request/Response """ permission_classes = ( drf_permissions.IsAuthenticatedOrReadOnly, base_permissions.TokenHasScope, ) required_read_scopes = [CoreScopes.INSTITUTION_READ] required_write_scopes = [CoreScopes.NULL] model_class = Institution serializer_class = InstitutionSerializer view_category = 'institutions' view_name = 'institution-detail' # overrides RetrieveAPIView def get_object(self): return self.get_institution() class InstitutionNodeList(JSONAPIBaseView, ODMFilterMixin, generics.ListAPIView, InstitutionMixin): """Nodes that have selected an institution as their primary institution. ##Permissions Only public nodes or ones in which current user is a contributor. """ permission_classes = ( drf_permissions.IsAuthenticatedOrReadOnly, base_permissions.TokenHasScope, ) required_read_scopes = [CoreScopes.INSTITUTION_READ, CoreScopes.NODE_BASE_READ] required_write_scopes = [CoreScopes.NULL] model_class = Node serializer_class = NodeSerializer view_category = 'institutions' view_name = 'institution-nodes' ordering = ('-date_modified', ) base_node_query = ( Q('is_deleted', 'ne', True) & Q('is_folder', 'ne', True) & Q('is_registration', 'eq', False) & Q('parent_node', 'eq', None) & Q('is_public', 'eq', True) ) # overrides ODMFilterMixin def get_default_odm_query(self): return self.base_node_query # overrides RetrieveAPIView def get_queryset(self): inst = self.get_institution() query = self.get_query_from_request() return Node.find_by_institutions(inst, query) class InstitutionUserList(JSONAPIBaseView, ODMFilterMixin, generics.ListAPIView, InstitutionMixin): """Users that have been authenticated with the institution. """ permission_classes = ( drf_permissions.IsAuthenticatedOrReadOnly, base_permissions.TokenHasScope, ) required_read_scopes = [CoreScopes.INSTITUTION_READ, CoreScopes.USERS_READ] required_write_scopes = [CoreScopes.NULL] model_class = User serializer_class = UserSerializer view_category = 'institutions' view_name = 'institution-users' # overrides ODMFilterMixin def get_default_odm_query(self): inst = self.get_institution() query = Q('_affiliated_institutions', 'eq', inst.node) return query # overrides RetrieveAPIView def get_queryset(self): query = self.get_query_from_request() return User.find(query) class InstitutionAuth(JSONAPIBaseView, generics.CreateAPIView): permission_classes = ( drf_permissions.IsAuthenticated, base_permissions.TokenHasScope, ) serializer_class = JSONAPISerializer required_read_scopes = [CoreScopes.NULL] required_write_scopes = [CoreScopes.NULL] authentication_classes = (InstitutionAuthentication, ) view_category = 'institutions' view_name = 'institution-auth' def post(self, request, *args, **kwargs): return Response(status=status.HTTP_204_NO_CONTENT) class InstitutionRegistrationList(InstitutionNodeList): """Registrations have selected an institution as their primary institution. """ view_name = 'institution-registrations' base_node_query = ( Q('is_deleted', 'ne', True) & Q('is_folder', 'ne', True) & Q('is_registration', 'eq', True) & Q('is_public', 'eq', True) ) ordering = ('-date_modified', ) def get_queryset(self): inst = self.get_institution() query = self.get_query_from_request() nodes = Node.find_by_institutions(inst, query) return [node for node in nodes if not node.is_retracted] class InstitutionNodesRelationship(JSONAPIBaseView, generics.RetrieveDestroyAPIView, generics.CreateAPIView, InstitutionMixin): """ Relationship Endpoint for Institution -> Nodes Relationship Used to set, remove, update and retrieve the affiliated_institution of nodes with this institution ##Actions ###Create Method: POST URL: /links/self Query Params: <none> Body (JSON): { "data": [{ "type": "nodes", # required "id": <node_id> # required }] } Success: 201 This requires admin permissions on the nodes requested and for the user making the request to have the institution affiliated in their account. ###Destroy Method: DELETE URL: /links/self Query Params: <none> Body (JSON): { "data": [{ "type": "nodes", # required "id": <node_id> # required }] } Success: 204 This requires admin permissions in the nodes requested. """ permission_classes = ( drf_permissions.IsAuthenticatedOrReadOnly, base_permissions.TokenHasScope, UserIsAffiliated ) required_read_scopes = [CoreScopes.NULL] required_write_scopes = [CoreScopes.NULL] serializer_class = InstitutionNodesRelationshipSerializer parser_classes = (JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON, ) view_category = 'institutions' view_name = 'institution-relationships-nodes' def get_object(self): inst = self.get_institution() auth = get_user_auth(self.request) nodes = [node for node in Node.find_by_institutions(inst, Q('is_registration', 'eq', False) & Q('is_deleted', 'ne', True)) if node.is_public or node.can_view(auth)] ret = { 'data': nodes, 'self': inst } self.check_object_permissions(self.request, ret) return ret def perform_destroy(self, instance): data = self.request.data['data'] user = self.request.user ids = [datum['id'] for datum in data] nodes = [] for id_ in ids: node = Node.load(id_) if not node.has_permission(user, osf_permissions.ADMIN): raise exceptions.PermissionDenied(detail='Admin permission on node {} required'.format(id_)) nodes.append(node) for node in nodes: node.remove_affiliated_institution(inst=instance['self'], user=user) node.save() def create(self, *args, **kwargs): try: ret = super(InstitutionNodesRelationship, self).create(*args, **kwargs) except RelationshipPostMakesNoChanges: return Response(status=status.HTTP_204_NO_CONTENT) return ret
apache-2.0
buntyke/GPy
GPy/kern/_src/todo/eq_ode1.py
19
23502
# Copyright (c) 2013, GPy Authors, see AUTHORS.txt # Licensed under the BSD 3-clause license (see LICENSE.txt) from kernpart import Kernpart import numpy as np from GPy.util.linalg import mdot, pdinv from GPy.util.ln_diff_erfs import ln_diff_erfs import pdb from scipy import weave class Eq_ode1(Kernpart): """ Covariance function for first order differential equation driven by an exponentiated quadratic covariance. This outputs of this kernel have the form .. math:: \frac{\text{d}y_j}{\text{d}t} = \sum_{i=1}^R w_{j,i} f_i(t-\delta_j) +\sqrt{\kappa_j}g_j(t) - d_jy_j(t) where :math:`R` is the rank of the system, :math:`w_{j,i}` is the sensitivity of the :math:`j`th output to the :math:`i`th latent function, :math:`d_j` is the decay rate of the :math:`j`th output and :math:`f_i(t)` and :math:`g_i(t)` are independent latent Gaussian processes goverened by an exponentiated quadratic covariance. :param output_dim: number of outputs driven by latent function. :type output_dim: int :param W: sensitivities of each output to the latent driving function. :type W: ndarray (output_dim x rank). :param rank: If rank is greater than 1 then there are assumed to be a total of rank latent forces independently driving the system, each with identical covariance. :type rank: int :param decay: decay rates for the first order system. :type decay: array of length output_dim. :param delay: delay between latent force and output response. :type delay: array of length output_dim. :param kappa: diagonal term that allows each latent output to have an independent component to the response. :type kappa: array of length output_dim. .. Note: see first order differential equation examples in GPy.examples.regression for some usage. """ def __init__(self,output_dim, W=None, rank=1, kappa=None, lengthscale=1.0, decay=None, delay=None): self.rank = rank self.input_dim = 1 self.name = 'eq_ode1' self.output_dim = output_dim self.lengthscale = lengthscale self.num_params = self.output_dim*self.rank + 1 + (self.output_dim - 1) if kappa is not None: self.num_params+=self.output_dim if delay is not None: assert delay.shape==(self.output_dim-1,) self.num_params+=self.output_dim-1 self.rank = rank if W is None: self.W = 0.5*np.random.randn(self.output_dim,self.rank)/np.sqrt(self.rank) else: assert W.shape==(self.output_dim,self.rank) self.W = W if decay is None: self.decay = np.ones(self.output_dim-1) if kappa is not None: assert kappa.shape==(self.output_dim,) self.kappa = kappa self.delay = delay self.is_normalized = True self.is_stationary = False self.gaussian_initial = False self._set_params(self._get_params()) def _get_params(self): param_list = [self.W.flatten()] if self.kappa is not None: param_list.append(self.kappa) param_list.append(self.decay) if self.delay is not None: param_list.append(self.delay) param_list.append(self.lengthscale) return np.hstack(param_list) def _set_params(self,x): assert x.size == self.num_params end = self.output_dim*self.rank self.W = x[:end].reshape(self.output_dim,self.rank) start = end self.B = np.dot(self.W,self.W.T) if self.kappa is not None: end+=self.output_dim self.kappa = x[start:end] self.B += np.diag(self.kappa) start=end end+=self.output_dim-1 self.decay = x[start:end] start=end if self.delay is not None: end+=self.output_dim-1 self.delay = x[start:end] start=end end+=1 self.lengthscale = x[start] self.sigma = np.sqrt(2)*self.lengthscale def _get_param_names(self): param_names = sum([['W%i_%i'%(i,j) for j in range(self.rank)] for i in range(self.output_dim)],[]) if self.kappa is not None: param_names += ['kappa_%i'%i for i in range(self.output_dim)] param_names += ['decay_%i'%i for i in range(1,self.output_dim)] if self.delay is not None: param_names += ['delay_%i'%i for i in 1+range(1,self.output_dim)] param_names+= ['lengthscale'] return param_names def K(self,X,X2,target): if X.shape[1] > 2: raise ValueError('Input matrix for ode1 covariance should have at most two columns, one containing times, the other output indices') self._K_computations(X, X2) target += self._scale*self._K_dvar if self.gaussian_initial: # Add covariance associated with initial condition. t1_mat = self._t[self._rorder, None] t2_mat = self._t2[None, self._rorder2] target+=self.initial_variance * np.exp(- self.decay * (t1_mat + t2_mat)) def Kdiag(self,index,target): #target += np.diag(self.B)[np.asarray(index,dtype=np.int).flatten()] pass def _param_grad_helper(self,dL_dK,X,X2,target): # First extract times and indices. self._extract_t_indices(X, X2, dL_dK=dL_dK) self._dK_ode_dtheta(target) def _dK_ode_dtheta(self, target): """Do all the computations for the ode parts of the covariance function.""" t_ode = self._t[self._index>0] dL_dK_ode = self._dL_dK[self._index>0, :] index_ode = self._index[self._index>0]-1 if self._t2 is None: if t_ode.size==0: return t2_ode = t_ode dL_dK_ode = dL_dK_ode[:, self._index>0] index2_ode = index_ode else: t2_ode = self._t2[self._index2>0] dL_dK_ode = dL_dK_ode[:, self._index2>0] if t_ode.size==0 or t2_ode.size==0: return index2_ode = self._index2[self._index2>0]-1 h1 = self._compute_H(t_ode, index_ode, t2_ode, index2_ode, stationary=self.is_stationary, update_derivatives=True) #self._dK_ddelay = self._dh_ddelay self._dK_dsigma = self._dh_dsigma if self._t2 is None: h2 = h1 else: h2 = self._compute_H(t2_ode, index2_ode, t_ode, index_ode, stationary=self.is_stationary, update_derivatives=True) #self._dK_ddelay += self._dh_ddelay.T self._dK_dsigma += self._dh_dsigma.T # C1 = self.sensitivity # C2 = self.sensitivity # K = 0.5 * (h1 + h2.T) # var2 = C1*C2 # if self.is_normalized: # dk_dD1 = (sum(sum(dL_dK.*dh1_dD1)) + sum(sum(dL_dK.*dh2_dD1.T)))*0.5*var2 # dk_dD2 = (sum(sum(dL_dK.*dh1_dD2)) + sum(sum(dL_dK.*dh2_dD2.T)))*0.5*var2 # dk_dsigma = 0.5 * var2 * sum(sum(dL_dK.*dK_dsigma)) # dk_dC1 = C2 * sum(sum(dL_dK.*K)) # dk_dC2 = C1 * sum(sum(dL_dK.*K)) # else: # K = np.sqrt(np.pi) * K # dk_dD1 = (sum(sum(dL_dK.*dh1_dD1)) + * sum(sum(dL_dK.*K)) # dk_dC2 = self.sigma * C1 * sum(sum(dL_dK.*K)) # dk_dSim1Variance = dk_dC1 # Last element is the length scale. (dL_dK_ode[:, :, None]*self._dh_ddelay[:, None, :]).sum(2) target[-1] += (dL_dK_ode*self._dK_dsigma/np.sqrt(2)).sum() # # only pass the gradient with respect to the inverse width to one # # of the gradient vectors ... otherwise it is counted twice. # g1 = real([dk_dD1 dk_dinvWidth dk_dSim1Variance]) # g2 = real([dk_dD2 0 dk_dSim2Variance]) # return g1, g2""" def dKdiag_dtheta(self,dL_dKdiag,index,target): pass def gradients_X(self,dL_dK,X,X2,target): pass def _extract_t_indices(self, X, X2=None, dL_dK=None): """Extract times and output indices from the input matrix X. Times are ordered according to their index for convenience of computation, this ordering is stored in self._order and self.order2. These orderings are then mapped back to the original ordering (in X) using self._rorder and self._rorder2. """ # TODO: some fast checking here to see if this needs recomputing? self._t = X[:, 0] if not X.shape[1] == 2: raise ValueError('Input matrix for ode1 covariance should have two columns, one containing times, the other output indices') self._index = np.asarray(X[:, 1],dtype=np.int) # Sort indices so that outputs are in blocks for computational # convenience. self._order = self._index.argsort() self._index = self._index[self._order] self._t = self._t[self._order] self._rorder = self._order.argsort() # rorder is for reversing the order if X2 is None: self._t2 = None self._index2 = None self._order2 = self._order self._rorder2 = self._rorder else: if not X2.shape[1] == 2: raise ValueError('Input matrix for ode1 covariance should have two columns, one containing times, the other output indices') self._t2 = X2[:, 0] self._index2 = np.asarray(X2[:, 1],dtype=np.int) self._order2 = self._index2.argsort() self._index2 = self._index2[self._order2] self._t2 = self._t2[self._order2] self._rorder2 = self._order2.argsort() # rorder2 is for reversing order if dL_dK is not None: self._dL_dK = dL_dK[self._order, :] self._dL_dK = self._dL_dK[:, self._order2] def _K_computations(self, X, X2): """Perform main body of computations for the ode1 covariance function.""" # First extract times and indices. self._extract_t_indices(X, X2) self._K_compute_eq() self._K_compute_ode_eq() if X2 is None: self._K_eq_ode = self._K_ode_eq.T else: self._K_compute_ode_eq(transpose=True) self._K_compute_ode() if X2 is None: self._K_dvar = np.zeros((self._t.shape[0], self._t.shape[0])) else: self._K_dvar = np.zeros((self._t.shape[0], self._t2.shape[0])) # Reorder values of blocks for placing back into _K_dvar. self._K_dvar = np.vstack((np.hstack((self._K_eq, self._K_eq_ode)), np.hstack((self._K_ode_eq, self._K_ode)))) self._K_dvar = self._K_dvar[self._rorder, :] self._K_dvar = self._K_dvar[:, self._rorder2] if X2 is None: # Matrix giving scales of each output self._scale = np.zeros((self._t.size, self._t.size)) code=""" for(int i=0;i<N; i++){ scale_mat[i+i*N] = B[index[i]+output_dim*(index[i])]; for(int j=0; j<i; j++){ scale_mat[j+i*N] = B[index[i]+output_dim*index[j]]; scale_mat[i+j*N] = scale_mat[j+i*N]; } } """ scale_mat, B, index = self._scale, self.B, self._index N, output_dim = self._t.size, self.output_dim weave.inline(code,['index', 'scale_mat', 'B', 'N', 'output_dim']) else: self._scale = np.zeros((self._t.size, self._t2.size)) code = """ for(int i=0; i<N; i++){ for(int j=0; j<N2; j++){ scale_mat[i+j*N] = B[index[i]+output_dim*index2[j]]; } } """ scale_mat, B, index, index2 = self._scale, self.B, self._index, self._index2 N, N2, output_dim = self._t.size, self._t2.size, self.output_dim weave.inline(code, ['index', 'index2', 'scale_mat', 'B', 'N', 'N2', 'output_dim']) def _K_compute_eq(self): """Compute covariance for latent covariance.""" t_eq = self._t[self._index==0] if self._t2 is None: if t_eq.size==0: self._K_eq = np.zeros((0, 0)) return self._dist2 = np.square(t_eq[:, None] - t_eq[None, :]) else: t2_eq = self._t2[self._index2==0] if t_eq.size==0 or t2_eq.size==0: self._K_eq = np.zeros((t_eq.size, t2_eq.size)) return self._dist2 = np.square(t_eq[:, None] - t2_eq[None, :]) self._K_eq = np.exp(-self._dist2/(2*self.lengthscale*self.lengthscale)) if self.is_normalized: self._K_eq/=(np.sqrt(2*np.pi)*self.lengthscale) def _K_compute_ode_eq(self, transpose=False): """Compute the cross covariances between latent exponentiated quadratic and observed ordinary differential equations. :param transpose: if set to false the exponentiated quadratic is on the rows of the matrix and is computed according to self._t, if set to true it is on the columns and is computed according to self._t2 (default=False). :type transpose: bool""" if self._t2 is not None: if transpose: t_eq = self._t[self._index==0] t_ode = self._t2[self._index2>0] index_ode = self._index2[self._index2>0]-1 else: t_eq = self._t2[self._index2==0] t_ode = self._t[self._index>0] index_ode = self._index[self._index>0]-1 else: t_eq = self._t[self._index==0] t_ode = self._t[self._index>0] index_ode = self._index[self._index>0]-1 if t_ode.size==0 or t_eq.size==0: if transpose: self._K_eq_ode = np.zeros((t_eq.shape[0], t_ode.shape[0])) else: self._K_ode_eq = np.zeros((t_ode.shape[0], t_eq.shape[0])) return t_ode_mat = t_ode[:, None] t_eq_mat = t_eq[None, :] if self.delay is not None: t_ode_mat -= self.delay[index_ode, None] diff_t = (t_ode_mat - t_eq_mat) inv_sigma_diff_t = 1./self.sigma*diff_t decay_vals = self.decay[index_ode][:, None] half_sigma_d_i = 0.5*self.sigma*decay_vals if self.is_stationary: ln_part, signs = ln_diff_erfs(inf, half_sigma_d_i - inv_sigma_diff_t, return_sign=True) else: ln_part, signs = ln_diff_erfs(half_sigma_d_i + t_eq_mat/self.sigma, half_sigma_d_i - inv_sigma_diff_t, return_sign=True) sK = signs*np.exp(half_sigma_d_i*half_sigma_d_i - decay_vals*diff_t + ln_part) sK *= 0.5 if not self.is_normalized: sK *= np.sqrt(np.pi)*self.sigma if transpose: self._K_eq_ode = sK.T else: self._K_ode_eq = sK def _K_compute_ode(self): # Compute covariances between outputs of the ODE models. t_ode = self._t[self._index>0] index_ode = self._index[self._index>0]-1 if self._t2 is None: if t_ode.size==0: self._K_ode = np.zeros((0, 0)) return t2_ode = t_ode index2_ode = index_ode else: t2_ode = self._t2[self._index2>0] if t_ode.size==0 or t2_ode.size==0: self._K_ode = np.zeros((t_ode.size, t2_ode.size)) return index2_ode = self._index2[self._index2>0]-1 # When index is identical h = self._compute_H(t_ode, index_ode, t2_ode, index2_ode, stationary=self.is_stationary) if self._t2 is None: self._K_ode = 0.5 * (h + h.T) else: h2 = self._compute_H(t2_ode, index2_ode, t_ode, index_ode, stationary=self.is_stationary) self._K_ode = 0.5 * (h + h2.T) if not self.is_normalized: self._K_ode *= np.sqrt(np.pi)*self.sigma def _compute_diag_H(self, t, index, update_derivatives=False, stationary=False): """Helper function for computing H for the diagonal only. :param t: time input. :type t: array :param index: first output indices :type index: array of int. :param index: second output indices :type index: array of int. :param update_derivatives: whether or not to update the derivative portions (default False). :type update_derivatives: bool :param stationary: whether to compute the stationary version of the covariance (default False). :type stationary: bool""" """if delta_i~=delta_j: [h, dh_dD_i, dh_dD_j, dh_dsigma] = np.diag(simComputeH(t, index, t, index, update_derivatives=True, stationary=self.is_stationary)) else: Decay = self.decay[index] if self.delay is not None: t = t - self.delay[index] t_squared = t*t half_sigma_decay = 0.5*self.sigma*Decay [ln_part_1, sign1] = ln_diff_erfs(half_sigma_decay + t/self.sigma, half_sigma_decay) [ln_part_2, sign2] = ln_diff_erfs(half_sigma_decay, half_sigma_decay - t/self.sigma) h = (sign1*np.exp(half_sigma_decay*half_sigma_decay + ln_part_1 - log(Decay + D_j)) - sign2*np.exp(half_sigma_decay*half_sigma_decay - (Decay + D_j)*t + ln_part_2 - log(Decay + D_j))) sigma2 = self.sigma*self.sigma if update_derivatives: dh_dD_i = ((0.5*Decay*sigma2*(Decay + D_j)-1)*h + t*sign2*np.exp( half_sigma_decay*half_sigma_decay-(Decay+D_j)*t + ln_part_2 ) + self.sigma/np.sqrt(np.pi)* (-1 + np.exp(-t_squared/sigma2-Decay*t) + np.exp(-t_squared/sigma2-D_j*t) - np.exp(-(Decay + D_j)*t))) dh_dD_i = (dh_dD_i/(Decay+D_j)).real dh_dD_j = (t*sign2*np.exp( half_sigma_decay*half_sigma_decay-(Decay + D_j)*t+ln_part_2 ) -h) dh_dD_j = (dh_dD_j/(Decay + D_j)).real dh_dsigma = 0.5*Decay*Decay*self.sigma*h \ + 2/(np.sqrt(np.pi)*(Decay+D_j))\ *((-Decay/2) \ + (-t/sigma2+Decay/2)*np.exp(-t_squared/sigma2 - Decay*t) \ - (-t/sigma2-Decay/2)*np.exp(-t_squared/sigma2 - D_j*t) \ - Decay/2*np.exp(-(Decay+D_j)*t))""" pass def _compute_H(self, t, index, t2, index2, update_derivatives=False, stationary=False): """Helper function for computing part of the ode1 covariance function. :param t: first time input. :type t: array :param index: Indices of first output. :type index: array of int :param t2: second time input. :type t2: array :param index2: Indices of second output. :type index2: array of int :param update_derivatives: whether to update derivatives (default is False) :return h : result of this subcomponent of the kernel for the given values. :rtype: ndarray """ if stationary: raise NotImplementedError, "Error, stationary version of this covariance not yet implemented." # Vector of decays and delays associated with each output. Decay = self.decay[index] Decay2 = self.decay[index2] t_mat = t[:, None] t2_mat = t2[None, :] if self.delay is not None: Delay = self.delay[index] Delay2 = self.delay[index2] t_mat-=Delay[:, None] t2_mat-=Delay2[None, :] diff_t = (t_mat - t2_mat) inv_sigma_diff_t = 1./self.sigma*diff_t half_sigma_decay_i = 0.5*self.sigma*Decay[:, None] ln_part_1, sign1 = ln_diff_erfs(half_sigma_decay_i + t2_mat/self.sigma, half_sigma_decay_i - inv_sigma_diff_t, return_sign=True) ln_part_2, sign2 = ln_diff_erfs(half_sigma_decay_i, half_sigma_decay_i - t_mat/self.sigma, return_sign=True) h = sign1*np.exp(half_sigma_decay_i *half_sigma_decay_i -Decay[:, None]*diff_t+ln_part_1 -np.log(Decay[:, None] + Decay2[None, :])) h -= sign2*np.exp(half_sigma_decay_i*half_sigma_decay_i -Decay[:, None]*t_mat-Decay2[None, :]*t2_mat+ln_part_2 -np.log(Decay[:, None] + Decay2[None, :])) if update_derivatives: sigma2 = self.sigma*self.sigma # Update ith decay gradient dh_ddecay = ((0.5*Decay[:, None]*sigma2*(Decay[:, None] + Decay2[None, :])-1)*h + (-diff_t*sign1*np.exp( half_sigma_decay_i*half_sigma_decay_i-Decay[:, None]*diff_t+ln_part_1 ) +t_mat*sign2*np.exp( half_sigma_decay_i*half_sigma_decay_i-Decay[:, None]*t_mat - Decay2*t2_mat+ln_part_2)) +self.sigma/np.sqrt(np.pi)*( -np.exp( -diff_t*diff_t/sigma2 )+np.exp( -t2_mat*t2_mat/sigma2-Decay[:, None]*t_mat )+np.exp( -t_mat*t_mat/sigma2-Decay2[None, :]*t2_mat )-np.exp( -(Decay[:, None]*t_mat + Decay2[None, :]*t2_mat) ) )) self._dh_ddecay = (dh_ddecay/(Decay[:, None]+Decay2[None, :])).real # Update jth decay gradient dh_ddecay2 = (t2_mat*sign2 *np.exp( half_sigma_decay_i*half_sigma_decay_i -(Decay[:, None]*t_mat + Decay2[None, :]*t2_mat) +ln_part_2 ) -h) self._dh_ddecay2 = (dh_ddecay/(Decay[:, None] + Decay2[None, :])).real # Update sigma gradient self._dh_dsigma = (half_sigma_decay_i*Decay[:, None]*h + 2/(np.sqrt(np.pi) *(Decay[:, None]+Decay2[None, :])) *((-diff_t/sigma2-Decay[:, None]/2) *np.exp(-diff_t*diff_t/sigma2) + (-t2_mat/sigma2+Decay[:, None]/2) *np.exp(-t2_mat*t2_mat/sigma2-Decay[:, None]*t_mat) - (-t_mat/sigma2-Decay[:, None]/2) *np.exp(-t_mat*t_mat/sigma2-Decay2[None, :]*t2_mat) - Decay[:, None]/2 *np.exp(-(Decay[:, None]*t_mat+Decay2[None, :]*t2_mat)))) return h
mit
airtrick/zxing
cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/dmd.py
34
8613
"""SCons.Tool.dmd Tool-specific initialization for the Digital Mars D compiler. (http://digitalmars.com/d) Coded by Andy Friesen (andy@ikagames.com) 15 November 2003 There are a number of problems with this script at this point in time. The one that irritates me the most is the Windows linker setup. The D linker doesn't have a way to add lib paths on the commandline, as far as I can see. You have to specify paths relative to the SConscript or use absolute paths. To hack around it, add '#/blah'. This will link blah.lib from the directory where SConstruct resides. Compiler variables: DC - The name of the D compiler to use. Defaults to dmd or gdmd, whichever is found. DPATH - List of paths to search for import modules. DVERSIONS - List of version tags to enable when compiling. DDEBUG - List of debug tags to enable when compiling. Linker related variables: LIBS - List of library files to link in. DLINK - Name of the linker to use. Defaults to dmd or gdmd. DLINKFLAGS - List of linker flags. Lib tool variables: DLIB - Name of the lib tool to use. Defaults to lib. DLIBFLAGS - List of flags to pass to the lib tool. LIBS - Same as for the linker. (libraries to pull into the .lib) """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/dmd.py 5023 2010/06/14 22:05:46 scons" import os import SCons.Action import SCons.Builder import SCons.Defaults import SCons.Scanner.D import SCons.Tool # Adapted from c++.py def isD(source): if not source: return 0 for s in source: if s.sources: ext = os.path.splitext(str(s.sources[0]))[1] if ext == '.d': return 1 return 0 smart_link = {} smart_lib = {} def generate(env): global smart_link global smart_lib static_obj, shared_obj = SCons.Tool.createObjBuilders(env) DAction = SCons.Action.Action('$DCOM', '$DCOMSTR') static_obj.add_action('.d', DAction) shared_obj.add_action('.d', DAction) static_obj.add_emitter('.d', SCons.Defaults.StaticObjectEmitter) shared_obj.add_emitter('.d', SCons.Defaults.SharedObjectEmitter) dc = env.Detect(['dmd', 'gdmd']) env['DC'] = dc env['DCOM'] = '$DC $_DINCFLAGS $_DVERFLAGS $_DDEBUGFLAGS $_DFLAGS -c -of$TARGET $SOURCES' env['_DINCFLAGS'] = '$( ${_concat(DINCPREFIX, DPATH, DINCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)' env['_DVERFLAGS'] = '$( ${_concat(DVERPREFIX, DVERSIONS, DVERSUFFIX, __env__)} $)' env['_DDEBUGFLAGS'] = '$( ${_concat(DDEBUGPREFIX, DDEBUG, DDEBUGSUFFIX, __env__)} $)' env['_DFLAGS'] = '$( ${_concat(DFLAGPREFIX, DFLAGS, DFLAGSUFFIX, __env__)} $)' env['DPATH'] = ['#/'] env['DFLAGS'] = [] env['DVERSIONS'] = [] env['DDEBUG'] = [] if dc: # Add the path to the standard library. # This is merely for the convenience of the dependency scanner. dmd_path = env.WhereIs(dc) if dmd_path: x = dmd_path.rindex(dc) phobosDir = dmd_path[:x] + '/../src/phobos' if os.path.isdir(phobosDir): env.Append(DPATH = [phobosDir]) env['DINCPREFIX'] = '-I' env['DINCSUFFIX'] = '' env['DVERPREFIX'] = '-version=' env['DVERSUFFIX'] = '' env['DDEBUGPREFIX'] = '-debug=' env['DDEBUGSUFFIX'] = '' env['DFLAGPREFIX'] = '-' env['DFLAGSUFFIX'] = '' env['DFILESUFFIX'] = '.d' # Need to use the Digital Mars linker/lib on windows. # *nix can just use GNU link. if env['PLATFORM'] == 'win32': env['DLINK'] = '$DC' env['DLINKCOM'] = '$DLINK -of$TARGET $SOURCES $DFLAGS $DLINKFLAGS $_DLINKLIBFLAGS' env['DLIB'] = 'lib' env['DLIBCOM'] = '$DLIB $_DLIBFLAGS -c $TARGET $SOURCES $_DLINKLIBFLAGS' env['_DLINKLIBFLAGS'] = '$( ${_concat(DLIBLINKPREFIX, LIBS, DLIBLINKSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)' env['_DLIBFLAGS'] = '$( ${_concat(DLIBFLAGPREFIX, DLIBFLAGS, DLIBFLAGSUFFIX, __env__)} $)' env['DLINKFLAGS'] = [] env['DLIBLINKPREFIX'] = '' env['DLIBLINKSUFFIX'] = '.lib' env['DLIBFLAGPREFIX'] = '-' env['DLIBFLAGSUFFIX'] = '' env['DLINKFLAGPREFIX'] = '-' env['DLINKFLAGSUFFIX'] = '' SCons.Tool.createStaticLibBuilder(env) # Basically, we hijack the link and ar builders with our own. # these builders check for the presence of D source, and swap out # the system's defaults for the Digital Mars tools. If there's no D # source, then we silently return the previous settings. linkcom = env.get('LINKCOM') try: env['SMART_LINKCOM'] = smart_link[linkcom] except KeyError: def _smartLink(source, target, env, for_signature, defaultLinker=linkcom): if isD(source): # XXX I'm not sure how to add a $DLINKCOMSTR variable # so that it works with this _smartLink() logic, # and I don't have a D compiler/linker to try it out, # so we'll leave it alone for now. return '$DLINKCOM' else: return defaultLinker env['SMART_LINKCOM'] = smart_link[linkcom] = _smartLink arcom = env.get('ARCOM') try: env['SMART_ARCOM'] = smart_lib[arcom] except KeyError: def _smartLib(source, target, env, for_signature, defaultLib=arcom): if isD(source): # XXX I'm not sure how to add a $DLIBCOMSTR variable # so that it works with this _smartLib() logic, and # I don't have a D compiler/archiver to try it out, # so we'll leave it alone for now. return '$DLIBCOM' else: return defaultLib env['SMART_ARCOM'] = smart_lib[arcom] = _smartLib # It is worth noting that the final space in these strings is # absolutely pivotal. SCons sees these as actions and not generators # if it is not there. (very bad) env['ARCOM'] = '$SMART_ARCOM ' env['LINKCOM'] = '$SMART_LINKCOM ' else: # assuming linux linkcom = env.get('LINKCOM') try: env['SMART_LINKCOM'] = smart_link[linkcom] except KeyError: def _smartLink(source, target, env, for_signature, defaultLinker=linkcom, dc=dc): if isD(source): try: libs = env['LIBS'] except KeyError: libs = [] if 'phobos' not in libs and 'gphobos' not in libs: if dc is 'dmd': env.Append(LIBS = ['phobos']) elif dc is 'gdmd': env.Append(LIBS = ['gphobos']) if 'pthread' not in libs: env.Append(LIBS = ['pthread']) if 'm' not in libs: env.Append(LIBS = ['m']) return defaultLinker env['SMART_LINKCOM'] = smart_link[linkcom] = _smartLink env['LINKCOM'] = '$SMART_LINKCOM ' def exists(env): return env.Detect(['dmd', 'gdmd']) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
apache-2.0
jerrylei98/Dailydos
tasks_utils.py
1
1150
import sqlite3 def get_full_sql(user): conn = sqlite3.connect("tasks.db") c = conn.cursor() temp = [] for row in c.execute('SELECT * FROM tasks where email = "' + user + '";'): temp.append(row) conn.close() return temp def get_tasks(user): temp = get_full_sql(user) i = 0 temp2 = [] while(i < len(temp)): temp2.append(temp[i][1]) i+=1 return temp2 def remove_tasks(task_list): if len(task_list) > 0: conn = sqlite3.connect("tasks.db") c = conn.cursor() for item in task_list: c.execute('DELETE FROM tasks WHERE task="' + item + '";') conn.commit() conn.close() def clear_tasks(user): conn = sqlite3.connect("tasks.db") c = conn.cursor() c.execute('DELETE FROM tasks WHERE email="' + user + '";') conn.commit() conn.close() #temp = ['cheese', 'try 3', 'try again 217'] #remove_tasks(temp) #print get_tasks("jerrylei98@gmail.com")
mit
Mokona/python-p4lib
test/functionnal/which.py
1
12209
#!/usr/bin/env python # Copyright (c) 2002-2005 ActiveState Corp. # See LICENSE.txt for license details. # Author: # Trent Mick (TrentM@ActiveState.com) # Home: # http://trentm.com/projects/which/ r"""Find the full path to commands. which(command, path=None, verbose=0, exts=None) Return the full path to the first match of the given command on the path. whichall(command, path=None, verbose=0, exts=None) Return a list of full paths to all matches of the given command on the path. whichgen(command, path=None, verbose=0, exts=None) Return a generator which will yield full paths to all matches of the given command on the path. By default the PATH environment variable is searched (as well as, on Windows, the AppPaths key in the registry), but a specific 'path' list to search may be specified as well. On Windows, the PATHEXT environment variable is applied as appropriate. If "verbose" is true then a tuple of the form (<fullpath>, <matched-where-description>) is returned for each match. The latter element is a textual description of where the match was found. For example: from PATH element 0 from HKLM\SOFTWARE\...\perl.exe """ _cmdlnUsage = """ Show the full path of commands. Usage: which [<options>...] [<command-name>...] Options: -h, --help Print this help and exit. -V, --version Print the version info and exit. -a, --all Print *all* matching paths. -v, --verbose Print out how matches were located and show near misses on stderr. -q, --quiet Just print out matches. I.e., do not print out near misses. -p <altpath>, --path=<altpath> An alternative path (list of directories) may be specified for searching. -e <exts>, --exts=<exts> Specify a list of extensions to consider instead of the usual list (';'-separate list, Windows only). Show the full path to the program that would be run for each given command name, if any. Which, like GNU's which, returns the number of failed arguments, or -1 when no <command-name> was given. Near misses include duplicates, non-regular files and (on Un*x) files without executable access. """ __revision__ = "$Id: which.py 430 2005-08-20 03:11:58Z trentm $" __version_info__ = (1, 1, 0) __version__ = '.'.join(map(str, __version_info__)) import os import sys import getopt import stat #---- exceptions class WhichError(Exception): pass #---- internal support stuff def _getRegisteredExecutable(exeName): """Windows allow application paths to be registered in the registry.""" registered = None if sys.platform.startswith('win'): if os.path.splitext(exeName)[1].lower() != '.exe': exeName += '.exe' import _winreg try: key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\" +\ exeName value = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key) registered = (value, "from HKLM\\" + key) except _winreg.error: pass if registered and not os.path.exists(registered[0]): registered = None return registered def _samefile(fname1, fname2): if sys.platform.startswith('win'): return (os.path.normpath(os.path.normcase(fname1)) == os.path.normpath(os.path.normcase(fname2))) else: return os.path.samefile(fname1, fname2) def _cull(potential, matches, verbose=0): """Cull inappropriate matches. Possible reasons: - a duplicate of a previous match - not a disk file - not executable (non-Windows) If 'potential' is approved it is returned and added to 'matches'. Otherwise, None is returned. """ for match in matches: # don't yield duplicates if _samefile(potential[0], match[0]): if verbose: sys.stderr.write("duplicate: %s (%s)\n" % potential) return None else: if not stat.S_ISREG(os.stat(potential[0]).st_mode): if verbose: sys.stderr.write("not a regular file: %s (%s)\n" % potential) elif not os.access(potential[0], os.X_OK): if verbose: sys.stderr.write("no executable access: %s (%s)\n" % potential) else: matches.append(potential) return potential #---- module API def whichgen(command, path=None, verbose=0, exts=None): """Return a generator of full paths to the given command. "command" is a the name of the executable to search for. "path" is an optional alternate path list to search. The default it to use the PATH environment variable. "verbose", if true, will cause a 2-tuple to be returned for each match. The second element is a textual description of where the match was found. "exts" optionally allows one to specify a list of extensions to use instead of the standard list for this system. This can effectively be used as an optimization to, for example, avoid stat's of "foo.vbs" when searching for "foo" and you know it is not a VisualBasic script but ".vbs" is on PATHEXT. This option is only supported on Windows. This method returns a generator which yields either full paths to the given command or, if verbose, tuples of the form (<path to command>, <where path found>). """ matches = [] if path is None: usingGivenPath = 0 path = os.environ.get("PATH", "").split(os.pathsep) if sys.platform.startswith("win"): path.insert(0, os.curdir) # implied by Windows shell else: usingGivenPath = 1 # Windows has the concept of a list of extensions (PATHEXT env var). if sys.platform.startswith("win"): if exts is None: exts = os.environ.get("PATHEXT", "").split(os.pathsep) # If '.exe' is not in exts then obviously this is Win9x and # or a bogus PATHEXT, then use a reasonable default. for ext in exts: if ext.lower() == ".exe": break else: exts = ['.COM', '.EXE', '.BAT'] elif not isinstance(exts, list): raise TypeError("'exts' argument must be a list or None") else: if exts is not None: raise WhichError("'exts' argument is not supported on " "platform '%s'" % sys.platform) exts = [] # File name cannot have path separators because PATH lookup does not # work that way. if os.sep in command or os.altsep and os.altsep in command: pass else: for i in range(len(path)): dirName = path[i] # On windows the dirName *could* be quoted, drop the quotes if sys.platform.startswith("win") and len(dirName) >= 2\ and dirName[0] == '"' and dirName[-1] == '"': dirName = dirName[1:-1] for ext in [''] + exts: absName = os.path.abspath( os.path.normpath(os.path.join(dirName, command + ext))) if os.path.isfile(absName): if usingGivenPath: fromWhere = "from given path element %d" % i elif not sys.platform.startswith("win"): fromWhere = "from PATH element %d" % i elif i == 0: fromWhere = "from current directory" else: fromWhere = "from PATH element %d" % (i - 1) match = _cull((absName, fromWhere), matches, verbose) if match: if verbose: yield match else: yield match[0] match = _getRegisteredExecutable(command) if match is not None: match = _cull(match, matches, verbose) if match: if verbose: yield match else: yield match[0] def which(command, path=None, verbose=0, exts=None): """Return the full path to the first match of the given command on the path. "command" is a the name of the executable to search for. "path" is an optional alternate path list to search. The default it to use the PATH environment variable. "verbose", if true, will cause a 2-tuple to be returned. The second element is a textual description of where the match was found. "exts" optionally allows one to specify a list of extensions to use instead of the standard list for this system. This can effectively be used as an optimization to, for example, avoid stat's of "foo.vbs" when searching for "foo" and you know it is not a VisualBasic script but ".vbs" is on PATHEXT. This option is only supported on Windows. If no match is found for the command, a WhichError is raised. """ match_list = list(whichgen(command, path, verbose, exts)) if not match_list: raise WhichError("Could not find '%s' on the path." % command) else: return match_list[0] def whichall(command, path=None, verbose=0, exts=None): """Return a list of full paths to all matches of the given command on the path. "command" is a the name of the executable to search for. "path" is an optional alternate path list to search. The default it to use the PATH environment variable. "verbose", if true, will cause a 2-tuple to be returned for each match. The second element is a textual description of where the match was found. "exts" optionally allows one to specify a list of extensions to use instead of the standard list for this system. This can effectively be used as an optimization to, for example, avoid stat's of "foo.vbs" when searching for "foo" and you know it is not a VisualBasic script but ".vbs" is on PATHEXT. This option is only supported on Windows. """ return list(whichgen(command, path, verbose, exts)) #---- mainline def main(argv): all = 0 verbose = 0 altpath = None exts = None try: optlist, args = getopt.getopt(argv[1:], 'haVvqp:e:', ['help', 'all', 'version', 'verbose', 'quiet', 'path=', 'exts=']) except getopt.GetoptError as msg: sys.stderr.write("which: error: %s. Your invocation was: %s\n" % (msg, argv)) sys.stderr.write("Try 'which --help'.\n") return 1 for opt, optarg in optlist: if opt in ('-h', '--help'): print (_cmdlnUsage) return 0 elif opt in ('-V', '--version'): print ("which %s" % __version__) return 0 elif opt in ('-a', '--all'): all = 1 elif opt in ('-v', '--verbose'): verbose = 1 elif opt in ('-q', '--quiet'): verbose = 0 elif opt in ('-p', '--path'): if optarg: altpath = optarg.split(os.pathsep) else: altpath = [] elif opt in ('-e', '--exts'): if optarg: exts = optarg.split(os.pathsep) else: exts = [] if len(args) == 0: return -1 failures = 0 for arg in args: #print "debug: search for %r" % arg nmatches = 0 for match in whichgen(arg, path=altpath, verbose=verbose, exts=exts): if verbose: print ("%s (%s)" % match) else: print (match) nmatches += 1 if not all: break if not nmatches: failures += 1 return failures if __name__ == "__main__": sys.exit(main(sys.argv))
mit
jhayworth/config
.emacs.d/elpy/rpc-venv/local/lib/python2.7/site-packages/pip/_vendor/chardet/sjisprober.py
290
3774
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import SJISDistributionAnalysis from .jpcntx import SJISContextAnalysis from .mbcssm import SJIS_SM_MODEL from .enums import ProbingState, MachineState class SJISProber(MultiByteCharSetProber): def __init__(self): super(SJISProber, self).__init__() self.coding_sm = CodingStateMachine(SJIS_SM_MODEL) self.distribution_analyzer = SJISDistributionAnalysis() self.context_analyzer = SJISContextAnalysis() self.reset() def reset(self): super(SJISProber, self).reset() self.context_analyzer.reset() @property def charset_name(self): return self.context_analyzer.charset_name @property def language(self): return "Japanese" def feed(self, byte_str): for i in range(len(byte_str)): coding_state = self.coding_sm.next_state(byte_str[i]) if coding_state == MachineState.ERROR: self.logger.debug('%s %s prober hit error at byte %s', self.charset_name, self.language, i) self._state = ProbingState.NOT_ME break elif coding_state == MachineState.ITS_ME: self._state = ProbingState.FOUND_IT break elif coding_state == MachineState.START: char_len = self.coding_sm.get_current_charlen() if i == 0: self._last_char[1] = byte_str[0] self.context_analyzer.feed(self._last_char[2 - char_len:], char_len) self.distribution_analyzer.feed(self._last_char, char_len) else: self.context_analyzer.feed(byte_str[i + 1 - char_len:i + 3 - char_len], char_len) self.distribution_analyzer.feed(byte_str[i - 1:i + 1], char_len) self._last_char[0] = byte_str[-1] if self.state == ProbingState.DETECTING: if (self.context_analyzer.got_enough_data() and (self.get_confidence() > self.SHORTCUT_THRESHOLD)): self._state = ProbingState.FOUND_IT return self.state def get_confidence(self): context_conf = self.context_analyzer.get_confidence() distrib_conf = self.distribution_analyzer.get_confidence() return max(context_conf, distrib_conf)
gpl-3.0
askhl/ase
ase/gui/colors.py
1
30815
# encoding: utf-8 """colors.py - select how to color the atoms in the GUI.""" import gtk from gettext import gettext as _ from ase.gui.widgets import pack, cancel_apply_ok, oops, help import ase from ase.data.colors import jmol_colors import numpy as np import colorsys named_colors = ('Green', 'Yellow', 'Blue', 'Red', 'Orange', 'Cyan', 'Magenta', 'Black', 'White', 'Grey', 'Violet', 'Brown', 'Navy') class ColorWindow(gtk.Window): "A window for selecting how to color the atoms." def __init__(self, gui): gtk.Window.__init__(self) self.gui = gui self.colormode = gui.colormode self.actual_colordata = None self.set_title(_("Colors")) vbox = gtk.VBox() self.add(vbox) vbox.show() # The main layout consists of two columns, the leftmost split in an upper and lower part. self.maintable = gtk.Table(2,2) pack(vbox, self.maintable) self.methodbox = gtk.VBox() self.methodbox.show() self.maintable.attach(self.methodbox, 0, 1, 0, 1) self.scalebox = gtk.VBox() self.scalebox.show() self.maintable.attach(self.scalebox, 0, 1, 1, 2) self.colorbox = gtk.Frame() self.colorbox.show() self.maintable.attach(self.colorbox, 1, 2, 0, 2, gtk.EXPAND) # Upper left: Choose how the atoms are colored. lbl = gtk.Label(_("Choose how the atoms are colored:")) pack(self.methodbox, [lbl]) self.radio_jmol = gtk.RadioButton(None, _('By atomic number, default "jmol" colors')) self.radio_atno = gtk.RadioButton(self.radio_jmol, _('By atomic number, user specified')) self.radio_tag = gtk.RadioButton(self.radio_jmol, _('By tag')) self.radio_force = gtk.RadioButton(self.radio_jmol, _('By force')) self.radio_velocity = gtk.RadioButton(self.radio_jmol, _('By velocity')) self.radio_charge = gtk.RadioButton(self.radio_jmol, _('By charge')) self.radio_coordination = gtk.RadioButton( self.radio_jmol, _('By coordination')) self.radio_manual = gtk.RadioButton(self.radio_jmol, _('Manually specified')) self.radio_same = gtk.RadioButton(self.radio_jmol, _('All the same color')) self.force_box = gtk.VBox() self.velocity_box = gtk.VBox() self.charge_box = gtk.VBox() for widget in (self.radio_jmol, self.radio_atno, self.radio_tag, self.radio_force, self.force_box, self.radio_velocity, self.radio_charge, self.charge_box, self.radio_coordination, self.velocity_box, self.radio_manual, self.radio_same): pack(self.methodbox, [widget]) if isinstance(widget, gtk.RadioButton): widget.connect('toggled', self.method_radio_changed) # Now fill in the box for additional information in case the force is used. self.force_label = gtk.Label(_("This should not be displayed!")) pack(self.force_box, [self.force_label]) self.force_min = gtk.Adjustment(0.0, 0.0, 100.0, 0.05) self.force_max = gtk.Adjustment(0.0, 0.0, 100.0, 0.05) self.force_steps = gtk.Adjustment(10, 2, 500, 1) force_apply = gtk.Button(_('Update')) force_apply.connect('clicked', self.set_force_colors) pack(self.force_box, [gtk.Label(_('Min: ')), gtk.SpinButton(self.force_min, 1.0, 2), gtk.Label(_(' Max: ')), gtk.SpinButton(self.force_max, 1.0, 2), gtk.Label(_(' Steps: ')), gtk.SpinButton(self.force_steps, 1, 0), gtk.Label(' '), force_apply]) self.force_box.hide() # Now fill in the box for additional information in case the velocity is used. self.velocity_label = gtk.Label("This should not be displayed!") pack(self.velocity_box, [self.velocity_label]) self.velocity_min = gtk.Adjustment(0.0, 0.0, 100.0, 0.005) self.velocity_max = gtk.Adjustment(0.0, 0.0, 100.0, 0.005) self.velocity_steps = gtk.Adjustment(10, 2, 500, 1) velocity_apply = gtk.Button(_('Update')) velocity_apply.connect('clicked', self.set_velocity_colors) pack(self.velocity_box, [gtk.Label(_('Min: ')), gtk.SpinButton(self.velocity_min, 1.0, 3), gtk.Label(_(' Max: ')), gtk.SpinButton(self.velocity_max, 1.0, 3), gtk.Label(_(' Steps: ')), gtk.SpinButton(self.velocity_steps, 1, 0), gtk.Label(' '), velocity_apply]) self.velocity_box.hide() # Now fill in the box for additional information in case # the charge is used. self.charge_label = gtk.Label(_("This should not be displayed!")) pack(self.charge_box, [self.charge_label]) self.charge_min = gtk.Adjustment(0.0, -100.0, 100.0, 0.05) self.charge_max = gtk.Adjustment(0.0, -100.0, 100.0, 0.05) self.charge_steps = gtk.Adjustment(10, 2, 500, 1) charge_apply = gtk.Button(_('Update')) charge_apply.connect('clicked', self.set_charge_colors) pack(self.charge_box, [gtk.Label(_('Min: ')), gtk.SpinButton(self.charge_min, 10.0, 2), gtk.Label(_(' Max: ')), gtk.SpinButton(self.charge_max, 10.0, 2), gtk.Label(_(' Steps: ')), gtk.SpinButton(self.charge_steps, 1, 0), gtk.Label(' '), charge_apply]) self.charge_box.hide() # Lower left: Create a color scale pack(self.scalebox, gtk.Label("")) lbl = gtk.Label(_('Create a color scale:')) pack(self.scalebox, [lbl]) color_scales = ( _('Black - white'), _('Black - red - yellow - white'), _('Black - green - white'), _('Black - blue - cyan'), _('Blue - white - red'), _('Hue'), _('Named colors') ) self.scaletype_created = None self.scaletype = gtk.combo_box_new_text() for s in color_scales: self.scaletype.append_text(s) self.createscale = gtk.Button(_("Create")) pack(self.scalebox, [self.scaletype, self.createscale]) self.createscale.connect('clicked', self.create_color_scale) # The actually colors are specified in a box possibly with scrollbars self.colorwin = gtk.ScrolledWindow() self.colorwin.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC) self.colorwin.show() self.colorbox.add(self.colorwin) self.colorwin.add_with_viewport(gtk.VBox()) # Dummy contents buts = cancel_apply_ok(cancel=lambda widget: self.destroy(), apply=self.apply, ok=self.ok) pack(vbox, [buts], end=True, bottom=True) # Make the initial setup of the colors self.color_errors = {} self.init_colors_from_gui() self.show() gui.register_vulnerable(self) def notify_atoms_changed(self): "Called by gui object when the atoms have changed." self.destroy() def init_colors_from_gui(self): cm = self.gui.colormode # Disallow methods if corresponding data is not available if not self.gui.images.T.any(): self.radio_tag.set_sensitive(False) if self.radio_tag.get_active() or cm == 'tag': self.radio_jmol.set_active(True) return else: self.radio_tag.set_sensitive(True) if np.isnan(self.gui.images.F).any() or not self.gui.images.F.any(): self.radio_force.set_sensitive(False) if self.radio_force.get_active() or cm == 'force': self.radio_jmol.set_active(True) return else: self.radio_force.set_sensitive(True) if np.isnan(self.gui.images.V).any() or not self.gui.images.V.any(): self.radio_velocity.set_sensitive(False) if self.radio_velocity.get_active() or cm == 'velocity': self.radio_jmol.set_active(True) return else: self.radio_velocity.set_sensitive(True) if not self.gui.images.q.any(): self.radio_charge.set_sensitive(False) else: self.radio_charge.set_sensitive(True) self.radio_manual.set_sensitive(self.gui.images.natoms <= 1000) # Now check what the current color mode is if cm == 'jmol': self.radio_jmol.set_active(True) self.set_jmol_colors() elif cm == 'atno': self.radio_atno.set_active(True) elif cm == 'tags': self.radio_tag.set_active(True) elif cm == 'force': self.radio_force.set_active(True) elif cm == 'velocity': self.radio_velocity.set_active(True) elif cm == 'charge': self.radio_charge.set_active(True) elif cm == 'coordination': self.radio_coordination.set_active(True) elif cm == 'manual': self.radio_manual.set_active(True) elif cm == 'same': self.radio_same.set_active(True) def method_radio_changed(self, widget=None): "Called when a radio button is changed." self.scaletype_created = None self.scaletype.set_active(-1) if not widget.get_active(): # Ignore most events when a button is turned off. if widget is self.radio_force: self.force_box.hide() if widget is self.radio_velocity: self.velocity_box.hide() return if widget is self.radio_jmol: self.set_jmol_colors() elif widget is self.radio_atno: self.set_atno_colors() elif widget is self.radio_tag: self.set_tag_colors() elif widget is self.radio_force: self.show_force_stuff() self.set_force_colors() elif widget is self.radio_velocity: self.show_velocity_stuff() self.set_velocity_colors() elif widget is self.radio_charge: self.show_charge_stuff() self.set_charge_colors() elif widget is self.radio_coordination: self.set_coordination_colors() elif widget is self.radio_manual: self.set_manual_colors() elif widget is self.radio_same: self.set_same_color() else: raise RuntimeError('Unknown widget in method_radio_changed') def make_jmol_colors(self): "Set the colors to the default jmol colors" self.colordata_z = [] hasfound = {} for z in self.gui.images.Z: if z not in hasfound: hasfound[z] = True self.colordata_z.append([z, jmol_colors[z]]) def set_jmol_colors(self): "We use the immutable jmol colors." self.make_jmol_colors() self.set_atno_colors() for entry in self.color_entries: entry.set_sensitive(False) self.colormode = 'jmol' def set_atno_colors(self): "We use user-specified per-element colors." if not hasattr(self, 'colordata_z'): # No initial colors. Use jmol colors self.make_jmol_colors() self.actual_colordata = self.colordata_z self.color_labels = ["%i (%s):" % (z, ase.data.chemical_symbols[z]) for z, col in self.colordata_z] self.make_colorwin() self.colormode = 'atno' def set_tag_colors(self): "We use per-tag colors." # Find which tags are in use tags = self.gui.images.T existingtags = range(tags.min(), tags.max()+1) if not hasattr(self, 'colordata_tags') or len(self.colordata_tags) != len(existingtags): colors = self.get_named_colors(len(existingtags)) self.colordata_tags = [[x, y] for x, y in zip(existingtags, colors)] self.actual_colordata = self.colordata_tags self.color_labels = [str(x)+':' for x, y in self.colordata_tags] self.make_colorwin() self.colormode = 'tags' def set_same_color(self): "All atoms have the same color" if not hasattr(self, 'colordata_same'): try: self.colordata_same = self.actual_colordata[0:1] except AttributeError: self.colordata_same = self.get_named_colors(1) self.actual_colordata = self.colordata_same self.actual_colordata[0][0] = 0 self.color_labels = ['all:'] self.make_colorwin() self.colormode = 'same' def set_force_colors(self, *args): "Use the forces as basis for the colors." borders = np.linspace(self.force_min.value, self.force_max.value, self.force_steps.value, endpoint=False) if self.scaletype_created is None: colors = self.new_color_scale([[0, [1,1,1]], [1, [0,0,1]]], len(borders)) elif (not hasattr(self, 'colordata_force') or len(self.colordata_force) != len(borders)): colors = self.get_color_scale(len(borders), self.scaletype_created) else: colors = [y for x, y in self.colordata_force] self.colordata_force = [[x, y] for x, y in zip(borders, colors)] self.actual_colordata = self.colordata_force self.color_labels = ["%.2f:" % x for x, y in self.colordata_force] self.make_colorwin() self.colormode = 'force' fmin = self.force_min.value fmax = self.force_max.value factor = self.force_steps.value / (fmax -fmin) self.colormode_force_data = (fmin, factor) def set_velocity_colors(self, *args): "Use the velocities as basis for the colors." borders = np.linspace(self.velocity_min.value, self.velocity_max.value, self.velocity_steps.value, endpoint=False) if self.scaletype_created is None: colors = self.new_color_scale([[0, [1,1,1]], [1, [1,0,0]]], len(borders)) elif (not hasattr(self, 'colordata_velocity') or len(self.colordata_velocity) != len(borders)): colors = self.get_color_scale(len(borders), self.scaletype_created) else: colors = [y for x, y in self.colordata_velocity] self.colordata_velocity = [[x, y] for x, y in zip(borders, colors)] self.actual_colordata = self.colordata_velocity self.color_labels = ["%.2f:" % x for x, y in self.colordata_velocity] self.make_colorwin() self.colormode = 'velocity' vmin = self.velocity_min.value vmax = self.velocity_max.value factor = self.velocity_steps.value / (vmax -vmin) self.colormode_velocity_data = (vmin, factor) def set_charge_colors(self, *args): "Use the charge as basis for the colors." borders = np.linspace(self.charge_min.value, self.charge_max.value, self.charge_steps.value, endpoint=False) if self.scaletype_created is None: colors = self.new_color_scale([[0, [1,1,1]], [1, [0,0,1]]], len(borders)) elif (not hasattr(self, 'colordata_charge') or len(self.colordata_charge) != len(borders)): colors = self.get_color_scale(len(borders), self.scaletype_created) else: colors = [y for x, y in self.colordata_charge] self.colordata_charge = [[x, y] for x, y in zip(borders, colors)] self.actual_colordata = self.colordata_charge self.color_labels = ["%.2f:" % x for x, y in self.colordata_charge] self.make_colorwin() self.colormode = 'charge' qmin = self.charge_min.value qmax = self.charge_max.value factor = self.charge_steps.value / (qmax - qmin) self.colormode_charge_data = (qmin, factor) def set_coordination_colors(self, *args): "Use coordination as basis for the colors." if not hasattr(self.gui, 'coordination'): self.gui.toggle_show_bonds(None) coords = self.gui.coordination existing = range(0, coords.max() + 1) if not hasattr(self, 'colordata_coordination'): colors = self.get_named_colors(len(named_colors)) self.colordata_coordination = [[x, y] for x, y in enumerate(colors)] self.actual_colordata = self.colordata_coordination self.color_labels = [(str(x) + ':') for x, y in self.colordata_coordination] self.make_colorwin() self.colormode = 'coordination' def set_manual_colors(self): "Set colors of all atoms from the last selection." # We cannot directly make np.arrays of the colors, as they may # be sequences of the same length, causing creation of a 2D # array of characters/numbers instead of a 1D array of # objects. colors = np.array([None] * self.gui.images.natoms) if self.colormode in ['atno', 'jmol', 'tags']: maxval = max([x for x, y in self.actual_colordata]) oldcolors = np.array([None] * (maxval+1)) for x, y in self.actual_colordata: oldcolors[x] = y if self.colormode == 'tags': colors[:] = oldcolors[self.gui.images.T[self.gui.frame]] else: colors[:] = oldcolors[self.gui.images.Z] elif self.colormode == 'force': oldcolors = np.array([None] * len(self.actual_colordata)) oldcolors[:] = [y for x, y in self.actual_colordata] F = self.gui.images.F[self.gui.frame] F = np.sqrt((F * F).sum(axis=-1)) nF = (F - self.colormode_force_data[0]) * self.colormode_force_data[1] nF = np.clip(nF.astype(int), 0, len(oldcolors)-1) colors[:] = oldcolors[nF] elif self.colormode == 'velocity': oldcolors = np.array([None] * len(self.actual_colordata)) oldcolors[:] = [y for x, y in self.actual_colordata] V = self.gui.images.V[self.gui.frame] V = np.sqrt((V * V).sum(axis=-1)) nV = (V - self.colormode_velocity_data[0]) * self.colormode_velocity_data[1] nV = np.clip(nV.astype(int), 0, len(oldcolors)-1) colors[:] = oldcolors[nV] elif self.colormode == 'charge': oldcolors = np.array([None] * len(self.actual_colordata)) oldcolors[:] = [y for x, y in self.actual_colordata] q = self.gui.images.q[self.gui.frame] nq = ((q - self.colormode_charge_data[0]) * self.colormode_charge_data[1]) nq = np.clip(nq.astype(int), 0, len(oldcolors)-1) ## print "nq = ", nq colors[:] = oldcolors[nq] elif self.colormode == 'coordination': oldcolors = np.array([None] * len(self.actual_colordata)) oldcolors[:] = [y for x, y in self.actual_colordata] print self.gui.images.bonds elif self.colormode == 'same': oldcolor = self.actual_colordata[0][1] if len(colors) == len(oldcolor): # Direct assignment would be e.g. one letter per atom. :-( colors[:] = [oldcolor] * len(colors) else: colors[:] = oldcolor elif self.colormode == 'manual': if self.actual_colordata is None: # import colors from gui, if they don't exist already colors = [y for x,y in self.gui.colordata] self.color_labels = ["%d:" % i for i in range(len(colors))] self.actual_colordata = [[i, x] for i, x in enumerate(colors)] self.make_colorwin() self.colormode = 'manual' def show_force_stuff(self): "Show and update widgets needed for selecting the force scale." self.force_box.show() F = np.sqrt(((self.gui.images.F*self.gui.images.dynamic[:,np.newaxis])**2).sum(axis=-1)) fmax = F.max() nimages = self.gui.images.nimages assert len(F) == nimages if nimages > 1: fmax_frame = self.gui.images.F[self.gui.frame].max() txt = _("Max force: %.2f (this frame), %.2f (all frames)") % (fmax_frame, fmax) else: txt = _("Max force: %.2f.") % (fmax,) self.force_label.set_text(txt) if self.force_max.value == 0.0: self.force_max.value = fmax def show_velocity_stuff(self): "Show and update widgets needed for selecting the velocity scale." self.velocity_box.show() V = np.sqrt((self.gui.images.V * self.gui.images.V).sum(axis=-1)) vmax = V.max() nimages = self.gui.images.nimages assert len(V) == nimages if nimages > 1: vmax_frame = self.gui.images.V[self.gui.frame].max() txt = _("Max velocity: %.2f (this frame), %.2f (all frames)") % (vmax_frame, vmax) else: txt = _("Max velocity: %.2f.") % (vmax,) self.velocity_label.set_text(txt) if self.velocity_max.value == 0.0: self.velocity_max.value = vmax def show_charge_stuff(self): "Show and update widgets needed for selecting the charge scale." self.charge_box.show() qmin = self.gui.images.q.min() qmax = self.gui.images.q.max() nimages = self.gui.images.nimages if nimages > 1: qmin_frame = self.gui.images.q[self.gui.frame].min() qmax_frame = self.gui.images.q[self.gui.frame].max() txt = (_('Min, max charge: %.2f, %.2f (this frame),' + '%.2f, %.2f (all frames)') % (qmin_frame, qmax_frame, qmin, qmax)) else: txt = _("Min, max charge: %.2f, %.2f.") % (qmin, qmax,) self.charge_label.set_text(txt) self.charge_max.value = qmax self.charge_min.value = qmin def make_colorwin(self): """Make the list of editable color entries. Uses self.actual_colordata and self.color_labels. Produces self.color_entries. """ assert len(self.actual_colordata) == len(self.color_labels) self.color_entries = [] old = self.colorwin.get_child() self.colorwin.remove(old) del old table = gtk.Table(len(self.actual_colordata)+1, 4) self.colorwin.add_with_viewport(table) table.show() self.color_display = [] for i in range(len(self.actual_colordata)): lbl = gtk.Label(self.color_labels[i]) entry = gtk.Entry(max=20) val = self.actual_colordata[i][1] error = False if not isinstance(val, str): assert len(val) == 3 intval = tuple(np.round(65535*np.array(val)).astype(int)) val = "%.3f, %.3f, %.3f" % tuple(val) clr = gtk.gdk.Color(*intval) else: try: clr = gtk.gdk.color_parse(val) except ValueError: error = True entry.set_text(val) blob = gtk.EventBox() space = gtk.Label space = gtk.Label(" ") space.show() blob.add(space) if error: space.set_text(_("ERROR")) else: blob.modify_bg(gtk.STATE_NORMAL, clr) table.attach(lbl, 0, 1, i, i+1, yoptions=0) table.attach(entry, 1, 2, i, i+1, yoptions=0) table.attach(blob, 2, 3, i, i+1, yoptions=0) lbl.show() entry.show() blob.show() entry.connect('changed', self.entry_changed, i) self.color_display.append(blob) self.color_entries.append(entry) def entry_changed(self, widget, index): """The user has changed a color.""" txt = widget.get_text() txtfields = txt.split(',') if len(txtfields) == 3: self.actual_colordata[index][1] = [float(x) for x in txtfields] val = tuple([int(65535*float(x)) for x in txtfields]) clr = gtk.gdk.Color(*val) else: self.actual_colordata[index][1] = txt try: clr = gtk.gdk.color_parse(txt) except ValueError: # Cannot parse the color displ = self.color_display[index] displ.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse('white')) displ.get_child().set_text(_("ERR")) self.color_errors[index] = (self.color_labels[index], txt) return self.color_display[index].get_child().set_text(" ") # Clear error message self.color_errors.pop(index, None) self.color_display[index].modify_bg(gtk.STATE_NORMAL, clr) def create_color_scale(self, *args): if self.radio_jmol.get_active(): self.radio_atno.set_active(1) n = len(self.color_entries) s = self.scaletype.get_active() scale = self.get_color_scale(n, s) self.scaletype_created = s for i in range(n): if isinstance(scale[i], str): self.color_entries[i].set_text(scale[i]) else: s = "%.3f, %.3f, %.3f" % tuple(scale[i]) self.color_entries[i].set_text(s) self.color_entries[i].activate() def get_color_scale(self, n, s): if s == 0: # Black - White scale = self.new_color_scale([[0, [0,0,0]], [1, [1,1,1]]], n) elif s == 1: # Black - Red - Yellow - White (STM colors) scale = self.new_color_scale([[0, [0,0,0]], [0.33, [1,0,0]], [0.67, [1,1,0]], [1, [1,1,1]]], n) elif s == 2: # Black - Green - White scale = self.new_color_scale([[0, [0,0,0]], [0.5, [0,0.9,0]], [0.75, [0.2,1.0,0.2]], [1, [1,1,1]]], n) elif s == 3: # Black - Blue - Cyan scale = self.new_color_scale([[0, [0,0,0]], [0.5, [0,0,1]], [1, [0,1,1]]], n) elif s == 4: # Blue - White - Red scale = self.new_color_scale([[0, [0,0,1]], [0.5, [1,1,1]], [2, [1,0,0]]], n) elif s == 5: # Hues hues = np.linspace(0.0, 1.0, n, endpoint=False) scale = ["%.3f, %.3f, %.3f" % colorsys.hls_to_rgb(h, 0.5, 1) for h in hues] elif s == 6: # Named colors scale = self.get_named_colors(n) else: scale = None return scale def new_color_scale(self, fixpoints, n): "Create a homogeneous color scale." x = np.array([a[0] for a in fixpoints], float) y = np.array([a[1] for a in fixpoints], float) assert y.shape[1] == 3 res = [] for a in np.linspace(0.0, 1.0, n, endpoint=True): n = x.searchsorted(a) if n == 0: v = y[0] # Before the start elif n == len(x): v = x[-1] # After the end else: x0 = x[n-1] x1 = x[n] y0 = y[n-1] y1 = y[n] v = y0 + (y1 - y0) / (x1 - x0) * (a - x0) res.append(v) return res def get_named_colors(self, n): if n <= len(named_colors): return named_colors[:n] else: return named_colors + ('Black',) * (n - len(named_colors)) def apply(self, *args): #if self.colormode in ['atno', 'jmol', 'tags']: # Color atoms according to an integer value number if self.color_errors: oops(_("Incorrect color specification"), "%s: %s" % self.color_errors.values()[0]) return False colordata = self.actual_colordata if self.colormode == 'force': # Use integers instead for border values colordata = [[i, x[1]] for i, x in enumerate(self.actual_colordata)] self.gui.colormode_force_data = self.colormode_force_data elif self.colormode == 'velocity': # Use integers instead for border values colordata = [[i, x[1]] for i, x in enumerate(self.actual_colordata)] self.gui.colormode_velocity_data = self.colormode_velocity_data elif self.colormode == 'charge': # Use integers instead for border values colordata = [[i, x[1]] for i, x in enumerate(self.actual_colordata)] self.gui.colormode_charge_data = self.colormode_charge_data maxval = max([x for x, y in colordata]) self.gui.colors = [None] * (maxval + 1) new = self.gui.drawing_area.window.new_gc alloc = self.gui.colormap.alloc_color for z, val in colordata: if isinstance(val, str): self.gui.colors[z] = new(alloc(val)) else: clr = tuple([int(65535*x) for x in val]) assert len(clr) == 3 self.gui.colors[z] = new(alloc(*clr)) self.gui.colormode = self.colormode self.gui.colordata = colordata self.gui.draw() return True def cancel(self, *args): self.destroy() def ok(self, *args): if self.apply(): self.destroy()
gpl-2.0
annarev/tensorflow
tensorflow/python/tpu/tpu_test_wrapper.py
17
7198
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Wrapper for Python TPU tests. The py_tpu_test macro will actually use this file as its main, building and executing the user-provided test file as a py_binary instead. This lets us do important work behind the scenes, without complicating the tests themselves. The main responsibilities of this file are: - Define standard set of model flags if test did not. This allows us to safely set flags at the Bazel invocation level using --test_arg. - Pick a random directory on GCS to use for each test case, and set it as the default value of --model_dir. This is similar to how Bazel provides each test with a fresh local directory in $TEST_TMPDIR. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import ast import importlib import os import sys import uuid from tensorflow.python.platform import flags from tensorflow.python.util import tf_inspect FLAGS = flags.FLAGS flags.DEFINE_string( 'wrapped_tpu_test_module_relative', None, 'The Python-style relative path to the user-given test. If test is in same ' 'directory as BUILD file as is common, then "test.py" would be ".test".') flags.DEFINE_string('test_dir_base', os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR'), 'GCS path to root directory for temporary test files.') flags.DEFINE_string( 'bazel_repo_root', 'tensorflow/python', 'Substring of a bazel filepath beginning the python absolute import path.') # List of flags which all TPU tests should accept. REQUIRED_FLAGS = ['tpu', 'zone', 'project', 'model_dir'] def maybe_define_flags(): """Defines any required flags that are missing.""" for f in REQUIRED_FLAGS: try: flags.DEFINE_string(f, None, 'flag defined by test lib') except flags.DuplicateFlagError: pass def set_random_test_dir(): """Pick a random GCS directory under --test_dir_base, set as --model_dir.""" path = os.path.join(FLAGS.test_dir_base, uuid.uuid4().hex) FLAGS.set_default('model_dir', path) def calculate_parent_python_path(test_filepath): """Returns the absolute import path for the containing directory. Args: test_filepath: The filepath which Bazel invoked (ex: /filesystem/path/tensorflow/tensorflow/python/tpu/tpu_test) Returns: Absolute import path of parent (ex: tensorflow.python.tpu). Raises: ValueError: if bazel_repo_root does not appear within test_filepath. """ # We find the last occurrence of bazel_repo_root, and drop everything before. split_path = test_filepath.rsplit(FLAGS.bazel_repo_root, 1) if len(split_path) < 2: raise ValueError('Filepath "%s" does not contain repo root "%s"' % (test_filepath, FLAGS.bazel_repo_root)) path = FLAGS.bazel_repo_root + split_path[1] # We drop the last portion of the path, which is the name of the test wrapper. path = path.rsplit('/', 1)[0] # We convert the directory separators into dots. return path.replace('/', '.') def import_user_module(): """Imports the flag-specified user test code. This runs all top-level statements in the user module, specifically flag definitions. Returns: The user test module. """ return importlib.import_module(FLAGS.wrapped_tpu_test_module_relative, calculate_parent_python_path(sys.argv[0])) def _is_test_class(obj): """Check if arbitrary object is a test class (not a test object!). Args: obj: An arbitrary object from within a module. Returns: True iff obj is a test class inheriting at some point from a module named "TestCase". This is because we write tests using different underlying test libraries. """ return (tf_inspect.isclass(obj) and 'TestCase' in (p.__name__ for p in tf_inspect.getmro(obj))) module_variables = vars() def move_test_classes_into_scope(wrapped_test_module): """Add all test classes defined in wrapped module to our module. The test runner works by inspecting the main module for TestCase classes, so by adding a module-level reference to the TestCase we cause it to execute the wrapped TestCase. Args: wrapped_test_module: The user-provided test code to run. """ for name, obj in wrapped_test_module.__dict__.items(): if _is_test_class(obj): module_variables['tpu_test_imported_%s' % name] = obj def run_user_main(wrapped_test_module): """Runs the "if __name__ == '__main__'" at the bottom of a module. TensorFlow practice is to have a main if at the bottom of the module which might call an API compat function before calling test.main(). Since this is a statement, not a function, we can't cleanly reference it, but we can inspect it from the user module and run it in the context of that module so all imports and variables are available to it. Args: wrapped_test_module: The user-provided test code to run. Raises: NotImplementedError: If main block was not found in module. This should not be caught, as it is likely an error on the user's part -- absltest is all too happy to report a successful status (and zero tests executed) if a user forgets to end a class with "test.main()". """ tree = ast.parse(tf_inspect.getsource(wrapped_test_module)) # Get string representation of just the condition `__name == "__main__"`. target = ast.dump(ast.parse('if __name__ == "__main__": pass').body[0].test) # `tree.body` is a list of top-level statements in the module, like imports # and class definitions. We search for our main block, starting from the end. for expr in reversed(tree.body): if isinstance(expr, ast.If) and ast.dump(expr.test) == target: break else: raise NotImplementedError( 'Could not find `if __name__ == "main":` block in %s.' % wrapped_test_module.__name__) # expr is defined because we would have raised an error otherwise. new_ast = ast.Module(body=expr.body, type_ignores=[]) # pylint:disable=undefined-loop-variable exec( # pylint:disable=exec-used compile(new_ast, '<ast>', 'exec'), globals(), wrapped_test_module.__dict__, ) if __name__ == '__main__': # Partially parse flags, since module to import is specified by flag. unparsed = FLAGS(sys.argv, known_only=True) user_module = import_user_module() maybe_define_flags() # Parse remaining flags. FLAGS(unparsed) set_random_test_dir() move_test_classes_into_scope(user_module) run_user_main(user_module)
apache-2.0
KyleAMoore/KanjiNani
Android/.buildozer/android/platform/build/dists/KanjiNani/crystax_python/crystax_python/site-packages/kivy/modules/recorder.py
25
2481
''' Recorder module =============== .. versionadded:: 1.1.0 Create an instance of :class:`~kivy.input.recorder.Recorder`, attach to the class, and bind some keys to record / play sequences: - F6: play the last record in a loop - F7: read the latest recording - F8: record input events Configuration ------------- .. |attrs| replace:: :attr:`~kivy.input.recorder.Recorder.record_attrs` .. |profile_mask| replace:: :attr:`~kivy.input.recorder.Recorder.record_profile_mask` :Parameters: `attrs`: str, defaults to |attrs| value. Attributes to record from the motion event `profile_mask`: str, defaults to |profile_mask| value. Mask for motion event profile. Used to filter which profile will appear in the fake motion event when replayed. `filename`: str, defaults to 'recorder.kvi' Name of the file to record / play with Usage ----- For normal module usage, please see the :mod:`~kivy.modules` documentation. ''' __all__ = ('start', 'stop') from kivy.logger import Logger from functools import partial def replay(recorder, *args): if recorder.play: return else: recorder.play = True def on_recorder_key(recorder, window, key, *largs): if key == 289: # F8 if recorder.play: Logger.error('Recorder: Cannot start recording while playing.') return recorder.record = not recorder.record elif key == 288: # F7 if recorder.record: Logger.error('Recorder: Cannot start playing while recording.') return recorder.play = not recorder.play elif key == 287: # F6 if recorder.play: recorder.unbind(play=replay) else: recorder.bind(play=replay) recorder.play = True def start(win, ctx): keys = {} # attributes value = ctx.config.get('attrs', None) if value is not None: keys['record_attrs'] = value.split(':') # profile mask value = ctx.config.get('profile_mask', None) if value is not None: keys['record_profile_mask'] = value.split(':') # filename value = ctx.config.get('filename', None) if value is not None: keys['filename'] = value from kivy.input.recorder import Recorder ctx.recorder = Recorder(window=win, **keys) win.bind(on_key_down=partial(on_recorder_key, ctx.recorder)) def stop(win, ctx): if hasattr(ctx, 'recorder'): ctx.recorder.release()
gpl-3.0
matthewfranglen/spark
python/pyspark/ml/linalg/__init__.py
20
39556
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ MLlib utilities for linear algebra. For dense vectors, MLlib uses the NumPy `array` type, so you can simply pass NumPy arrays around. For sparse vectors, users can construct a :class:`SparseVector` object from MLlib or pass SciPy `scipy.sparse` column vectors if SciPy is available in their environment. """ import sys import array import struct if sys.version >= '3': basestring = str xrange = range import copyreg as copy_reg long = int else: from itertools import izip as zip import copy_reg import numpy as np from pyspark import since from pyspark.sql.types import UserDefinedType, StructField, StructType, ArrayType, DoubleType, \ IntegerType, ByteType, BooleanType __all__ = ['Vector', 'DenseVector', 'SparseVector', 'Vectors', 'Matrix', 'DenseMatrix', 'SparseMatrix', 'Matrices'] if sys.version_info[:2] == (2, 7): # speed up pickling array in Python 2.7 def fast_pickle_array(ar): return array.array, (ar.typecode, ar.tostring()) copy_reg.pickle(array.array, fast_pickle_array) # Check whether we have SciPy. MLlib works without it too, but if we have it, some methods, # such as _dot and _serialize_double_vector, start to support scipy.sparse matrices. try: import scipy.sparse _have_scipy = True except: # No SciPy in environment, but that's okay _have_scipy = False def _convert_to_vector(l): if isinstance(l, Vector): return l elif type(l) in (array.array, np.array, np.ndarray, list, tuple, xrange): return DenseVector(l) elif _have_scipy and scipy.sparse.issparse(l): assert l.shape[1] == 1, "Expected column vector" # Make sure the converted csc_matrix has sorted indices. csc = l.tocsc() if not csc.has_sorted_indices: csc.sort_indices() return SparseVector(l.shape[0], csc.indices, csc.data) else: raise TypeError("Cannot convert type %s into Vector" % type(l)) def _vector_size(v): """ Returns the size of the vector. >>> _vector_size([1., 2., 3.]) 3 >>> _vector_size((1., 2., 3.)) 3 >>> _vector_size(array.array('d', [1., 2., 3.])) 3 >>> _vector_size(np.zeros(3)) 3 >>> _vector_size(np.zeros((3, 1))) 3 >>> _vector_size(np.zeros((1, 3))) Traceback (most recent call last): ... ValueError: Cannot treat an ndarray of shape (1, 3) as a vector """ if isinstance(v, Vector): return len(v) elif type(v) in (array.array, list, tuple, xrange): return len(v) elif type(v) == np.ndarray: if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1): return len(v) else: raise ValueError("Cannot treat an ndarray of shape %s as a vector" % str(v.shape)) elif _have_scipy and scipy.sparse.issparse(v): assert v.shape[1] == 1, "Expected column vector" return v.shape[0] else: raise TypeError("Cannot treat type %s as a vector" % type(v)) def _format_float(f, digits=4): s = str(round(f, digits)) if '.' in s: s = s[:s.index('.') + 1 + digits] return s def _format_float_list(l): return [_format_float(x) for x in l] def _double_to_long_bits(value): if np.isnan(value): value = float('nan') # pack double into 64 bits, then unpack as long int return struct.unpack('Q', struct.pack('d', value))[0] class VectorUDT(UserDefinedType): """ SQL user-defined type (UDT) for Vector. """ @classmethod def sqlType(cls): return StructType([ StructField("type", ByteType(), False), StructField("size", IntegerType(), True), StructField("indices", ArrayType(IntegerType(), False), True), StructField("values", ArrayType(DoubleType(), False), True)]) @classmethod def module(cls): return "pyspark.ml.linalg" @classmethod def scalaUDT(cls): return "org.apache.spark.ml.linalg.VectorUDT" def serialize(self, obj): if isinstance(obj, SparseVector): indices = [int(i) for i in obj.indices] values = [float(v) for v in obj.values] return (0, obj.size, indices, values) elif isinstance(obj, DenseVector): values = [float(v) for v in obj] return (1, None, None, values) else: raise TypeError("cannot serialize %r of type %r" % (obj, type(obj))) def deserialize(self, datum): assert len(datum) == 4, \ "VectorUDT.deserialize given row with length %d but requires 4" % len(datum) tpe = datum[0] if tpe == 0: return SparseVector(datum[1], datum[2], datum[3]) elif tpe == 1: return DenseVector(datum[3]) else: raise ValueError("do not recognize type %r" % tpe) def simpleString(self): return "vector" class MatrixUDT(UserDefinedType): """ SQL user-defined type (UDT) for Matrix. """ @classmethod def sqlType(cls): return StructType([ StructField("type", ByteType(), False), StructField("numRows", IntegerType(), False), StructField("numCols", IntegerType(), False), StructField("colPtrs", ArrayType(IntegerType(), False), True), StructField("rowIndices", ArrayType(IntegerType(), False), True), StructField("values", ArrayType(DoubleType(), False), True), StructField("isTransposed", BooleanType(), False)]) @classmethod def module(cls): return "pyspark.ml.linalg" @classmethod def scalaUDT(cls): return "org.apache.spark.ml.linalg.MatrixUDT" def serialize(self, obj): if isinstance(obj, SparseMatrix): colPtrs = [int(i) for i in obj.colPtrs] rowIndices = [int(i) for i in obj.rowIndices] values = [float(v) for v in obj.values] return (0, obj.numRows, obj.numCols, colPtrs, rowIndices, values, bool(obj.isTransposed)) elif isinstance(obj, DenseMatrix): values = [float(v) for v in obj.values] return (1, obj.numRows, obj.numCols, None, None, values, bool(obj.isTransposed)) else: raise TypeError("cannot serialize type %r" % (type(obj))) def deserialize(self, datum): assert len(datum) == 7, \ "MatrixUDT.deserialize given row with length %d but requires 7" % len(datum) tpe = datum[0] if tpe == 0: return SparseMatrix(*datum[1:]) elif tpe == 1: return DenseMatrix(datum[1], datum[2], datum[5], datum[6]) else: raise ValueError("do not recognize type %r" % tpe) def simpleString(self): return "matrix" class Vector(object): __UDT__ = VectorUDT() """ Abstract class for DenseVector and SparseVector """ def toArray(self): """ Convert the vector into an numpy.ndarray :return: numpy.ndarray """ raise NotImplementedError class DenseVector(Vector): """ A dense vector represented by a value array. We use numpy array for storage and arithmetics will be delegated to the underlying numpy array. >>> v = Vectors.dense([1.0, 2.0]) >>> u = Vectors.dense([3.0, 4.0]) >>> v + u DenseVector([4.0, 6.0]) >>> 2 - v DenseVector([1.0, 0.0]) >>> v / 2 DenseVector([0.5, 1.0]) >>> v * u DenseVector([3.0, 8.0]) >>> u / v DenseVector([3.0, 2.0]) >>> u % 2 DenseVector([1.0, 0.0]) >>> -v DenseVector([-1.0, -2.0]) """ def __init__(self, ar): if isinstance(ar, bytes): ar = np.frombuffer(ar, dtype=np.float64) elif not isinstance(ar, np.ndarray): ar = np.array(ar, dtype=np.float64) if ar.dtype != np.float64: ar = ar.astype(np.float64) self.array = ar def __reduce__(self): return DenseVector, (self.array.tostring(),) def numNonzeros(self): """ Number of nonzero elements. This scans all active values and count non zeros """ return np.count_nonzero(self.array) def norm(self, p): """ Calculates the norm of a DenseVector. >>> a = DenseVector([0, -1, 2, -3]) >>> a.norm(2) 3.7... >>> a.norm(1) 6.0 """ return np.linalg.norm(self.array, p) def dot(self, other): """ Compute the dot product of two Vectors. We support (Numpy array, list, SparseVector, or SciPy sparse) and a target NumPy array that is either 1- or 2-dimensional. Equivalent to calling numpy.dot of the two vectors. >>> dense = DenseVector(array.array('d', [1., 2.])) >>> dense.dot(dense) 5.0 >>> dense.dot(SparseVector(2, [0, 1], [2., 1.])) 4.0 >>> dense.dot(range(1, 3)) 5.0 >>> dense.dot(np.array(range(1, 3))) 5.0 >>> dense.dot([1.,]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> dense.dot(np.reshape([1., 2., 3., 4.], (2, 2), order='F')) array([ 5., 11.]) >>> dense.dot(np.reshape([1., 2., 3.], (3, 1), order='F')) Traceback (most recent call last): ... AssertionError: dimension mismatch """ if type(other) == np.ndarray: if other.ndim > 1: assert len(self) == other.shape[0], "dimension mismatch" return np.dot(self.array, other) elif _have_scipy and scipy.sparse.issparse(other): assert len(self) == other.shape[0], "dimension mismatch" return other.transpose().dot(self.toArray()) else: assert len(self) == _vector_size(other), "dimension mismatch" if isinstance(other, SparseVector): return other.dot(self) elif isinstance(other, Vector): return np.dot(self.toArray(), other.toArray()) else: return np.dot(self.toArray(), other) def squared_distance(self, other): """ Squared distance of two Vectors. >>> dense1 = DenseVector(array.array('d', [1., 2.])) >>> dense1.squared_distance(dense1) 0.0 >>> dense2 = np.array([2., 1.]) >>> dense1.squared_distance(dense2) 2.0 >>> dense3 = [2., 1.] >>> dense1.squared_distance(dense3) 2.0 >>> sparse1 = SparseVector(2, [0, 1], [2., 1.]) >>> dense1.squared_distance(sparse1) 2.0 >>> dense1.squared_distance([1.,]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> dense1.squared_distance(SparseVector(1, [0,], [1.,])) Traceback (most recent call last): ... AssertionError: dimension mismatch """ assert len(self) == _vector_size(other), "dimension mismatch" if isinstance(other, SparseVector): return other.squared_distance(self) elif _have_scipy and scipy.sparse.issparse(other): return _convert_to_vector(other).squared_distance(self) if isinstance(other, Vector): other = other.toArray() elif not isinstance(other, np.ndarray): other = np.array(other) diff = self.toArray() - other return np.dot(diff, diff) def toArray(self): """ Returns the underlying numpy.ndarray """ return self.array @property def values(self): """ Returns the underlying numpy.ndarray """ return self.array def __getitem__(self, item): return self.array[item] def __len__(self): return len(self.array) def __str__(self): return "[" + ",".join([str(v) for v in self.array]) + "]" def __repr__(self): return "DenseVector([%s])" % (', '.join(_format_float(i) for i in self.array)) def __eq__(self, other): if isinstance(other, DenseVector): return np.array_equal(self.array, other.array) elif isinstance(other, SparseVector): if len(self) != other.size: return False return Vectors._equals(list(xrange(len(self))), self.array, other.indices, other.values) return False def __ne__(self, other): return not self == other def __hash__(self): size = len(self) result = 31 + size nnz = 0 i = 0 while i < size and nnz < 128: if self.array[i] != 0: result = 31 * result + i bits = _double_to_long_bits(self.array[i]) result = 31 * result + (bits ^ (bits >> 32)) nnz += 1 i += 1 return result def __getattr__(self, item): return getattr(self.array, item) def __neg__(self): return DenseVector(-self.array) def _delegate(op): def func(self, other): if isinstance(other, DenseVector): other = other.array return DenseVector(getattr(self.array, op)(other)) return func __add__ = _delegate("__add__") __sub__ = _delegate("__sub__") __mul__ = _delegate("__mul__") __div__ = _delegate("__div__") __truediv__ = _delegate("__truediv__") __mod__ = _delegate("__mod__") __radd__ = _delegate("__radd__") __rsub__ = _delegate("__rsub__") __rmul__ = _delegate("__rmul__") __rdiv__ = _delegate("__rdiv__") __rtruediv__ = _delegate("__rtruediv__") __rmod__ = _delegate("__rmod__") class SparseVector(Vector): """ A simple sparse vector class for passing data to MLlib. Users may alternatively pass SciPy's {scipy.sparse} data types. """ def __init__(self, size, *args): """ Create a sparse vector, using either a dictionary, a list of (index, value) pairs, or two separate arrays of indices and values (sorted by index). :param size: Size of the vector. :param args: Active entries, as a dictionary {index: value, ...}, a list of tuples [(index, value), ...], or a list of strictly increasing indices and a list of corresponding values [index, ...], [value, ...]. Inactive entries are treated as zeros. >>> SparseVector(4, {1: 1.0, 3: 5.5}) SparseVector(4, {1: 1.0, 3: 5.5}) >>> SparseVector(4, [(1, 1.0), (3, 5.5)]) SparseVector(4, {1: 1.0, 3: 5.5}) >>> SparseVector(4, [1, 3], [1.0, 5.5]) SparseVector(4, {1: 1.0, 3: 5.5}) >>> SparseVector(4, {1:1.0, 6:2.0}) Traceback (most recent call last): ... AssertionError: Index 6 is out of the size of vector with size=4 >>> SparseVector(4, {-1:1.0}) Traceback (most recent call last): ... AssertionError: Contains negative index -1 """ self.size = int(size) """ Size of the vector. """ assert 1 <= len(args) <= 2, "must pass either 2 or 3 arguments" if len(args) == 1: pairs = args[0] if type(pairs) == dict: pairs = pairs.items() pairs = sorted(pairs) self.indices = np.array([p[0] for p in pairs], dtype=np.int32) """ A list of indices corresponding to active entries. """ self.values = np.array([p[1] for p in pairs], dtype=np.float64) """ A list of values corresponding to active entries. """ else: if isinstance(args[0], bytes): assert isinstance(args[1], bytes), "values should be string too" if args[0]: self.indices = np.frombuffer(args[0], np.int32) self.values = np.frombuffer(args[1], np.float64) else: # np.frombuffer() doesn't work well with empty string in older version self.indices = np.array([], dtype=np.int32) self.values = np.array([], dtype=np.float64) else: self.indices = np.array(args[0], dtype=np.int32) self.values = np.array(args[1], dtype=np.float64) assert len(self.indices) == len(self.values), "index and value arrays not same length" for i in xrange(len(self.indices) - 1): if self.indices[i] >= self.indices[i + 1]: raise TypeError( "Indices %s and %s are not strictly increasing" % (self.indices[i], self.indices[i + 1])) if self.indices.size > 0: assert np.max(self.indices) < self.size, \ "Index %d is out of the size of vector with size=%d" \ % (np.max(self.indices), self.size) assert np.min(self.indices) >= 0, \ "Contains negative index %d" % (np.min(self.indices)) def numNonzeros(self): """ Number of nonzero elements. This scans all active values and count non zeros. """ return np.count_nonzero(self.values) def norm(self, p): """ Calculates the norm of a SparseVector. >>> a = SparseVector(4, [0, 1], [3., -4.]) >>> a.norm(1) 7.0 >>> a.norm(2) 5.0 """ return np.linalg.norm(self.values, p) def __reduce__(self): return ( SparseVector, (self.size, self.indices.tostring(), self.values.tostring())) def dot(self, other): """ Dot product with a SparseVector or 1- or 2-dimensional Numpy array. >>> a = SparseVector(4, [1, 3], [3.0, 4.0]) >>> a.dot(a) 25.0 >>> a.dot(array.array('d', [1., 2., 3., 4.])) 22.0 >>> b = SparseVector(4, [2], [1.0]) >>> a.dot(b) 0.0 >>> a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]])) array([ 22., 22.]) >>> a.dot([1., 2., 3.]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> a.dot(np.array([1., 2.])) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> a.dot(DenseVector([1., 2.])) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> a.dot(np.zeros((3, 2))) Traceback (most recent call last): ... AssertionError: dimension mismatch """ if isinstance(other, np.ndarray): if other.ndim not in [2, 1]: raise ValueError("Cannot call dot with %d-dimensional array" % other.ndim) assert len(self) == other.shape[0], "dimension mismatch" return np.dot(self.values, other[self.indices]) assert len(self) == _vector_size(other), "dimension mismatch" if isinstance(other, DenseVector): return np.dot(other.array[self.indices], self.values) elif isinstance(other, SparseVector): # Find out common indices. self_cmind = np.in1d(self.indices, other.indices, assume_unique=True) self_values = self.values[self_cmind] if self_values.size == 0: return 0.0 else: other_cmind = np.in1d(other.indices, self.indices, assume_unique=True) return np.dot(self_values, other.values[other_cmind]) else: return self.dot(_convert_to_vector(other)) def squared_distance(self, other): """ Squared distance from a SparseVector or 1-dimensional NumPy array. >>> a = SparseVector(4, [1, 3], [3.0, 4.0]) >>> a.squared_distance(a) 0.0 >>> a.squared_distance(array.array('d', [1., 2., 3., 4.])) 11.0 >>> a.squared_distance(np.array([1., 2., 3., 4.])) 11.0 >>> b = SparseVector(4, [2], [1.0]) >>> a.squared_distance(b) 26.0 >>> b.squared_distance(a) 26.0 >>> b.squared_distance([1., 2.]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> b.squared_distance(SparseVector(3, [1,], [1.0,])) Traceback (most recent call last): ... AssertionError: dimension mismatch """ assert len(self) == _vector_size(other), "dimension mismatch" if isinstance(other, np.ndarray) or isinstance(other, DenseVector): if isinstance(other, np.ndarray) and other.ndim != 1: raise Exception("Cannot call squared_distance with %d-dimensional array" % other.ndim) if isinstance(other, DenseVector): other = other.array sparse_ind = np.zeros(other.size, dtype=bool) sparse_ind[self.indices] = True dist = other[sparse_ind] - self.values result = np.dot(dist, dist) other_ind = other[~sparse_ind] result += np.dot(other_ind, other_ind) return result elif isinstance(other, SparseVector): result = 0.0 i, j = 0, 0 while i < len(self.indices) and j < len(other.indices): if self.indices[i] == other.indices[j]: diff = self.values[i] - other.values[j] result += diff * diff i += 1 j += 1 elif self.indices[i] < other.indices[j]: result += self.values[i] * self.values[i] i += 1 else: result += other.values[j] * other.values[j] j += 1 while i < len(self.indices): result += self.values[i] * self.values[i] i += 1 while j < len(other.indices): result += other.values[j] * other.values[j] j += 1 return result else: return self.squared_distance(_convert_to_vector(other)) def toArray(self): """ Returns a copy of this SparseVector as a 1-dimensional numpy.ndarray. """ arr = np.zeros((self.size,), dtype=np.float64) arr[self.indices] = self.values return arr def __len__(self): return self.size def __str__(self): inds = "[" + ",".join([str(i) for i in self.indices]) + "]" vals = "[" + ",".join([str(v) for v in self.values]) + "]" return "(" + ",".join((str(self.size), inds, vals)) + ")" def __repr__(self): inds = self.indices vals = self.values entries = ", ".join(["{0}: {1}".format(inds[i], _format_float(vals[i])) for i in xrange(len(inds))]) return "SparseVector({0}, {{{1}}})".format(self.size, entries) def __eq__(self, other): if isinstance(other, SparseVector): return other.size == self.size and np.array_equal(other.indices, self.indices) \ and np.array_equal(other.values, self.values) elif isinstance(other, DenseVector): if self.size != len(other): return False return Vectors._equals(self.indices, self.values, list(xrange(len(other))), other.array) return False def __getitem__(self, index): inds = self.indices vals = self.values if not isinstance(index, int): raise TypeError( "Indices must be of type integer, got type %s" % type(index)) if index >= self.size or index < -self.size: raise IndexError("Index %d out of bounds." % index) if index < 0: index += self.size if (inds.size == 0) or (index > inds.item(-1)): return 0. insert_index = np.searchsorted(inds, index) row_ind = inds[insert_index] if row_ind == index: return vals[insert_index] return 0. def __ne__(self, other): return not self.__eq__(other) def __hash__(self): result = 31 + self.size nnz = 0 i = 0 while i < len(self.values) and nnz < 128: if self.values[i] != 0: result = 31 * result + int(self.indices[i]) bits = _double_to_long_bits(self.values[i]) result = 31 * result + (bits ^ (bits >> 32)) nnz += 1 i += 1 return result class Vectors(object): """ Factory methods for working with vectors. .. note:: Dense vectors are simply represented as NumPy array objects, so there is no need to covert them for use in MLlib. For sparse vectors, the factory methods in this class create an MLlib-compatible type, or users can pass in SciPy's `scipy.sparse` column vectors. """ @staticmethod def sparse(size, *args): """ Create a sparse vector, using either a dictionary, a list of (index, value) pairs, or two separate arrays of indices and values (sorted by index). :param size: Size of the vector. :param args: Non-zero entries, as a dictionary, list of tuples, or two sorted lists containing indices and values. >>> Vectors.sparse(4, {1: 1.0, 3: 5.5}) SparseVector(4, {1: 1.0, 3: 5.5}) >>> Vectors.sparse(4, [(1, 1.0), (3, 5.5)]) SparseVector(4, {1: 1.0, 3: 5.5}) >>> Vectors.sparse(4, [1, 3], [1.0, 5.5]) SparseVector(4, {1: 1.0, 3: 5.5}) """ return SparseVector(size, *args) @staticmethod def dense(*elements): """ Create a dense vector of 64-bit floats from a Python list or numbers. >>> Vectors.dense([1, 2, 3]) DenseVector([1.0, 2.0, 3.0]) >>> Vectors.dense(1.0, 2.0) DenseVector([1.0, 2.0]) """ if len(elements) == 1 and not isinstance(elements[0], (float, int, long)): # it's list, numpy.array or other iterable object. elements = elements[0] return DenseVector(elements) @staticmethod def squared_distance(v1, v2): """ Squared distance between two vectors. a and b can be of type SparseVector, DenseVector, np.ndarray or array.array. >>> a = Vectors.sparse(4, [(0, 1), (3, 4)]) >>> b = Vectors.dense([2, 5, 4, 1]) >>> a.squared_distance(b) 51.0 """ v1, v2 = _convert_to_vector(v1), _convert_to_vector(v2) return v1.squared_distance(v2) @staticmethod def norm(vector, p): """ Find norm of the given vector. """ return _convert_to_vector(vector).norm(p) @staticmethod def zeros(size): return DenseVector(np.zeros(size)) @staticmethod def _equals(v1_indices, v1_values, v2_indices, v2_values): """ Check equality between sparse/dense vectors, v1_indices and v2_indices assume to be strictly increasing. """ v1_size = len(v1_values) v2_size = len(v2_values) k1 = 0 k2 = 0 all_equal = True while all_equal: while k1 < v1_size and v1_values[k1] == 0: k1 += 1 while k2 < v2_size and v2_values[k2] == 0: k2 += 1 if k1 >= v1_size or k2 >= v2_size: return k1 >= v1_size and k2 >= v2_size all_equal = v1_indices[k1] == v2_indices[k2] and v1_values[k1] == v2_values[k2] k1 += 1 k2 += 1 return all_equal class Matrix(object): __UDT__ = MatrixUDT() """ Represents a local matrix. """ def __init__(self, numRows, numCols, isTransposed=False): self.numRows = numRows self.numCols = numCols self.isTransposed = isTransposed def toArray(self): """ Returns its elements in a numpy.ndarray. """ raise NotImplementedError @staticmethod def _convert_to_array(array_like, dtype): """ Convert Matrix attributes which are array-like or buffer to array. """ if isinstance(array_like, bytes): return np.frombuffer(array_like, dtype=dtype) return np.asarray(array_like, dtype=dtype) class DenseMatrix(Matrix): """ Column-major dense matrix. """ def __init__(self, numRows, numCols, values, isTransposed=False): Matrix.__init__(self, numRows, numCols, isTransposed) values = self._convert_to_array(values, np.float64) assert len(values) == numRows * numCols self.values = values def __reduce__(self): return DenseMatrix, ( self.numRows, self.numCols, self.values.tostring(), int(self.isTransposed)) def __str__(self): """ Pretty printing of a DenseMatrix >>> dm = DenseMatrix(2, 2, range(4)) >>> print(dm) DenseMatrix([[ 0., 2.], [ 1., 3.]]) >>> dm = DenseMatrix(2, 2, range(4), isTransposed=True) >>> print(dm) DenseMatrix([[ 0., 1.], [ 2., 3.]]) """ # Inspired by __repr__ in scipy matrices. array_lines = repr(self.toArray()).splitlines() # We need to adjust six spaces which is the difference in number # of letters between "DenseMatrix" and "array" x = '\n'.join([(" " * 6 + line) for line in array_lines[1:]]) return array_lines[0].replace("array", "DenseMatrix") + "\n" + x def __repr__(self): """ Representation of a DenseMatrix >>> dm = DenseMatrix(2, 2, range(4)) >>> dm DenseMatrix(2, 2, [0.0, 1.0, 2.0, 3.0], False) """ # If the number of values are less than seventeen then return as it is. # Else return first eight values and last eight values. if len(self.values) < 17: entries = _format_float_list(self.values) else: entries = ( _format_float_list(self.values[:8]) + ["..."] + _format_float_list(self.values[-8:]) ) entries = ", ".join(entries) return "DenseMatrix({0}, {1}, [{2}], {3})".format( self.numRows, self.numCols, entries, self.isTransposed) def toArray(self): """ Return a numpy.ndarray >>> m = DenseMatrix(2, 2, range(4)) >>> m.toArray() array([[ 0., 2.], [ 1., 3.]]) """ if self.isTransposed: return np.asfortranarray( self.values.reshape((self.numRows, self.numCols))) else: return self.values.reshape((self.numRows, self.numCols), order='F') def toSparse(self): """Convert to SparseMatrix""" if self.isTransposed: values = np.ravel(self.toArray(), order='F') else: values = self.values indices = np.nonzero(values)[0] colCounts = np.bincount(indices // self.numRows) colPtrs = np.cumsum(np.hstack( (0, colCounts, np.zeros(self.numCols - colCounts.size)))) values = values[indices] rowIndices = indices % self.numRows return SparseMatrix(self.numRows, self.numCols, colPtrs, rowIndices, values) def __getitem__(self, indices): i, j = indices if i < 0 or i >= self.numRows: raise IndexError("Row index %d is out of range [0, %d)" % (i, self.numRows)) if j >= self.numCols or j < 0: raise IndexError("Column index %d is out of range [0, %d)" % (j, self.numCols)) if self.isTransposed: return self.values[i * self.numCols + j] else: return self.values[i + j * self.numRows] def __eq__(self, other): if (self.numRows != other.numRows or self.numCols != other.numCols): return False if isinstance(other, SparseMatrix): return np.all(self.toArray() == other.toArray()) self_values = np.ravel(self.toArray(), order='F') other_values = np.ravel(other.toArray(), order='F') return np.all(self_values == other_values) class SparseMatrix(Matrix): """Sparse Matrix stored in CSC format.""" def __init__(self, numRows, numCols, colPtrs, rowIndices, values, isTransposed=False): Matrix.__init__(self, numRows, numCols, isTransposed) self.colPtrs = self._convert_to_array(colPtrs, np.int32) self.rowIndices = self._convert_to_array(rowIndices, np.int32) self.values = self._convert_to_array(values, np.float64) if self.isTransposed: if self.colPtrs.size != numRows + 1: raise ValueError("Expected colPtrs of size %d, got %d." % (numRows + 1, self.colPtrs.size)) else: if self.colPtrs.size != numCols + 1: raise ValueError("Expected colPtrs of size %d, got %d." % (numCols + 1, self.colPtrs.size)) if self.rowIndices.size != self.values.size: raise ValueError("Expected rowIndices of length %d, got %d." % (self.rowIndices.size, self.values.size)) def __str__(self): """ Pretty printing of a SparseMatrix >>> sm1 = SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]) >>> print(sm1) 2 X 2 CSCMatrix (0,0) 2.0 (1,0) 3.0 (1,1) 4.0 >>> sm1 = SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4], True) >>> print(sm1) 2 X 2 CSRMatrix (0,0) 2.0 (0,1) 3.0 (1,1) 4.0 """ spstr = "{0} X {1} ".format(self.numRows, self.numCols) if self.isTransposed: spstr += "CSRMatrix\n" else: spstr += "CSCMatrix\n" cur_col = 0 smlist = [] # Display first 16 values. if len(self.values) <= 16: zipindval = zip(self.rowIndices, self.values) else: zipindval = zip(self.rowIndices[:16], self.values[:16]) for i, (rowInd, value) in enumerate(zipindval): if self.colPtrs[cur_col + 1] <= i: cur_col += 1 if self.isTransposed: smlist.append('({0},{1}) {2}'.format( cur_col, rowInd, _format_float(value))) else: smlist.append('({0},{1}) {2}'.format( rowInd, cur_col, _format_float(value))) spstr += "\n".join(smlist) if len(self.values) > 16: spstr += "\n.." * 2 return spstr def __repr__(self): """ Representation of a SparseMatrix >>> sm1 = SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]) >>> sm1 SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2.0, 3.0, 4.0], False) """ rowIndices = list(self.rowIndices) colPtrs = list(self.colPtrs) if len(self.values) <= 16: values = _format_float_list(self.values) else: values = ( _format_float_list(self.values[:8]) + ["..."] + _format_float_list(self.values[-8:]) ) rowIndices = rowIndices[:8] + ["..."] + rowIndices[-8:] if len(self.colPtrs) > 16: colPtrs = colPtrs[:8] + ["..."] + colPtrs[-8:] values = ", ".join(values) rowIndices = ", ".join([str(ind) for ind in rowIndices]) colPtrs = ", ".join([str(ptr) for ptr in colPtrs]) return "SparseMatrix({0}, {1}, [{2}], [{3}], [{4}], {5})".format( self.numRows, self.numCols, colPtrs, rowIndices, values, self.isTransposed) def __reduce__(self): return SparseMatrix, ( self.numRows, self.numCols, self.colPtrs.tostring(), self.rowIndices.tostring(), self.values.tostring(), int(self.isTransposed)) def __getitem__(self, indices): i, j = indices if i < 0 or i >= self.numRows: raise IndexError("Row index %d is out of range [0, %d)" % (i, self.numRows)) if j < 0 or j >= self.numCols: raise IndexError("Column index %d is out of range [0, %d)" % (j, self.numCols)) # If a CSR matrix is given, then the row index should be searched # for in ColPtrs, and the column index should be searched for in the # corresponding slice obtained from rowIndices. if self.isTransposed: j, i = i, j colStart = self.colPtrs[j] colEnd = self.colPtrs[j + 1] nz = self.rowIndices[colStart: colEnd] ind = np.searchsorted(nz, i) + colStart if ind < colEnd and self.rowIndices[ind] == i: return self.values[ind] else: return 0.0 def toArray(self): """ Return a numpy.ndarray """ A = np.zeros((self.numRows, self.numCols), dtype=np.float64, order='F') for k in xrange(self.colPtrs.size - 1): startptr = self.colPtrs[k] endptr = self.colPtrs[k + 1] if self.isTransposed: A[k, self.rowIndices[startptr:endptr]] = self.values[startptr:endptr] else: A[self.rowIndices[startptr:endptr], k] = self.values[startptr:endptr] return A def toDense(self): densevals = np.ravel(self.toArray(), order='F') return DenseMatrix(self.numRows, self.numCols, densevals) # TODO: More efficient implementation: def __eq__(self, other): return np.all(self.toArray() == other.toArray()) class Matrices(object): @staticmethod def dense(numRows, numCols, values): """ Create a DenseMatrix """ return DenseMatrix(numRows, numCols, values) @staticmethod def sparse(numRows, numCols, colPtrs, rowIndices, values): """ Create a SparseMatrix """ return SparseMatrix(numRows, numCols, colPtrs, rowIndices, values) def _test(): import doctest try: # Numpy 1.14+ changed it's string format. np.set_printoptions(legacy='1.13') except TypeError: pass (failure_count, test_count) = doctest.testmod(optionflags=doctest.ELLIPSIS) if failure_count: sys.exit(-1) if __name__ == "__main__": _test()
mit
mediatum/mediatum
core/styles.py
1
9708
""" mediatum - a multimedia content repository Copyright (C) 2010 Arne Seifert <seiferta@in.tum.de> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import codecs import logging from mediatumtal import tal import os import attr import core.config as config from core import app from core.transition import render_template, render_macro import glob from jinja2.loaders import FileSystemLoader, ChoiceLoader, PrefixLoader from jinja2.exceptions import TemplateNotFound from core.request_handler import addFileStore as _addFileStore from core.request_handler import addFileStorePath as _addFileStorePath full_styles_by_contenttype = {} list_styles = {} logg = logging.getLogger(__name__) class Theme(object): def __init__(self, name, path): self.name = name self.path = path self.style_path = os.path.join(path, "styles") def getImagePath(self): return self.path + "/img/" def activate(self): import core.webconfig # XXX: this is ugly, is there a better way? core.webconfig.theme = self theme_jinja_loader = self.make_jinja_loader() if theme_jinja_loader is not None: logg.info("adding jinja loader for theme") app.add_template_loader(theme_jinja_loader, 0) _addFileStore("/theme/", self.path + "/") _addFileStorePath("/css/", self.path + "/css/") _addFileStorePath("/img/", self.path + "/img/") _addFileStorePath("/js/", self.path + "/js/") def get_tal_template_path(self, filename): raise NotImplementedError("implement in subclasses!") def getTemplate(self, filename): return self.get_tal_template_path(filename) def render_template(self, template_name, context): if template_name.endswith((".j2.jade", ".j2.html")): # caller wants a jinja template return render_template(template_name, **context) elif template_name.endswith(".html"): # caller wants a TAL template tal_template = self.get_tal_template_path(template_name) return tal.getTAL(tal_template, context) else: raise TemplateNotFound("invalid template name (must end with .j2.jade, .j2.html or .html): " + template_name) def render_macro(self, template_name, macro_name, context): if template_name.endswith((".j2.jade", ".j2.html")): # caller wants a jinja template return render_macro(template_name, macro_name, **context) elif template_name.endswith(".html"): # caller wants a TAL template tal_template = self.get_tal_template_path(template_name) return tal.getTAL(tal_template, context, macro=macro_name) else: raise TemplateNotFound("invalid template name (must end with .j2.jade, .j2.html or .html): " + template_name) class DefaultTheme(Theme): PATH = "web/themes/mediatum" def __init__(self): super(DefaultTheme, self).__init__("default", DefaultTheme.PATH) @classmethod def make_jinja_loader(self): template_path = os.path.join(config.basedir, DefaultTheme.PATH, "templates") if os.path.isdir(template_path): return FileSystemLoader(template_path) @classmethod def get_tal_template_path(self, filename): relative_template_path = os.path.join(DefaultTheme.PATH, filename) if not os.path.exists(os.path.join(config.basedir, relative_template_path)): raise TemplateNotFound("TAL template {} not found".format(filename)) return relative_template_path class CustomTheme(Theme): def get_tal_template_path(self, filename): relative_template_path = self.path + filename if os.path.exists(os.path.join(config.basedir, relative_template_path)): return relative_template_path else: return DefaultTheme.get_tal_template_path(filename) def make_jinja_loader(self): template_path = os.path.join(config.basedir, self.path, "templates") default_jinja_loader = DefaultTheme.make_jinja_loader() prefix_default_loader = PrefixLoader({"mediatum": default_jinja_loader}) if os.path.isdir(template_path): # This loader first looks # at the CustomTheme template path, then the default template path, if nothing was found. return ChoiceLoader([ prefix_default_loader, FileSystemLoader(template_path), default_jinja_loader ]) else: # no template dir for this theme, only use templates from default theme return default_jinja_loader class FullStyle(object): def __init__(self, path="", template=None, contenttype="all", name="name", label="label", icon="icon", default="", description="", maskfield_separator=""): self.path = path self.type = type self.contenttype = contenttype self.name = name self.label = label self.icon = icon self.template = template self.default = default == "true" self.description = description self.maskfield_separator = maskfield_separator class TALFullStyle(FullStyle): def render_template(self, req, context): template_path = os.path.join(self.path, self.template) return tal.getTAL(template_path, context, request=req) class JinjaFullStyle(FullStyle): def render_template(self, req, context): template_path = os.path.join("styles", self.template) return render_template(template_path, **context) @attr.s class ListStyle(object): name = attr.ib() icon = attr.ib() label = attr.ib() path = attr.ib() template = attr.ib() description = attr.ib() maskfield_separator = attr.ib(default="") nodes_per_page = attr.ib(default=10, convert=int) def render_template(self, req, context): if self.template.endswith((".j2.jade", ".j2.html")): template_path = os.path.join("styles", self.template) return render_template(template_path, **context) else: template_path = os.path.join(self.path, self.template) return tal.getTAL(template_path, context, request=req) def readStyleConfig(filename): attrs = {} with codecs.open(filename, "rb", encoding='utf8') as fi: for line in fi: if line.find("#") < 0: line = line.split("=") key = line[0].strip() value = line[1].strip().replace("\r", "").replace("\n", "") attrs[key] = value attrs["path"] = os.path.dirname(filename) return attrs def make_style_from_config(attrs): style_type = attrs["type"] del attrs["type"] if style_type == "smallview": return ListStyle(**attrs) elif style_type == "bigview": template = attrs["template"] if template.endswith("j2.jade") or template.endswith("j2.html"): return JinjaFullStyle(**attrs) else: return TALFullStyle(**attrs) def _load_styles_from_path(dirpath): full_styles_from_path = {} list_styles_from_path = {} if os.path.exists(dirpath): config_filepaths = glob.glob(dirpath + "/*.cfg") for filepath in config_filepaths: style_config = readStyleConfig(filepath) style = make_style_from_config(style_config) if isinstance(style, ListStyle): list_styles_from_path[style.name] = style else: styles_for_type = full_styles_from_path.setdefault(style.contenttype, {}) styles_for_type[style.name] = style else: logg.warn("style path %s not found, ignoring", dirpath) return full_styles_from_path, list_styles_from_path def _load_all_styles(): from core import webconfig default_style_path = os.path.join(config.basedir, 'web/frontend/styles') default_full_styles, default_list_styles = _load_styles_from_path(default_style_path) theme_full_styles, theme_list_styles = _load_styles_from_path(webconfig.theme.style_path) # styles from a theme have higher priority full_styles_by_contenttype.update(default_full_styles) full_styles_by_contenttype.update(theme_full_styles) list_styles.update(default_list_styles) list_styles.update(theme_list_styles) def get_list_style(style_name): if not list_styles: _load_all_styles() return list_styles.get(style_name) def get_full_style(content_type, style_name): if not full_styles_by_contenttype: _load_all_styles() styles_for_content_type = full_styles_by_contenttype.get(content_type) if styles_for_content_type is None: raise Exception("no content styles defined for node type {}".format(content_type)) return styles_for_content_type.get(style_name, styles_for_content_type.values()[0]) def get_styles_for_contenttype(content_type): if not full_styles_by_contenttype: _load_all_styles() return full_styles_by_contenttype.get(content_type, {}).values()
gpl-3.0
morissette/devopsdays-hackathon-2016
venv/lib/python2.7/site-packages/flask/__init__.py
425
1674
# -*- coding: utf-8 -*- """ flask ~~~~~ A microframework based on Werkzeug. It's extensively documented and follows best practice patterns. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ __version__ = '0.10.1' # utilities we import from Werkzeug and Jinja2 that are unused # in the module but are exported as public interface. from werkzeug.exceptions import abort from werkzeug.utils import redirect from jinja2 import Markup, escape from .app import Flask, Request, Response from .config import Config from .helpers import url_for, flash, send_file, send_from_directory, \ get_flashed_messages, get_template_attribute, make_response, safe_join, \ stream_with_context from .globals import current_app, g, request, session, _request_ctx_stack, \ _app_ctx_stack from .ctx import has_request_context, has_app_context, \ after_this_request, copy_current_request_context from .module import Module from .blueprints import Blueprint from .templating import render_template, render_template_string # the signals from .signals import signals_available, template_rendered, request_started, \ request_finished, got_request_exception, request_tearing_down, \ appcontext_tearing_down, appcontext_pushed, \ appcontext_popped, message_flashed # We're not exposing the actual json module but a convenient wrapper around # it. from . import json # This was the only thing that flask used to export at one point and it had # a more generic name. jsonify = json.jsonify # backwards compat, goes away in 1.0 from .sessions import SecureCookieSession as Session json_available = True
gpl-3.0
melon-li/openstack-dashboard
openstack_dashboard/dashboards/admin/routers/urls.py
66
1108
# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.conf.urls import patterns from django.conf.urls import url from openstack_dashboard.dashboards.admin.routers import views ROUTER_URL = r'^(?P<router_id>[^/]+)/%s' urlpatterns = patterns( 'horizon.dashboards.admin.routers.views', url(r'^$', views.IndexView.as_view(), name='index'), url(ROUTER_URL % '$', views.DetailView.as_view(), name='detail'), url(ROUTER_URL % 'update', views.UpdateView.as_view(), name='update'), )
apache-2.0
ovnicraft/openerp-restaurant
mrp_repair/__openerp__.py
65
2540
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Repairs Management', 'version': '1.0', 'category': 'Manufacturing', 'description': """ The aim is to have a complete module to manage all products repairs. ==================================================================== The following topics should be covered by this module: ------------------------------------------------------ * Add/remove products in the reparation * Impact for stocks * Invoicing (products and/or services) * Warranty concept * Repair quotation report * Notes for the technician and for the final customer """, 'author': 'OpenERP SA', 'images': ['images/repair_order.jpeg'], 'depends': ['mrp', 'sale', 'account'], 'data': [ 'security/ir.model.access.csv', 'security/mrp_repair_security.xml', 'mrp_repair_data.xml', 'mrp_repair_sequence.xml', 'wizard/mrp_repair_cancel_view.xml', 'wizard/mrp_repair_make_invoice_view.xml', 'mrp_repair_view.xml', 'mrp_repair_workflow.xml', 'mrp_repair_report.xml', 'views/report_mrprepairorder.xml', ], 'demo': ['mrp_repair_demo.yml'], 'test': ['test/mrp_repair_users.yml', 'test/test_mrp_repair_noneinv.yml', 'test/test_mrp_repair_b4inv.yml', 'test/test_mrp_repair_afterinv.yml', 'test/test_mrp_repair_cancel.yml', 'test/test_mrp_repair_fee.yml', ], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
timokoola/finnkinotxt
botocore/vendored/requests/auth.py
413
6794
# -*- coding: utf-8 -*- """ requests.auth ~~~~~~~~~~~~~ This module contains the authentication handlers for Requests. """ import os import re import time import hashlib from base64 import b64encode from .compat import urlparse, str from .cookies import extract_cookies_to_jar from .utils import parse_dict_header, to_native_string from .status_codes import codes CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' CONTENT_TYPE_MULTI_PART = 'multipart/form-data' def _basic_auth_str(username, password): """Returns a Basic Auth string.""" authstr = 'Basic ' + to_native_string( b64encode(('%s:%s' % (username, password)).encode('latin1')).strip() ) return authstr class AuthBase(object): """Base class that all auth implementations derive from""" def __call__(self, r): raise NotImplementedError('Auth hooks must be callable.') class HTTPBasicAuth(AuthBase): """Attaches HTTP Basic Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password def __call__(self, r): r.headers['Authorization'] = _basic_auth_str(self.username, self.password) return r class HTTPProxyAuth(HTTPBasicAuth): """Attaches HTTP Proxy Authentication to a given Request object.""" def __call__(self, r): r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password) return r class HTTPDigestAuth(AuthBase): """Attaches HTTP Digest Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password self.last_nonce = '' self.nonce_count = 0 self.chal = {} self.pos = None self.num_401_calls = 1 def build_digest_header(self, method, url): realm = self.chal['realm'] nonce = self.chal['nonce'] qop = self.chal.get('qop') algorithm = self.chal.get('algorithm') opaque = self.chal.get('opaque') if algorithm is None: _algorithm = 'MD5' else: _algorithm = algorithm.upper() # lambdas assume digest modules are imported at the top level if _algorithm == 'MD5' or _algorithm == 'MD5-SESS': def md5_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.md5(x).hexdigest() hash_utf8 = md5_utf8 elif _algorithm == 'SHA': def sha_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.sha1(x).hexdigest() hash_utf8 = sha_utf8 KD = lambda s, d: hash_utf8("%s:%s" % (s, d)) if hash_utf8 is None: return None # XXX not implemented yet entdig = None p_parsed = urlparse(url) #: path is request-uri defined in RFC 2616 which should not be empty path = p_parsed.path or "/" if p_parsed.query: path += '?' + p_parsed.query A1 = '%s:%s:%s' % (self.username, realm, self.password) A2 = '%s:%s' % (method, path) HA1 = hash_utf8(A1) HA2 = hash_utf8(A2) if nonce == self.last_nonce: self.nonce_count += 1 else: self.nonce_count = 1 ncvalue = '%08x' % self.nonce_count s = str(self.nonce_count).encode('utf-8') s += nonce.encode('utf-8') s += time.ctime().encode('utf-8') s += os.urandom(8) cnonce = (hashlib.sha1(s).hexdigest()[:16]) if _algorithm == 'MD5-SESS': HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce)) if qop is None: respdig = KD(HA1, "%s:%s" % (nonce, HA2)) elif qop == 'auth' or 'auth' in qop.split(','): noncebit = "%s:%s:%s:%s:%s" % ( nonce, ncvalue, cnonce, 'auth', HA2 ) respdig = KD(HA1, noncebit) else: # XXX handle auth-int. return None self.last_nonce = nonce # XXX should the partial digests be encoded too? base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ 'response="%s"' % (self.username, realm, nonce, path, respdig) if opaque: base += ', opaque="%s"' % opaque if algorithm: base += ', algorithm="%s"' % algorithm if entdig: base += ', digest="%s"' % entdig if qop: base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce) return 'Digest %s' % (base) def handle_redirect(self, r, **kwargs): """Reset num_401_calls counter on redirects.""" if r.is_redirect: self.num_401_calls = 1 def handle_401(self, r, **kwargs): """Takes the given response and tries digest-auth, if needed.""" if self.pos is not None: # Rewind the file position indicator of the body to where # it was to resend the request. r.request.body.seek(self.pos) num_401_calls = getattr(self, 'num_401_calls', 1) s_auth = r.headers.get('www-authenticate', '') if 'digest' in s_auth.lower() and num_401_calls < 2: self.num_401_calls += 1 pat = re.compile(r'digest ', flags=re.IGNORECASE) self.chal = parse_dict_header(pat.sub('', s_auth, count=1)) # Consume content and release the original connection # to allow our new request to reuse the same one. r.content r.raw.release_conn() prep = r.request.copy() extract_cookies_to_jar(prep._cookies, r.request, r.raw) prep.prepare_cookies(prep._cookies) prep.headers['Authorization'] = self.build_digest_header( prep.method, prep.url) _r = r.connection.send(prep, **kwargs) _r.history.append(r) _r.request = prep return _r self.num_401_calls = 1 return r def __call__(self, r): # If we have a saved nonce, skip the 401 if self.last_nonce: r.headers['Authorization'] = self.build_digest_header(r.method, r.url) try: self.pos = r.body.tell() except AttributeError: # In the case of HTTPDigestAuth being reused and the body of # the previous request was a file-like object, pos has the # file position of the previous body. Ensure it's set to # None. self.pos = None r.register_hook('response', self.handle_401) r.register_hook('response', self.handle_redirect) return r
apache-2.0
bmander/dancecontraption
django/core/servers/basehttp.py
78
25168
""" BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21). Adapted from wsgiref.simple_server: http://svn.eby-sarna.com/wsgiref/ This is a simple server for use in testing or debugging Django apps. It hasn't been reviewed for security issues. Don't use it for production use. """ from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer import os import re import socket import sys import urllib import warnings from django.core.management.color import color_style from django.utils.http import http_date from django.utils._os import safe_join from django.contrib.staticfiles import handlers, views as static __version__ = "0.1" __all__ = ['WSGIServer','WSGIRequestHandler'] server_version = "WSGIServer/" + __version__ sys_version = "Python/" + sys.version.split()[0] software_version = server_version + ' ' + sys_version class WSGIServerException(Exception): pass class FileWrapper(object): """Wrapper to convert file-like objects to iterables""" def __init__(self, filelike, blksize=8192): self.filelike = filelike self.blksize = blksize if hasattr(filelike,'close'): self.close = filelike.close def __getitem__(self,key): data = self.filelike.read(self.blksize) if data: return data raise IndexError def __iter__(self): return self def next(self): data = self.filelike.read(self.blksize) if data: return data raise StopIteration # Regular expression that matches `special' characters in parameters, the # existence of which force quoting of the parameter value. tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]') def _formatparam(param, value=None, quote=1): """Convenience function to format and return a key=value pair. This will quote the value if needed or if quote is true. """ if value is not None and len(value) > 0: if quote or tspecials.search(value): value = value.replace('\\', '\\\\').replace('"', r'\"') return '%s="%s"' % (param, value) else: return '%s=%s' % (param, value) else: return param class Headers(object): """Manage a collection of HTTP response headers""" def __init__(self,headers): if not isinstance(headers, list): raise TypeError("Headers must be a list of name/value tuples") self._headers = headers def __len__(self): """Return the total number of headers, including duplicates.""" return len(self._headers) def __setitem__(self, name, val): """Set the value of a header.""" del self[name] self._headers.append((name, val)) def __delitem__(self,name): """Delete all occurrences of a header, if present. Does *not* raise an exception if the header is missing. """ name = name.lower() self._headers[:] = [kv for kv in self._headers if kv[0].lower()<>name] def __getitem__(self,name): """Get the first header value for 'name' Return None if the header is missing instead of raising an exception. Note that if the header appeared multiple times, the first exactly which occurrance gets returned is undefined. Use getall() to get all the values matching a header field name. """ return self.get(name) def has_key(self, name): """Return true if the message contains the header.""" return self.get(name) is not None __contains__ = has_key def get_all(self, name): """Return a list of all the values for the named field. These will be sorted in the order they appeared in the original header list or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list. If no fields exist with the given name, returns an empty list. """ name = name.lower() return [kv[1] for kv in self._headers if kv[0].lower()==name] def get(self,name,default=None): """Get the first header value for 'name', or return 'default'""" name = name.lower() for k,v in self._headers: if k.lower()==name: return v return default def keys(self): """Return a list of all the header field names. These will be sorted in the order they appeared in the original header list, or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list. """ return [k for k, v in self._headers] def values(self): """Return a list of all header values. These will be sorted in the order they appeared in the original header list, or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list. """ return [v for k, v in self._headers] def items(self): """Get all the header fields and values. These will be sorted in the order they were in the original header list, or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list. """ return self._headers[:] def __repr__(self): return "Headers(%s)" % `self._headers` def __str__(self): """str() returns the formatted headers, complete with end line, suitable for direct HTTP transmission.""" return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['','']) def setdefault(self,name,value): """Return first matching header value for 'name', or 'value' If there is no header named 'name', add a new header with name 'name' and value 'value'.""" result = self.get(name) if result is None: self._headers.append((name,value)) return value else: return result def add_header(self, _name, _value, **_params): """Extended header setting. _name is the header field to add. keyword arguments can be used to set additional parameters for the header field, with underscores converted to dashes. Normally the parameter will be added as key="value" unless value is None, in which case only the key will be added. Example: h.add_header('content-disposition', 'attachment', filename='bud.gif') Note that unlike the corresponding 'email.Message' method, this does *not* handle '(charset, language, value)' tuples: all values must be strings or None. """ parts = [] if _value is not None: parts.append(_value) for k, v in _params.items(): if v is None: parts.append(k.replace('_', '-')) else: parts.append(_formatparam(k.replace('_', '-'), v)) self._headers.append((_name, "; ".join(parts))) def guess_scheme(environ): """Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https' """ if environ.get("HTTPS") in ('yes','on','1'): return 'https' else: return 'http' _hop_headers = { 'connection':1, 'keep-alive':1, 'proxy-authenticate':1, 'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1, 'upgrade':1 } def is_hop_by_hop(header_name): """Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header""" return header_name.lower() in _hop_headers class ServerHandler(object): """Manage the invocation of a WSGI application""" # Configuration parameters; can override per-subclass or per-instance wsgi_version = (1,0) wsgi_multithread = True wsgi_multiprocess = True wsgi_run_once = False origin_server = True # We are transmitting direct to client http_version = "1.0" # Version that should be used for response server_software = software_version # os_environ is used to supply configuration from the OS environment: # by default it's a copy of 'os.environ' as of import time, but you can # override this in e.g. your __init__ method. os_environ = dict(os.environ.items()) # Collaborator classes wsgi_file_wrapper = FileWrapper # set to None to disable headers_class = Headers # must be a Headers-like class # Error handling (also per-subclass or per-instance) traceback_limit = None # Print entire traceback to self.get_stderr() error_status = "500 INTERNAL SERVER ERROR" error_headers = [('Content-Type','text/plain')] # State variables (don't mess with these) status = result = None headers_sent = False headers = None bytes_sent = 0 def __init__(self, stdin, stdout, stderr, environ, multithread=True, multiprocess=False): self.stdin = stdin self.stdout = stdout self.stderr = stderr self.base_env = environ self.wsgi_multithread = multithread self.wsgi_multiprocess = multiprocess def run(self, application): """Invoke the application""" # Note to self: don't move the close()! Asynchronous servers shouldn't # call close() from finish_response(), so if you close() anywhere but # the double-error branch here, you'll break asynchronous servers by # prematurely closing. Async servers must return from 'run()' without # closing if there might still be output to iterate over. try: self.setup_environ() self.result = application(self.environ, self.start_response) self.finish_response() except: try: self.handle_error() except: # If we get an error handling an error, just give up already! self.close() raise # ...and let the actual server figure it out. def setup_environ(self): """Set up the environment for one request""" env = self.environ = self.os_environ.copy() self.add_cgi_vars() env['wsgi.input'] = self.get_stdin() env['wsgi.errors'] = self.get_stderr() env['wsgi.version'] = self.wsgi_version env['wsgi.run_once'] = self.wsgi_run_once env['wsgi.url_scheme'] = self.get_scheme() env['wsgi.multithread'] = self.wsgi_multithread env['wsgi.multiprocess'] = self.wsgi_multiprocess if self.wsgi_file_wrapper is not None: env['wsgi.file_wrapper'] = self.wsgi_file_wrapper if self.origin_server and self.server_software: env.setdefault('SERVER_SOFTWARE',self.server_software) def finish_response(self): """ Send any iterable data, then close self and the iterable Subclasses intended for use in asynchronous servers will want to redefine this method, such that it sets up callbacks in the event loop to iterate over the data, and to call 'self.close()' once the response is finished. """ if not self.result_is_file() or not self.sendfile(): for data in self.result: self.write(data) self.finish_content() self.close() def get_scheme(self): """Return the URL scheme being used""" return guess_scheme(self.environ) def set_content_length(self): """Compute Content-Length or switch to chunked encoding if possible""" try: blocks = len(self.result) except (TypeError, AttributeError, NotImplementedError): pass else: if blocks==1: self.headers['Content-Length'] = str(self.bytes_sent) return # XXX Try for chunked encoding if origin server and client is 1.1 def cleanup_headers(self): """Make any necessary header changes or defaults Subclasses can extend this to add other defaults. """ if 'Content-Length' not in self.headers: self.set_content_length() def start_response(self, status, headers,exc_info=None): """'start_response()' callable as specified by PEP 333""" if exc_info: try: if self.headers_sent: # Re-raise original exception if headers sent raise exc_info[0], exc_info[1], exc_info[2] finally: exc_info = None # avoid dangling circular ref elif self.headers is not None: raise AssertionError("Headers already set!") assert isinstance(status, str),"Status must be a string" assert len(status)>=4,"Status must be at least 4 characters" assert int(status[:3]),"Status message must begin w/3-digit code" assert status[3]==" ", "Status message must have a space after code" if __debug__: for name,val in headers: assert isinstance(name, str),"Header names must be strings" assert isinstance(val, str),"Header values must be strings" assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed" self.status = status self.headers = self.headers_class(headers) return self.write def send_preamble(self): """Transmit version/status/date/server, via self._write()""" if self.origin_server: if self.client_is_modern(): self._write('HTTP/%s %s\r\n' % (self.http_version,self.status)) if 'Date' not in self.headers: self._write( 'Date: %s\r\n' % http_date() ) if self.server_software and 'Server' not in self.headers: self._write('Server: %s\r\n' % self.server_software) else: self._write('Status: %s\r\n' % self.status) def write(self, data): """'write()' callable as specified by PEP 333""" assert isinstance(data, str), "write() argument must be string" if not self.status: raise AssertionError("write() before start_response()") elif not self.headers_sent: # Before the first output, send the stored headers self.bytes_sent = len(data) # make sure we know content-length self.send_headers() else: self.bytes_sent += len(data) # XXX check Content-Length and truncate if too many bytes written? # If data is too large, socket will choke, so write chunks no larger # than 32MB at a time. length = len(data) if length > 33554432: offset = 0 while offset < length: chunk_size = min(33554432, length) self._write(data[offset:offset+chunk_size]) self._flush() offset += chunk_size else: self._write(data) self._flush() def sendfile(self): """Platform-specific file transmission Override this method in subclasses to support platform-specific file transmission. It is only called if the application's return iterable ('self.result') is an instance of 'self.wsgi_file_wrapper'. This method should return a true value if it was able to actually transmit the wrapped file-like object using a platform-specific approach. It should return a false value if normal iteration should be used instead. An exception can be raised to indicate that transmission was attempted, but failed. NOTE: this method should call 'self.send_headers()' if 'self.headers_sent' is false and it is going to attempt direct transmission of the file1. """ return False # No platform-specific transmission by default def finish_content(self): """Ensure headers and content have both been sent""" if not self.headers_sent: self.headers['Content-Length'] = "0" self.send_headers() else: pass # XXX check if content-length was too short? def close(self): try: self.request_handler.log_request(self.status.split(' ',1)[0], self.bytes_sent) finally: try: if hasattr(self.result,'close'): self.result.close() finally: self.result = self.headers = self.status = self.environ = None self.bytes_sent = 0; self.headers_sent = False def send_headers(self): """Transmit headers to the client, via self._write()""" self.cleanup_headers() self.headers_sent = True if not self.origin_server or self.client_is_modern(): self.send_preamble() self._write(str(self.headers)) def result_is_file(self): """True if 'self.result' is an instance of 'self.wsgi_file_wrapper'""" wrapper = self.wsgi_file_wrapper return wrapper is not None and isinstance(self.result,wrapper) def client_is_modern(self): """True if client can accept status and headers""" return self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9' def log_exception(self,exc_info): """Log the 'exc_info' tuple in the server log Subclasses may override to retarget the output or change its format. """ try: from traceback import print_exception stderr = self.get_stderr() print_exception( exc_info[0], exc_info[1], exc_info[2], self.traceback_limit, stderr ) stderr.flush() finally: exc_info = None def handle_error(self): """Log current error, and send error output to client if possible""" self.log_exception(sys.exc_info()) if not self.headers_sent: self.result = self.error_output(self.environ, self.start_response) self.finish_response() # XXX else: attempt advanced recovery techniques for HTML or text? def error_output(self, environ, start_response): import traceback start_response(self.error_status, self.error_headers[:], sys.exc_info()) return ['\n'.join(traceback.format_exception(*sys.exc_info()))] # Pure abstract methods; *must* be overridden in subclasses def _write(self,data): self.stdout.write(data) self._write = self.stdout.write def _flush(self): self.stdout.flush() self._flush = self.stdout.flush def get_stdin(self): return self.stdin def get_stderr(self): return self.stderr def add_cgi_vars(self): self.environ.update(self.base_env) class WSGIServer(HTTPServer): """BaseHTTPServer that implements the Python WSGI protocol""" application = None def __init__(self, *args, **kwargs): if kwargs.pop('ipv6', False): self.address_family = socket.AF_INET6 HTTPServer.__init__(self, *args, **kwargs) def server_bind(self): """Override server_bind to store the server name.""" try: HTTPServer.server_bind(self) except Exception, e: raise WSGIServerException(e) self.setup_environ() def setup_environ(self): # Set up base environment env = self.base_environ = {} env['SERVER_NAME'] = self.server_name env['GATEWAY_INTERFACE'] = 'CGI/1.1' env['SERVER_PORT'] = str(self.server_port) env['REMOTE_HOST']='' env['CONTENT_LENGTH']='' env['SCRIPT_NAME'] = '' def get_app(self): return self.application def set_app(self,application): self.application = application class WSGIRequestHandler(BaseHTTPRequestHandler): server_version = "WSGIServer/" + __version__ def __init__(self, *args, **kwargs): from django.conf import settings self.admin_media_prefix = settings.ADMIN_MEDIA_PREFIX # We set self.path to avoid crashes in log_message() on unsupported # requests (like "OPTIONS"). self.path = '' self.style = color_style() BaseHTTPRequestHandler.__init__(self, *args, **kwargs) def get_environ(self): env = self.server.base_environ.copy() env['SERVER_PROTOCOL'] = self.request_version env['REQUEST_METHOD'] = self.command if '?' in self.path: path,query = self.path.split('?',1) else: path,query = self.path,'' env['PATH_INFO'] = urllib.unquote(path) env['QUERY_STRING'] = query env['REMOTE_ADDR'] = self.client_address[0] if self.headers.typeheader is None: env['CONTENT_TYPE'] = self.headers.type else: env['CONTENT_TYPE'] = self.headers.typeheader length = self.headers.getheader('content-length') if length: env['CONTENT_LENGTH'] = length for h in self.headers.headers: k,v = h.split(':',1) k=k.replace('-','_').upper(); v=v.strip() if k in env: continue # skip content length, type,etc. if 'HTTP_'+k in env: env['HTTP_'+k] += ','+v # comma-separate multiple headers else: env['HTTP_'+k] = v return env def get_stderr(self): return sys.stderr def handle(self): """Handle a single HTTP request""" self.raw_requestline = self.rfile.readline() if not self.parse_request(): # An error code has been sent, just exit return handler = ServerHandler(self.rfile, self.wfile, self.get_stderr(), self.get_environ()) handler.request_handler = self # backpointer for logging handler.run(self.server.get_app()) def log_message(self, format, *args): # Don't bother logging requests for admin images or the favicon. if self.path.startswith(self.admin_media_prefix) or self.path == '/favicon.ico': return msg = "[%s] %s\n" % (self.log_date_time_string(), format % args) # Utilize terminal colors, if available if args[1][0] == '2': # Put 2XX first, since it should be the common case msg = self.style.HTTP_SUCCESS(msg) elif args[1][0] == '1': msg = self.style.HTTP_INFO(msg) elif args[1] == '304': msg = self.style.HTTP_NOT_MODIFIED(msg) elif args[1][0] == '3': msg = self.style.HTTP_REDIRECT(msg) elif args[1] == '404': msg = self.style.HTTP_NOT_FOUND(msg) elif args[1][0] == '4': msg = self.style.HTTP_BAD_REQUEST(msg) else: # Any 5XX, or any other response msg = self.style.HTTP_SERVER_ERROR(msg) sys.stderr.write(msg) class AdminMediaHandler(handlers.StaticFilesHandler): """ WSGI middleware that intercepts calls to the admin media directory, as defined by the ADMIN_MEDIA_PREFIX setting, and serves those images. Use this ONLY LOCALLY, for development! This hasn't been tested for security and is not super efficient. This is pending for deprecation since 1.3. """ def get_base_dir(self): import django return os.path.join(django.__path__[0], 'contrib', 'admin', 'media') def get_base_url(self): from django.conf import settings from django.core.exceptions import ImproperlyConfigured if not settings.ADMIN_MEDIA_PREFIX: raise ImproperlyConfigured( "The ADMIN_MEDIA_PREFIX setting can't be empty " "when using the AdminMediaHandler, e.g. with runserver.") return settings.ADMIN_MEDIA_PREFIX def file_path(self, url): """ Returns the path to the media file on disk for the given URL. The passed URL is assumed to begin with ``self.base_url``. If the resulting file path is outside the media directory, then a ValueError is raised. """ relative_url = url[len(self.base_url[2]):] relative_path = urllib.url2pathname(relative_url) return safe_join(self.base_dir, relative_path) def serve(self, request): document_root, path = os.path.split(self.file_path(request.path)) return static.serve(request, path, document_root=document_root, insecure=True) def _should_handle(self, path): """ Checks if the path should be handled. Ignores the path if: * the host is provided as part of the base_url * the request's path isn't under the base path """ return path.startswith(self.base_url[2]) and not self.base_url[1] def run(addr, port, wsgi_handler, ipv6=False): server_address = (addr, port) httpd = WSGIServer(server_address, WSGIRequestHandler, ipv6=ipv6) httpd.set_app(wsgi_handler) httpd.serve_forever()
bsd-3-clause
xyzz/vcmi-build
project/jni/python/src/Lib/test/test_multibytecodec_support.py
55
12606
#!/usr/bin/env python # # test_multibytecodec_support.py # Common Unittest Routines for CJK codecs # import sys, codecs import unittest, re from test import test_support from StringIO import StringIO class TestBase: encoding = '' # codec name codec = None # codec tuple (with 4 elements) tstring = '' # string to test StreamReader codectests = None # must set. codec test tuple roundtriptest = 1 # set if roundtrip is possible with unicode has_iso10646 = 0 # set if this encoding contains whole iso10646 map xmlcharnametest = None # string to test xmlcharrefreplace unmappedunicode = u'\udeee' # a unicode codepoint that is not mapped. def setUp(self): if self.codec is None: self.codec = codecs.lookup(self.encoding) self.encode = self.codec.encode self.decode = self.codec.decode self.reader = self.codec.streamreader self.writer = self.codec.streamwriter self.incrementalencoder = self.codec.incrementalencoder self.incrementaldecoder = self.codec.incrementaldecoder def test_chunkcoding(self): for native, utf8 in zip(*[StringIO(f).readlines() for f in self.tstring]): u = self.decode(native)[0] self.assertEqual(u, utf8.decode('utf-8')) if self.roundtriptest: self.assertEqual(native, self.encode(u)[0]) def test_errorhandle(self): for source, scheme, expected in self.codectests: if type(source) == type(''): func = self.decode else: func = self.encode if expected: result = func(source, scheme)[0] self.assertEqual(result, expected) else: self.assertRaises(UnicodeError, func, source, scheme) def test_xmlcharrefreplace(self): if self.has_iso10646: return s = u"\u0b13\u0b23\u0b60 nd eggs" self.assertEqual( self.encode(s, "xmlcharrefreplace")[0], "&#2835;&#2851;&#2912; nd eggs" ) def test_customreplace_encode(self): if self.has_iso10646: return from htmlentitydefs import codepoint2name def xmlcharnamereplace(exc): if not isinstance(exc, UnicodeEncodeError): raise TypeError("don't know how to handle %r" % exc) l = [] for c in exc.object[exc.start:exc.end]: if ord(c) in codepoint2name: l.append(u"&%s;" % codepoint2name[ord(c)]) else: l.append(u"&#%d;" % ord(c)) return (u"".join(l), exc.end) codecs.register_error("test.xmlcharnamereplace", xmlcharnamereplace) if self.xmlcharnametest: sin, sout = self.xmlcharnametest else: sin = u"\xab\u211c\xbb = \u2329\u1234\u232a" sout = "&laquo;&real;&raquo; = &lang;&#4660;&rang;" self.assertEqual(self.encode(sin, "test.xmlcharnamereplace")[0], sout) def test_callback_wrong_objects(self): def myreplace(exc): return (ret, exc.end) codecs.register_error("test.cjktest", myreplace) for ret in ([1, 2, 3], [], None, object(), 'string', ''): self.assertRaises(TypeError, self.encode, self.unmappedunicode, 'test.cjktest') def test_callback_long_index(self): def myreplace(exc): return (u'x', long(exc.end)) codecs.register_error("test.cjktest", myreplace) self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh', 'test.cjktest'), ('abcdxefgh', 9)) def myreplace(exc): return (u'x', sys.maxint + 1) codecs.register_error("test.cjktest", myreplace) self.assertRaises(IndexError, self.encode, self.unmappedunicode, 'test.cjktest') def test_callback_None_index(self): def myreplace(exc): return (u'x', None) codecs.register_error("test.cjktest", myreplace) self.assertRaises(TypeError, self.encode, self.unmappedunicode, 'test.cjktest') def test_callback_backward_index(self): def myreplace(exc): if myreplace.limit > 0: myreplace.limit -= 1 return (u'REPLACED', 0) else: return (u'TERMINAL', exc.end) myreplace.limit = 3 codecs.register_error("test.cjktest", myreplace) self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh', 'test.cjktest'), ('abcdREPLACEDabcdREPLACEDabcdREPLACEDabcdTERMINALefgh', 9)) def test_callback_forward_index(self): def myreplace(exc): return (u'REPLACED', exc.end + 2) codecs.register_error("test.cjktest", myreplace) self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh', 'test.cjktest'), ('abcdREPLACEDgh', 9)) def test_callback_index_outofbound(self): def myreplace(exc): return (u'TERM', 100) codecs.register_error("test.cjktest", myreplace) self.assertRaises(IndexError, self.encode, self.unmappedunicode, 'test.cjktest') def test_incrementalencoder(self): UTF8Reader = codecs.getreader('utf-8') for sizehint in [None] + range(1, 33) + \ [64, 128, 256, 512, 1024]: istream = UTF8Reader(StringIO(self.tstring[1])) ostream = StringIO() encoder = self.incrementalencoder() while 1: if sizehint is not None: data = istream.read(sizehint) else: data = istream.read() if not data: break e = encoder.encode(data) ostream.write(e) self.assertEqual(ostream.getvalue(), self.tstring[0]) def test_incrementaldecoder(self): UTF8Writer = codecs.getwriter('utf-8') for sizehint in [None, -1] + range(1, 33) + \ [64, 128, 256, 512, 1024]: istream = StringIO(self.tstring[0]) ostream = UTF8Writer(StringIO()) decoder = self.incrementaldecoder() while 1: data = istream.read(sizehint) if not data: break else: u = decoder.decode(data) ostream.write(u) self.assertEqual(ostream.getvalue(), self.tstring[1]) def test_incrementalencoder_error_callback(self): inv = self.unmappedunicode e = self.incrementalencoder() self.assertRaises(UnicodeEncodeError, e.encode, inv, True) e.errors = 'ignore' self.assertEqual(e.encode(inv, True), '') e.reset() def tempreplace(exc): return (u'called', exc.end) codecs.register_error('test.incremental_error_callback', tempreplace) e.errors = 'test.incremental_error_callback' self.assertEqual(e.encode(inv, True), 'called') # again e.errors = 'ignore' self.assertEqual(e.encode(inv, True), '') def test_streamreader(self): UTF8Writer = codecs.getwriter('utf-8') for name in ["read", "readline", "readlines"]: for sizehint in [None, -1] + range(1, 33) + \ [64, 128, 256, 512, 1024]: istream = self.reader(StringIO(self.tstring[0])) ostream = UTF8Writer(StringIO()) func = getattr(istream, name) while 1: data = func(sizehint) if not data: break if name == "readlines": ostream.writelines(data) else: ostream.write(data) self.assertEqual(ostream.getvalue(), self.tstring[1]) def test_streamwriter(self): readfuncs = ('read', 'readline', 'readlines') UTF8Reader = codecs.getreader('utf-8') for name in readfuncs: for sizehint in [None] + range(1, 33) + \ [64, 128, 256, 512, 1024]: istream = UTF8Reader(StringIO(self.tstring[1])) ostream = self.writer(StringIO()) func = getattr(istream, name) while 1: if sizehint is not None: data = func(sizehint) else: data = func() if not data: break if name == "readlines": ostream.writelines(data) else: ostream.write(data) self.assertEqual(ostream.getvalue(), self.tstring[0]) if len(u'\U00012345') == 2: # ucs2 build _unichr = unichr def unichr(v): if v >= 0x10000: return _unichr(0xd800 + ((v - 0x10000) >> 10)) + \ _unichr(0xdc00 + ((v - 0x10000) & 0x3ff)) else: return _unichr(v) _ord = ord def ord(c): if len(c) == 2: return 0x10000 + ((_ord(c[0]) - 0xd800) << 10) + \ (ord(c[1]) - 0xdc00) else: return _ord(c) class TestBase_Mapping(unittest.TestCase): pass_enctest = [] pass_dectest = [] supmaps = [] def __init__(self, *args, **kw): unittest.TestCase.__init__(self, *args, **kw) self.open_mapping_file() # test it to report the error early def open_mapping_file(self): return test_support.open_urlresource(self.mapfileurl) def test_mapping_file(self): if self.mapfileurl.endswith('.xml'): self._test_mapping_file_ucm() else: self._test_mapping_file_plain() def _test_mapping_file_plain(self): unichrs = lambda s: u''.join(map(unichr, map(eval, s.split('+')))) urt_wa = {} for line in self.open_mapping_file(): if not line: break data = line.split('#')[0].strip().split() if len(data) != 2: continue csetval = eval(data[0]) if csetval <= 0x7F: csetch = chr(csetval & 0xff) elif csetval >= 0x1000000: csetch = chr(csetval >> 24) + chr((csetval >> 16) & 0xff) + \ chr((csetval >> 8) & 0xff) + chr(csetval & 0xff) elif csetval >= 0x10000: csetch = chr(csetval >> 16) + \ chr((csetval >> 8) & 0xff) + chr(csetval & 0xff) elif csetval >= 0x100: csetch = chr(csetval >> 8) + chr(csetval & 0xff) else: continue unich = unichrs(data[1]) if ord(unich) == 0xfffd or urt_wa.has_key(unich): continue urt_wa[unich] = csetch self._testpoint(csetch, unich) def _test_mapping_file_ucm(self): ucmdata = self.open_mapping_file().read() uc = re.findall('<a u="([A-F0-9]{4})" b="([0-9A-F ]+)"/>', ucmdata) for uni, coded in uc: unich = unichr(int(uni, 16)) codech = ''.join(chr(int(c, 16)) for c in coded.split()) self._testpoint(codech, unich) def test_mapping_supplemental(self): for mapping in self.supmaps: self._testpoint(*mapping) def _testpoint(self, csetch, unich): if (csetch, unich) not in self.pass_enctest: try: self.assertEqual(unich.encode(self.encoding), csetch) except UnicodeError, exc: self.fail('Encoding failed while testing %s -> %s: %s' % ( repr(unich), repr(csetch), exc.reason)) if (csetch, unich) not in self.pass_dectest: try: self.assertEqual(csetch.decode(self.encoding), unich) except UnicodeError, exc: self.fail('Decoding failed while testing %s -> %s: %s' % ( repr(csetch), repr(unich), exc.reason)) def load_teststring(encoding): from test import cjkencodings_test return cjkencodings_test.teststring[encoding]
lgpl-2.1
noname007/ycmd
cpp/ycm/tests/gmock/gtest/scripts/upload.py
2511
51024
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tool for uploading diffs from a version control system to the codereview app. Usage summary: upload.py [options] [-- diff_options] Diff options are passed to the diff command of the underlying system. Supported version control systems: Git Mercurial Subversion It is important for Git/Mercurial users to specify a tree/node/branch to diff against by using the '--rev' option. """ # This code is derived from appcfg.py in the App Engine SDK (open source), # and from ASPN recipe #146306. import cookielib import getpass import logging import md5 import mimetypes import optparse import os import re import socket import subprocess import sys import urllib import urllib2 import urlparse try: import readline except ImportError: pass # The logging verbosity: # 0: Errors only. # 1: Status messages. # 2: Info logs. # 3: Debug logs. verbosity = 1 # Max size of patch or base file. MAX_UPLOAD_SIZE = 900 * 1024 def GetEmail(prompt): """Prompts the user for their email address and returns it. The last used email address is saved to a file and offered up as a suggestion to the user. If the user presses enter without typing in anything the last used email address is used. If the user enters a new address, it is saved for next time we prompt. """ last_email_file_name = os.path.expanduser("~/.last_codereview_email_address") last_email = "" if os.path.exists(last_email_file_name): try: last_email_file = open(last_email_file_name, "r") last_email = last_email_file.readline().strip("\n") last_email_file.close() prompt += " [%s]" % last_email except IOError, e: pass email = raw_input(prompt + ": ").strip() if email: try: last_email_file = open(last_email_file_name, "w") last_email_file.write(email) last_email_file.close() except IOError, e: pass else: email = last_email return email def StatusUpdate(msg): """Print a status message to stdout. If 'verbosity' is greater than 0, print the message. Args: msg: The string to print. """ if verbosity > 0: print msg def ErrorExit(msg): """Print an error message to stderr and exit.""" print >>sys.stderr, msg sys.exit(1) class ClientLoginError(urllib2.HTTPError): """Raised to indicate there was an error authenticating with ClientLogin.""" def __init__(self, url, code, msg, headers, args): urllib2.HTTPError.__init__(self, url, code, msg, headers, None) self.args = args self.reason = args["Error"] class AbstractRpcServer(object): """Provides a common interface for a simple RPC server.""" def __init__(self, host, auth_function, host_override=None, extra_headers={}, save_cookies=False): """Creates a new HttpRpcServer. Args: host: The host to send requests to. auth_function: A function that takes no arguments and returns an (email, password) tuple when called. Will be called if authentication is required. host_override: The host header to send to the server (defaults to host). extra_headers: A dict of extra headers to append to every request. save_cookies: If True, save the authentication cookies to local disk. If False, use an in-memory cookiejar instead. Subclasses must implement this functionality. Defaults to False. """ self.host = host self.host_override = host_override self.auth_function = auth_function self.authenticated = False self.extra_headers = extra_headers self.save_cookies = save_cookies self.opener = self._GetOpener() if self.host_override: logging.info("Server: %s; Host: %s", self.host, self.host_override) else: logging.info("Server: %s", self.host) def _GetOpener(self): """Returns an OpenerDirector for making HTTP requests. Returns: A urllib2.OpenerDirector object. """ raise NotImplementedError() def _CreateRequest(self, url, data=None): """Creates a new urllib request.""" logging.debug("Creating request for: '%s' with payload:\n%s", url, data) req = urllib2.Request(url, data=data) if self.host_override: req.add_header("Host", self.host_override) for key, value in self.extra_headers.iteritems(): req.add_header(key, value) return req def _GetAuthToken(self, email, password): """Uses ClientLogin to authenticate the user, returning an auth token. Args: email: The user's email address password: The user's password Raises: ClientLoginError: If there was an error authenticating with ClientLogin. HTTPError: If there was some other form of HTTP error. Returns: The authentication token returned by ClientLogin. """ account_type = "GOOGLE" if self.host.endswith(".google.com"): # Needed for use inside Google. account_type = "HOSTED" req = self._CreateRequest( url="https://www.google.com/accounts/ClientLogin", data=urllib.urlencode({ "Email": email, "Passwd": password, "service": "ah", "source": "rietveld-codereview-upload", "accountType": account_type, }), ) try: response = self.opener.open(req) response_body = response.read() response_dict = dict(x.split("=") for x in response_body.split("\n") if x) return response_dict["Auth"] except urllib2.HTTPError, e: if e.code == 403: body = e.read() response_dict = dict(x.split("=", 1) for x in body.split("\n") if x) raise ClientLoginError(req.get_full_url(), e.code, e.msg, e.headers, response_dict) else: raise def _GetAuthCookie(self, auth_token): """Fetches authentication cookies for an authentication token. Args: auth_token: The authentication token returned by ClientLogin. Raises: HTTPError: If there was an error fetching the authentication cookies. """ # This is a dummy value to allow us to identify when we're successful. continue_location = "http://localhost/" args = {"continue": continue_location, "auth": auth_token} req = self._CreateRequest("http://%s/_ah/login?%s" % (self.host, urllib.urlencode(args))) try: response = self.opener.open(req) except urllib2.HTTPError, e: response = e if (response.code != 302 or response.info()["location"] != continue_location): raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, response.headers, response.fp) self.authenticated = True def _Authenticate(self): """Authenticates the user. The authentication process works as follows: 1) We get a username and password from the user 2) We use ClientLogin to obtain an AUTH token for the user (see http://code.google.com/apis/accounts/AuthForInstalledApps.html). 3) We pass the auth token to /_ah/login on the server to obtain an authentication cookie. If login was successful, it tries to redirect us to the URL we provided. If we attempt to access the upload API without first obtaining an authentication cookie, it returns a 401 response and directs us to authenticate ourselves with ClientLogin. """ for i in range(3): credentials = self.auth_function() try: auth_token = self._GetAuthToken(credentials[0], credentials[1]) except ClientLoginError, e: if e.reason == "BadAuthentication": print >>sys.stderr, "Invalid username or password." continue if e.reason == "CaptchaRequired": print >>sys.stderr, ( "Please go to\n" "https://www.google.com/accounts/DisplayUnlockCaptcha\n" "and verify you are a human. Then try again.") break if e.reason == "NotVerified": print >>sys.stderr, "Account not verified." break if e.reason == "TermsNotAgreed": print >>sys.stderr, "User has not agreed to TOS." break if e.reason == "AccountDeleted": print >>sys.stderr, "The user account has been deleted." break if e.reason == "AccountDisabled": print >>sys.stderr, "The user account has been disabled." break if e.reason == "ServiceDisabled": print >>sys.stderr, ("The user's access to the service has been " "disabled.") break if e.reason == "ServiceUnavailable": print >>sys.stderr, "The service is not available; try again later." break raise self._GetAuthCookie(auth_token) return def Send(self, request_path, payload=None, content_type="application/octet-stream", timeout=None, **kwargs): """Sends an RPC and returns the response. Args: request_path: The path to send the request to, eg /api/appversion/create. payload: The body of the request, or None to send an empty request. content_type: The Content-Type header to use. timeout: timeout in seconds; default None i.e. no timeout. (Note: for large requests on OS X, the timeout doesn't work right.) kwargs: Any keyword arguments are converted into query string parameters. Returns: The response body, as a string. """ # TODO: Don't require authentication. Let the server say # whether it is necessary. if not self.authenticated: self._Authenticate() old_timeout = socket.getdefaulttimeout() socket.setdefaulttimeout(timeout) try: tries = 0 while True: tries += 1 args = dict(kwargs) url = "http://%s%s" % (self.host, request_path) if args: url += "?" + urllib.urlencode(args) req = self._CreateRequest(url=url, data=payload) req.add_header("Content-Type", content_type) try: f = self.opener.open(req) response = f.read() f.close() return response except urllib2.HTTPError, e: if tries > 3: raise elif e.code == 401: self._Authenticate() ## elif e.code >= 500 and e.code < 600: ## # Server Error - try again. ## continue else: raise finally: socket.setdefaulttimeout(old_timeout) class HttpRpcServer(AbstractRpcServer): """Provides a simplified RPC-style interface for HTTP requests.""" def _Authenticate(self): """Save the cookie jar after authentication.""" super(HttpRpcServer, self)._Authenticate() if self.save_cookies: StatusUpdate("Saving authentication cookies to %s" % self.cookie_file) self.cookie_jar.save() def _GetOpener(self): """Returns an OpenerDirector that supports cookies and ignores redirects. Returns: A urllib2.OpenerDirector object. """ opener = urllib2.OpenerDirector() opener.add_handler(urllib2.ProxyHandler()) opener.add_handler(urllib2.UnknownHandler()) opener.add_handler(urllib2.HTTPHandler()) opener.add_handler(urllib2.HTTPDefaultErrorHandler()) opener.add_handler(urllib2.HTTPSHandler()) opener.add_handler(urllib2.HTTPErrorProcessor()) if self.save_cookies: self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies") self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file) if os.path.exists(self.cookie_file): try: self.cookie_jar.load() self.authenticated = True StatusUpdate("Loaded authentication cookies from %s" % self.cookie_file) except (cookielib.LoadError, IOError): # Failed to load cookies - just ignore them. pass else: # Create an empty cookie file with mode 600 fd = os.open(self.cookie_file, os.O_CREAT, 0600) os.close(fd) # Always chmod the cookie file os.chmod(self.cookie_file, 0600) else: # Don't save cookies across runs of update.py. self.cookie_jar = cookielib.CookieJar() opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar)) return opener parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]") parser.add_option("-y", "--assume_yes", action="store_true", dest="assume_yes", default=False, help="Assume that the answer to yes/no questions is 'yes'.") # Logging group = parser.add_option_group("Logging options") group.add_option("-q", "--quiet", action="store_const", const=0, dest="verbose", help="Print errors only.") group.add_option("-v", "--verbose", action="store_const", const=2, dest="verbose", default=1, help="Print info level logs (default).") group.add_option("--noisy", action="store_const", const=3, dest="verbose", help="Print all logs.") # Review server group = parser.add_option_group("Review server options") group.add_option("-s", "--server", action="store", dest="server", default="codereview.appspot.com", metavar="SERVER", help=("The server to upload to. The format is host[:port]. " "Defaults to 'codereview.appspot.com'.")) group.add_option("-e", "--email", action="store", dest="email", metavar="EMAIL", default=None, help="The username to use. Will prompt if omitted.") group.add_option("-H", "--host", action="store", dest="host", metavar="HOST", default=None, help="Overrides the Host header sent with all RPCs.") group.add_option("--no_cookies", action="store_false", dest="save_cookies", default=True, help="Do not save authentication cookies to local disk.") # Issue group = parser.add_option_group("Issue options") group.add_option("-d", "--description", action="store", dest="description", metavar="DESCRIPTION", default=None, help="Optional description when creating an issue.") group.add_option("-f", "--description_file", action="store", dest="description_file", metavar="DESCRIPTION_FILE", default=None, help="Optional path of a file that contains " "the description when creating an issue.") group.add_option("-r", "--reviewers", action="store", dest="reviewers", metavar="REVIEWERS", default=None, help="Add reviewers (comma separated email addresses).") group.add_option("--cc", action="store", dest="cc", metavar="CC", default=None, help="Add CC (comma separated email addresses).") # Upload options group = parser.add_option_group("Patch options") group.add_option("-m", "--message", action="store", dest="message", metavar="MESSAGE", default=None, help="A message to identify the patch. " "Will prompt if omitted.") group.add_option("-i", "--issue", type="int", action="store", metavar="ISSUE", default=None, help="Issue number to which to add. Defaults to new issue.") group.add_option("--download_base", action="store_true", dest="download_base", default=False, help="Base files will be downloaded by the server " "(side-by-side diffs may not work on files with CRs).") group.add_option("--rev", action="store", dest="revision", metavar="REV", default=None, help="Branch/tree/revision to diff against (used by DVCS).") group.add_option("--send_mail", action="store_true", dest="send_mail", default=False, help="Send notification email to reviewers.") def GetRpcServer(options): """Returns an instance of an AbstractRpcServer. Returns: A new AbstractRpcServer, on which RPC calls can be made. """ rpc_server_class = HttpRpcServer def GetUserCredentials(): """Prompts the user for a username and password.""" email = options.email if email is None: email = GetEmail("Email (login for uploading to %s)" % options.server) password = getpass.getpass("Password for %s: " % email) return (email, password) # If this is the dev_appserver, use fake authentication. host = (options.host or options.server).lower() if host == "localhost" or host.startswith("localhost:"): email = options.email if email is None: email = "test@example.com" logging.info("Using debug user %s. Override with --email" % email) server = rpc_server_class( options.server, lambda: (email, "password"), host_override=options.host, extra_headers={"Cookie": 'dev_appserver_login="%s:False"' % email}, save_cookies=options.save_cookies) # Don't try to talk to ClientLogin. server.authenticated = True return server return rpc_server_class(options.server, GetUserCredentials, host_override=options.host, save_cookies=options.save_cookies) def EncodeMultipartFormData(fields, files): """Encode form fields for multipart/form-data. Args: fields: A sequence of (name, value) elements for regular form fields. files: A sequence of (name, filename, value) elements for data to be uploaded as files. Returns: (content_type, body) ready for httplib.HTTP instance. Source: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306 """ BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-' CRLF = '\r\n' lines = [] for (key, value) in fields: lines.append('--' + BOUNDARY) lines.append('Content-Disposition: form-data; name="%s"' % key) lines.append('') lines.append(value) for (key, filename, value) in files: lines.append('--' + BOUNDARY) lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)) lines.append('Content-Type: %s' % GetContentType(filename)) lines.append('') lines.append(value) lines.append('--' + BOUNDARY + '--') lines.append('') body = CRLF.join(lines) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body def GetContentType(filename): """Helper to guess the content-type from the filename.""" return mimetypes.guess_type(filename)[0] or 'application/octet-stream' # Use a shell for subcommands on Windows to get a PATH search. use_shell = sys.platform.startswith("win") def RunShellWithReturnCode(command, print_output=False, universal_newlines=True): """Executes a command and returns the output from stdout and the return code. Args: command: Command to execute. print_output: If True, the output is printed to stdout. If False, both stdout and stderr are ignored. universal_newlines: Use universal_newlines flag (default: True). Returns: Tuple (output, return code) """ logging.info("Running %s", command) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=use_shell, universal_newlines=universal_newlines) if print_output: output_array = [] while True: line = p.stdout.readline() if not line: break print line.strip("\n") output_array.append(line) output = "".join(output_array) else: output = p.stdout.read() p.wait() errout = p.stderr.read() if print_output and errout: print >>sys.stderr, errout p.stdout.close() p.stderr.close() return output, p.returncode def RunShell(command, silent_ok=False, universal_newlines=True, print_output=False): data, retcode = RunShellWithReturnCode(command, print_output, universal_newlines) if retcode: ErrorExit("Got error status from %s:\n%s" % (command, data)) if not silent_ok and not data: ErrorExit("No output from %s" % command) return data class VersionControlSystem(object): """Abstract base class providing an interface to the VCS.""" def __init__(self, options): """Constructor. Args: options: Command line options. """ self.options = options def GenerateDiff(self, args): """Return the current diff as a string. Args: args: Extra arguments to pass to the diff command. """ raise NotImplementedError( "abstract method -- subclass %s must override" % self.__class__) def GetUnknownFiles(self): """Return a list of files unknown to the VCS.""" raise NotImplementedError( "abstract method -- subclass %s must override" % self.__class__) def CheckForUnknownFiles(self): """Show an "are you sure?" prompt if there are unknown files.""" unknown_files = self.GetUnknownFiles() if unknown_files: print "The following files are not added to version control:" for line in unknown_files: print line prompt = "Are you sure to continue?(y/N) " answer = raw_input(prompt).strip() if answer != "y": ErrorExit("User aborted") def GetBaseFile(self, filename): """Get the content of the upstream version of a file. Returns: A tuple (base_content, new_content, is_binary, status) base_content: The contents of the base file. new_content: For text files, this is empty. For binary files, this is the contents of the new file, since the diff output won't contain information to reconstruct the current file. is_binary: True iff the file is binary. status: The status of the file. """ raise NotImplementedError( "abstract method -- subclass %s must override" % self.__class__) def GetBaseFiles(self, diff): """Helper that calls GetBase file for each file in the patch. Returns: A dictionary that maps from filename to GetBaseFile's tuple. Filenames are retrieved based on lines that start with "Index:" or "Property changes on:". """ files = {} for line in diff.splitlines(True): if line.startswith('Index:') or line.startswith('Property changes on:'): unused, filename = line.split(':', 1) # On Windows if a file has property changes its filename uses '\' # instead of '/'. filename = filename.strip().replace('\\', '/') files[filename] = self.GetBaseFile(filename) return files def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options, files): """Uploads the base files (and if necessary, the current ones as well).""" def UploadFile(filename, file_id, content, is_binary, status, is_base): """Uploads a file to the server.""" file_too_large = False if is_base: type = "base" else: type = "current" if len(content) > MAX_UPLOAD_SIZE: print ("Not uploading the %s file for %s because it's too large." % (type, filename)) file_too_large = True content = "" checksum = md5.new(content).hexdigest() if options.verbose > 0 and not file_too_large: print "Uploading %s file for %s" % (type, filename) url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id) form_fields = [("filename", filename), ("status", status), ("checksum", checksum), ("is_binary", str(is_binary)), ("is_current", str(not is_base)), ] if file_too_large: form_fields.append(("file_too_large", "1")) if options.email: form_fields.append(("user", options.email)) ctype, body = EncodeMultipartFormData(form_fields, [("data", filename, content)]) response_body = rpc_server.Send(url, body, content_type=ctype) if not response_body.startswith("OK"): StatusUpdate(" --> %s" % response_body) sys.exit(1) patches = dict() [patches.setdefault(v, k) for k, v in patch_list] for filename in patches.keys(): base_content, new_content, is_binary, status = files[filename] file_id_str = patches.get(filename) if file_id_str.find("nobase") != -1: base_content = None file_id_str = file_id_str[file_id_str.rfind("_") + 1:] file_id = int(file_id_str) if base_content != None: UploadFile(filename, file_id, base_content, is_binary, status, True) if new_content != None: UploadFile(filename, file_id, new_content, is_binary, status, False) def IsImage(self, filename): """Returns true if the filename has an image extension.""" mimetype = mimetypes.guess_type(filename)[0] if not mimetype: return False return mimetype.startswith("image/") class SubversionVCS(VersionControlSystem): """Implementation of the VersionControlSystem interface for Subversion.""" def __init__(self, options): super(SubversionVCS, self).__init__(options) if self.options.revision: match = re.match(r"(\d+)(:(\d+))?", self.options.revision) if not match: ErrorExit("Invalid Subversion revision %s." % self.options.revision) self.rev_start = match.group(1) self.rev_end = match.group(3) else: self.rev_start = self.rev_end = None # Cache output from "svn list -r REVNO dirname". # Keys: dirname, Values: 2-tuple (ouput for start rev and end rev). self.svnls_cache = {} # SVN base URL is required to fetch files deleted in an older revision. # Result is cached to not guess it over and over again in GetBaseFile(). required = self.options.download_base or self.options.revision is not None self.svn_base = self._GuessBase(required) def GuessBase(self, required): """Wrapper for _GuessBase.""" return self.svn_base def _GuessBase(self, required): """Returns the SVN base URL. Args: required: If true, exits if the url can't be guessed, otherwise None is returned. """ info = RunShell(["svn", "info"]) for line in info.splitlines(): words = line.split() if len(words) == 2 and words[0] == "URL:": url = words[1] scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) username, netloc = urllib.splituser(netloc) if username: logging.info("Removed username from base URL") if netloc.endswith("svn.python.org"): if netloc == "svn.python.org": if path.startswith("/projects/"): path = path[9:] elif netloc != "pythondev@svn.python.org": ErrorExit("Unrecognized Python URL: %s" % url) base = "http://svn.python.org/view/*checkout*%s/" % path logging.info("Guessed Python base = %s", base) elif netloc.endswith("svn.collab.net"): if path.startswith("/repos/"): path = path[6:] base = "http://svn.collab.net/viewvc/*checkout*%s/" % path logging.info("Guessed CollabNet base = %s", base) elif netloc.endswith(".googlecode.com"): path = path + "/" base = urlparse.urlunparse(("http", netloc, path, params, query, fragment)) logging.info("Guessed Google Code base = %s", base) else: path = path + "/" base = urlparse.urlunparse((scheme, netloc, path, params, query, fragment)) logging.info("Guessed base = %s", base) return base if required: ErrorExit("Can't find URL in output from svn info") return None def GenerateDiff(self, args): cmd = ["svn", "diff"] if self.options.revision: cmd += ["-r", self.options.revision] cmd.extend(args) data = RunShell(cmd) count = 0 for line in data.splitlines(): if line.startswith("Index:") or line.startswith("Property changes on:"): count += 1 logging.info(line) if not count: ErrorExit("No valid patches found in output from svn diff") return data def _CollapseKeywords(self, content, keyword_str): """Collapses SVN keywords.""" # svn cat translates keywords but svn diff doesn't. As a result of this # behavior patching.PatchChunks() fails with a chunk mismatch error. # This part was originally written by the Review Board development team # who had the same problem (http://reviews.review-board.org/r/276/). # Mapping of keywords to known aliases svn_keywords = { # Standard keywords 'Date': ['Date', 'LastChangedDate'], 'Revision': ['Revision', 'LastChangedRevision', 'Rev'], 'Author': ['Author', 'LastChangedBy'], 'HeadURL': ['HeadURL', 'URL'], 'Id': ['Id'], # Aliases 'LastChangedDate': ['LastChangedDate', 'Date'], 'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'], 'LastChangedBy': ['LastChangedBy', 'Author'], 'URL': ['URL', 'HeadURL'], } def repl(m): if m.group(2): return "$%s::%s$" % (m.group(1), " " * len(m.group(3))) return "$%s$" % m.group(1) keywords = [keyword for name in keyword_str.split(" ") for keyword in svn_keywords.get(name, [])] return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content) def GetUnknownFiles(self): status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True) unknown_files = [] for line in status.split("\n"): if line and line[0] == "?": unknown_files.append(line) return unknown_files def ReadFile(self, filename): """Returns the contents of a file.""" file = open(filename, 'rb') result = "" try: result = file.read() finally: file.close() return result def GetStatus(self, filename): """Returns the status of a file.""" if not self.options.revision: status = RunShell(["svn", "status", "--ignore-externals", filename]) if not status: ErrorExit("svn status returned no output for %s" % filename) status_lines = status.splitlines() # If file is in a cl, the output will begin with # "\n--- Changelist 'cl_name':\n". See # http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt if (len(status_lines) == 3 and not status_lines[0] and status_lines[1].startswith("--- Changelist")): status = status_lines[2] else: status = status_lines[0] # If we have a revision to diff against we need to run "svn list" # for the old and the new revision and compare the results to get # the correct status for a file. else: dirname, relfilename = os.path.split(filename) if dirname not in self.svnls_cache: cmd = ["svn", "list", "-r", self.rev_start, dirname or "."] out, returncode = RunShellWithReturnCode(cmd) if returncode: ErrorExit("Failed to get status for %s." % filename) old_files = out.splitlines() args = ["svn", "list"] if self.rev_end: args += ["-r", self.rev_end] cmd = args + [dirname or "."] out, returncode = RunShellWithReturnCode(cmd) if returncode: ErrorExit("Failed to run command %s" % cmd) self.svnls_cache[dirname] = (old_files, out.splitlines()) old_files, new_files = self.svnls_cache[dirname] if relfilename in old_files and relfilename not in new_files: status = "D " elif relfilename in old_files and relfilename in new_files: status = "M " else: status = "A " return status def GetBaseFile(self, filename): status = self.GetStatus(filename) base_content = None new_content = None # If a file is copied its status will be "A +", which signifies # "addition-with-history". See "svn st" for more information. We need to # upload the original file or else diff parsing will fail if the file was # edited. if status[0] == "A" and status[3] != "+": # We'll need to upload the new content if we're adding a binary file # since diff's output won't contain it. mimetype = RunShell(["svn", "propget", "svn:mime-type", filename], silent_ok=True) base_content = "" is_binary = mimetype and not mimetype.startswith("text/") if is_binary and self.IsImage(filename): new_content = self.ReadFile(filename) elif (status[0] in ("M", "D", "R") or (status[0] == "A" and status[3] == "+") or # Copied file. (status[0] == " " and status[1] == "M")): # Property change. args = [] if self.options.revision: url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start) else: # Don't change filename, it's needed later. url = filename args += ["-r", "BASE"] cmd = ["svn"] + args + ["propget", "svn:mime-type", url] mimetype, returncode = RunShellWithReturnCode(cmd) if returncode: # File does not exist in the requested revision. # Reset mimetype, it contains an error message. mimetype = "" get_base = False is_binary = mimetype and not mimetype.startswith("text/") if status[0] == " ": # Empty base content just to force an upload. base_content = "" elif is_binary: if self.IsImage(filename): get_base = True if status[0] == "M": if not self.rev_end: new_content = self.ReadFile(filename) else: url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end) new_content = RunShell(["svn", "cat", url], universal_newlines=True, silent_ok=True) else: base_content = "" else: get_base = True if get_base: if is_binary: universal_newlines = False else: universal_newlines = True if self.rev_start: # "svn cat -r REV delete_file.txt" doesn't work. cat requires # the full URL with "@REV" appended instead of using "-r" option. url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start) base_content = RunShell(["svn", "cat", url], universal_newlines=universal_newlines, silent_ok=True) else: base_content = RunShell(["svn", "cat", filename], universal_newlines=universal_newlines, silent_ok=True) if not is_binary: args = [] if self.rev_start: url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start) else: url = filename args += ["-r", "BASE"] cmd = ["svn"] + args + ["propget", "svn:keywords", url] keywords, returncode = RunShellWithReturnCode(cmd) if keywords and not returncode: base_content = self._CollapseKeywords(base_content, keywords) else: StatusUpdate("svn status returned unexpected output: %s" % status) sys.exit(1) return base_content, new_content, is_binary, status[0:5] class GitVCS(VersionControlSystem): """Implementation of the VersionControlSystem interface for Git.""" def __init__(self, options): super(GitVCS, self).__init__(options) # Map of filename -> hash of base file. self.base_hashes = {} def GenerateDiff(self, extra_args): # This is more complicated than svn's GenerateDiff because we must convert # the diff output to include an svn-style "Index:" line as well as record # the hashes of the base files, so we can upload them along with our diff. if self.options.revision: extra_args = [self.options.revision] + extra_args gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args) svndiff = [] filecount = 0 filename = None for line in gitdiff.splitlines(): match = re.match(r"diff --git a/(.*) b/.*$", line) if match: filecount += 1 filename = match.group(1) svndiff.append("Index: %s\n" % filename) else: # The "index" line in a git diff looks like this (long hashes elided): # index 82c0d44..b2cee3f 100755 # We want to save the left hash, as that identifies the base file. match = re.match(r"index (\w+)\.\.", line) if match: self.base_hashes[filename] = match.group(1) svndiff.append(line + "\n") if not filecount: ErrorExit("No valid patches found in output from git diff") return "".join(svndiff) def GetUnknownFiles(self): status = RunShell(["git", "ls-files", "--exclude-standard", "--others"], silent_ok=True) return status.splitlines() def GetBaseFile(self, filename): hash = self.base_hashes[filename] base_content = None new_content = None is_binary = False if hash == "0" * 40: # All-zero hash indicates no base file. status = "A" base_content = "" else: status = "M" base_content, returncode = RunShellWithReturnCode(["git", "show", hash]) if returncode: ErrorExit("Got error status from 'git show %s'" % hash) return (base_content, new_content, is_binary, status) class MercurialVCS(VersionControlSystem): """Implementation of the VersionControlSystem interface for Mercurial.""" def __init__(self, options, repo_dir): super(MercurialVCS, self).__init__(options) # Absolute path to repository (we can be in a subdir) self.repo_dir = os.path.normpath(repo_dir) # Compute the subdir cwd = os.path.normpath(os.getcwd()) assert cwd.startswith(self.repo_dir) self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/") if self.options.revision: self.base_rev = self.options.revision else: self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip() def _GetRelPath(self, filename): """Get relative path of a file according to the current directory, given its logical path in the repo.""" assert filename.startswith(self.subdir), filename return filename[len(self.subdir):].lstrip(r"\/") def GenerateDiff(self, extra_args): # If no file specified, restrict to the current subdir extra_args = extra_args or ["."] cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args data = RunShell(cmd, silent_ok=True) svndiff = [] filecount = 0 for line in data.splitlines(): m = re.match("diff --git a/(\S+) b/(\S+)", line) if m: # Modify line to make it look like as it comes from svn diff. # With this modification no changes on the server side are required # to make upload.py work with Mercurial repos. # NOTE: for proper handling of moved/copied files, we have to use # the second filename. filename = m.group(2) svndiff.append("Index: %s" % filename) svndiff.append("=" * 67) filecount += 1 logging.info(line) else: svndiff.append(line) if not filecount: ErrorExit("No valid patches found in output from hg diff") return "\n".join(svndiff) + "\n" def GetUnknownFiles(self): """Return a list of files unknown to the VCS.""" args = [] status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."], silent_ok=True) unknown_files = [] for line in status.splitlines(): st, fn = line.split(" ", 1) if st == "?": unknown_files.append(fn) return unknown_files def GetBaseFile(self, filename): # "hg status" and "hg cat" both take a path relative to the current subdir # rather than to the repo root, but "hg diff" has given us the full path # to the repo root. base_content = "" new_content = None is_binary = False oldrelpath = relpath = self._GetRelPath(filename) # "hg status -C" returns two lines for moved/copied files, one otherwise out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath]) out = out.splitlines() # HACK: strip error message about missing file/directory if it isn't in # the working copy if out[0].startswith('%s: ' % relpath): out = out[1:] if len(out) > 1: # Moved/copied => considered as modified, use old filename to # retrieve base contents oldrelpath = out[1].strip() status = "M" else: status, _ = out[0].split(' ', 1) if status != "A": base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath], silent_ok=True) is_binary = "\0" in base_content # Mercurial's heuristic if status != "R": new_content = open(relpath, "rb").read() is_binary = is_binary or "\0" in new_content if is_binary and base_content: # Fetch again without converting newlines base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath], silent_ok=True, universal_newlines=False) if not is_binary or not self.IsImage(relpath): new_content = None return base_content, new_content, is_binary, status # NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync. def SplitPatch(data): """Splits a patch into separate pieces for each file. Args: data: A string containing the output of svn diff. Returns: A list of 2-tuple (filename, text) where text is the svn diff output pertaining to filename. """ patches = [] filename = None diff = [] for line in data.splitlines(True): new_filename = None if line.startswith('Index:'): unused, new_filename = line.split(':', 1) new_filename = new_filename.strip() elif line.startswith('Property changes on:'): unused, temp_filename = line.split(':', 1) # When a file is modified, paths use '/' between directories, however # when a property is modified '\' is used on Windows. Make them the same # otherwise the file shows up twice. temp_filename = temp_filename.strip().replace('\\', '/') if temp_filename != filename: # File has property changes but no modifications, create a new diff. new_filename = temp_filename if new_filename: if filename and diff: patches.append((filename, ''.join(diff))) filename = new_filename diff = [line] continue if diff is not None: diff.append(line) if filename and diff: patches.append((filename, ''.join(diff))) return patches def UploadSeparatePatches(issue, rpc_server, patchset, data, options): """Uploads a separate patch for each file in the diff output. Returns a list of [patch_key, filename] for each file. """ patches = SplitPatch(data) rv = [] for patch in patches: if len(patch[1]) > MAX_UPLOAD_SIZE: print ("Not uploading the patch for " + patch[0] + " because the file is too large.") continue form_fields = [("filename", patch[0])] if not options.download_base: form_fields.append(("content_upload", "1")) files = [("data", "data.diff", patch[1])] ctype, body = EncodeMultipartFormData(form_fields, files) url = "/%d/upload_patch/%d" % (int(issue), int(patchset)) print "Uploading patch for " + patch[0] response_body = rpc_server.Send(url, body, content_type=ctype) lines = response_body.splitlines() if not lines or lines[0] != "OK": StatusUpdate(" --> %s" % response_body) sys.exit(1) rv.append([lines[1], patch[0]]) return rv def GuessVCS(options): """Helper to guess the version control system. This examines the current directory, guesses which VersionControlSystem we're using, and returns an instance of the appropriate class. Exit with an error if we can't figure it out. Returns: A VersionControlSystem instance. Exits if the VCS can't be guessed. """ # Mercurial has a command to get the base directory of a repository # Try running it, but don't die if we don't have hg installed. # NOTE: we try Mercurial first as it can sit on top of an SVN working copy. try: out, returncode = RunShellWithReturnCode(["hg", "root"]) if returncode == 0: return MercurialVCS(options, out.strip()) except OSError, (errno, message): if errno != 2: # ENOENT -- they don't have hg installed. raise # Subversion has a .svn in all working directories. if os.path.isdir('.svn'): logging.info("Guessed VCS = Subversion") return SubversionVCS(options) # Git has a command to test if you're in a git tree. # Try running it, but don't die if we don't have git installed. try: out, returncode = RunShellWithReturnCode(["git", "rev-parse", "--is-inside-work-tree"]) if returncode == 0: return GitVCS(options) except OSError, (errno, message): if errno != 2: # ENOENT -- they don't have git installed. raise ErrorExit(("Could not guess version control system. " "Are you in a working copy directory?")) def RealMain(argv, data=None): """The real main function. Args: argv: Command line arguments. data: Diff contents. If None (default) the diff is generated by the VersionControlSystem implementation returned by GuessVCS(). Returns: A 2-tuple (issue id, patchset id). The patchset id is None if the base files are not uploaded by this script (applies only to SVN checkouts). """ logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:" "%(lineno)s %(message)s ")) os.environ['LC_ALL'] = 'C' options, args = parser.parse_args(argv[1:]) global verbosity verbosity = options.verbose if verbosity >= 3: logging.getLogger().setLevel(logging.DEBUG) elif verbosity >= 2: logging.getLogger().setLevel(logging.INFO) vcs = GuessVCS(options) if isinstance(vcs, SubversionVCS): # base field is only allowed for Subversion. # Note: Fetching base files may become deprecated in future releases. base = vcs.GuessBase(options.download_base) else: base = None if not base and options.download_base: options.download_base = True logging.info("Enabled upload of base file") if not options.assume_yes: vcs.CheckForUnknownFiles() if data is None: data = vcs.GenerateDiff(args) files = vcs.GetBaseFiles(data) if verbosity >= 1: print "Upload server:", options.server, "(change with -s/--server)" if options.issue: prompt = "Message describing this patch set: " else: prompt = "New issue subject: " message = options.message or raw_input(prompt).strip() if not message: ErrorExit("A non-empty message is required") rpc_server = GetRpcServer(options) form_fields = [("subject", message)] if base: form_fields.append(("base", base)) if options.issue: form_fields.append(("issue", str(options.issue))) if options.email: form_fields.append(("user", options.email)) if options.reviewers: for reviewer in options.reviewers.split(','): if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1: ErrorExit("Invalid email address: %s" % reviewer) form_fields.append(("reviewers", options.reviewers)) if options.cc: for cc in options.cc.split(','): if "@" in cc and not cc.split("@")[1].count(".") == 1: ErrorExit("Invalid email address: %s" % cc) form_fields.append(("cc", options.cc)) description = options.description if options.description_file: if options.description: ErrorExit("Can't specify description and description_file") file = open(options.description_file, 'r') description = file.read() file.close() if description: form_fields.append(("description", description)) # Send a hash of all the base file so the server can determine if a copy # already exists in an earlier patchset. base_hashes = "" for file, info in files.iteritems(): if not info[0] is None: checksum = md5.new(info[0]).hexdigest() if base_hashes: base_hashes += "|" base_hashes += checksum + ":" + file form_fields.append(("base_hashes", base_hashes)) # If we're uploading base files, don't send the email before the uploads, so # that it contains the file status. if options.send_mail and options.download_base: form_fields.append(("send_mail", "1")) if not options.download_base: form_fields.append(("content_upload", "1")) if len(data) > MAX_UPLOAD_SIZE: print "Patch is large, so uploading file patches separately." uploaded_diff_file = [] form_fields.append(("separate_patches", "1")) else: uploaded_diff_file = [("data", "data.diff", data)] ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file) response_body = rpc_server.Send("/upload", body, content_type=ctype) patchset = None if not options.download_base or not uploaded_diff_file: lines = response_body.splitlines() if len(lines) >= 2: msg = lines[0] patchset = lines[1].strip() patches = [x.split(" ", 1) for x in lines[2:]] else: msg = response_body else: msg = response_body StatusUpdate(msg) if not response_body.startswith("Issue created.") and \ not response_body.startswith("Issue updated."): sys.exit(0) issue = msg[msg.rfind("/")+1:] if not uploaded_diff_file: result = UploadSeparatePatches(issue, rpc_server, patchset, data, options) if not options.download_base: patches = result if not options.download_base: vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files) if options.send_mail: rpc_server.Send("/" + issue + "/mail", payload="") return issue, patchset def main(): try: RealMain(sys.argv) except KeyboardInterrupt: print StatusUpdate("Interrupted.") sys.exit(1) if __name__ == "__main__": main()
gpl-3.0
Kast0rTr0y/ansible
lib/ansible/modules/database/vertica/vertica_role.py
20
8551
#!/usr/bin/python # -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = """ --- module: vertica_role version_added: '2.0' short_description: Adds or removes Vertica database roles and assigns roles to them. description: - Adds or removes Vertica database role and, optionally, assign other roles. options: name: description: - Name of the role to add or remove. required: true assigned_roles: description: - Comma separated list of roles to assign to the role. aliases: ['assigned_role'] required: false default: null state: description: - Whether to create C(present), drop C(absent) or lock C(locked) a role. required: false choices: ['present', 'absent'] default: present db: description: - Name of the Vertica database. required: false default: null cluster: description: - Name of the Vertica cluster. required: false default: localhost port: description: - Vertica cluster port to connect to. required: false default: 5433 login_user: description: - The username used to authenticate with. required: false default: dbadmin login_password: description: - The password used to authenticate with. required: false default: null notes: - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] author: "Dariusz Owczarek (@dareko)" """ EXAMPLES = """ - name: creating a new vertica role vertica_role: name=role_name db=db_name state=present - name: creating a new vertica role with other role assigned vertica_role: name=role_name assigned_role=other_role_name state=present """ try: import pyodbc except ImportError: pyodbc_found = False else: pyodbc_found = True from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pycompat24 import get_exception class NotSupportedError(Exception): pass class CannotDropError(Exception): pass # module specific functions def get_role_facts(cursor, role=''): facts = {} cursor.execute(""" select r.name, r.assigned_roles from roles r where (? = '' or r.name ilike ?) """, role, role) while True: rows = cursor.fetchmany(100) if not rows: break for row in rows: role_key = row.name.lower() facts[role_key] = { 'name': row.name, 'assigned_roles': []} if row.assigned_roles: facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') return facts def update_roles(role_facts, cursor, role, existing, required): for assigned_role in set(existing) - set(required): cursor.execute("revoke {0} from {1}".format(assigned_role, role)) for assigned_role in set(required) - set(existing): cursor.execute("grant {0} to {1}".format(assigned_role, role)) def check(role_facts, role, assigned_roles): role_key = role.lower() if role_key not in role_facts: return False if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0: return False return True def present(role_facts, cursor, role, assigned_roles): role_key = role.lower() if role_key not in role_facts: cursor.execute("create role {0}".format(role)) update_roles(role_facts, cursor, role, [], assigned_roles) role_facts.update(get_role_facts(cursor, role)) return True else: changed = False if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0: update_roles(role_facts, cursor, role, role_facts[role_key]['assigned_roles'], assigned_roles) changed = True if changed: role_facts.update(get_role_facts(cursor, role)) return changed def absent(role_facts, cursor, role, assigned_roles): role_key = role.lower() if role_key in role_facts: update_roles(role_facts, cursor, role, role_facts[role_key]['assigned_roles'], []) cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name'])) del role_facts[role_key] return True else: return False # module logic def main(): module = AnsibleModule( argument_spec=dict( role=dict(required=True, aliases=['name']), assigned_roles=dict(default=None, aliases=['assigned_role']), state=dict(default='present', choices=['absent', 'present']), db=dict(default=None), cluster=dict(default='localhost'), port=dict(default='5433'), login_user=dict(default='dbadmin'), login_password=dict(default=None, no_log=True), ), supports_check_mode = True) if not pyodbc_found: module.fail_json(msg="The python pyodbc module is required.") role = module.params['role'] assigned_roles = [] if module.params['assigned_roles']: assigned_roles = module.params['assigned_roles'].split(',') assigned_roles = filter(None, assigned_roles) state = module.params['state'] db = '' if module.params['db']: db = module.params['db'] changed = False try: dsn = ( "Driver=Vertica;" "Server={0};" "Port={1};" "Database={2};" "User={3};" "Password={4};" "ConnectionLoadBalance={5}" ).format(module.params['cluster'], module.params['port'], db, module.params['login_user'], module.params['login_password'], 'true') db_conn = pyodbc.connect(dsn, autocommit=True) cursor = db_conn.cursor() except Exception: e = get_exception() module.fail_json(msg="Unable to connect to database: {0}.".format(e)) try: role_facts = get_role_facts(cursor) if module.check_mode: changed = not check(role_facts, role, assigned_roles) elif state == 'absent': try: changed = absent(role_facts, cursor, role, assigned_roles) except pyodbc.Error: e = get_exception() module.fail_json(msg=str(e)) elif state == 'present': try: changed = present(role_facts, cursor, role, assigned_roles) except pyodbc.Error: e = get_exception() module.fail_json(msg=str(e)) except NotSupportedError: e = get_exception() module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts}) except CannotDropError: e = get_exception() module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts}) except SystemExit: # avoid catching this on python 2.4 raise except Exception: e = get_exception() module.fail_json(msg=e) module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts}) if __name__ == '__main__': main()
gpl-3.0
tytso/compute-image-packages
gcimagebundle/gcimagebundlelib/fs_copy.py
5
5513
# Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Creates a copy of specified directories\files.""" import logging import os import re from gcimagebundlelib import manifest from gcimagebundlelib import utils class FsCopyError(Exception): """Error occured in fs copy operation.""" class InvalidFsCopyError(Exception): """Error when verification fails before fs copying.""" class FsCopy(object): """Specifies which files/directories must be copied.""" def __init__(self): # Populate the required parameters with None so we can verify. self._output_tarfile = None self._srcs = [] self._excludes = [] self._key = None self._recursive = True self._fs_size = 0 self._ignore_hard_links = False self._platform = None self._overwrite_list = [] self._scratch_dir = '/tmp' self._disk = None self._manifest = manifest.ImageManifest(is_gce_instance=utils.IsRunningOnGCE()) def SetTarfile(self, tar_file): """Sets tar file which will contain file system copy. Args: tar_file: path to a tar file. """ self._output_tarfile = tar_file def AddDisk(self, disk): """Adds the disk which should be bundled. Args: disk: The block disk that needs to be bundled. """ self._disk = disk def AddSource(self, src, arcname='', recursive=True): """Adds a source to be copied to the tar file. Args: src: path to directory/file to be copied. arcname: name of src in the tar archive. If arcname is empty, then instead of copying src itself only its content is copied. recursive: specifies if src directory should be copied recursively. Raises: ValueError: If src path doesn't exist. """ if not os.path.exists(src): raise ValueError('invalid path') # Note that there is a fundamental asymmetry here as # abspath('/') => '/' while abspath('/usr/') => '/usr'. # This creates some subtleties elsewhere in the code. self._srcs.append((os.path.abspath(src), arcname, recursive)) def AppendExcludes(self, excludes): """Adds a file/directory to be excluded from file copy. Args: excludes: A list of ExcludeSpec objects. """ self._excludes.extend(excludes) def SetKey(self, key): """Sets a key to use to sign the archive digest. Args: key: key to use to sign the archive digest. """ # The key is ignored for now. # TODO(user): sign the digest with the key self._key = key def SetPlatform(self, platform): """Sets the OS platform which is used to create an image. Args: platform: OS platform specific settings. """ self._platform = platform logging.warning('overwrite list = %s', ' '.join(platform.GetOverwriteList())) self._overwrite_list = [re.sub('^/', '', x) for x in platform.GetOverwriteList()] def _SetManifest(self, image_manifest): """For test only, allows to set a test manifest object.""" self._manifest = image_manifest def SetScratchDirectory(self, directory): """Sets a directory used for storing intermediate results. Args: directory: scratch directory path. """ self._scratch_dir = directory def IgnoreHardLinks(self): """Requests that hard links should not be copied as hard links.""" # TODO(user): I don't see a reason for this option to exist. Currently # there is a difference in how this option is interpreted between FsTarball # and FsRawDisk. FsTarball only copies one hard link to an inode and ignores # the rest of them. FsRawDisk copies the content of a file that hard link is # pointing to instead of recreating a hard link. Either option seems useless # for creating a copy of a file system. self._ignore_hard_links = True def Verify(self): """Verify if we have all the components to build a tar.""" self._Verify() def Bundleup(self): """Creates the tar image based on set parameters. Returns: the SHA1 digest of the the tar archive. """ return (0, None) def _Verify(self): """Verifies the tar attributes. Raises InvalidTarballError. Raises: InvalidFsCopyError: If not all required parameters are set. FsCopyError: If source file does not exist. """ if not self._output_tarfile or not self._srcs or not self._key: raise InvalidFsCopyError('Incomplete copy spec') for (src, _, _) in self._srcs: if not os.path.exists(src): raise FsCopyError('%s does not exists' % src) def _ShouldExclude(self, filename): """"Checks if a file/directory are excluded from a copy. Args: filename: a file/directory path. Returns: True if a file/directory shouldn't be copied, False otherwise. """ for spec in self._excludes: if spec.ShouldExclude(filename): logging.info('tarfile: Excluded %s', filename) return True return False
apache-2.0
google-research/evoflow
evoflow/ops/dual_crossover.py
1
6303
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from evoflow.utils import slices2array from evoflow.engine import OP from evoflow import backend as B class DualCrossover(OP): O_AUTOGRAPH = True O_XLA = False def __init__(self, population_fraction, max_crossover_probability, **kwargs): """Perform Dual crossovers on a given population. Args: population_fraction (float): How many chromosomes should have a cross-over. max_crossover_probability (list(float)): What is the maximum fraction of the genes that will be affected by the crossover. debug (bool, optional): print debug information and function returns additional data. Returns: tensor: population with a crossover. See: https://en.wikipedia.org/wiki/Crossover_(genetic_algorithm) """ if not (0 < population_fraction <= 1.0): raise ValueError("population_fraction must be in ]0. 1]") for val in max_crossover_probability: if not (0 < val <= 1.0): raise ValueError( "max_crossover_probability values must be between ]0. 1]") self.population_fraction = population_fraction self.max_crossover_probability = max_crossover_probability super(DualCrossover, self).__init__(**kwargs) def call(self, population): # mix genomes shuffled_population = B.copy(population) shuffled_population = B.shuffle(shuffled_population) # how many chromosomes to crossover num_crossover_chromosomes = int(population.shape[0] * self.population_fraction) self.print_debug('num chromosomes', num_crossover_chromosomes) # compute the shape needed for the mutation mutations_shape = [num_crossover_chromosomes] for idx, frac in enumerate(self.max_crossover_probability): max_genes = int(population.shape[idx + 1] * frac + 1) if max_genes > 1: num_genes = B.randint(1, high=max_genes) else: num_genes = 1 mutations_shape.append(num_genes) mutations_shape = mutations_shape self.print_debug("population_shape:", population.shape) self.print_debug("mutation_shape:", mutations_shape) # compute the fancy indexing dynamlically # ! the start point must be randomized slices = [slice(0, num_crossover_chromosomes)] for idx, crossover_size in enumerate(mutations_shape[1:]): # ! making indexing explicit as its a huge pitfall mutation_dim = idx + 1 max_start = population.shape[mutation_dim] - crossover_size + 1 start = B.randint(0, max_start) slices.append(slice(start, crossover_size + start)) slices = tuple(slices) tslices = slices2array(slices) self.print_debug('slices', slices) # crossover cross_section = shuffled_population[slices] population = B.assign(population, cross_section, tslices) return population class DualCrossover1D(DualCrossover): def __init__(self, population_fraction=0.9, max_crossover_probability=0.2, **kwargs): if not isinstance(max_crossover_probability, float): raise ValueError('max_crossover_probability must be a float') super(DualCrossover1D, self).__init__( population_fraction=population_fraction, max_crossover_probability=[max_crossover_probability], **kwargs) class DualCrossover2D(DualCrossover): def __init__(self, population_fraction=0.9, max_crossover_probability=(0.2, 0.2), **kwargs): if len(max_crossover_probability) != 2: raise ValueError( 'max_crossover_probability must be of form (x, y)') super(DualCrossover2D, self).__init__( population_fraction=population_fraction, max_crossover_probability=max_crossover_probability, **kwargs) class DualCrossover3D(DualCrossover): def __init__(self, population_fraction=0.9, max_crossover_probability=(0.2, 0.2, 0.2), **kwargs): if len(max_crossover_probability) != 3: raise ValueError( 'max_crossover_probability must be of form (x, y, z)') super(DualCrossover3D, self).__init__( population_fraction=population_fraction, max_crossover_probability=max_crossover_probability, **kwargs) if __name__ == '__main__': from copy import copy from evoflow.utils import op_optimization_benchmark from termcolor import cprint NUM_RUNS = 3 pop_shape = (100, 200, 100) population = B.randint(0, 256, pop_shape) population_fraction = 0.5 max_reverse_probability = (0.5, 0.5) OP = DualCrossover2D(population_fraction, max_reverse_probability, optimization_level=0) op_optimization_benchmark(population, OP, NUM_RUNS).report() quit() GENOME_SHAPE = (6, 4, 4) population = B.randint(0, 256, GENOME_SHAPE) population_fraction = 0.5 max_crossover_size_fraction = (0.5, 0.5) print(population.shape) original_population = copy(population) population = DualCrossover2D(population_fraction, max_crossover_size_fraction, debug=True)(population) # diff matrix diff = B.clip(abs(population - original_population), 0, 1) print(diff)
apache-2.0
prusnak/bitcoin
qa/rpc-tests/nodehandling.py
11
3834
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node handling.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import urllib.parse class NodeHandlingTest (BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 4 self.setup_clean_chain = False def run_test(self): ########################### # setban/listbanned tests # ########################### assert_equal(len(self.nodes[2].getpeerinfo()), 4) #we should have 4 nodes at this point self.nodes[2].setban("127.0.0.1", "add") time.sleep(3) #wait till the nodes are disconected assert_equal(len(self.nodes[2].getpeerinfo()), 0) #all nodes must be disconnected at this point assert_equal(len(self.nodes[2].listbanned()), 1) self.nodes[2].clearbanned() assert_equal(len(self.nodes[2].listbanned()), 0) self.nodes[2].setban("127.0.0.0/24", "add") assert_equal(len(self.nodes[2].listbanned()), 1) # This will throw an exception because 127.0.0.1 is within range 127.0.0.0/24 assert_raises_jsonrpc(-23, "IP/Subnet already banned", self.nodes[2].setban, "127.0.0.1", "add") # This will throw an exception because 127.0.0.1/42 is not a real subnet assert_raises_jsonrpc(-30, "Error: Invalid IP/Subnet", self.nodes[2].setban, "127.0.0.1/42", "add") assert_equal(len(self.nodes[2].listbanned()), 1) #still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24 # This will throw an exception because 127.0.0.1 was not added above assert_raises_jsonrpc(-30, "Error: Unban failed", self.nodes[2].setban, "127.0.0.1", "remove") assert_equal(len(self.nodes[2].listbanned()), 1) self.nodes[2].setban("127.0.0.0/24", "remove") assert_equal(len(self.nodes[2].listbanned()), 0) self.nodes[2].clearbanned() assert_equal(len(self.nodes[2].listbanned()), 0) ##test persisted banlist self.nodes[2].setban("127.0.0.0/32", "add") self.nodes[2].setban("127.0.0.0/24", "add") self.nodes[2].setban("192.168.0.1", "add", 1) #ban for 1 seconds self.nodes[2].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) #ban for 1000 seconds listBeforeShutdown = self.nodes[2].listbanned() assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) #must be here time.sleep(2) #make 100% sure we expired 192.168.0.1 node time #stop node stop_node(self.nodes[2], 2) self.nodes[2] = start_node(2, self.options.tmpdir) listAfterShutdown = self.nodes[2].listbanned() assert_equal("127.0.0.0/24", listAfterShutdown[0]['address']) assert_equal("127.0.0.0/32", listAfterShutdown[1]['address']) assert_equal("/19" in listAfterShutdown[2]['address'], True) ########################### # RPC disconnectnode test # ########################### url = urllib.parse.urlparse(self.nodes[1].url) self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1))) time.sleep(2) #disconnecting a node needs a little bit of time for node in self.nodes[0].getpeerinfo(): assert(node['addr'] != url.hostname+":"+str(p2p_port(1))) connect_nodes_bi(self.nodes,0,1) #reconnect the node found = False for node in self.nodes[0].getpeerinfo(): if node['addr'] == url.hostname+":"+str(p2p_port(1)): found = True assert(found) if __name__ == '__main__': NodeHandlingTest ().main ()
mit
openafs-contrib/afspy
afs/util/CacheManagerUtil.py
1
2003
""" common cache-manager operations """ from afs.util.Executor import exec_wrapper import ParseCacheManagerUtil as PM from BaseUtil import BaseUtil class CacheManagerUtil(BaseUtil) : @exec_wrapper def flush_all(self, _cfg=None): """ Force the AFS Cache Manager to discard all data """ CmdList=[_cfg.binaries["fs"] , "flushall"] return CmdList,PM.parse_flushall @exec_wrapper def flush_volume(self, path, _cfg=None): """ Forces the Cache Manager to discard cached data from a volume """ CmdList=[_cfg.binaries["fs"] , "flushvolume", "%s" % path] return CmdList,PM.parse_flushvolume @exec_wrapper def flush_mount(self, path, _cfg=None): """ Forces the Cache Manager to discard a mount point """ CmdList=[_cfg.binaries["fs"] , "flushmount", "-path", "%s"%path] return CmdList, PM.parse_flushmount @exec_wrapper def flush(self, path, _cfg=None): """ Forces the Cache Manager to discard a cached file or directory """ CmdList=[_cfg.binaries["fs"] , "flush", "-path", "%s" % path] return CmdList, PM.parse_flush @exec_wrapper def get_ws_cell(self, cache_manager, _cfg=None): """ Returns the name of the cell to which a machine belongs """ CmdList=[_cfg.binaries["fs"] , "wscell"] return CmdList, PM.pull_ws_cell @exec_wrapper def get_cell_aliases(self, cache_manager, _cfg=None): """ list defined Cell aliases """ CmdList=[_cfg.binaries["fs"] , "listaliases"] return CmdList, PM.pull_cell_aliases @exec_wrapper def new_cell_alias(self, cache_manager, alias, _cfg=None): """ set a new Cell alias """ CmdList=[_cfg.binaries["fs"] , "newaliases", "-alias" "%s" % alias,"-name" % _cfg.cell] return CmdList,PM.pull_cell_alias
bsd-2-clause
Edraak/edraak-platform
openedx/core/djangoapps/user_api/serializers.py
1
4358
""" Django REST Framework serializers for the User API application """ from django.contrib.auth.models import User from django.utils.timezone import now from rest_framework import serializers from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification, SSOVerification, ManualVerification from .models import UserPreference class UserSerializer(serializers.HyperlinkedModelSerializer): """ Serializer that generates a representation of a User entity containing a subset of fields """ name = serializers.SerializerMethodField() preferences = serializers.SerializerMethodField() def get_name(self, user): """ Return the name attribute from the user profile object if profile exists else none """ return user.profile.name def get_name_en(self, user): """ Return the name attribute from the user profile object if profile exists else none """ return user.profile.name_en def get_preferences(self, user): """ Returns the set of preferences as a dict for the specified user """ return UserPreference.get_all_preferences(user) class Meta(object): model = User # This list is the minimal set required by the notification service fields = ("id", "url", "email", "name", "username", "preferences") read_only_fields = ("id", "email", "username") class UserPreferenceSerializer(serializers.HyperlinkedModelSerializer): """ Serializer that generates a representation of a UserPreference entity. """ user = UserSerializer() class Meta(object): model = UserPreference depth = 1 fields = ('user', 'key', 'value', 'url') class RawUserPreferenceSerializer(serializers.ModelSerializer): """ Serializer that generates a raw representation of a user preference. """ user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all()) class Meta(object): model = UserPreference depth = 1 fields = ('user', 'key', 'value', 'url') class ReadOnlyFieldsSerializerMixin(object): """ Mixin for use with Serializers that provides a method `get_read_only_fields`, which returns a tuple of all read-only fields on the Serializer. """ @classmethod def get_read_only_fields(cls): """ Return all fields on this Serializer class which are read-only. Expects sub-classes implement Meta.explicit_read_only_fields, which is a tuple declaring read-only fields which were declared explicitly and thus could not be added to the usual cls.Meta.read_only_fields tuple. """ return getattr(cls.Meta, 'read_only_fields', '') + getattr(cls.Meta, 'explicit_read_only_fields', '') @classmethod def get_writeable_fields(cls): """ Return all fields on this serializer that are writeable. """ all_fields = getattr(cls.Meta, 'fields', tuple()) return tuple(set(all_fields) - set(cls.get_read_only_fields())) class CountryTimeZoneSerializer(serializers.Serializer): # pylint: disable=abstract-method """ Serializer that generates a list of common time zones for a country """ time_zone = serializers.CharField() description = serializers.CharField() class IDVerificationSerializer(serializers.ModelSerializer): """ Serializer that generates a representation of a user's ID verification status. """ is_verified = serializers.SerializerMethodField() def get_is_verified(self, obj): """ Return a boolean indicating if a the user is verified. """ return obj.status == 'approved' and obj.expiration_datetime > now() class SoftwareSecurePhotoVerificationSerializer(IDVerificationSerializer): class Meta(object): fields = ('status', 'expiration_datetime', 'is_verified') model = SoftwareSecurePhotoVerification class SSOVerificationSerializer(IDVerificationSerializer): class Meta(object): fields = ('status', 'expiration_datetime', 'is_verified') model = SSOVerification class ManualVerificationSerializer(IDVerificationSerializer): class Meta(object): fields = ('status', 'expiration_datetime', 'is_verified') model = ManualVerification
agpl-3.0
raptorjr/xbmc
tools/EventClients/Clients/Kodi Send/kodi-send.py
137
2576
#!/usr/bin/python # # XBMC Media Center # XBMC Send # Copyright (c) 2009 team-xbmc # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # import sys, os import getopt from socket import * try: from kodi.xbmcclient import * except: sys.path.append(os.path.join(os.path.realpath(os.path.dirname(__file__)), '../../lib/python')) from xbmcclient import * def usage(): print "kodi-send [OPTION] --action=ACTION" print 'Example' print '\tkodi-send --host=192.168.0.1 --port=9777 --action="Quit"' print "Options" print "\t-?, --help\t\t\tWill bring up this message" print "\t--host=HOST\t\t\tChoose what HOST to connect to (default=localhost)" print "\t--port=PORT\t\t\tChoose what PORT to connect to (default=9777)" print '\t--action=ACTION\t\t\tSends an action to XBMC, this option can be added multiple times to create a macro' pass def main(): try: opts, args = getopt.getopt(sys.argv[1:], "?pa:v", ["help", "host=", "port=", "action="]) except getopt.GetoptError, err: # print help information and exit: print str(err) # will print something like "option -a not recognized" usage() sys.exit(2) ip = "localhost" port = 9777 actions = [] verbose = False for o, a in opts: if o in ("-?", "--help"): usage() sys.exit() elif o == "--host": ip = a elif o == "--port": port = int(a) elif o in ("-a", "--action"): actions.append(a) else: assert False, "unhandled option" addr = (ip, port) sock = socket(AF_INET,SOCK_DGRAM) if len(actions) is 0: usage() sys.exit(0) for action in actions: print 'Sending action:', action packet = PacketACTION(actionmessage=action, actiontype=ACTION_BUTTON) packet.send(sock, addr) if __name__=="__main__": main()
gpl-2.0
arne-cl/pattern
examples/04-search/04-taxonomy.py
21
2576
import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..")) from pattern.search import search, taxonomy, Classifier from pattern.en import parsetree # The search module includes a Taxonomy class # that can be used to define semantic word types. # For example, consider that you want to extract flower names from a text. # This would make search patterns somewhat unwieldy: # search("rose|lily|daisy|daffodil|begonia", txt). # A better approach is to use the taxonomy: for flower in ("rose", "lily", "daisy", "daffodil", "begonia"): taxonomy.append(flower, type="flower") print taxonomy.children("flower") print taxonomy.parents("rose") print taxonomy.classify("rose") # Yields the most recently added parent. print # Taxonomy terms can be included in a pattern by using uppercase: t = parsetree("A field of white daffodils.", lemmata=True) m = search("FLOWER", t) print t print m print # Another example: taxonomy.append("chicken", type="food") taxonomy.append("chicken", type="bird") taxonomy.append("penguin", type="bird") taxonomy.append("bird", type="animal") print taxonomy.parents("chicken") print taxonomy.children("animal", recursive=True) print search("FOOD", "I'm eating chicken.") print # The advantage is that the taxonomy can hold an entire hierarchy. # For example, "flower" could be classified as "organism". # Other organisms could be defined as well (insects, trees, mammals, ...) # The ORGANISM constraint then matches everything that is an organism. # A taxonomy entry can also be a proper name containing spaces # (e.g. "windows vista", case insensitive). # It will be detected as long as it is contained in a single chunk: taxonomy.append("windows vista", type="operating system") taxonomy.append("ubuntu", type="operating system") t = parsetree("Which do you like more, Windows Vista, or Ubuntu?") m = search("OPERATING_SYSTEM", t) print t print m print m[0].constituents() print # Taxonomy entries cannot have wildcards (*), # but you can use a classifier to simulate this. # Classifiers are quite slow but useful in many ways. # For example, a classifier could be written to dynamically # retrieve word categories from WordNet. def find_parents(word): if word.startswith(("mac os", "windows", "ubuntu")): return ["operating system"] c = Classifier(parents=find_parents) taxonomy.classifiers.append(c) t = parsetree("I like Mac OS X 10.5 better than Windows XP or Ubuntu.") m = search("OPERATING_SYSTEM", t) print t print m print m[0].constituents() print m[1].constituents() print
bsd-3-clause
BabeNovelty/numpy
numpy/matrixlib/tests/test_defmatrix.py
130
14801
from __future__ import division, absolute_import, print_function import collections import numpy as np from numpy import matrix, asmatrix, bmat from numpy.testing import ( TestCase, run_module_suite, assert_, assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, assert_raises ) from numpy.matrixlib.defmatrix import matrix_power from numpy.matrixlib import mat class TestCtor(TestCase): def test_basic(self): A = np.array([[1, 2], [3, 4]]) mA = matrix(A) assert_(np.all(mA.A == A)) B = bmat("A,A;A,A") C = bmat([[A, A], [A, A]]) D = np.array([[1, 2, 1, 2], [3, 4, 3, 4], [1, 2, 1, 2], [3, 4, 3, 4]]) assert_(np.all(B.A == D)) assert_(np.all(C.A == D)) E = np.array([[5, 6], [7, 8]]) AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]]) assert_(np.all(bmat([A, E]) == AEresult)) vec = np.arange(5) mvec = matrix(vec) assert_(mvec.shape == (1, 5)) def test_exceptions(self): # Check for TypeError when called with invalid string data. assert_raises(TypeError, matrix, "invalid") def test_bmat_nondefault_str(self): A = np.array([[1, 2], [3, 4]]) B = np.array([[5, 6], [7, 8]]) Aresult = np.array([[1, 2, 1, 2], [3, 4, 3, 4], [1, 2, 1, 2], [3, 4, 3, 4]]) mixresult = np.array([[1, 2, 5, 6], [3, 4, 7, 8], [5, 6, 1, 2], [7, 8, 3, 4]]) assert_(np.all(bmat("A,A;A,A") == Aresult)) assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult)) assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B}) assert_( np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult)) b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A}) assert_(np.all(b2 == mixresult)) class TestProperties(TestCase): def test_sum(self): """Test whether matrix.sum(axis=1) preserves orientation. Fails in NumPy <= 0.9.6.2127. """ M = matrix([[1, 2, 0, 0], [3, 4, 0, 0], [1, 2, 1, 2], [3, 4, 3, 4]]) sum0 = matrix([8, 12, 4, 6]) sum1 = matrix([3, 7, 6, 14]).T sumall = 30 assert_array_equal(sum0, M.sum(axis=0)) assert_array_equal(sum1, M.sum(axis=1)) assert_equal(sumall, M.sum()) assert_array_equal(sum0, np.sum(M, axis=0)) assert_array_equal(sum1, np.sum(M, axis=1)) assert_equal(sumall, np.sum(M)) def test_prod(self): x = matrix([[1, 2, 3], [4, 5, 6]]) assert_equal(x.prod(), 720) assert_equal(x.prod(0), matrix([[4, 10, 18]])) assert_equal(x.prod(1), matrix([[6], [120]])) assert_equal(np.prod(x), 720) assert_equal(np.prod(x, axis=0), matrix([[4, 10, 18]])) assert_equal(np.prod(x, axis=1), matrix([[6], [120]])) y = matrix([0, 1, 3]) assert_(y.prod() == 0) def test_max(self): x = matrix([[1, 2, 3], [4, 5, 6]]) assert_equal(x.max(), 6) assert_equal(x.max(0), matrix([[4, 5, 6]])) assert_equal(x.max(1), matrix([[3], [6]])) assert_equal(np.max(x), 6) assert_equal(np.max(x, axis=0), matrix([[4, 5, 6]])) assert_equal(np.max(x, axis=1), matrix([[3], [6]])) def test_min(self): x = matrix([[1, 2, 3], [4, 5, 6]]) assert_equal(x.min(), 1) assert_equal(x.min(0), matrix([[1, 2, 3]])) assert_equal(x.min(1), matrix([[1], [4]])) assert_equal(np.min(x), 1) assert_equal(np.min(x, axis=0), matrix([[1, 2, 3]])) assert_equal(np.min(x, axis=1), matrix([[1], [4]])) def test_ptp(self): x = np.arange(4).reshape((2, 2)) assert_(x.ptp() == 3) assert_(np.all(x.ptp(0) == np.array([2, 2]))) assert_(np.all(x.ptp(1) == np.array([1, 1]))) def test_var(self): x = np.arange(9).reshape((3, 3)) mx = x.view(np.matrix) assert_equal(x.var(ddof=0), mx.var(ddof=0)) assert_equal(x.var(ddof=1), mx.var(ddof=1)) def test_basic(self): import numpy.linalg as linalg A = np.array([[1., 2.], [3., 4.]]) mA = matrix(A) assert_(np.allclose(linalg.inv(A), mA.I)) assert_(np.all(np.array(np.transpose(A) == mA.T))) assert_(np.all(np.array(np.transpose(A) == mA.H))) assert_(np.all(A == mA.A)) B = A + 2j*A mB = matrix(B) assert_(np.allclose(linalg.inv(B), mB.I)) assert_(np.all(np.array(np.transpose(B) == mB.T))) assert_(np.all(np.array(np.transpose(B).conj() == mB.H))) def test_pinv(self): x = matrix(np.arange(6).reshape(2, 3)) xpinv = matrix([[-0.77777778, 0.27777778], [-0.11111111, 0.11111111], [ 0.55555556, -0.05555556]]) assert_almost_equal(x.I, xpinv) def test_comparisons(self): A = np.arange(100).reshape(10, 10) mA = matrix(A) mB = matrix(A) + 0.1 assert_(np.all(mB == A+0.1)) assert_(np.all(mB == matrix(A+0.1))) assert_(not np.any(mB == matrix(A-0.1))) assert_(np.all(mA < mB)) assert_(np.all(mA <= mB)) assert_(np.all(mA <= mA)) assert_(not np.any(mA < mA)) assert_(not np.any(mB < mA)) assert_(np.all(mB >= mA)) assert_(np.all(mB >= mB)) assert_(not np.any(mB > mB)) assert_(np.all(mA == mA)) assert_(not np.any(mA == mB)) assert_(np.all(mB != mA)) assert_(not np.all(abs(mA) > 0)) assert_(np.all(abs(mB > 0))) def test_asmatrix(self): A = np.arange(100).reshape(10, 10) mA = asmatrix(A) A[0, 0] = -10 assert_(A[0, 0] == mA[0, 0]) def test_noaxis(self): A = matrix([[1, 0], [0, 1]]) assert_(A.sum() == matrix(2)) assert_(A.mean() == matrix(0.5)) def test_repr(self): A = matrix([[1, 0], [0, 1]]) assert_(repr(A) == "matrix([[1, 0],\n [0, 1]])") class TestCasting(TestCase): def test_basic(self): A = np.arange(100).reshape(10, 10) mA = matrix(A) mB = mA.copy() O = np.ones((10, 10), np.float64) * 0.1 mB = mB + O assert_(mB.dtype.type == np.float64) assert_(np.all(mA != mB)) assert_(np.all(mB == mA+0.1)) mC = mA.copy() O = np.ones((10, 10), np.complex128) mC = mC * O assert_(mC.dtype.type == np.complex128) assert_(np.all(mA != mB)) class TestAlgebra(TestCase): def test_basic(self): import numpy.linalg as linalg A = np.array([[1., 2.], [3., 4.]]) mA = matrix(A) B = np.identity(2) for i in range(6): assert_(np.allclose((mA ** i).A, B)) B = np.dot(B, A) Ainv = linalg.inv(A) B = np.identity(2) for i in range(6): assert_(np.allclose((mA ** -i).A, B)) B = np.dot(B, Ainv) assert_(np.allclose((mA * mA).A, np.dot(A, A))) assert_(np.allclose((mA + mA).A, (A + A))) assert_(np.allclose((3*mA).A, (3*A))) mA2 = matrix(A) mA2 *= 3 assert_(np.allclose(mA2.A, 3*A)) def test_pow(self): """Test raising a matrix to an integer power works as expected.""" m = matrix("1. 2.; 3. 4.") m2 = m.copy() m2 **= 2 mi = m.copy() mi **= -1 m4 = m2.copy() m4 **= 2 assert_array_almost_equal(m2, m**2) assert_array_almost_equal(m4, np.dot(m2, m2)) assert_array_almost_equal(np.dot(mi, m), np.eye(2)) def test_notimplemented(self): '''Check that 'not implemented' operations produce a failure.''' A = matrix([[1., 2.], [3., 4.]]) # __rpow__ try: 1.0**A except TypeError: pass else: self.fail("matrix.__rpow__ doesn't raise a TypeError") # __mul__ with something not a list, ndarray, tuple, or scalar try: A*object() except TypeError: pass else: self.fail("matrix.__mul__ with non-numeric object doesn't raise" "a TypeError") class TestMatrixReturn(TestCase): def test_instance_methods(self): a = matrix([1.0], dtype='f8') methodargs = { 'astype': ('intc',), 'clip': (0.0, 1.0), 'compress': ([1],), 'repeat': (1,), 'reshape': (1,), 'swapaxes': (0, 0), 'dot': np.array([1.0]), } excluded_methods = [ 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', 'searchsorted', 'setflags', 'setfield', 'sort', 'partition', 'argpartition', 'take', 'tofile', 'tolist', 'tostring', 'tobytes', 'all', 'any', 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', 'prod', 'std', 'ctypes', 'itemset', ] for attrib in dir(a): if attrib.startswith('_') or attrib in excluded_methods: continue f = getattr(a, attrib) if isinstance(f, collections.Callable): # reset contents of a a.astype('f8') a.fill(1.0) if attrib in methodargs: args = methodargs[attrib] else: args = () b = f(*args) assert_(type(b) is matrix, "%s" % attrib) assert_(type(a.real) is matrix) assert_(type(a.imag) is matrix) c, d = matrix([0.0]).nonzero() assert_(type(c) is np.ndarray) assert_(type(d) is np.ndarray) class TestIndexing(TestCase): def test_basic(self): x = asmatrix(np.zeros((3, 2), float)) y = np.zeros((3, 1), float) y[:, 0] = [0.8, 0.2, 0.3] x[:, 1] = y > 0.5 assert_equal(x, [[0, 1], [0, 0], [0, 0]]) class TestNewScalarIndexing(TestCase): def setUp(self): self.a = matrix([[1, 2], [3, 4]]) def test_dimesions(self): a = self.a x = a[0] assert_equal(x.ndim, 2) def test_array_from_matrix_list(self): a = self.a x = np.array([a, a]) assert_equal(x.shape, [2, 2, 2]) def test_array_to_list(self): a = self.a assert_equal(a.tolist(), [[1, 2], [3, 4]]) def test_fancy_indexing(self): a = self.a x = a[1, [0, 1, 0]] assert_(isinstance(x, matrix)) assert_equal(x, matrix([[3, 4, 3]])) x = a[[1, 0]] assert_(isinstance(x, matrix)) assert_equal(x, matrix([[3, 4], [1, 2]])) x = a[[[1], [0]], [[1, 0], [0, 1]]] assert_(isinstance(x, matrix)) assert_equal(x, matrix([[4, 3], [1, 2]])) def test_matrix_element(self): x = matrix([[1, 2, 3], [4, 5, 6]]) assert_equal(x[0][0], matrix([[1, 2, 3]])) assert_equal(x[0][0].shape, (1, 3)) assert_equal(x[0].shape, (1, 3)) assert_equal(x[:, 0].shape, (2, 1)) x = matrix(0) assert_equal(x[0, 0], 0) assert_equal(x[0], 0) assert_equal(x[:, 0].shape, x.shape) def test_scalar_indexing(self): x = asmatrix(np.zeros((3, 2), float)) assert_equal(x[0, 0], x[0][0]) def test_row_column_indexing(self): x = asmatrix(np.eye(2)) assert_array_equal(x[0,:], [[1, 0]]) assert_array_equal(x[1,:], [[0, 1]]) assert_array_equal(x[:, 0], [[1], [0]]) assert_array_equal(x[:, 1], [[0], [1]]) def test_boolean_indexing(self): A = np.arange(6) A.shape = (3, 2) x = asmatrix(A) assert_array_equal(x[:, np.array([True, False])], x[:, 0]) assert_array_equal(x[np.array([True, False, False]),:], x[0,:]) def test_list_indexing(self): A = np.arange(6) A.shape = (3, 2) x = asmatrix(A) assert_array_equal(x[:, [1, 0]], x[:, ::-1]) assert_array_equal(x[[2, 1, 0],:], x[::-1,:]) class TestPower(TestCase): def test_returntype(self): a = np.array([[0, 1], [0, 0]]) assert_(type(matrix_power(a, 2)) is np.ndarray) a = mat(a) assert_(type(matrix_power(a, 2)) is matrix) def test_list(self): assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]]) class TestShape(TestCase): def setUp(self): self.a = np.array([[1], [2]]) self.m = matrix([[1], [2]]) def test_shape(self): assert_equal(self.a.shape, (2, 1)) assert_equal(self.m.shape, (2, 1)) def test_numpy_ravel(self): assert_equal(np.ravel(self.a).shape, (2,)) assert_equal(np.ravel(self.m).shape, (2,)) def test_member_ravel(self): assert_equal(self.a.ravel().shape, (2,)) assert_equal(self.m.ravel().shape, (1, 2)) def test_member_flatten(self): assert_equal(self.a.flatten().shape, (2,)) assert_equal(self.m.flatten().shape, (1, 2)) def test_numpy_ravel_order(self): x = np.array([[1, 2, 3], [4, 5, 6]]) assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) x = matrix([[1, 2, 3], [4, 5, 6]]) assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) def test_matrix_ravel_order(self): x = matrix([[1, 2, 3], [4, 5, 6]]) assert_equal(x.ravel(), [[1, 2, 3, 4, 5, 6]]) assert_equal(x.ravel(order='F'), [[1, 4, 2, 5, 3, 6]]) assert_equal(x.T.ravel(), [[1, 4, 2, 5, 3, 6]]) assert_equal(x.T.ravel(order='A'), [[1, 2, 3, 4, 5, 6]]) def test_array_memory_sharing(self): assert_(np.may_share_memory(self.a, self.a.ravel())) assert_(not np.may_share_memory(self.a, self.a.flatten())) def test_matrix_memory_sharing(self): assert_(np.may_share_memory(self.m, self.m.ravel())) assert_(not np.may_share_memory(self.m, self.m.flatten())) if __name__ == "__main__": run_module_suite()
bsd-3-clause