repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
android-ia/platform_external_chromium_org
tools/telemetry/telemetry/core/gpu_device_unittest.py
33
1491
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest from telemetry.core import gpu_device class TestGPUDevice(unittest.TestCase): def testConstruction(self): device = gpu_device.GPUDevice(1000, 2000, 'test_vendor', 'test_device') self.assertEquals(device.vendor_id, 1000) self.assertEquals(device.device_id, 2000) self.assertEquals(device.vendor_string, 'test_vendor') self.assertEquals(device.device_string, 'test_device') def testFromDict(self): dictionary = { 'vendor_id': 3000, 'device_id': 4000, 'vendor_string': 'test_vendor_2', 'device_string': 'test_device_2' } device = gpu_device.GPUDevice.FromDict(dictionary) self.assertEquals(device.vendor_id, 3000) self.assertEquals(device.device_id, 4000) self.assertEquals(device.vendor_string, 'test_vendor_2') self.assertEquals(device.device_string, 'test_device_2') def testMissingAttrsFromDict(self): data = { 'vendor_id': 1, 'device_id': 2, 'vendor_string': 'a', 'device_string': 'b' } for k in data: data_copy = data.copy() del data_copy[k] try: gpu_device.GPUDevice.FromDict(data_copy) self.fail('Should raise exception if attribute "%s" is missing' % k) except AssertionError: raise except: pass
bsd-3-clause
psa-lab/siteinterlock
siteinterlock/proflex_utils/tests/test_hether.py
1
4792
# Sebastian Raschka 2016 # # `siteinterlock` is a Python package for selecting near-native protein-ligand # docking poses based upon the hypothesis that interfacial rigidification # of both the protein and ligand prove to be important characteristics of # the native binding mode and are sensitive to the spatial coupling of # interactions and bond-rotational degrees of freedom in the interface. # # Copyright (C) 2016 Michigan State University # License: GPLv3 # # SiteInterlock was developed in the # Protein Structural Analysis & Design Laboratory # (http://www.kuhnlab.bmb.msu.edu) # Contact email: kuhnlab@msu.edu # # Package author: Sebastian Raschka <http://sebastianraschka.com> # import os import json from siteinterlock.proflex_utils import hether from siteinterlock.proflex_utils.hether import _read_pflexdataset from siteinterlock.proflex_utils.hether import _read_decomp from siteinterlock.proflex_utils.hether import _cluster_mapping from siteinterlock.proflex_utils.hether import _cluster_rigidity from siteinterlock.proflex_utils.hether import _rigidity_diff absdir = os.path.dirname(os.path.abspath(__file__)) N_ATOMS = None TETHER_CNT = None def test_read_pflexdataset(): rel_test_data = 'hether_test_data/hether_testcase_0/1RA1_H_proflexdataset' abs_test_data = os.path.join(absdir, rel_test_data) pflexd_atom_cnt, pflexd_calphas, pflexd_tether_cnt = \ _read_pflexdataset(abs_test_data) assert(pflexd_atom_cnt == 2489) assert(pflexd_tether_cnt == 291) assert(len(pflexd_calphas) == 159) assert(pflexd_calphas[:3] == [2, 21, 40]) def test_read_decomp(): rel_test_data = 'hether_test_data/hether_testcase_0/decomp_list' abs_test_data = os.path.join(absdir, rel_test_data) dd, n_residues, n_atoms = _read_decomp(abs_test_data) assert(len(dd) == 131) assert(len(dd[0]['clusters']) == 2780) assert(dd[0]['energy'] == -9.80474), dd[0]['energy'] assert(dd[0]['clusters'][:3] == [636, 635, 266]) assert(n_residues == 159) assert(n_atoms == 2780) def test_cluster_mapping(): rel_test_data = 'hether_test_data/hether_testcase_0/decomp_dict.json' abs_test_data = os.path.join(absdir, rel_test_data) with open(abs_test_data, 'r') as t: s = t.read() dd = json.loads(s) dd = dict((int(k), v) for k, v in dd.items()) rel_test_data = 'hether_test_data/hether_testcase_0/calphas.json' abs_test_data = os.path.join(absdir, rel_test_data) with open(abs_test_data, 'r') as t: s = t.read() calphas = json.loads(s) for i in dd.keys(): cluster_dict = _cluster_mapping(calpha_idx=calphas, cluster_idx=dd[i]['clusters']) assert(sum([i for i in cluster_dict.values()]) == 159) def test_cluster_rigidity(): rel_test_data = 'hether_test_data/hether_testcase_0/cluster_dict.json' abs_test_data = os.path.join(absdir, rel_test_data) with open(abs_test_data, 'r') as t: s = t.read() cluster_dict = json.loads(s) rig, n_clust = _cluster_rigidity(cluster_dict=cluster_dict) assert(round(rig, 3) == 0.849) assert(n_clust == 2) def test_hether(): rel_test_data = 'hether_test_data/hether_testcase_0/1RA1_H_proflexdataset' pflexdataset = os.path.join(absdir, rel_test_data) rel_test_data = 'hether_test_data/hether_testcase_0/decomp_list' decomplist = os.path.join(absdir, rel_test_data) energy, rigidity, n_cluster = hether(pflexdataset_file=pflexdataset, decomp_file=decomplist) assert(n_cluster == 2) assert(round(rigidity, 2) == 0.82) assert(energy == -0.711), energy def test_hether_1ahc(): rel_test_data = ('hether_test_data/hether_testcase_1ahc/' '1ahc_no_lig_proflexdataset') pflexdataset = os.path.join(absdir, rel_test_data) rel_test_data = 'hether_test_data/hether_testcase_1ahc/decomp_list' decomplist = os.path.join(absdir, rel_test_data) energy, rigidity, n_cluster = hether(pflexdataset_file=pflexdataset, decomp_file=decomplist) assert(n_cluster == 3) assert(round(rigidity, 2) == 0.76) assert(energy == -0.393) def test_hether_1a9x(): rel_test_data = ('hether_test_data/hether_testcase_1a9x/' '1A9X_nolig_proflexdataset') pflexdataset = os.path.join(absdir, rel_test_data) rel_test_data = 'hether_test_data/hether_testcase_1a9x/decomp_list' decomplist = os.path.join(absdir, rel_test_data) energy, rigidity, n_cluster = hether(pflexdataset_file=pflexdataset, decomp_file=decomplist) assert(n_cluster == 5) assert(round(rigidity, 2) == 0.83) assert(energy == -0.292)
gpl-3.0
swayf/pyLoad
module/plugins/crypter/MediafireComFolder.py
2
2323
# -*- coding: utf-8 -*- import re from module.plugins.Crypter import Crypter from module.plugins.hoster.MediafireCom import checkHTMLHeader from module.common.json_layer import json_loads class MediafireComFolder(Crypter): __name__ = "MediafireComFolder" __type__ = "crypter" __pattern__ = r"http://(\w*\.)*mediafire\.com/(folder/|\?sharekey=|\?\w{13}($|[/#]))" __version__ = "0.14" __description__ = """Mediafire.com Folder Plugin""" __author_name__ = ("zoidberg") __author_mail__ = ("zoidberg@mujmail.cz") FOLDER_KEY_PATTERN = r"var afI= '(\w+)';" FILE_URL_PATTERN = '<meta property="og:url" content="http://www.mediafire.com/\?(\w+)"/>' def decrypt(self, pyfile): new_links = [] url, result = checkHTMLHeader(pyfile.url) self.logDebug('Location (%d): %s' % (result, url)) if result == 0: # load and parse html html = self.load(pyfile.url) found = re.search(self.FILE_URL_PATTERN, html) if found: # file page new_links.append("http://www.mediafire.com/file/%s" % found.group(1)) else: # folder page found = re.search(self.FOLDER_KEY_PATTERN, html) if found: folder_key = found.group(1) self.logDebug("FOLDER KEY: %s" % folder_key) json_resp = json_loads(self.load("http://www.mediafire.com/api/folder/get_info.php?folder_key=%s&response_format=json&version=1" % folder_key)) #self.logInfo(json_resp) if json_resp['response']['result'] == "Success": for link in json_resp['response']['folder_info']['files']: new_links.append("http://www.mediafire.com/file/%s" % link['quickkey']) else: self.fail(json_resp['response']['message']) elif result == 1: self.offline() else: new_links.append(url) if new_links: self.core.files.addLinks(new_links, self.pyfile.package().id) else: self.fail('Could not extract any links')
agpl-3.0
amisrs/angular-flask
angular_flask/lib/python2.7/site-packages/sqlalchemy/testing/suite/test_insert.py
8
5173
from .. import fixtures, config from ..config import requirements from .. import exclusions from ..assertions import eq_ from .. import engines from sqlalchemy import Integer, String, select, util from ..schema import Table, Column class LastrowidTest(fixtures.TablesTest): run_deletes = 'each' __requires__ = 'implements_get_lastrowid', 'autoincrement_insert' __engine_options__ = {"implicit_returning": False} @classmethod def define_tables(cls, metadata): Table('autoinc_pk', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50)) ) Table('manual_pk', metadata, Column('id', Integer, primary_key=True, autoincrement=False), Column('data', String(50)) ) def _assert_round_trip(self, table, conn): row = conn.execute(table.select()).first() eq_( row, (config.db.dialect.default_sequence_base, "some data") ) def test_autoincrement_on_insert(self): config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) self._assert_round_trip(self.tables.autoinc_pk, config.db) def test_last_inserted_id(self): r = config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) eq_( r.inserted_primary_key, [pk] ) @exclusions.fails_if(lambda: util.pypy, "lastrowid not maintained after " "connection close") @requirements.dbapi_lastrowid def test_native_lastrowid_autoinc(self): r = config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) lastrowid = r.lastrowid pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) eq_( lastrowid, pk ) class InsertBehaviorTest(fixtures.TablesTest): run_deletes = 'each' @classmethod def define_tables(cls, metadata): Table('autoinc_pk', metadata, Column('id', Integer, primary_key=True, \ test_needs_autoincrement=True), Column('data', String(50)) ) def test_autoclose_on_insert(self): if requirements.returning.enabled: engine = engines.testing_engine( options={'implicit_returning': False}) else: engine = config.db r = engine.execute( self.tables.autoinc_pk.insert(), data="some data" ) assert r.closed assert r.is_insert assert not r.returns_rows @requirements.returning def test_autoclose_on_insert_implicit_returning(self): r = config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) assert r.closed assert r.is_insert assert not r.returns_rows @requirements.empty_inserts def test_empty_insert(self): r = config.db.execute( self.tables.autoinc_pk.insert(), ) assert r.closed r = config.db.execute( self.tables.autoinc_pk.select().\ where(self.tables.autoinc_pk.c.id != None) ) assert len(r.fetchall()) class ReturningTest(fixtures.TablesTest): run_deletes = 'each' __requires__ = 'returning', 'autoincrement_insert' __engine_options__ = {"implicit_returning": True} def _assert_round_trip(self, table, conn): row = conn.execute(table.select()).first() eq_( row, (config.db.dialect.default_sequence_base, "some data") ) @classmethod def define_tables(cls, metadata): Table('autoinc_pk', metadata, Column('id', Integer, primary_key=True, \ test_needs_autoincrement=True), Column('data', String(50)) ) def test_explicit_returning_pk(self): engine = config.db table = self.tables.autoinc_pk r = engine.execute( table.insert().returning( table.c.id), data="some data" ) pk = r.first()[0] fetched_pk = config.db.scalar(select([table.c.id])) eq_(fetched_pk, pk) def test_autoincrement_on_insert_implcit_returning(self): config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) self._assert_round_trip(self.tables.autoinc_pk, config.db) def test_last_inserted_id_implicit_returning(self): r = config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) eq_( r.inserted_primary_key, [pk] ) __all__ = ('LastrowidTest', 'InsertBehaviorTest', 'ReturningTest')
mit
dongguangming/python-phonenumbers
python/phonenumbers/data/region_CU.py
11
1999
"""Auto-generated file, do not edit by hand. CU metadata""" from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata PHONE_METADATA_CU = PhoneMetadata(id='CU', country_code=53, international_prefix='119', general_desc=PhoneNumberDesc(national_number_pattern='[2-57]\\d{5,7}', possible_number_pattern='\\d{4,8}'), fixed_line=PhoneNumberDesc(national_number_pattern='2[1-4]\\d{5,6}|3(?:1\\d{6}|[23]\\d{4,6})|4(?:[125]\\d{5,6}|[36]\\d{6}|[78]\\d{4,6})|7\\d{6,7}', possible_number_pattern='\\d{4,8}', example_number='71234567'), mobile=PhoneNumberDesc(national_number_pattern='5\\d{7}', possible_number_pattern='\\d{8}', example_number='51234567'), toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'), national_prefix='0', national_prefix_for_parsing='0', number_format=[NumberFormat(pattern='(\\d)(\\d{6,7})', format='\\1 \\2', leading_digits_pattern=['7'], national_prefix_formatting_rule='(0\\1)'), NumberFormat(pattern='(\\d{2})(\\d{4,6})', format='\\1 \\2', leading_digits_pattern=['[2-4]'], national_prefix_formatting_rule='(0\\1)'), NumberFormat(pattern='(\\d)(\\d{7})', format='\\1 \\2', leading_digits_pattern=['5'], national_prefix_formatting_rule='0\\1')])
apache-2.0
MoKee/android_kernel_htc_villec2
tools/perf/scripts/python/check-perf-trace.py
11214
2503
# perf script event handlers, generated by perf script -g python # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # This script tests basic functionality such as flag and symbol # strings, common_xxx() calls back into perf, begin, end, unhandled # events, etc. Basically, if this script runs successfully and # displays expected results, Python scripting support should be ok. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from Core import * from perf_trace_context import * unhandled = autodict() def trace_begin(): print "trace_begin" pass def trace_end(): print_unhandled() def irq__softirq_entry(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, vec): print_header(event_name, common_cpu, common_secs, common_nsecs, common_pid, common_comm) print_uncommon(context) print "vec=%s\n" % \ (symbol_str("irq__softirq_entry", "vec", vec)), def kmem__kmalloc(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, call_site, ptr, bytes_req, bytes_alloc, gfp_flags): print_header(event_name, common_cpu, common_secs, common_nsecs, common_pid, common_comm) print_uncommon(context) print "call_site=%u, ptr=%u, bytes_req=%u, " \ "bytes_alloc=%u, gfp_flags=%s\n" % \ (call_site, ptr, bytes_req, bytes_alloc, flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)), def trace_unhandled(event_name, context, event_fields_dict): try: unhandled[event_name] += 1 except TypeError: unhandled[event_name] = 1 def print_header(event_name, cpu, secs, nsecs, pid, comm): print "%-20s %5u %05u.%09u %8u %-20s " % \ (event_name, cpu, secs, nsecs, pid, comm), # print trace fields not included in handler args def print_uncommon(context): print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \ % (common_pc(context), trace_flag_str(common_flags(context)), \ common_lock_depth(context)) def print_unhandled(): keys = unhandled.keys() if not keys: return print "\nunhandled events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "-----------"), for event_name in keys: print "%-40s %10d\n" % (event_name, unhandled[event_name])
gpl-2.0
prehawk1999/xiamione
creator.py
1
6062
# -*- coding: UTF-8 -*- import os, time, shutil, urllib2 from mutagen.mp3 import MP3, HeaderNotFoundError from mutagen.id3 import ID3NoHeaderError from mutagen.id3 import ID3, TIT2, TIT3, TALB, TPE1, TRCK, TYER, TLAN, TPUB from mutagen.id3 import APIC, TCON, COMM, TDAT from xquery import TagQuery from errorinfo import * class Creator: def __init__(self, musicdir, path, tags=None): self.musicdir = musicdir self.lpath = path self.buffered = False self.cov_data = None self.art_data = None self.tags = tags self.done = False if tags is not None: self.ensureEnv(tags) def ensureEnv(self, tags=None): if not self.tags and tags == None: raise CreatorError(u'You dont put tags in!') elif tags is not None: self.tags = tags self.d_cov = os.path.join(self.musicdir, self.tags['TPE0'], self.tags['TALB']) self.f_art = os.path.join(self.musicdir, self.tags['TPE0'], self.tags['TPE0']) + '.jpg' self.f_cov = os.path.join(self.d_cov, self.tags['TALB']) + '.jpg' self.f_sng = os.path.join(self.d_cov, self.tags['TIT2']) + '.mp3' #ensure dir trees if not os.path.exists(self.d_cov): os.makedirs(self.d_cov) #ensure pictures if not os.path.exists(self.f_art) or os.path.getsize(self.f_art) < 500: data = self.downloadCover(self.tags['art_data'], self.f_art, 'art') self.art_data = data else: self.art_data = open(self.f_art, 'rb').read() if not os.path.exists(self.f_cov) or os.path.getsize(self.f_cov) < 500: data = self.downloadCover(self.tags['cov_data'], self.f_cov, 'cov') self.cov_data = data else: self.cov_data = open(self.f_cov, 'rb').read() #ensure that the file is complete if not self.buffered: self.waitCompletion() #return something~~~ if os.path.exists(self.lpath) and os.path.exists(self.f_cov) \ and os.path.exists(self.f_art): return u'Target: %s\nDestination: %s' % \ (os.path.basename(self.lpath), self.musicdir) def move(self): try: shutil.copy(self.lpath, self.f_sng) self.temp = self.lpath self.lpath = self.f_sng self.easyTags() self.done = True except Exception, e: return u'Failed to Move! %s' % e.message else: return u'%s -by- %s -from- %s' % (self.tags['TIT2'], self.tags['TPE1'], self.tags['TALB']) def clear(self): if self.done: os.remove(self.temp) print u'%s\t -BY- \t%s\t -FROM- \t%s <done!>' % (self.tags['TIT2'], self.tags['TPE1'], self.tags['TALB']) else: raise CreatorError('failed to transmitt! no move!') def downloadCover(self, url, path, mode='cov'): tiy = 10 while not os.path.exists(path) or os.path.getsize(path) < 500: if tiy <= 0: raise CreatorError( 'downloadCover: url cant be downloaded!').ahk_display() tiy -= 1 try: os.remove(path) except Exception: pass try: req = urllib2.Request(url) req.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 8.0; \ Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.5.30729; \ .NET CLR 3.0.4506.2152; .NET4.0C; .NET4.0E; Zune 4.7) LBBROWSER') res = urllib2.urlopen(req) data = res.read() except Exception: CreatorWarning('Failed to download pic!') with open(path, 'wb') as f: f.write(data) time.sleep(0.5) return data def easyTags(self): if not MP3(self.lpath).info.sketchy: try: audio = ID3(self.lpath) except ID3NoHeaderError: audio = ID3() audio.update_to_v23() audio.add(TIT2(encoding=3, text=[self.tags['TIT2']] )) audio.add(TALB(encoding=3, text=[self.tags['TALB']] )) audio.add(TPE1(encoding=3, text=[self.tags['TPE1']] )) audio.add(TRCK(encoding=3, text=[self.tags['TRCK']] )) audio.add(TYER(encoding=3, text=[self.tags['TYER']] )) audio.add(TLAN(encoding=3, text=[self.tags['TLAN']] )) audio.add(TPUB(encoding=3, text=[self.tags['TPUB']] )) audio.add(COMM(encoding=3, text=[self.tags['COMM']] )) if 'APIC:Cover' not in audio.keys() and self.cov_data: audio.add(APIC( encoding = 3, mime = 'image/jpg', type = 3, desc = u'Cover', data = self.cov_data + \ '\x00'*(len(self.cov_data)*3) )) audio.save(self.lpath) def waitCompletion(self): oz= os.stat(self.lpath).st_size cd = 10 while cd > 0: time.sleep(0.2) nz = os.stat(self.lpath).st_size if nz == oz: cd -= 1 else: oz = nz cd = 10 self.buffered = True if __name__ == '__main__': x = TagQuery('1769948048') x.expandInfo() print "****Module Testing, the result will be display below***" c = Creator('Exp', '1769948048_1879231_l.mp3', x.gettags()) print "---===---Testing tagging ---===---" print c.ensureEnv() print "---===---Testing file move---===---" print c.move()
mit
40423112/2017springcd_hw
plugin/liquid_tags/test_notebook.py
311
3042
import re from pelican.tests.support import unittest from . import notebook class TestNotebookTagRegex(unittest.TestCase): def get_argdict(self, markup): match = notebook.FORMAT.search(markup) if match: argdict = match.groupdict() src = argdict['src'] start = argdict['start'] end = argdict['end'] language = argdict['language'] return src, start, end, language return None def test_basic_notebook_tag(self): markup = u'path/to/thing.ipynb' src, start, end, language = self.get_argdict(markup) self.assertEqual(src, u'path/to/thing.ipynb') self.assertIsNone(start) self.assertIsNone(end) self.assertIsNone(language) def test_basic_notebook_tag_insensitive_to_whitespace(self): markup = u' path/to/thing.ipynb ' src, start, end, language = self.get_argdict(markup) self.assertEqual(src, u'path/to/thing.ipynb') self.assertIsNone(start) self.assertIsNone(end) self.assertIsNone(language) def test_notebook_tag_with_cells(self): markup = u'path/to/thing.ipynb cells[1:5]' src, start, end, language = self.get_argdict(markup) self.assertEqual(src, u'path/to/thing.ipynb') self.assertEqual(start, u'1') self.assertEqual(end, u'5') self.assertIsNone(language) def test_notebook_tag_with_alphanumeric_language(self): markup = u'path/to/thing.ipynb language[python3]' src, start, end, language = self.get_argdict(markup) self.assertEqual(src, u'path/to/thing.ipynb') self.assertIsNone(start) self.assertIsNone(end) self.assertEqual(language, u'python3') def test_notebook_tag_with_symbol_in_name_language(self): for short_name in [u'c++', u'cpp-objdump', u'c++-objdumb', u'cxx-objdump']: markup = u'path/to/thing.ipynb language[{}]'.format(short_name) src, start, end, language = self.get_argdict(markup) self.assertEqual(src, u'path/to/thing.ipynb') self.assertIsNone(start) self.assertIsNone(end) self.assertEqual(language, short_name) def test_notebook_tag_with_language_and_cells(self): markup = u'path/to/thing.ipynb cells[1:5] language[julia]' src, start, end, language = self.get_argdict(markup) self.assertEqual(src, u'path/to/thing.ipynb') self.assertEqual(start, u'1') self.assertEqual(end, u'5') self.assertEqual(language, u'julia') def test_notebook_tag_with_language_and_cells_and_weird_spaces(self): markup = u' path/to/thing.ipynb cells[1:5] language[julia] ' src, start, end, language = self.get_argdict(markup) self.assertEqual(src, u'path/to/thing.ipynb') self.assertEqual(start, u'1') self.assertEqual(end, u'5') self.assertEqual(language, u'julia') if __name__ == '__main__': unittest.main()
agpl-3.0
Jgarcia-IAS/SAT
openerp/addons-extra/odoo-pruebas/odoo-server/addons/base_report_designer/plugin/openerp_report_designer/bin/script/AddAttachment.py
384
11148
######################################################################### # # Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com # Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>). # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # See: http://www.gnu.org/licenses/lgpl.html # ############################################################################# import os import uno import unohelper import xmlrpclib import base64 from com.sun.star.task import XJobExecutor if __name__<>"package": from lib.gui import * from lib.error import ErrorDialog from lib.tools import * from LoginTest import * from lib.rpc import * database="test" uid = 3 class AddAttachment(unohelper.Base, XJobExecutor ): Kind = { 'PDF' : 'pdf', 'OpenOffice': 'sxw', } def __init__(self, ctx): self.ctx = ctx self.module = "openerp_report" self.version = "0.1" LoginTest() if not loginstatus and __name__=="package": exit(1) self.aSearchResult = [] desktop=getDesktop() oDoc2 = desktop.getCurrentComponent() docinfo=oDoc2.getDocumentInfo() global passwd self.password = passwd global url self.sock=RPCSession(url) if docinfo.getUserFieldValue(2) <> "" and docinfo.getUserFieldValue(3) <> "": self.win = DBModalDialog(60, 50, 180, 70, "Add Attachment to Server") self.win.addFixedText("lblResourceType", 2 , 5, 100, 10, "Select Appropriate Resource Type:") self.win.addComboListBox("lstResourceType", -2, 25, 176, 15,True) self.win.addButton('btnOkWithoutInformation', -2 , -5, 25 , 15,'OK' ,actionListenerProc = self.btnOkWithoutInformation_clicked ) else: self.win = DBModalDialog(60, 50, 180, 190, "Add Attachment to Server") self.win.addFixedText("lblModuleName",2 , 9, 42, 20, "Select Module:") self.win.addComboListBox("lstmodel", -2, 5, 134, 15,True) self.lstModel = self.win.getControl( "lstmodel" ) self.dModel = {} # Open a new connexion to the server ids = self.sock.execute(database, uid, self.password, 'ir.module.module', 'search', [('name','=','base_report_model'),('state', '=', 'installed')]) if not len(ids): # If the module 'base_report_model' is not installed, use the default model self.dModel = { "Partner":'res.partner', } else: ids =self.sock.execute(database, uid, self.password, 'base.report.model' , 'search', []) res = self.sock.execute(database, uid, self.password, 'base.report.model' , 'read', ids, ['name','model_id']) models = self.sock.execute(database, uid, self.password, 'ir.model' , 'read', map(lambda x:x['model_id'][0], res), ['model']) models = dict(map(lambda x:(x['id'],x['model']), models)) self.dModel = dict(map(lambda x: (x['name'],models[x['model_id'][0]]), res)) for item in self.dModel.keys(): self.lstModel.addItem(item, self.lstModel.getItemCount()) self.win.addFixedText("lblSearchName",2 , 25, 60, 10, "Enter Search String:") self.win.addEdit("txtSearchName", 2, 35, 149, 15,) self.win.addButton('btnSearch', -2 , 35, 25 , 15,'Search' ,actionListenerProc = self.btnSearch_clicked ) self.win.addFixedText("lblSearchRecord", 2 , 55, 60, 10, "Search Result:") self.win.addComboListBox("lstResource", -2, 65, 176, 70, False ) self.lstResource = self.win.getControl( "lstResource" ) self.win.addFixedText("lblResourceType", 2 , 137, 100, 20, "Select Appropriate Resource Type:") self.win.addComboListBox("lstResourceType", -2, 147, 176, 15,True ) self.win.addButton('btnOkWithInformation', -2 , -5, 25 , 15,'OK' ,actionListenerProc = self.btnOkWithInformation_clicked ) self.lstResourceType = self.win.getControl( "lstResourceType" ) for kind in self.Kind.keys(): self.lstResourceType.addItem( kind, self.lstResourceType.getItemCount() ) self.win.addButton('btnCancel', -2 - 27 , -5 , 30 , 15, 'Cancel' ,actionListenerProc = self.btnCancel_clicked ) self.win.doModalDialog("lstResourceType", self.Kind.keys()[0]) def btnSearch_clicked(self, oActionEvent): modelSelectedItem = self.win.getListBoxSelectedItem("lstmodel") if modelSelectedItem == "": return desktop=getDesktop() oDoc2 = desktop.getCurrentComponent() docinfo=oDoc2.getDocumentInfo() self.aSearchResult =self.sock.execute( database, uid, self.password, self.dModel[modelSelectedItem], 'name_search', self.win.getEditText("txtSearchName")) self.win.removeListBoxItems("lstResource", 0, self.win.getListBoxItemCount("lstResource")) if self.aSearchResult == []: ErrorDialog("No search result found.", "", "Search Error.") return for result in self.aSearchResult: self.lstResource.addItem(result[1],result[0]) def _send_attachment(self, name, data, res_model, res_id): desktop = getDesktop() oDoc2 = desktop.getCurrentComponent() docinfo = oDoc2.getDocumentInfo() params = { 'name': name, 'datas': base64.encodestring( data ), 'datas_fname': name, 'res_model' : res_model, 'res_id' : int(res_id), } return self.sock.execute( database, uid, self.password, 'ir.attachment', 'create', params ) def send_attachment(self, model, resource_id): desktop = getDesktop() oDoc2 = desktop.getCurrentComponent() docinfo = oDoc2.getDocumentInfo() if oDoc2.getURL() == "": ErrorDialog("You should save your file.", "", "Saving Error.") return None url = oDoc2.getURL() if self.Kind[self.win.getListBoxSelectedItem("lstResourceType")] == "pdf": url = self.doc2pdf(url[7:]) if url == None: ErrorDialog( "Problem in creating PDF.", "", "PDF Error.") return None url = url[7:] data = read_data_from_file( get_absolute_file_path( url ) ) return self._send_attachment( os.path.basename( url ), data, model, resource_id ) def btnOkWithoutInformation_clicked(self, oActionEvent): desktop = getDesktop() oDoc2 = desktop.getCurrentComponent() docinfo = oDoc2.getDocumentInfo() if self.win.getListBoxSelectedItem("lstResourceType") == "": ErrorDialog("You have to select a resource type.", "", "Selection Error." ) return res = self.send_attachment( docinfo.getUserFieldValue(3), docinfo.getUserFieldValue(2) ) self.win.endExecute() def btnOkWithInformation_clicked(self, oActionEvent): if self.win.getListBoxSelectedItem("lstResourceType") == "": ErrorDialog( "You have to select a resource type.", "", "Selection Error." ) return if self.win.getListBoxSelectedItem("lstResource") == "" or self.win.getListBoxSelectedItem("lstmodel") == "": ErrorDialog("You have to select Model and Resource.", "", "Selection Error.") return resourceid = None for s in self.aSearchResult: if s[1] == self.win.getListBoxSelectedItem("lstResource"): resourceid = s[0] break if resourceid == None: ErrorDialog("No resource is selected.", "", "Resource Error." ) return res = self.send_attachment( self.dModel[self.win.getListBoxSelectedItem('lstmodel')], resourceid ) self.win.endExecute() def btnCancel_clicked(self, oActionEvent): self.win.endExecute() def doc2pdf(self, strFile): oDoc = None strFilterSubName = '' strUrl = convertToURL( strFile ) desktop = getDesktop() oDoc = desktop.loadComponentFromURL( strUrl, "_blank", 0, Array(self._MakePropertyValue("Hidden",True))) if oDoc: strFilterSubName = "" # select appropriate filter if oDoc.supportsService("com.sun.star.presentation.PresentationDocument"): strFilterSubName = "impress_pdf_Export" elif oDoc.supportsService("com.sun.star.sheet.SpreadsheetDocument"): strFilterSubName = "calc_pdf_Export" elif oDoc.supportsService("com.sun.star.text.WebDocument"): strFilterSubName = "writer_web_pdf_Export" elif oDoc.supportsService("com.sun.star.text.GlobalDocument"): strFilterSubName = "writer_globaldocument_pdf_Export" elif oDoc.supportsService("com.sun.star.text.TextDocument"): strFilterSubName = "writer_pdf_Export" elif oDoc.supportsService("com.sun.star.drawing.DrawingDocument"): strFilterSubName = "draw_pdf_Export" elif oDoc.supportsService("com.sun.star.formula.FormulaProperties"): strFilterSubName = "math_pdf_Export" elif oDoc.supportsService("com.sun.star.chart.ChartDocument"): strFilterSubName = "chart_pdf_Export" else: pass filename = len(strFilterSubName) > 0 and convertToURL( os.path.splitext( strFile )[0] + ".pdf" ) or None if len(strFilterSubName) > 0: oDoc.storeToURL( filename, Array(self._MakePropertyValue("FilterName", strFilterSubName ),self._MakePropertyValue("CompressMode", "1" ))) oDoc.close(True) # Can be None if len(strFilterSubName) <= 0 return filename def _MakePropertyValue(self, cName="", uValue=u""): oPropertyValue = createUnoStruct( "com.sun.star.beans.PropertyValue" ) if cName: oPropertyValue.Name = cName if uValue: oPropertyValue.Value = uValue return oPropertyValue if __name__<>"package" and __name__=="__main__": AddAttachment(None) elif __name__=="package": g_ImplementationHelper.addImplementation( AddAttachment, "org.openoffice.openerp.report.addattachment", ("com.sun.star.task.Job",),) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
petrutlucian94/cinder
cinder/volume/drivers/infortrend/eonstor_ds_cli/cli_factory.py
23
18449
# Copyright (c) 2015 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Infortrend basic CLI factory. """ import abc from oslo_concurrency import processutils from oslo_log import log as logging import six from cinder.i18n import _LE from cinder import utils LOG = logging.getLogger(__name__) DEFAULT_RETRY_TIME = 5 def retry_cli(func): def inner(self, *args, **kwargs): total_retry_time = self.cli_retry_time if total_retry_time is None: total_retry_time = DEFAULT_RETRY_TIME retry_time = 0 while retry_time < total_retry_time: rc, out = func(self, *args, **kwargs) retry_time += 1 if rc == 0: break LOG.error(_LE( 'Retry %(retry)s times: %(method)s Failed ' '%(rc)s: %(reason)s'), { 'retry': retry_time, 'method': self.__class__.__name__, 'rc': rc, 'reason': out}) LOG.debug( 'Method: %(method)s Return Code: %(rc)s ' 'Output: %(out)s', { 'method': self.__class__.__name__, 'rc': rc, 'out': out}) return rc, out return inner def util_execute(command_line): content, err = utils.execute(command_line, shell=True) return content def strip_empty_in_list(list): result = [] for entry in list: entry = entry.strip() if entry != "": result.append(entry) return result def table_to_dict(table): tableHeader = table[0].split(" ") tableHeaderList = strip_empty_in_list(tableHeader) result = [] for i in range(len(table) - 2): if table[i + 2].strip() == "": break resultEntry = {} tableEntry = table[i + 2].split(" ") tableEntryList = strip_empty_in_list(tableEntry) for key, value in zip(tableHeaderList, tableEntryList): resultEntry[key] = value result.append(resultEntry) return result def content_lines_to_dict(content_lines): result = [] resultEntry = {} for content_line in content_lines: if content_line.strip() == "": result.append(resultEntry) resultEntry = {} continue split_entry = content_line.strip().split(": ", 1) resultEntry[split_entry[0]] = split_entry[1] return result @six.add_metaclass(abc.ABCMeta) class BaseCommand(object): """The BaseCommand abstract class.""" def __init__(self): super(BaseCommand, self).__init__() @abc.abstractmethod def execute(self, *args, **kwargs): pass class ExecuteCommand(BaseCommand): """The Common ExecuteCommand.""" def __init__(self, cli_conf): super(ExecuteCommand, self).__init__() self.cli_retry_time = cli_conf.get('cli_retry_time') @retry_cli def execute(self, *args, **kwargs): result = None rc = 0 try: result, err = utils.execute(*args, **kwargs) except processutils.ProcessExecutionError as pe: rc = pe.exit_code result = pe.stdout result = result.replace('\n', '\\n') LOG.error(_LE( 'Error on execute command. ' 'Error code: %(exit_code)d Error msg: %(result)s'), { 'exit_code': pe.exit_code, 'result': result}) return rc, result class CLIBaseCommand(BaseCommand): """The CLIBaseCommand class.""" def __init__(self, cli_conf): super(CLIBaseCommand, self).__init__() self.java = "java -jar" self.execute_file = cli_conf.get('path') self.ip = cli_conf.get('ip') self.password = cli_conf.get('password') self.cli_retry_time = cli_conf.get('cli_retry_time') self.command = "" self.parameters = () self.command_line = "" def _generate_command(self, parameters): """Generate execute Command. use java, execute, command, parameters.""" self.parameters = parameters parameters_line = ' '.join(parameters) if self.password: parameters_line = 'password=%s %s' % ( self.password, parameters_line) self.command_line = "{0} {1} {2} {3} {4}".format( self.java, self.execute_file, self.ip, self.command, parameters_line) return self.command_line def _parser(self, content=None): """The parser to parse command result. :param content: The parse Content :returns: parse result """ content = content.replace("\r", "") content = content.replace("\\/-", "") content = content.strip() LOG.debug(content) if content is not None: content_lines = content.split("\n") rc, out = self._parse_return(content_lines) if rc != 0: return rc, out else: return rc, content_lines return -1, None @retry_cli def execute(self, *args, **kwargs): command_line = self._generate_command(args) LOG.debug('Executing: %(command)s', {'command': command_line}) rc = 0 result = None try: content = self._execute(command_line) rc, result = self._parser(content) except processutils.ProcessExecutionError as pe: rc = -2 # prevent confusing with cli real rc result = pe.stdout result = result.replace('\n', '\\n') LOG.error(_LE( 'Error on execute %(command)s. ' 'Error code: %(exit_code)d Error msg: %(result)s'), { 'command': command_line, 'exit_code': pe.exit_code, 'result': result}) return rc, result def _execute(self, command_line): return util_execute(command_line) def set_ip(self, ip): """Set the Raid's ip.""" self.ip = ip def _parse_return(self, content_lines): """Get the end of command line result.""" rc = 0 return_value = content_lines[-1].strip().split(' ', 1)[1] return_cli_result = content_lines[-2].strip().split(' ', 1)[1] rc = int(return_value, 16) return rc, return_cli_result class CreateLD(CLIBaseCommand): """The Create LD Command.""" def __init__(self, *args, **kwargs): super(CreateLD, self).__init__(*args, **kwargs) self.command = "create ld" class CreateLV(CLIBaseCommand): """The Create LV Command.""" def __init__(self, *args, **kwargs): super(CreateLV, self).__init__(*args, **kwargs) self.command = "create lv" class CreatePartition(CLIBaseCommand): """Create Partition. create part [LV-ID] [name] [size={partition-size}] [min={minimal-reserve-size}] [init={switch}] [tier={tier-level-list}] """ def __init__(self, *args, **kwargs): super(CreatePartition, self).__init__(*args, **kwargs) self.command = "create part" class DeletePartition(CLIBaseCommand): """Delete Partition. delete part [partition-ID] [-y] """ def __init__(self, *args, **kwargs): super(DeletePartition, self).__init__(*args, **kwargs) self.command = "delete part" class SetPartition(CLIBaseCommand): """Set Partition. set part [partition-ID] [name={partition-name}] [min={minimal-reserve-size}] set part expand [partition-ID] [size={expand-size}] set part purge [partition-ID] [number] [rule-type] set part reclaim [partition-ID] """ def __init__(self, *args, **kwargs): super(SetPartition, self).__init__(*args, **kwargs) self.command = "set part" class CreateMap(CLIBaseCommand): """Map the Partition on the channel. create map [part] [partition-ID] [Channel-ID] [Target-ID] [LUN-ID] [assign={assign-to}] """ def __init__(self, *args, **kwargs): super(CreateMap, self).__init__(*args, **kwargs) self.command = "create map" class DeleteMap(CLIBaseCommand): """Unmap the Partition on the channel. delete map [part] [partition-ID] [Channel-ID] [Target-ID] [LUN-ID] [-y] """ def __init__(self, *args, **kwargs): super(DeleteMap, self).__init__(*args, **kwargs) self.command = "delete map" class CreateSnapshot(CLIBaseCommand): """Create partition's Snapshot. create si [part] [partition-ID] """ def __init__(self, *args, **kwargs): super(CreateSnapshot, self).__init__(*args, **kwargs) self.command = "create si" class DeleteSnapshot(CLIBaseCommand): """Delete partition's Snapshot. delete si [snapshot-image-ID] [-y] """ def __init__(self, *args, **kwargs): super(DeleteSnapshot, self).__init__(*args, **kwargs) self.command = "delete si" class CreateReplica(CLIBaseCommand): """Create partition or snapshot's replica. create replica [name] [part | si] [source-volume-ID] [part] [target-volume-ID] [type={replication-mode}] [priority={level}] [desc={description}] [incremental={switch}] [timeout={value}] [compression={switch}] """ def __init__(self, *args, **kwargs): super(CreateReplica, self).__init__(*args, **kwargs) self.command = "create replica" class DeleteReplica(CLIBaseCommand): """Delete and terminate specific replication job. delete replica [volume-pair-ID] [-y] """ def __init__(self, *args, **kwargs): super(DeleteReplica, self).__init__(*args, **kwargs) self.command = "delete replica" class CreateIQN(CLIBaseCommand): """Create host iqn for CHAP or lun filter. create iqn [IQN] [IQN-alias-name] [user={username}] [password={secret}] [target={name}] [target-password={secret}] [ip={ip-address}] [mask={netmask-ip}] """ def __init__(self, *args, **kwargs): super(CreateIQN, self).__init__(*args, **kwargs) self.command = "create iqn" class DeleteIQN(CLIBaseCommand): """Delete host iqn by name. delete iqn [name] """ def __init__(self, *args, **kwargs): super(DeleteIQN, self).__init__(*args, **kwargs) self.command = "delete iqn" class ShowCommand(CLIBaseCommand): """Basic Show Command.""" def __init__(self, *args, **kwargs): super(ShowCommand, self).__init__(*args, **kwargs) self.param_detail = "-l" self.default_type = "table" self.start_key = "" def _parser(self, content=None): """Parse Table or Detail format into dict. # Table format ID Name LD-amount ---------------------- 123 LV-1 1 # Result { 'ID': '123', 'Name': 'LV-1', 'LD-amount': '1' } # Detail format ID: 5DE94FF775D81C30 Name: LV-1 LD-amount: 1 # Result { 'ID': '123', 'Name': 'LV-1', 'LD-amount': '1' } :param content: The parse Content. :returns: parse result """ rc, out = super(ShowCommand, self)._parser(content) # Error. if rc != 0: return rc, out # No content. if len(out) < 6: return rc, [] detect_type = self.detect_type() # Show detail content. if detect_type == "list": start_id = self.detect_detail_start_index(out) if start_id < 0: return rc, [] result = content_lines_to_dict(out[start_id:-2]) else: start_id = self.detect_table_start_index(out) if start_id < 0: return rc, [] result = table_to_dict(out[start_id:-3]) return rc, result def detect_type(self): if self.param_detail in self.parameters: detect_type = "list" else: detect_type = self.default_type return detect_type def detect_table_start_index(self, content): for i in range(3, len(content)): key = content[i].strip().split(' ') if self.start_key in key[0].strip(): return i return -1 def detect_detail_start_index(self, content): for i in range(3, len(content)): split_entry = content[i].strip().split(' ') if len(split_entry) >= 2 and ':' in split_entry[0]: return i return -1 class ShowLD(ShowCommand): """Show LD. show ld [index-list] """ def __init__(self, *args, **kwargs): super(ShowLD, self).__init__(*args, **kwargs) self.command = "show ld" class ShowLV(ShowCommand): """Show LV. show lv [lv={LV-IDs}] [-l] """ def __init__(self, *args, **kwargs): super(ShowLV, self).__init__(*args, **kwargs) self.command = "show lv" self.start_key = "ID" def detect_table_start_index(self, content): if "tier" in self.parameters: self.start_key = "LV-Name" for i in range(3, len(content)): key = content[i].strip().split(' ') if self.start_key in key[0].strip(): return i return -1 class ShowPartition(ShowCommand): """Show Partition. show part [part={partition-IDs} | lv={LV-IDs}] [-l] """ def __init__(self, *args, **kwargs): super(ShowPartition, self).__init__(*args, **kwargs) self.command = "show part" self.start_key = "ID" class ShowSnapshot(ShowCommand): """Show Snapshot. show si [si={snapshot-image-IDs} | part={partition-IDs} | lv={LV-IDs}] [-l] """ def __init__(self, *args, **kwargs): super(ShowSnapshot, self).__init__(*args, **kwargs) self.command = "show si" self.start_key = "Index" class ShowDevice(ShowCommand): """Show Device. show device """ def __init__(self, *args, **kwargs): super(ShowDevice, self).__init__(*args, **kwargs) self.command = "show device" self.start_key = "Index" class ShowChannel(ShowCommand): """Show Channel. show channel """ def __init__(self, *args, **kwargs): super(ShowChannel, self).__init__(*args, **kwargs) self.command = "show channel" self.start_key = "Ch" class ShowDisk(ShowCommand): """The Show Disk Command. show disk [disk-index-list | channel={ch}] """ def __init__(self, *args, **kwargs): super(ShowDisk, self).__init__(*args, **kwargs) self.command = "show disk" class ShowMap(ShowCommand): """Show Map. show map [part={partition-IDs} | channel={channel-IDs}] [-l] """ def __init__(self, *args, **kwargs): super(ShowMap, self).__init__(*args, **kwargs) self.command = "show map" self.start_key = "Ch" class ShowNet(ShowCommand): """Show IP network. show net [id={channel-IDs}] [-l] """ def __init__(self, *args, **kwargs): super(ShowNet, self).__init__(*args, **kwargs) self.command = "show net" self.start_key = "ID" class ShowLicense(ShowCommand): """Show License. show license """ def __init__(self, *args, **kwargs): super(ShowLicense, self).__init__(*args, **kwargs) self.command = "show license" self.start_key = "License" def _parser(self, content=None): """Parse License format. # License format License Amount(Partition/Subsystem) Expired ------------------------------------------------ EonPath --- True # Result { 'EonPath': { 'Amount': '---', 'Support': True } } :param content: The parse Content. :returns: parse result """ rc, out = super(ShowLicense, self)._parser(content) if rc != 0: return rc, out if len(out) > 0: result = {} for entry in out: if entry['Expired'] == '---' or entry['Expired'] == 'Expired': support = False else: support = True result[entry['License']] = { 'Amount': entry['Amount(Partition/Subsystem)'], 'Support': support } return rc, result return rc, [] class ShowReplica(ShowCommand): """Show information of all replication jobs or specific job. show replica [id={volume-pair-IDs}] [-l] id={volume-pair-IDs} """ def __init__(self, *args, **kwargs): super(ShowReplica, self).__init__(*args, **kwargs) self.command = 'show replica' class ShowWWN(ShowCommand): """Show Fibre network. show wwn """ def __init__(self, *args, **kwargs): super(ShowWWN, self).__init__(*args, **kwargs) self.command = "show wwn" self.start_key = "CH" class ShowIQN(ShowCommand): """Show iSCSI initiator IQN which is set by create iqn. show iqn """ LIST_START_LINE = "List of initiator IQN(s):" def __init__(self, *args, **kwargs): super(ShowIQN, self).__init__(*args, **kwargs) self.command = "show iqn" self.default_type = "list" def detect_detail_start_index(self, content): for i in range(3, len(content)): if content[i].strip() == self.LIST_START_LINE: return i + 2 return -1
apache-2.0
wolfgangz2013/rt-thread
bsp/CME_M7/rtconfig.py
15
3306
import os # toolchains options ARCH='arm' CPU='cortex-m3' CROSS_TOOL='keil' if os.getenv('RTT_CC'): CROSS_TOOL = os.getenv('RTT_CC') if CROSS_TOOL == 'gcc': PLATFORM = 'gcc' EXEC_PATH = r'C:\Program Files\GNU Tools ARM Embedded\4.8 2013q4\bin' elif CROSS_TOOL == 'keil': PLATFORM = 'armcc' EXEC_PATH = 'C:/Keil' elif CROSS_TOOL == 'iar': print('================ERROR============================') print('Not support iar yet!') print('=================================================') exit(0) if os.getenv('RTT_EXEC_PATH'): EXEC_PATH = os.getenv('RTT_EXEC_PATH') if os.getenv('RTT_ROOT'): RTT_ROOT = os.getenv('RTT_ROOT') else: RTT_ROOT = os.path.normpath(os.getcwd() + '/../..') BUILD = 'debug' if PLATFORM == 'gcc': # toolchains PREFIX = 'arm-none-eabi-' CC = PREFIX + 'gcc' AS = PREFIX + 'gcc' AR = PREFIX + 'ar' LINK = PREFIX + 'gcc' TARGET_EXT = 'elf' SIZE = PREFIX + 'size' OBJDUMP = PREFIX + 'objdump' OBJCPY = PREFIX + 'objcopy' DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections' CFLAGS = DEVICE + ' -g -Wall ' AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb ' LFLAGS = DEVICE + ' -lm -lgcc -lc' + ' -nostartfiles -Wl,--gc-sections,-Map=rtthread.map,-cref,-u,Reset_Handler -T CME_M7.ld' CPATH = '' LPATH = '' if BUILD == 'debug': CFLAGS += ' -O0 -gdwarf-2' AFLAGS += ' -gdwarf-2' else: CFLAGS += ' -O3' POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n' elif PLATFORM == 'armcc': # toolchains CC = 'armcc' AS = 'armasm' AR = 'armar' LINK = 'armlink' TARGET_EXT = 'axf' DEVICE = ' --cpu Cortex-M3' CFLAGS = DEVICE + ' --c99 --apcs=interwork' AFLAGS = DEVICE LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread.map --scatter CME_M7.sct' LFLAGS += ' --keep *.o(.rti_fn.*) --keep *.o(FSymTab) --keep *.o(VSymTab)' EXEC_PATH += '/ARM/ARMCC/bin' if BUILD == 'debug': CFLAGS += ' -g -O0' AFLAGS += ' -g' else: CFLAGS += ' -O2' POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET' elif PLATFORM == 'iar': # toolchains CC = 'iccarm' AS = 'iasmarm' AR = 'iarchive' LINK = 'ilinkarm' TARGET_EXT = 'out' DEVICE = ' -D USE_STDPERIPH_DRIVER' CFLAGS = DEVICE CFLAGS += ' --diag_suppress Pa050' CFLAGS += ' --no_cse' CFLAGS += ' --no_unroll' CFLAGS += ' --no_inline' CFLAGS += ' --no_code_motion' CFLAGS += ' --no_tbaa' CFLAGS += ' --no_clustering' CFLAGS += ' --no_scheduling' CFLAGS += ' --debug' CFLAGS += ' --endian=little' CFLAGS += ' --cpu=Cortex-M3' CFLAGS += ' -e' CFLAGS += ' --fpu=None' CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"' CFLAGS += ' -Ol' AFLAGS = '' AFLAGS += ' -s+' AFLAGS += ' -w+' AFLAGS += ' -r' AFLAGS += ' --cpu Cortex-M3' AFLAGS += ' --fpu None' LFLAGS = ' --config CME_M7.icf' LFLAGS += ' --semihosting' LFLAGS += ' --entry __iar_program_start' EXEC_PATH = EXEC_PATH + '/arm/bin/' POST_ACTION = ''
apache-2.0
alexylem/jarvis
recorders/snowboy/wavget.py
1
6979
#!/usr/bin/env python import os REC_DIR = os.path.dirname(os.path.abspath(__file__)) import sys SB_DIR=os.path.join (REC_DIR, '../../stt_engines/snowboy') sys.path.insert(0, SB_DIR) import snowboydecoder import snowboydetect import pyaudio import collections import time import logging import glob import struct logger = logging.getLogger("recorder") logger.setLevel(logging.INFO) logging.basicConfig() RESOURCE_FILE = os.path.join(SB_DIR, "resources","common.res") """ Wav """ WAV_FORMAT_PCM = 0x0001 """ Fixed from snowboy recording """ WAV_CHANNELS = 1 WAV_FRAMERATE = 16000 """ 16 bits """ WAV_SAMPWIDTH = 2 class WavGet(object): """ Snowboy decoder to save a wave. :param audio_gain: multiply input volume by this factor. :param trigger_ticks: ticks before triggering callback, tick is a sleep_time. [0] ticks_silence_before_detect: min silence ticks before detection [1] ticks_voice_detect: need this number of voice ticks [3] ticks_silence_after_detect: min silence ticks after detection """ def __init__(self, audio_gain=1, trigger_ticks=[-1,-1,-1]): def audio_callback(in_data, frame_count, time_info, status): self.ring_buffer.extend(in_data) play_data = chr(0) * len(in_data) return play_data, pyaudio.paContinue a_model=glob.glob( os.path.join(SB_DIR,"resources","*.[up]mdl") ); assert len(a_model) > 0, "Need at least one model in resources to proceed" self.detector = snowboydetect.SnowboyDetect( resource_filename=RESOURCE_FILE.encode(), model_str=a_model[0].encode()) self.detector.SetAudioGain( int(audio_gain) ) """ match or not - it does not matter """ self.detector.SetSensitivity("0.01".encode()) self.adata = [] self.trigger_ticks = trigger_ticks self.ring_buffer = snowboydecoder.RingBuffer( self.detector.NumChannels() * self.detector.SampleRate() * 5) self.audio = pyaudio.PyAudio() self.stream_in = self.audio.open( input=True, output=False, format=self.audio.get_format_from_width( self.detector.BitsPerSample() / 8), channels=self.detector.NumChannels(), rate=self.detector.SampleRate(), frames_per_buffer=2048, stream_callback=audio_callback) def _write(self, output_file): # Write wav header and data fh = open( output_file, 'wb' ) fh.write( b'RIFF' ) _data = b''.join(self.adata) _datalength = len(_data) _nframes = _datalength // (WAV_CHANNELS * WAV_SAMPWIDTH) fh.write(struct.pack('<L4s4sLHHLLHH4s', 36 + _datalength, b'WAVE', b'fmt ', 16, WAV_FORMAT_PCM, WAV_CHANNELS, WAV_FRAMERATE, WAV_CHANNELS * WAV_FRAMERATE * WAV_SAMPWIDTH, WAV_CHANNELS * WAV_SAMPWIDTH, WAV_SAMPWIDTH * 8, b'data')) fh.write(struct.pack('<L', _datalength)) fh.write(_data) fh.close() def start(self, output_file, track_mode, interrupt_check=lambda: False, sleep_time=0.03): """ Start the voice detector. For every `sleep_time` second it checks the audio buffer. :param output_file: output file 'wav'. Unlink if failed. :param interrupt_check: a function that returns True if the main loop needs to stop. :param float sleep_time: how much time in second every loop waits. :return: None """ if interrupt_check(): logger.debug("detect voice return") return if track_mode is None: track_mode = False tticks = None if track_mode == False: if os.path.isfile(output_file): # check output file already exists os.unlink(output_file) # delete tticks = self.trigger_ticks silence_before = 0 voice = 0 silence_after = 0 while True: if interrupt_check(): logger.debug("detect voice break") break data = self.ring_buffer.get() if len(data) == 0: time.sleep(sleep_time) continue ans = self.detector.RunDetection(data) """ track mode """ if track_mode: if ans == -1: logger.error("Error initializing streams or reading audio data") elif ans == -2: sys.stdout.write('_') sys.stdout.flush() elif ans >= 0: sys.stdout.write('|') sys.stdout.flush() continue """ store file mode """ if ans == -1: logger.error("Error initializing streams or reading audio data") elif ans == -2: """ Silence """ sys.stdout.write('_') sys.stdout.flush() if voice == 0 or silence_before < tticks[0]: silence_before += 1 elif voice >= tticks[1]: """ Have enough voice to count silence_after """ silence_after += 1 if silence_after >= tticks[2]: break """ else ignore silence """ elif ans >= 0: """ Voice """ sys.stdout.write('|') sys.stdout.flush() if silence_before >= tticks[0]: """ Have enough silence to count voice """ silence_after = 0 voice += 1 if voice > 0: self.adata.append( data ); elif len(self.adata) <= 1: """ Always keep track of one block before voice activation to get a perfect sentence """ if len(self.adata)<1: self.adata.append( data ) else: self.adata[0] = data if track_mode == False: logger.info("Ticks status: " + `silence_before` + " " + `voice` + " " + `silence_after`) logger.debug("finished.") """ write content in wav """ self._write( output_file ) return def terminate(self): """ Terminate audio stream. Users cannot call start() again to detect. :return: None """ self.stream_in.stop_stream() self.stream_in.close() self.audio.terminate()
mit
WeblateOrg/weblate
weblate/checks/tests/test_utils.py
2
1729
# # Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com> # # This file is part of Weblate <https://weblate.org/> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # from django.test import SimpleTestCase from weblate.checks.tests.test_checks import MockUnit from weblate.checks.utils import highlight_string class HightlightTestCase(SimpleTestCase): def test_simple(self): self.assertEqual( highlight_string( "simple {format} string", MockUnit(flags="python-brace-format") ), [(7, 15, "{format}")], ) def test_multi(self): self.assertEqual( highlight_string( "simple {format} %d string", MockUnit(flags="python-brace-format, python-format"), ), [(7, 15, "{format}"), (16, 18, "%d")], ) def test_overlap(self): self.assertEqual( highlight_string( 'nested <a href="{format}">string</a>', MockUnit(flags="python-brace-format"), ), [(7, 26, '<a href="{format}">'), (32, 36, "</a>")], )
gpl-3.0
ridfrustum/lettuce
tests/integration/django/dill/leaves/models.py
18
1285
from django.db import models class Garden(models.Model): name = models.CharField(max_length=100) area = models.IntegerField() raining = models.BooleanField() @property def howbig(self): if self.area < 50: return 'small' elif self.area < 150: return 'medium' else: return 'big' class Field(models.Model): name = models.CharField(max_length=100) class Fruit(models.Model): name = models.CharField(max_length=100) garden = models.ForeignKey(Garden) ripe_by = models.DateField() fields = models.ManyToManyField(Field) class Bee(models.Model): name = models.CharField(max_length=100) pollinated_fruit = models.ManyToManyField(Fruit, related_name='pollinated_by') class Goose(models.Model): name = models.CharField(max_length=100) class Meta: verbose_name_plural = "geese" class Harvester(models.Model): make = models.CharField(max_length=100) rego = models.CharField(max_length=100) class Panda(models.Model): """ Not part of a garden, but still an important part of any good application """ name = models.CharField(max_length=100) location = models.CharField(max_length=100)
gpl-3.0
chiffa/numpy
numpy/compat/py3k.py
105
2008
""" Python 3 compatibility tools. """ from __future__ import division, absolute_import, print_function __all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested', 'asstr', 'open_latin1', 'long', 'basestring', 'sixu', 'integer_types'] import sys if sys.version_info[0] >= 3: import io long = int integer_types = (int,) basestring = str unicode = str bytes = bytes def asunicode(s): if isinstance(s, bytes): return s.decode('latin1') return str(s) def asbytes(s): if isinstance(s, bytes): return s return str(s).encode('latin1') def asstr(s): if isinstance(s, bytes): return s.decode('latin1') return str(s) def isfileobj(f): return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)) def open_latin1(filename, mode='r'): return open(filename, mode=mode, encoding='iso-8859-1') def sixu(s): return s strchar = 'U' else: bytes = str long = long basestring = basestring unicode = unicode integer_types = (int, long) asbytes = str asstr = str strchar = 'S' def isfileobj(f): return isinstance(f, file) def asunicode(s): if isinstance(s, unicode): return s return str(s).decode('ascii') def open_latin1(filename, mode='r'): return open(filename, mode=mode) def sixu(s): return unicode(s, 'unicode_escape') def getexception(): return sys.exc_info()[1] def asbytes_nested(x): if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): return [asbytes_nested(y) for y in x] else: return asbytes(x) def asunicode_nested(x): if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): return [asunicode_nested(y) for y in x] else: return asunicode(x)
bsd-3-clause
osvalr/odoo
addons/website_event_track/models/event.py
300
8344
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ from openerp.addons.website.models.website import slug import pytz class event_track_tag(osv.osv): _name = "event.track.tag" _order = 'name' _columns = { 'name': fields.char('Event Track Tag', translate=True) } class event_tag(osv.osv): _name = "event.tag" _order = 'name' _columns = { 'name': fields.char('Event Tag', translate=True) } # # Tracks: conferences # class event_track_stage(osv.osv): _name = "event.track.stage" _order = 'sequence' _columns = { 'name': fields.char('Track Stage', translate=True), 'sequence': fields.integer('Sequence') } _defaults = { 'sequence': 0 } class event_track_location(osv.osv): _name = "event.track.location" _columns = { 'name': fields.char('Track Rooms') } class event_track(osv.osv): _name = "event.track" _description = 'Event Tracks' _order = 'priority, date' _inherit = ['mail.thread', 'ir.needaction_mixin', 'website.seo.metadata'] def _website_url(self, cr, uid, ids, field_name, arg, context=None): res = dict.fromkeys(ids, '') for track in self.browse(cr, uid, ids, context=context): res[track.id] = "/event/%s/track/%s" % (slug(track.event_id), slug(track)) return res _columns = { 'name': fields.char('Track Title', required=True, translate=True), 'user_id': fields.many2one('res.users', 'Responsible'), 'speaker_ids': fields.many2many('res.partner', string='Speakers'), 'tag_ids': fields.many2many('event.track.tag', string='Tags'), 'stage_id': fields.many2one('event.track.stage', 'Stage'), 'description': fields.html('Track Description', translate=True), 'date': fields.datetime('Track Date'), 'duration': fields.float('Duration', digits=(16,2)), 'location_id': fields.many2one('event.track.location', 'Location'), 'event_id': fields.many2one('event.event', 'Event', required=True), 'color': fields.integer('Color Index'), 'priority': fields.selection([('3','Low'),('2','Medium (*)'),('1','High (**)'),('0','Highest (***)')], 'Priority', required=True), 'website_published': fields.boolean('Available in the website', copy=False), 'website_url': fields.function(_website_url, string="Website url", type="char"), 'image': fields.related('speaker_ids', 'image', type='binary', readonly=True) } def set_priority(self, cr, uid, ids, priority, context={}): return self.write(cr, uid, ids, {'priority' : priority}) def _default_stage_id(self, cr, uid, context={}): stage_obj = self.pool.get('event.track.stage') ids = stage_obj.search(cr, uid, [], context=context) return ids and ids[0] or False _defaults = { 'user_id': lambda self, cr, uid, ctx: uid, 'website_published': lambda self, cr, uid, ctx: False, 'duration': lambda *args: 1.5, 'stage_id': _default_stage_id, 'priority': '2' } def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None): stage_obj = self.pool.get('event.track.stage') result = stage_obj.name_search(cr, uid, '', context=context) return result, {} _group_by_full = { 'stage_id': _read_group_stage_ids, } # # Events # class event_event(osv.osv): _inherit = "event.event" def _list_tz(self,cr,uid, context=None): # put POSIX 'Etc/*' entries at the end to avoid confusing users - see bug 1086728 return [(tz,tz) for tz in sorted(pytz.all_timezones, key=lambda tz: tz if not tz.startswith('Etc/') else '_')] def _count_tracks(self, cr, uid, ids, field_name, arg, context=None): return { event.id: len(event.track_ids) for event in self.browse(cr, uid, ids, context=context) } def _get_tracks_tag_ids(self, cr, uid, ids, field_names, arg=None, context=None): res = dict((res_id, []) for res_id in ids) for event in self.browse(cr, uid, ids, context=context): for track in event.track_ids: res[event.id] += [tag.id for tag in track.tag_ids] res[event.id] = list(set(res[event.id])) return res _columns = { 'tag_ids': fields.many2many('event.tag', string='Tags'), 'track_ids': fields.one2many('event.track', 'event_id', 'Tracks', copy=True), 'sponsor_ids': fields.one2many('event.sponsor', 'event_id', 'Sponsorships', copy=True), 'blog_id': fields.many2one('blog.blog', 'Event Blog'), 'show_track_proposal': fields.boolean('Talks Proposals'), 'show_tracks': fields.boolean('Multiple Tracks'), 'show_blog': fields.boolean('News'), 'count_tracks': fields.function(_count_tracks, type='integer', string='Tracks'), 'tracks_tag_ids': fields.function(_get_tracks_tag_ids, type='one2many', relation='event.track.tag', string='Tags of Tracks'), 'allowed_track_tag_ids': fields.many2many('event.track.tag', string='Accepted Tags', help="List of available tags for track proposals."), 'timezone_of_event': fields.selection(_list_tz, 'Event Timezone', size=64), } _defaults = { 'show_track_proposal': False, 'show_tracks': False, 'show_blog': False, 'timezone_of_event':lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).tz, } def _get_new_menu_pages(self, cr, uid, event, context=None): context = context or {} result = super(event_event, self)._get_new_menu_pages(cr, uid, event, context=context) if event.show_tracks: result.append( (_('Talks'), '/event/%s/track' % slug(event))) result.append( (_('Agenda'), '/event/%s/agenda' % slug(event))) if event.blog_id: result.append( (_('News'), '/blogpost'+slug(event.blog_ig))) if event.show_track_proposal: result.append( (_('Talk Proposals'), '/event/%s/track_proposal' % slug(event))) return result # # Sponsors # class event_sponsors_type(osv.osv): _name = "event.sponsor.type" _order = "sequence" _columns = { "name": fields.char('Sponsor Type', required=True, translate=True), "sequence": fields.integer('Sequence') } class event_sponsors(osv.osv): _name = "event.sponsor" _order = "sequence" _columns = { 'event_id': fields.many2one('event.event', 'Event', required=True), 'sponsor_type_id': fields.many2one('event.sponsor.type', 'Sponsoring Type', required=True), 'partner_id': fields.many2one('res.partner', 'Sponsor/Customer', required=True), 'url': fields.text('Sponsor Website'), 'sequence': fields.related('sponsor_type_id', 'sequence', string='Sequence', store=True), 'image_medium': fields.related('partner_id', 'image_medium', string='Logo', type='binary') } def has_access_to_partner(self, cr, uid, ids, context=None): partner_ids = [sponsor.partner_id.id for sponsor in self.browse(cr, uid, ids, context=context)] return len(partner_ids) == self.pool.get("res.partner").search(cr, uid, [("id", "in", partner_ids)], count=True, context=context)
agpl-3.0
ZazieTheBeast/oscar
oscar/lib/python2.7/site-packages/pip/commands/show.py
142
5815
from __future__ import absolute_import from email.parser import FeedParser import logging import os from pip.basecommand import Command from pip.status_codes import SUCCESS, ERROR from pip._vendor import pkg_resources logger = logging.getLogger(__name__) class ShowCommand(Command): """Show information about one or more installed packages.""" name = 'show' usage = """ %prog [options] <package> ...""" summary = 'Show information about installed packages.' def __init__(self, *args, **kw): super(ShowCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '-f', '--files', dest='files', action='store_true', default=False, help='Show the full list of installed files for each package.') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): if not args: logger.warning('ERROR: Please provide a package name or names.') return ERROR query = args results = search_packages_info(query) if not print_results(results, options.files): return ERROR return SUCCESS def search_packages_info(query): """ Gather details from installed distributions. Print distribution name, version, location, and installed files. Installed files requires a pip generated 'installed-files.txt' in the distributions '.egg-info' directory. """ installed = dict( [(p.project_name.lower(), p) for p in pkg_resources.working_set]) query_names = [name.lower() for name in query] for dist in [installed[pkg] for pkg in query_names if pkg in installed]: package = { 'name': dist.project_name, 'version': dist.version, 'location': dist.location, 'requires': [dep.project_name for dep in dist.requires()], } file_list = None metadata = None if isinstance(dist, pkg_resources.DistInfoDistribution): # RECORDs should be part of .dist-info metadatas if dist.has_metadata('RECORD'): lines = dist.get_metadata_lines('RECORD') paths = [l.split(',')[0] for l in lines] paths = [os.path.join(dist.location, p) for p in paths] file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('METADATA'): metadata = dist.get_metadata('METADATA') else: # Otherwise use pip's log for .egg-info's if dist.has_metadata('installed-files.txt'): paths = dist.get_metadata_lines('installed-files.txt') paths = [os.path.join(dist.egg_info, p) for p in paths] file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('PKG-INFO'): metadata = dist.get_metadata('PKG-INFO') if dist.has_metadata('entry_points.txt'): entry_points = dist.get_metadata_lines('entry_points.txt') package['entry_points'] = entry_points installer = None if dist.has_metadata('INSTALLER'): for line in dist.get_metadata_lines('INSTALLER'): if line.strip(): installer = line.strip() break package['installer'] = installer # @todo: Should pkg_resources.Distribution have a # `get_pkg_info` method? feed_parser = FeedParser() feed_parser.feed(metadata) pkg_info_dict = feed_parser.close() for key in ('metadata-version', 'summary', 'home-page', 'author', 'author-email', 'license'): package[key] = pkg_info_dict.get(key) # It looks like FeedParser can not deal with repeated headers classifiers = [] for line in metadata.splitlines(): if not line: break # Classifier: License :: OSI Approved :: MIT License if line.startswith('Classifier: '): classifiers.append(line[len('Classifier: '):]) package['classifiers'] = classifiers if file_list: package['files'] = sorted(file_list) yield package def print_results(distributions, list_all_files): """ Print the informations from installed distributions found. """ results_printed = False for dist in distributions: results_printed = True logger.info("---") logger.info("Metadata-Version: %s", dist.get('metadata-version')) logger.info("Name: %s", dist['name']) logger.info("Version: %s", dist['version']) logger.info("Summary: %s", dist.get('summary')) logger.info("Home-page: %s", dist.get('home-page')) logger.info("Author: %s", dist.get('author')) logger.info("Author-email: %s", dist.get('author-email')) if dist['installer'] is not None: logger.info("Installer: %s", dist['installer']) logger.info("License: %s", dist.get('license')) logger.info("Location: %s", dist['location']) logger.info("Requires: %s", ', '.join(dist['requires'])) logger.info("Classifiers:") for classifier in dist['classifiers']: logger.info(" %s", classifier) if list_all_files: logger.info("Files:") if 'files' in dist: for line in dist['files']: logger.info(" %s", line.strip()) else: logger.info("Cannot locate installed-files.txt") if 'entry_points' in dist: logger.info("Entry-points:") for line in dist['entry_points']: logger.info(" %s", line.strip()) return results_printed
bsd-3-clause
pforret/python-for-android
python-build/python-libs/gdata/build/lib/gdata/tlslite/integration/XMLRPCTransport.py
271
5812
"""TLS Lite + xmlrpclib.""" import xmlrpclib import httplib from gdata.tlslite.integration.HTTPTLSConnection import HTTPTLSConnection from gdata.tlslite.integration.ClientHelper import ClientHelper class XMLRPCTransport(xmlrpclib.Transport, ClientHelper): """Handles an HTTPS transaction to an XML-RPC server.""" def __init__(self, username=None, password=None, sharedKey=None, certChain=None, privateKey=None, cryptoID=None, protocol=None, x509Fingerprint=None, x509TrustList=None, x509CommonName=None, settings=None): """Create a new XMLRPCTransport. An instance of this class can be passed to L{xmlrpclib.ServerProxy} to use TLS with XML-RPC calls:: from tlslite.api import XMLRPCTransport from xmlrpclib import ServerProxy transport = XMLRPCTransport(user="alice", password="abra123") server = ServerProxy("https://localhost", transport) For client authentication, use one of these argument combinations: - username, password (SRP) - username, sharedKey (shared-key) - certChain, privateKey (certificate) For server authentication, you can either rely on the implicit mutual authentication performed by SRP or shared-keys, or you can do certificate-based server authentication with one of these argument combinations: - cryptoID[, protocol] (requires cryptoIDlib) - x509Fingerprint - x509TrustList[, x509CommonName] (requires cryptlib_py) Certificate-based server authentication is compatible with SRP or certificate-based client authentication. It is not compatible with shared-keys. The constructor does not perform the TLS handshake itself, but simply stores these arguments for later. The handshake is performed only when this class needs to connect with the server. Thus you should be prepared to handle TLS-specific exceptions when calling methods of L{xmlrpclib.ServerProxy}. See the client handshake functions in L{tlslite.TLSConnection.TLSConnection} for details on which exceptions might be raised. @type username: str @param username: SRP or shared-key username. Requires the 'password' or 'sharedKey' argument. @type password: str @param password: SRP password for mutual authentication. Requires the 'username' argument. @type sharedKey: str @param sharedKey: Shared key for mutual authentication. Requires the 'username' argument. @type certChain: L{tlslite.X509CertChain.X509CertChain} or L{cryptoIDlib.CertChain.CertChain} @param certChain: Certificate chain for client authentication. Requires the 'privateKey' argument. Excludes the SRP or shared-key related arguments. @type privateKey: L{tlslite.utils.RSAKey.RSAKey} @param privateKey: Private key for client authentication. Requires the 'certChain' argument. Excludes the SRP or shared-key related arguments. @type cryptoID: str @param cryptoID: cryptoID for server authentication. Mutually exclusive with the 'x509...' arguments. @type protocol: str @param protocol: cryptoID protocol URI for server authentication. Requires the 'cryptoID' argument. @type x509Fingerprint: str @param x509Fingerprint: Hex-encoded X.509 fingerprint for server authentication. Mutually exclusive with the 'cryptoID' and 'x509TrustList' arguments. @type x509TrustList: list of L{tlslite.X509.X509} @param x509TrustList: A list of trusted root certificates. The other party must present a certificate chain which extends to one of these root certificates. The cryptlib_py module must be installed to use this parameter. Mutually exclusive with the 'cryptoID' and 'x509Fingerprint' arguments. @type x509CommonName: str @param x509CommonName: The end-entity certificate's 'CN' field must match this value. For a web server, this is typically a server name such as 'www.amazon.com'. Mutually exclusive with the 'cryptoID' and 'x509Fingerprint' arguments. Requires the 'x509TrustList' argument. @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} @param settings: Various settings which can be used to control the ciphersuites, certificate types, and SSL/TLS versions offered by the client. """ ClientHelper.__init__(self, username, password, sharedKey, certChain, privateKey, cryptoID, protocol, x509Fingerprint, x509TrustList, x509CommonName, settings) def make_connection(self, host): # create a HTTPS connection object from a host descriptor host, extra_headers, x509 = self.get_host_info(host) http = HTTPTLSConnection(host, None, self.username, self.password, self.sharedKey, self.certChain, self.privateKey, self.checker.cryptoID, self.checker.protocol, self.checker.x509Fingerprint, self.checker.x509TrustList, self.checker.x509CommonName, self.settings) http2 = httplib.HTTP() http2._setup(http) return http2
apache-2.0
microdee/IronHydra
src/IronHydra/Lib/distutils/command/install_lib.py
251
8338
"""distutils.command.install_lib Implements the Distutils 'install_lib' command (install all Python modules).""" __revision__ = "$Id$" import os import sys from distutils.core import Command from distutils.errors import DistutilsOptionError # Extension for Python source files. if hasattr(os, 'extsep'): PYTHON_SOURCE_EXTENSION = os.extsep + "py" else: PYTHON_SOURCE_EXTENSION = ".py" class install_lib(Command): description = "install all Python modules (extensions and pure Python)" # The byte-compilation options are a tad confusing. Here are the # possible scenarios: # 1) no compilation at all (--no-compile --no-optimize) # 2) compile .pyc only (--compile --no-optimize; default) # 3) compile .pyc and "level 1" .pyo (--compile --optimize) # 4) compile "level 1" .pyo only (--no-compile --optimize) # 5) compile .pyc and "level 2" .pyo (--compile --optimize-more) # 6) compile "level 2" .pyo only (--no-compile --optimize-more) # # The UI for this is two option, 'compile' and 'optimize'. # 'compile' is strictly boolean, and only decides whether to # generate .pyc files. 'optimize' is three-way (0, 1, or 2), and # decides both whether to generate .pyo files and what level of # optimization to use. user_options = [ ('install-dir=', 'd', "directory to install to"), ('build-dir=','b', "build directory (where to install from)"), ('force', 'f', "force installation (overwrite existing files)"), ('compile', 'c', "compile .py to .pyc [default]"), ('no-compile', None, "don't compile .py files"), ('optimize=', 'O', "also compile with optimization: -O1 for \"python -O\", " "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), ('skip-build', None, "skip the build steps"), ] boolean_options = ['force', 'compile', 'skip-build'] negative_opt = {'no-compile' : 'compile'} def initialize_options(self): # let the 'install' command dictate our installation directory self.install_dir = None self.build_dir = None self.force = 0 self.compile = None self.optimize = None self.skip_build = None def finalize_options(self): # Get all the information we need to install pure Python modules # from the umbrella 'install' command -- build (source) directory, # install (target) directory, and whether to compile .py files. self.set_undefined_options('install', ('build_lib', 'build_dir'), ('install_lib', 'install_dir'), ('force', 'force'), ('compile', 'compile'), ('optimize', 'optimize'), ('skip_build', 'skip_build'), ) if self.compile is None: self.compile = 1 if self.optimize is None: self.optimize = 0 if not isinstance(self.optimize, int): try: self.optimize = int(self.optimize) if self.optimize not in (0, 1, 2): raise AssertionError except (ValueError, AssertionError): raise DistutilsOptionError, "optimize must be 0, 1, or 2" def run(self): # Make sure we have built everything we need first self.build() # Install everything: simply dump the entire contents of the build # directory to the installation directory (that's the beauty of # having a build directory!) outfiles = self.install() # (Optionally) compile .py to .pyc if outfiles is not None and self.distribution.has_pure_modules(): self.byte_compile(outfiles) # -- Top-level worker functions ------------------------------------ # (called from 'run()') def build(self): if not self.skip_build: if self.distribution.has_pure_modules(): self.run_command('build_py') if self.distribution.has_ext_modules(): self.run_command('build_ext') def install(self): if os.path.isdir(self.build_dir): outfiles = self.copy_tree(self.build_dir, self.install_dir) else: self.warn("'%s' does not exist -- no Python modules to install" % self.build_dir) return return outfiles def byte_compile(self, files): if sys.dont_write_bytecode: self.warn('byte-compiling is disabled, skipping.') return from distutils.util import byte_compile # Get the "--root" directory supplied to the "install" command, # and use it as a prefix to strip off the purported filename # encoded in bytecode files. This is far from complete, but it # should at least generate usable bytecode in RPM distributions. install_root = self.get_finalized_command('install').root if self.compile: byte_compile(files, optimize=0, force=self.force, prefix=install_root, dry_run=self.dry_run) if self.optimize > 0: byte_compile(files, optimize=self.optimize, force=self.force, prefix=install_root, verbose=self.verbose, dry_run=self.dry_run) # -- Utility methods ----------------------------------------------- def _mutate_outputs(self, has_any, build_cmd, cmd_option, output_dir): if not has_any: return [] build_cmd = self.get_finalized_command(build_cmd) build_files = build_cmd.get_outputs() build_dir = getattr(build_cmd, cmd_option) prefix_len = len(build_dir) + len(os.sep) outputs = [] for file in build_files: outputs.append(os.path.join(output_dir, file[prefix_len:])) return outputs def _bytecode_filenames(self, py_filenames): bytecode_files = [] for py_file in py_filenames: # Since build_py handles package data installation, the # list of outputs can contain more than just .py files. # Make sure we only report bytecode for the .py files. ext = os.path.splitext(os.path.normcase(py_file))[1] if ext != PYTHON_SOURCE_EXTENSION: continue if self.compile: bytecode_files.append(py_file + "c") if self.optimize > 0: bytecode_files.append(py_file + "o") return bytecode_files # -- External interface -------------------------------------------- # (called by outsiders) def get_outputs(self): """Return the list of files that would be installed if this command were actually run. Not affected by the "dry-run" flag or whether modules have actually been built yet. """ pure_outputs = \ self._mutate_outputs(self.distribution.has_pure_modules(), 'build_py', 'build_lib', self.install_dir) if self.compile: bytecode_outputs = self._bytecode_filenames(pure_outputs) else: bytecode_outputs = [] ext_outputs = \ self._mutate_outputs(self.distribution.has_ext_modules(), 'build_ext', 'build_lib', self.install_dir) return pure_outputs + bytecode_outputs + ext_outputs def get_inputs(self): """Get the list of files that are input to this command, ie. the files that get installed as they are named in the build tree. The files in this list correspond one-to-one to the output filenames returned by 'get_outputs()'. """ inputs = [] if self.distribution.has_pure_modules(): build_py = self.get_finalized_command('build_py') inputs.extend(build_py.get_outputs()) if self.distribution.has_ext_modules(): build_ext = self.get_finalized_command('build_ext') inputs.extend(build_ext.get_outputs()) return inputs
mit
powlo/script.module.pydevd
lib/pydevd_attach_to_process/winappdbg/plugins/do_exchain.py
100
2307
#!~/.wine/drive_c/Python25/python.exe # -*- coding: utf-8 -*- # Command line debugger using WinAppDbg # Show exception handlers list # Copyright (c) 2009-2014, Mario Vilas # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice,this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. __revision__ = "$Id$" from winappdbg import HexDump, Table def do(self, arg): ".exchain - Show the SEH chain" thread = self.get_thread_from_prefix() print "Exception handlers for thread %d" % thread.get_tid() print table = Table() table.addRow("Block", "Function") bits = thread.get_bits() for (seh, seh_func) in thread.get_seh_chain(): if seh is not None: seh = HexDump.address(seh, bits) if seh_func is not None: seh_func = HexDump.address(seh_func, bits) table.addRow(seh, seh_func) print table.getOutput()
epl-1.0
CGATOxford/proj029
scripts/fastq2filteredfastq.py
1
3610
''' fastq2filteredfastq.py ======================= :Author: Nick Ilott :Release: $Id$ :Date: |today| :Tags: Python Purpose ------- filter a fastq file based on reads in a bam file. Usage ----- Example:: zcat in.fastq.gz | python fastq2filteredfastq.py --bamfile=x.bam Type:: python fastq2filteredfastq.py --help for command line help. Command line options -------------------- ''' import os import sys import re import optparse import pysam import CGATPipelines.Pipeline as P import CGAT.Experiment as E import CGAT.IOTools as IOTools import CGAT.Fastq as Fastq import CGAT.Experiment as E def main(argv=None): """script main. parses command line options in sys.argv, unless *argv* is given. """ if argv is None: argv = sys.argv # setup command line parser parser = E.OptionParser(version="%prog version: $Id$", usage=globals()["__doc__"]) parser.add_option("-b", "--bamfile", dest="bamfile", type="string", help="input bamfile to filter reads from") parser.add_option("-r", "--reads", dest="reads", type="choice", choices=("mapped", "unmapped"), help="type of read to keep") parser.add_option("-s", "--scriptsdir", dest="scriptsdir", type="string", help="CGAT scripts directory") parser.add_option("-i", "--invert", dest="invert", action="store_true", help="invert selection - if for example unmapped reads \ aren't output") parser.set_defaults(bamfile = None, reads = "mapped", scriptsdir = "/ifs/devel/nicki/cgat_git/cgat/scripts", invert = False) # add common options (-h/--help, ...) and parse command line (options, args) = E.Start(parser, argv=argv) c = E.Counter() c.input_alignments = 0 c.input_reads = 0 c.output_reads = 0 # output text file for reads TO KEEP bam = pysam.Samfile(options.bamfile, "rb") temp = P.getTempFile(".") E.info("iterating over bam file") for alignment in bam.fetch(until_eof = True): c.input_alignments += 1 if options.reads == "unmapped": if alignment.is_unmapped: #c.input_alignments += 1 temp.write(alignment.qname + "\n") elif options.reads == "mapped": if not alignment.is_unmapped: #c.input_alignments += 1 temp.write(alignment.qname + "\n") temp.close() tempname = temp.name E.info("filtering fastq file") # filter fastq file ids = set(IOTools.readList(IOTools.openFile(tempname).readlines())) c.input_alignments = len(ids) for fastq in Fastq.iterate(options.stdin): c.input_reads += 1 if fastq.identifier.endswith("/1") or fastq.identifier.endswith("/2"): identifier = fastq.identifier[:-2] elif len(fastq.identifier.split(" ")) == 2: identifier = fastq.identifier.split(" ")[0] else: identifier = fastq.identifier if not options.invert: if identifier in ids: c.output_reads += 1 options.stdout.write("%s\n" % fastq) else: if identifier in ids: continue c.output_reads += 1 options.stdout.write("%s\n" % fastq) E.info(c) os.unlink(tempname) # write footer and output benchmark information. E.Stop() if __name__ == "__main__": sys.exit(main(sys.argv))
bsd-3-clause
akutuzov/gensim
docs/src/conf.py
2
7182
# -*- coding: utf-8 -*- # # gensim documentation build configuration file, created by # sphinx-quickstart on Wed Mar 17 13:42:21 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.append(os.path.abspath('.')) # -- General configuration ----------------------------------------------------- html_theme = 'gensim_theme' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc'] autoclass_content = "both" # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'indextoc' # Additional templates that should be rendered to pages, maps page names to # template names. html_additional_pages = {'index': './_templates/indexcontent.html'} # General information about the project. project = u'gensim' copyright = u'2009-now, Radim Řehůřek <me(at)radimrehurek.com>' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.13.4.1' # The full version, including alpha/beta/rc tags. release = '0.13.4.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. #html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #main_colour = "#ffbbbb" html_theme_options = { #"rightsidebar": "false", #"stickysidebar": "true", #"bodyfont": "'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'", #"headfont": "'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'", #"sidebarbgcolor": "fuckyou", #"footerbgcolor": "#771111", #"relbarbgcolor": "#993333", #"sidebartextcolor": "#000000", #"sidebarlinkcolor": "#330000", #"codebgcolor": "#fffff0", #"headtextcolor": "#000080", #"headbgcolor": "#f0f0ff", #"bgcolor": "#ffffff", } # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['.'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = "gensim" # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = '' # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = 'favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = {} #{'index': ['download.html', 'globaltoc.html', 'searchbox.html', 'indexsidebar.html']} #html_sidebars = {'index': ['globaltoc.html', 'searchbox.html']} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False html_domain_indices = False # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'gensimdoc' html_show_sphinx = False # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'gensim.tex', u'gensim Documentation', u'Radim Řehůřek', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True
lgpl-2.1
Sodki/ansible
lib/ansible/plugins/callback/json.py
118
2439
# (c) 2016, Matt Martz <matt@sivel.net> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from ansible.plugins.callback import CallbackBase class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'stdout' CALLBACK_NAME = 'json' def __init__(self, display=None): super(CallbackModule, self).__init__(display) self.results = [] def _new_play(self, play): return { 'play': { 'name': play.name, 'id': str(play._uuid) }, 'tasks': [] } def _new_task(self, task): return { 'task': { 'name': task.name, 'id': str(task._uuid) }, 'hosts': {} } def v2_playbook_on_play_start(self, play): self.results.append(self._new_play(play)) def v2_playbook_on_task_start(self, task, is_conditional): self.results[-1]['tasks'].append(self._new_task(task)) def v2_runner_on_ok(self, result, **kwargs): host = result._host self.results[-1]['tasks'][-1]['hosts'][host.name] = result._result def v2_playbook_on_stats(self, stats): """Display info about playbook statistics""" hosts = sorted(stats.processed.keys()) summary = {} for h in hosts: s = stats.summarize(h) summary[h] = s output = { 'plays': self.results, 'stats': summary } self._display.display(json.dumps(output, indent=4, sort_keys=True)) v2_runner_on_failed = v2_runner_on_ok v2_runner_on_unreachable = v2_runner_on_ok v2_runner_on_skipped = v2_runner_on_ok
gpl-3.0
liuzhaoguo/FreeROI
froi/gui/component/unused/volumedintensitydialog.py
6
2368
__author__ = 'zhouguangfu' # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from PyQt4.QtCore import * from PyQt4.QtGui import * import matplotlib.pyplot as plt from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar class VolumeIntensityDialog(QDialog): """ A dialog for action of voxel time point curve display. """ def __init__(self, model,parent=None): super(VolumeIntensityDialog, self).__init__(parent) self._model = model self._init_gui() self._create_actions() self._plot() def _init_gui(self): """ Initialize GUI. """ # a figure instance to plot on self.figure = plt.figure() # this is the Canvas Widget that displays the `figure` # it takes the `figure` instance as a parameter to __init__ self.canvas = FigureCanvas(self.figure) # this is the Navigation widget,it takes the Canvas widget and a parent self.toolbar = NavigationToolbar(self.canvas, self) # set the layout layout = QVBoxLayout() layout.addWidget(self.toolbar) layout.addWidget(self.canvas) self.setLayout(layout) def _create_actions(self): self._model.time_changed.connect(self._plot) def _plot(self): ''' plot time time point curve.''' volume_data = self._model.data(self._model.currentIndex(),Qt.UserRole + 5) if self._model.data(self._model.currentIndex(),Qt.UserRole + 8): data = volume_data[:,:,:,self._model.get_current_time_point()] self.points = data[data!=0] # self.points = volume_data[volume_data[:,:,:,self._model.get_current_time_point()]!=0l, # self._model.get_current_time_point()] else: self.points = volume_data[volume_data!=0] # create an axis ax = self.figure.add_subplot(111) ax.hold(False) ax.hist(self.points,50) plt.xlabel("Intensity") plt.ylabel("Number") plt.grid() self.canvas.draw() def closeEvent(self, QCloseEvent): self._model.time_changed.disconnect(self._plot)
bsd-3-clause
dronefly/dronefly.github.io
flask/lib/python2.7/site-packages/pip/_vendor/requests/auth.py
331
6123
# -*- coding: utf-8 -*- """ requests.auth ~~~~~~~~~~~~~ This module contains the authentication handlers for Requests. """ import os import re import time import hashlib from base64 import b64encode from .compat import urlparse, str from .cookies import extract_cookies_to_jar from .utils import parse_dict_header CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' CONTENT_TYPE_MULTI_PART = 'multipart/form-data' def _basic_auth_str(username, password): """Returns a Basic Auth string.""" return 'Basic ' + b64encode(('%s:%s' % (username, password)).encode('latin1')).strip().decode('latin1') class AuthBase(object): """Base class that all auth implementations derive from""" def __call__(self, r): raise NotImplementedError('Auth hooks must be callable.') class HTTPBasicAuth(AuthBase): """Attaches HTTP Basic Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password def __call__(self, r): r.headers['Authorization'] = _basic_auth_str(self.username, self.password) return r class HTTPProxyAuth(HTTPBasicAuth): """Attaches HTTP Proxy Authentication to a given Request object.""" def __call__(self, r): r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password) return r class HTTPDigestAuth(AuthBase): """Attaches HTTP Digest Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password self.last_nonce = '' self.nonce_count = 0 self.chal = {} self.pos = None def build_digest_header(self, method, url): realm = self.chal['realm'] nonce = self.chal['nonce'] qop = self.chal.get('qop') algorithm = self.chal.get('algorithm') opaque = self.chal.get('opaque') if algorithm is None: _algorithm = 'MD5' else: _algorithm = algorithm.upper() # lambdas assume digest modules are imported at the top level if _algorithm == 'MD5' or _algorithm == 'MD5-SESS': def md5_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.md5(x).hexdigest() hash_utf8 = md5_utf8 elif _algorithm == 'SHA': def sha_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.sha1(x).hexdigest() hash_utf8 = sha_utf8 KD = lambda s, d: hash_utf8("%s:%s" % (s, d)) if hash_utf8 is None: return None # XXX not implemented yet entdig = None p_parsed = urlparse(url) path = p_parsed.path if p_parsed.query: path += '?' + p_parsed.query A1 = '%s:%s:%s' % (self.username, realm, self.password) A2 = '%s:%s' % (method, path) HA1 = hash_utf8(A1) HA2 = hash_utf8(A2) if nonce == self.last_nonce: self.nonce_count += 1 else: self.nonce_count = 1 ncvalue = '%08x' % self.nonce_count s = str(self.nonce_count).encode('utf-8') s += nonce.encode('utf-8') s += time.ctime().encode('utf-8') s += os.urandom(8) cnonce = (hashlib.sha1(s).hexdigest()[:16]) noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2) if _algorithm == 'MD5-SESS': HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce)) if qop is None: respdig = KD(HA1, "%s:%s" % (nonce, HA2)) elif qop == 'auth' or 'auth' in qop.split(','): respdig = KD(HA1, noncebit) else: # XXX handle auth-int. return None self.last_nonce = nonce # XXX should the partial digests be encoded too? base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ 'response="%s"' % (self.username, realm, nonce, path, respdig) if opaque: base += ', opaque="%s"' % opaque if algorithm: base += ', algorithm="%s"' % algorithm if entdig: base += ', digest="%s"' % entdig if qop: base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce) return 'Digest %s' % (base) def handle_401(self, r, **kwargs): """Takes the given response and tries digest-auth, if needed.""" if self.pos is not None: # Rewind the file position indicator of the body to where # it was to resend the request. r.request.body.seek(self.pos) num_401_calls = getattr(self, 'num_401_calls', 1) s_auth = r.headers.get('www-authenticate', '') if 'digest' in s_auth.lower() and num_401_calls < 2: setattr(self, 'num_401_calls', num_401_calls + 1) pat = re.compile(r'digest ', flags=re.IGNORECASE) self.chal = parse_dict_header(pat.sub('', s_auth, count=1)) # Consume content and release the original connection # to allow our new request to reuse the same one. r.content r.raw.release_conn() prep = r.request.copy() extract_cookies_to_jar(prep._cookies, r.request, r.raw) prep.prepare_cookies(prep._cookies) prep.headers['Authorization'] = self.build_digest_header( prep.method, prep.url) _r = r.connection.send(prep, **kwargs) _r.history.append(r) _r.request = prep return _r setattr(self, 'num_401_calls', 1) return r def __call__(self, r): # If we have a saved nonce, skip the 401 if self.last_nonce: r.headers['Authorization'] = self.build_digest_header(r.method, r.url) try: self.pos = r.body.tell() except AttributeError: pass r.register_hook('response', self.handle_401) return r
apache-2.0
40223233/2015cd_midterm
static/Brython3.1.1-20150328-091302/Lib/datetime.py
628
75044
"""Concrete date/time and related types. See http://www.iana.org/time-zones/repository/tz-link.html for time zone and DST data sources. """ import time as _time import math as _math def _cmp(x, y): return 0 if x == y else 1 if x > y else -1 MINYEAR = 1 MAXYEAR = 9999 _MAXORDINAL = 3652059 # date.max.toordinal() # Utility functions, adapted from Python's Demo/classes/Dates.py, which # also assumes the current Gregorian calendar indefinitely extended in # both directions. Difference: Dates.py calls January 1 of year 0 day # number 1. The code here calls January 1 of year 1 day number 1. This is # to match the definition of the "proleptic Gregorian" calendar in Dershowitz # and Reingold's "Calendrical Calculations", where it's the base calendar # for all computations. See the book for algorithms for converting between # proleptic Gregorian ordinals and many other calendar systems. _DAYS_IN_MONTH = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] _DAYS_BEFORE_MONTH = [None] dbm = 0 for dim in _DAYS_IN_MONTH[1:]: _DAYS_BEFORE_MONTH.append(dbm) dbm += dim del dbm, dim def _is_leap(year): "year -> 1 if leap year, else 0." return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0) def _days_before_year(year): "year -> number of days before January 1st of year." y = year - 1 return y*365 + y//4 - y//100 + y//400 def _days_in_month(year, month): "year, month -> number of days in that month in that year." assert 1 <= month <= 12, month if month == 2 and _is_leap(year): return 29 return _DAYS_IN_MONTH[month] def _days_before_month(year, month): "year, month -> number of days in year preceding first day of month." assert 1 <= month <= 12, 'month must be in 1..12' return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year)) def _ymd2ord(year, month, day): "year, month, day -> ordinal, considering 01-Jan-0001 as day 1." assert 1 <= month <= 12, 'month must be in 1..12' dim = _days_in_month(year, month) assert 1 <= day <= dim, ('day must be in 1..%d' % dim) return (_days_before_year(year) + _days_before_month(year, month) + day) _DI400Y = _days_before_year(401) # number of days in 400 years _DI100Y = _days_before_year(101) # " " " " 100 " _DI4Y = _days_before_year(5) # " " " " 4 " # A 4-year cycle has an extra leap day over what we'd get from pasting # together 4 single years. assert _DI4Y == 4 * 365 + 1 # Similarly, a 400-year cycle has an extra leap day over what we'd get from # pasting together 4 100-year cycles. assert _DI400Y == 4 * _DI100Y + 1 # OTOH, a 100-year cycle has one fewer leap day than we'd get from # pasting together 25 4-year cycles. assert _DI100Y == 25 * _DI4Y - 1 def _ord2ymd(n): "ordinal -> (year, month, day), considering 01-Jan-0001 as day 1." # n is a 1-based index, starting at 1-Jan-1. The pattern of leap years # repeats exactly every 400 years. The basic strategy is to find the # closest 400-year boundary at or before n, then work with the offset # from that boundary to n. Life is much clearer if we subtract 1 from # n first -- then the values of n at 400-year boundaries are exactly # those divisible by _DI400Y: # # D M Y n n-1 # -- --- ---- ---------- ---------------- # 31 Dec -400 -_DI400Y -_DI400Y -1 # 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary # ... # 30 Dec 000 -1 -2 # 31 Dec 000 0 -1 # 1 Jan 001 1 0 400-year boundary # 2 Jan 001 2 1 # 3 Jan 001 3 2 # ... # 31 Dec 400 _DI400Y _DI400Y -1 # 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary n -= 1 n400, n = divmod(n, _DI400Y) year = n400 * 400 + 1 # ..., -399, 1, 401, ... # Now n is the (non-negative) offset, in days, from January 1 of year, to # the desired date. Now compute how many 100-year cycles precede n. # Note that it's possible for n100 to equal 4! In that case 4 full # 100-year cycles precede the desired day, which implies the desired # day is December 31 at the end of a 400-year cycle. n100, n = divmod(n, _DI100Y) # Now compute how many 4-year cycles precede it. n4, n = divmod(n, _DI4Y) # And now how many single years. Again n1 can be 4, and again meaning # that the desired day is December 31 at the end of the 4-year cycle. n1, n = divmod(n, 365) year += n100 * 100 + n4 * 4 + n1 if n1 == 4 or n100 == 4: assert n == 0 return year-1, 12, 31 # Now the year is correct, and n is the offset from January 1. We find # the month via an estimate that's either exact or one too large. leapyear = n1 == 3 and (n4 != 24 or n100 == 3) assert leapyear == _is_leap(year) month = (n + 50) >> 5 preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear) if preceding > n: # estimate is too large month -= 1 preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear) n -= preceding assert 0 <= n < _days_in_month(year, month) # Now the year and month are correct, and n is the offset from the # start of that month: we're done! return year, month, n+1 # Month and day names. For localized versions, see the calendar module. _MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] _DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] def _build_struct_time(y, m, d, hh, mm, ss, dstflag): wday = (_ymd2ord(y, m, d) + 6) % 7 dnum = _days_before_month(y, m) + d return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag)) def _format_time(hh, mm, ss, us): # Skip trailing microseconds when us==0. result = "%02d:%02d:%02d" % (hh, mm, ss) if us: result += ".%06d" % us return result # Correctly substitute for %z and %Z escapes in strftime formats. def _wrap_strftime(object, format, timetuple): # Don't call utcoffset() or tzname() unless actually needed. freplace = None # the string to use for %f zreplace = None # the string to use for %z Zreplace = None # the string to use for %Z # Scan format for %z and %Z escapes, replacing as needed. newformat = [] push = newformat.append i, n = 0, len(format) while i < n: ch = format[i] i += 1 if ch == '%': if i < n: ch = format[i] i += 1 if ch == 'f': if freplace is None: freplace = '%06d' % getattr(object, 'microsecond', 0) newformat.append(freplace) elif ch == 'z': if zreplace is None: zreplace = "" if hasattr(object, "utcoffset"): offset = object.utcoffset() if offset is not None: sign = '+' if offset.days < 0: offset = -offset sign = '-' h, m = divmod(offset, timedelta(hours=1)) assert not m % timedelta(minutes=1), "whole minute" m //= timedelta(minutes=1) zreplace = '%c%02d%02d' % (sign, h, m) assert '%' not in zreplace newformat.append(zreplace) elif ch == 'Z': if Zreplace is None: Zreplace = "" if hasattr(object, "tzname"): s = object.tzname() if s is not None: # strftime is going to have at this: escape % Zreplace = s.replace('%', '%%') newformat.append(Zreplace) else: push('%') push(ch) else: push('%') else: push(ch) newformat = "".join(newformat) return _time.strftime(newformat, timetuple) def _call_tzinfo_method(tzinfo, methname, tzinfoarg): if tzinfo is None: return None return getattr(tzinfo, methname)(tzinfoarg) # Just raise TypeError if the arg isn't None or a string. def _check_tzname(name): if name is not None and not isinstance(name, str): raise TypeError("tzinfo.tzname() must return None or string, " "not '%s'" % type(name)) # name is the offset-producing method, "utcoffset" or "dst". # offset is what it returned. # If offset isn't None or timedelta, raises TypeError. # If offset is None, returns None. # Else offset is checked for being in range, and a whole # of minutes. # If it is, its integer value is returned. Else ValueError is raised. def _check_utc_offset(name, offset): assert name in ("utcoffset", "dst") if offset is None: return if not isinstance(offset, timedelta): raise TypeError("tzinfo.%s() must return None " "or timedelta, not '%s'" % (name, type(offset))) if offset % timedelta(minutes=1) or offset.microseconds: raise ValueError("tzinfo.%s() must return a whole number " "of minutes, got %s" % (name, offset)) if not -timedelta(1) < offset < timedelta(1): raise ValueError("%s()=%s, must be must be strictly between" " -timedelta(hours=24) and timedelta(hours=24)" % (name, offset)) def _check_date_fields(year, month, day): if not isinstance(year, int): raise TypeError('int expected') if not MINYEAR <= year <= MAXYEAR: raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year) if not 1 <= month <= 12: raise ValueError('month must be in 1..12', month) dim = _days_in_month(year, month) if not 1 <= day <= dim: raise ValueError('day must be in 1..%d' % dim, day) def _check_time_fields(hour, minute, second, microsecond): if not isinstance(hour, int): raise TypeError('int expected') if not 0 <= hour <= 23: raise ValueError('hour must be in 0..23', hour) if not 0 <= minute <= 59: raise ValueError('minute must be in 0..59', minute) if not 0 <= second <= 59: raise ValueError('second must be in 0..59', second) if not 0 <= microsecond <= 999999: raise ValueError('microsecond must be in 0..999999', microsecond) def _check_tzinfo_arg(tz): if tz is not None and not isinstance(tz, tzinfo): raise TypeError("tzinfo argument must be None or of a tzinfo subclass") def _cmperror(x, y): raise TypeError("can't compare '%s' to '%s'" % ( type(x).__name__, type(y).__name__)) class timedelta: """Represent the difference between two datetime objects. Supported operators: - add, subtract timedelta - unary plus, minus, abs - compare to timedelta - multiply, divide by int In addition, datetime supports subtraction of two datetime objects returning a timedelta, and addition or subtraction of a datetime and a timedelta giving a datetime. Representation: (days, seconds, microseconds). Why? Because I felt like it. """ __slots__ = '_days', '_seconds', '_microseconds' def __new__(cls, days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0): # Doing this efficiently and accurately in C is going to be difficult # and error-prone, due to ubiquitous overflow possibilities, and that # C double doesn't have enough bits of precision to represent # microseconds over 10K years faithfully. The code here tries to make # explicit where go-fast assumptions can be relied on, in order to # guide the C implementation; it's way more convoluted than speed- # ignoring auto-overflow-to-long idiomatic Python could be. # XXX Check that all inputs are ints or floats. # Final values, all integer. # s and us fit in 32-bit signed ints; d isn't bounded. d = s = us = 0 # Normalize everything to days, seconds, microseconds. days += weeks*7 seconds += minutes*60 + hours*3600 microseconds += milliseconds*1000 # Get rid of all fractions, and normalize s and us. # Take a deep breath <wink>. if isinstance(days, float): dayfrac, days = _math.modf(days) daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.)) assert daysecondswhole == int(daysecondswhole) # can't overflow s = int(daysecondswhole) assert days == int(days) d = int(days) else: daysecondsfrac = 0.0 d = days assert isinstance(daysecondsfrac, float) assert abs(daysecondsfrac) <= 1.0 assert isinstance(d, int) assert abs(s) <= 24 * 3600 # days isn't referenced again before redefinition if isinstance(seconds, float): secondsfrac, seconds = _math.modf(seconds) assert seconds == int(seconds) seconds = int(seconds) secondsfrac += daysecondsfrac assert abs(secondsfrac) <= 2.0 else: secondsfrac = daysecondsfrac # daysecondsfrac isn't referenced again assert isinstance(secondsfrac, float) assert abs(secondsfrac) <= 2.0 assert isinstance(seconds, int) days, seconds = divmod(seconds, 24*3600) d += days s += int(seconds) # can't overflow assert isinstance(s, int) assert abs(s) <= 2 * 24 * 3600 # seconds isn't referenced again before redefinition usdouble = secondsfrac * 1e6 assert abs(usdouble) < 2.1e6 # exact value not critical # secondsfrac isn't referenced again if isinstance(microseconds, float): microseconds += usdouble microseconds = round(microseconds, 0) seconds, microseconds = divmod(microseconds, 1e6) assert microseconds == int(microseconds) assert seconds == int(seconds) days, seconds = divmod(seconds, 24.*3600.) assert days == int(days) assert seconds == int(seconds) d += int(days) s += int(seconds) # can't overflow assert isinstance(s, int) assert abs(s) <= 3 * 24 * 3600 else: seconds, microseconds = divmod(microseconds, 1000000) days, seconds = divmod(seconds, 24*3600) d += days s += int(seconds) # can't overflow assert isinstance(s, int) assert abs(s) <= 3 * 24 * 3600 microseconds = float(microseconds) microseconds += usdouble microseconds = round(microseconds, 0) assert abs(s) <= 3 * 24 * 3600 assert abs(microseconds) < 3.1e6 # Just a little bit of carrying possible for microseconds and seconds. assert isinstance(microseconds, float) assert int(microseconds) == microseconds us = int(microseconds) seconds, us = divmod(us, 1000000) s += seconds # cant't overflow assert isinstance(s, int) days, s = divmod(s, 24*3600) d += days assert isinstance(d, int) assert isinstance(s, int) and 0 <= s < 24*3600 assert isinstance(us, int) and 0 <= us < 1000000 self = object.__new__(cls) self._days = d self._seconds = s self._microseconds = us if abs(d) > 999999999: raise OverflowError("timedelta # of days is too large: %d" % d) return self def __repr__(self): if self._microseconds: return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__, self._days, self._seconds, self._microseconds) if self._seconds: return "%s(%d, %d)" % ('datetime.' + self.__class__.__name__, self._days, self._seconds) return "%s(%d)" % ('datetime.' + self.__class__.__name__, self._days) def __str__(self): mm, ss = divmod(self._seconds, 60) hh, mm = divmod(mm, 60) s = "%d:%02d:%02d" % (hh, mm, ss) if self._days: def plural(n): return n, abs(n) != 1 and "s" or "" s = ("%d day%s, " % plural(self._days)) + s if self._microseconds: s = s + ".%06d" % self._microseconds return s def total_seconds(self): """Total seconds in the duration.""" return ((self.days * 86400 + self.seconds)*10**6 + self.microseconds) / 10**6 # Read-only field accessors @property def days(self): """days""" return self._days @property def seconds(self): """seconds""" return self._seconds @property def microseconds(self): """microseconds""" return self._microseconds def __add__(self, other): if isinstance(other, timedelta): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta return timedelta(self._days + other._days, self._seconds + other._seconds, self._microseconds + other._microseconds) return NotImplemented __radd__ = __add__ def __sub__(self, other): if isinstance(other, timedelta): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta return timedelta(self._days - other._days, self._seconds - other._seconds, self._microseconds - other._microseconds) return NotImplemented def __rsub__(self, other): if isinstance(other, timedelta): return -self + other return NotImplemented def __neg__(self): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta return timedelta(-self._days, -self._seconds, -self._microseconds) def __pos__(self): return self def __abs__(self): if self._days < 0: return -self else: return self def __mul__(self, other): if isinstance(other, int): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta return timedelta(self._days * other, self._seconds * other, self._microseconds * other) if isinstance(other, float): a, b = other.as_integer_ratio() return self * a / b return NotImplemented __rmul__ = __mul__ def _to_microseconds(self): return ((self._days * (24*3600) + self._seconds) * 1000000 + self._microseconds) def __floordiv__(self, other): if not isinstance(other, (int, timedelta)): return NotImplemented usec = self._to_microseconds() if isinstance(other, timedelta): return usec // other._to_microseconds() if isinstance(other, int): return timedelta(0, 0, usec // other) def __truediv__(self, other): if not isinstance(other, (int, float, timedelta)): return NotImplemented usec = self._to_microseconds() if isinstance(other, timedelta): return usec / other._to_microseconds() if isinstance(other, int): return timedelta(0, 0, usec / other) if isinstance(other, float): a, b = other.as_integer_ratio() return timedelta(0, 0, b * usec / a) def __mod__(self, other): if isinstance(other, timedelta): r = self._to_microseconds() % other._to_microseconds() return timedelta(0, 0, r) return NotImplemented def __divmod__(self, other): if isinstance(other, timedelta): q, r = divmod(self._to_microseconds(), other._to_microseconds()) return q, timedelta(0, 0, r) return NotImplemented # Comparisons of timedelta objects with other. def __eq__(self, other): if isinstance(other, timedelta): return self._cmp(other) == 0 else: return False def __ne__(self, other): if isinstance(other, timedelta): return self._cmp(other) != 0 else: return True def __le__(self, other): if isinstance(other, timedelta): return self._cmp(other) <= 0 else: _cmperror(self, other) def __lt__(self, other): if isinstance(other, timedelta): return self._cmp(other) < 0 else: _cmperror(self, other) def __ge__(self, other): if isinstance(other, timedelta): return self._cmp(other) >= 0 else: _cmperror(self, other) def __gt__(self, other): if isinstance(other, timedelta): return self._cmp(other) > 0 else: _cmperror(self, other) def _cmp(self, other): assert isinstance(other, timedelta) return _cmp(self._getstate(), other._getstate()) def __hash__(self): return hash(self._getstate()) def __bool__(self): return (self._days != 0 or self._seconds != 0 or self._microseconds != 0) # Pickle support. def _getstate(self): return (self._days, self._seconds, self._microseconds) def __reduce__(self): return (self.__class__, self._getstate()) timedelta.min = timedelta(-999999999) timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59, microseconds=999999) timedelta.resolution = timedelta(microseconds=1) class date: """Concrete date type. Constructors: __new__() fromtimestamp() today() fromordinal() Operators: __repr__, __str__ __cmp__, __hash__ __add__, __radd__, __sub__ (add/radd only with timedelta arg) Methods: timetuple() toordinal() weekday() isoweekday(), isocalendar(), isoformat() ctime() strftime() Properties (readonly): year, month, day """ __slots__ = '_year', '_month', '_day' def __new__(cls, year, month=None, day=None): """Constructor. Arguments: year, month, day (required, base 1) """ if (isinstance(year, bytes) and len(year) == 4 and 1 <= year[2] <= 12 and month is None): # Month is sane # Pickle support self = object.__new__(cls) self.__setstate(year) return self _check_date_fields(year, month, day) self = object.__new__(cls) self._year = year self._month = month self._day = day return self # Additional constructors @classmethod def fromtimestamp(cls, t): "Construct a date from a POSIX timestamp (like time.time())." y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t) return cls(y, m, d) @classmethod def today(cls): "Construct a date from time.time()." t = _time.time() return cls.fromtimestamp(t) @classmethod def fromordinal(cls, n): """Contruct a date from a proleptic Gregorian ordinal. January 1 of year 1 is day 1. Only the year, month and day are non-zero in the result. """ y, m, d = _ord2ymd(n) return cls(y, m, d) # Conversions to string def __repr__(self): """Convert to formal string, for repr(). >>> dt = datetime(2010, 1, 1) >>> repr(dt) 'datetime.datetime(2010, 1, 1, 0, 0)' >>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc) >>> repr(dt) 'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)' """ return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__, self._year, self._month, self._day) # XXX These shouldn't depend on time.localtime(), because that # clips the usable dates to [1970 .. 2038). At least ctime() is # easily done without using strftime() -- that's better too because # strftime("%c", ...) is locale specific. def ctime(self): "Return ctime() style string." weekday = self.toordinal() % 7 or 7 return "%s %s %2d 00:00:00 %04d" % ( _DAYNAMES[weekday], _MONTHNAMES[self._month], self._day, self._year) def strftime(self, fmt): "Format using strftime()." return _wrap_strftime(self, fmt, self.timetuple()) def __format__(self, fmt): if len(fmt) != 0: return self.strftime(fmt) return str(self) def isoformat(self): """Return the date formatted according to ISO. This is 'YYYY-MM-DD'. References: - http://www.w3.org/TR/NOTE-datetime - http://www.cl.cam.ac.uk/~mgk25/iso-time.html """ return "%04d-%02d-%02d" % (self._year, self._month, self._day) __str__ = isoformat # Read-only field accessors @property def year(self): """year (1-9999)""" return self._year @property def month(self): """month (1-12)""" return self._month @property def day(self): """day (1-31)""" return self._day # Standard conversions, __cmp__, __hash__ (and helpers) def timetuple(self): "Return local time tuple compatible with time.localtime()." return _build_struct_time(self._year, self._month, self._day, 0, 0, 0, -1) def toordinal(self): """Return proleptic Gregorian ordinal for the year, month and day. January 1 of year 1 is day 1. Only the year, month and day values contribute to the result. """ return _ymd2ord(self._year, self._month, self._day) def replace(self, year=None, month=None, day=None): """Return a new date with new values for the specified fields.""" if year is None: year = self._year if month is None: month = self._month if day is None: day = self._day _check_date_fields(year, month, day) return date(year, month, day) # Comparisons of date objects with other. def __eq__(self, other): if isinstance(other, date): return self._cmp(other) == 0 return NotImplemented def __ne__(self, other): if isinstance(other, date): return self._cmp(other) != 0 return NotImplemented def __le__(self, other): if isinstance(other, date): return self._cmp(other) <= 0 return NotImplemented def __lt__(self, other): if isinstance(other, date): return self._cmp(other) < 0 return NotImplemented def __ge__(self, other): if isinstance(other, date): return self._cmp(other) >= 0 return NotImplemented def __gt__(self, other): if isinstance(other, date): return self._cmp(other) > 0 return NotImplemented def _cmp(self, other): assert isinstance(other, date) y, m, d = self._year, self._month, self._day y2, m2, d2 = other._year, other._month, other._day return _cmp((y, m, d), (y2, m2, d2)) def __hash__(self): "Hash." return hash(self._getstate()) # Computations def __add__(self, other): "Add a date to a timedelta." if isinstance(other, timedelta): o = self.toordinal() + other.days if 0 < o <= _MAXORDINAL: return date.fromordinal(o) raise OverflowError("result out of range") return NotImplemented __radd__ = __add__ def __sub__(self, other): """Subtract two dates, or a date and a timedelta.""" if isinstance(other, timedelta): return self + timedelta(-other.days) if isinstance(other, date): days1 = self.toordinal() days2 = other.toordinal() return timedelta(days1 - days2) return NotImplemented def weekday(self): "Return day of the week, where Monday == 0 ... Sunday == 6." return (self.toordinal() + 6) % 7 # Day-of-the-week and week-of-the-year, according to ISO def isoweekday(self): "Return day of the week, where Monday == 1 ... Sunday == 7." # 1-Jan-0001 is a Monday return self.toordinal() % 7 or 7 def isocalendar(self): """Return a 3-tuple containing ISO year, week number, and weekday. The first ISO week of the year is the (Mon-Sun) week containing the year's first Thursday; everything else derives from that. The first week is 1; Monday is 1 ... Sunday is 7. ISO calendar algorithm taken from http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm """ year = self._year week1monday = _isoweek1monday(year) today = _ymd2ord(self._year, self._month, self._day) # Internally, week and day have origin 0 week, day = divmod(today - week1monday, 7) if week < 0: year -= 1 week1monday = _isoweek1monday(year) week, day = divmod(today - week1monday, 7) elif week >= 52: if today >= _isoweek1monday(year+1): year += 1 week = 0 return year, week+1, day+1 # Pickle support. def _getstate(self): yhi, ylo = divmod(self._year, 256) return bytes([yhi, ylo, self._month, self._day]), def __setstate(self, string): if len(string) != 4 or not (1 <= string[2] <= 12): raise TypeError("not enough arguments") yhi, ylo, self._month, self._day = string self._year = yhi * 256 + ylo def __reduce__(self): return (self.__class__, self._getstate()) _date_class = date # so functions w/ args named "date" can get at the class date.min = date(1, 1, 1) date.max = date(9999, 12, 31) date.resolution = timedelta(days=1) class tzinfo: """Abstract base class for time zone info classes. Subclasses must override the name(), utcoffset() and dst() methods. """ __slots__ = () def tzname(self, dt): "datetime -> string name of time zone." raise NotImplementedError("tzinfo subclass must override tzname()") def utcoffset(self, dt): "datetime -> minutes east of UTC (negative for west of UTC)" raise NotImplementedError("tzinfo subclass must override utcoffset()") def dst(self, dt): """datetime -> DST offset in minutes east of UTC. Return 0 if DST not in effect. utcoffset() must include the DST offset. """ raise NotImplementedError("tzinfo subclass must override dst()") def fromutc(self, dt): "datetime in UTC -> datetime in local time." if not isinstance(dt, datetime): raise TypeError("fromutc() requires a datetime argument") if dt.tzinfo is not self: raise ValueError("dt.tzinfo is not self") dtoff = dt.utcoffset() if dtoff is None: raise ValueError("fromutc() requires a non-None utcoffset() " "result") # See the long comment block at the end of this file for an # explanation of this algorithm. dtdst = dt.dst() if dtdst is None: raise ValueError("fromutc() requires a non-None dst() result") delta = dtoff - dtdst if delta: dt += delta dtdst = dt.dst() if dtdst is None: raise ValueError("fromutc(): dt.dst gave inconsistent " "results; cannot convert") return dt + dtdst # Pickle support. def __reduce__(self): getinitargs = getattr(self, "__getinitargs__", None) if getinitargs: args = getinitargs() else: args = () getstate = getattr(self, "__getstate__", None) if getstate: state = getstate() else: state = getattr(self, "__dict__", None) or None if state is None: return (self.__class__, args) else: return (self.__class__, args, state) _tzinfo_class = tzinfo class time: """Time with time zone. Constructors: __new__() Operators: __repr__, __str__ __cmp__, __hash__ Methods: strftime() isoformat() utcoffset() tzname() dst() Properties (readonly): hour, minute, second, microsecond, tzinfo """ def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None): """Constructor. Arguments: hour, minute (required) second, microsecond (default to zero) tzinfo (default to None) """ self = object.__new__(cls) if isinstance(hour, bytes) and len(hour) == 6: # Pickle support self.__setstate(hour, minute or None) return self _check_tzinfo_arg(tzinfo) _check_time_fields(hour, minute, second, microsecond) self._hour = hour self._minute = minute self._second = second self._microsecond = microsecond self._tzinfo = tzinfo return self # Read-only field accessors @property def hour(self): """hour (0-23)""" return self._hour @property def minute(self): """minute (0-59)""" return self._minute @property def second(self): """second (0-59)""" return self._second @property def microsecond(self): """microsecond (0-999999)""" return self._microsecond @property def tzinfo(self): """timezone info object""" return self._tzinfo # Standard conversions, __hash__ (and helpers) # Comparisons of time objects with other. def __eq__(self, other): if isinstance(other, time): return self._cmp(other, allow_mixed=True) == 0 else: return False def __ne__(self, other): if isinstance(other, time): return self._cmp(other, allow_mixed=True) != 0 else: return True def __le__(self, other): if isinstance(other, time): return self._cmp(other) <= 0 else: _cmperror(self, other) def __lt__(self, other): if isinstance(other, time): return self._cmp(other) < 0 else: _cmperror(self, other) def __ge__(self, other): if isinstance(other, time): return self._cmp(other) >= 0 else: _cmperror(self, other) def __gt__(self, other): if isinstance(other, time): return self._cmp(other) > 0 else: _cmperror(self, other) def _cmp(self, other, allow_mixed=False): assert isinstance(other, time) mytz = self._tzinfo ottz = other._tzinfo myoff = otoff = None if mytz is ottz: base_compare = True else: myoff = self.utcoffset() otoff = other.utcoffset() base_compare = myoff == otoff if base_compare: return _cmp((self._hour, self._minute, self._second, self._microsecond), (other._hour, other._minute, other._second, other._microsecond)) if myoff is None or otoff is None: if allow_mixed: return 2 # arbitrary non-zero value else: raise TypeError("cannot compare naive and aware times") myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1) othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1) return _cmp((myhhmm, self._second, self._microsecond), (othhmm, other._second, other._microsecond)) def __hash__(self): """Hash.""" tzoff = self.utcoffset() if not tzoff: # zero or None return hash(self._getstate()[0]) h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff, timedelta(hours=1)) assert not m % timedelta(minutes=1), "whole minute" m //= timedelta(minutes=1) if 0 <= h < 24: return hash(time(h, m, self.second, self.microsecond)) return hash((h, m, self.second, self.microsecond)) # Conversion to string def _tzstr(self, sep=":"): """Return formatted timezone offset (+xx:xx) or None.""" off = self.utcoffset() if off is not None: if off.days < 0: sign = "-" off = -off else: sign = "+" hh, mm = divmod(off, timedelta(hours=1)) assert not mm % timedelta(minutes=1), "whole minute" mm //= timedelta(minutes=1) assert 0 <= hh < 24 off = "%s%02d%s%02d" % (sign, hh, sep, mm) return off def __repr__(self): """Convert to formal string, for repr().""" if self._microsecond != 0: s = ", %d, %d" % (self._second, self._microsecond) elif self._second != 0: s = ", %d" % self._second else: s = "" s= "%s(%d, %d%s)" % ('datetime.' + self.__class__.__name__, self._hour, self._minute, s) if self._tzinfo is not None: assert s[-1:] == ")" s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")" return s def isoformat(self): """Return the time formatted according to ISO. This is 'HH:MM:SS.mmmmmm+zz:zz', or 'HH:MM:SS+zz:zz' if self.microsecond == 0. """ s = _format_time(self._hour, self._minute, self._second, self._microsecond) tz = self._tzstr() if tz: s += tz return s __str__ = isoformat def strftime(self, fmt): """Format using strftime(). The date part of the timestamp passed to underlying strftime should not be used. """ # The year must be >= 1000 else Python's strftime implementation # can raise a bogus exception. timetuple = (1900, 1, 1, self._hour, self._minute, self._second, 0, 1, -1) return _wrap_strftime(self, fmt, timetuple) def __format__(self, fmt): if len(fmt) != 0: return self.strftime(fmt) return str(self) # Timezone functions def utcoffset(self): """Return the timezone offset in minutes east of UTC (negative west of UTC).""" if self._tzinfo is None: return None offset = self._tzinfo.utcoffset(None) _check_utc_offset("utcoffset", offset) return offset def tzname(self): """Return the timezone name. Note that the name is 100% informational -- there's no requirement that it mean anything in particular. For example, "GMT", "UTC", "-500", "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. """ if self._tzinfo is None: return None name = self._tzinfo.tzname(None) _check_tzname(name) return name def dst(self): """Return 0 if DST is not in effect, or the DST offset (in minutes eastward) if DST is in effect. This is purely informational; the DST offset has already been added to the UTC offset returned by utcoffset() if applicable, so there's no need to consult dst() unless you're interested in displaying the DST info. """ if self._tzinfo is None: return None offset = self._tzinfo.dst(None) _check_utc_offset("dst", offset) return offset def replace(self, hour=None, minute=None, second=None, microsecond=None, tzinfo=True): """Return a new time with new values for the specified fields.""" if hour is None: hour = self.hour if minute is None: minute = self.minute if second is None: second = self.second if microsecond is None: microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo _check_time_fields(hour, minute, second, microsecond) _check_tzinfo_arg(tzinfo) return time(hour, minute, second, microsecond, tzinfo) def __bool__(self): if self.second or self.microsecond: return True offset = self.utcoffset() or timedelta(0) return timedelta(hours=self.hour, minutes=self.minute) != offset # Pickle support. def _getstate(self): us2, us3 = divmod(self._microsecond, 256) us1, us2 = divmod(us2, 256) basestate = bytes([self._hour, self._minute, self._second, us1, us2, us3]) if self._tzinfo is None: return (basestate,) else: return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): if len(string) != 6 or string[0] >= 24: raise TypeError("an integer is required") (self._hour, self._minute, self._second, us1, us2, us3) = string self._microsecond = (((us1 << 8) | us2) << 8) | us3 if tzinfo is None or isinstance(tzinfo, _tzinfo_class): self._tzinfo = tzinfo else: raise TypeError("bad tzinfo state arg %r" % tzinfo) def __reduce__(self): return (time, self._getstate()) _time_class = time # so functions w/ args named "time" can get at the class time.min = time(0, 0, 0) time.max = time(23, 59, 59, 999999) time.resolution = timedelta(microseconds=1) class datetime(date): """datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]]) The year, month and day arguments are required. tzinfo may be None, or an instance of a tzinfo subclass. The remaining arguments may be ints. """ __slots__ = date.__slots__ + ( '_hour', '_minute', '_second', '_microsecond', '_tzinfo') def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0, microsecond=0, tzinfo=None): if isinstance(year, bytes) and len(year) == 10: # Pickle support self = date.__new__(cls, year[:4]) self.__setstate(year, month) return self _check_tzinfo_arg(tzinfo) _check_time_fields(hour, minute, second, microsecond) self = date.__new__(cls, year, month, day) self._hour = hour self._minute = minute self._second = second self._microsecond = microsecond self._tzinfo = tzinfo return self # Read-only field accessors @property def hour(self): """hour (0-23)""" return self._hour @property def minute(self): """minute (0-59)""" return self._minute @property def second(self): """second (0-59)""" return self._second @property def microsecond(self): """microsecond (0-999999)""" return self._microsecond @property def tzinfo(self): """timezone info object""" return self._tzinfo @classmethod def fromtimestamp(cls, t, tz=None): """Construct a datetime from a POSIX timestamp (like time.time()). A timezone info object may be passed in as well. """ _check_tzinfo_arg(tz) converter = _time.localtime if tz is None else _time.gmtime t, frac = divmod(t, 1.0) us = int(frac * 1e6) # If timestamp is less than one microsecond smaller than a # full second, us can be rounded up to 1000000. In this case, # roll over to seconds, otherwise, ValueError is raised # by the constructor. if us == 1000000: t += 1 us = 0 y, m, d, hh, mm, ss, weekday, jday, dst = converter(t) ss = min(ss, 59) # clamp out leap seconds if the platform has them result = cls(y, m, d, hh, mm, ss, us, tz) if tz is not None: result = tz.fromutc(result) return result @classmethod def utcfromtimestamp(cls, t): "Construct a UTC datetime from a POSIX timestamp (like time.time())." t, frac = divmod(t, 1.0) us = int(frac * 1e6) # If timestamp is less than one microsecond smaller than a # full second, us can be rounded up to 1000000. In this case, # roll over to seconds, otherwise, ValueError is raised # by the constructor. if us == 1000000: t += 1 us = 0 y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t) ss = min(ss, 59) # clamp out leap seconds if the platform has them return cls(y, m, d, hh, mm, ss, us) # XXX This is supposed to do better than we *can* do by using time.time(), # XXX if the platform supports a more accurate way. The C implementation # XXX uses gettimeofday on platforms that have it, but that isn't # XXX available from Python. So now() may return different results # XXX across the implementations. @classmethod def now(cls, tz=None): "Construct a datetime from time.time() and optional time zone info." t = _time.time() return cls.fromtimestamp(t, tz) @classmethod def utcnow(cls): "Construct a UTC datetime from time.time()." t = _time.time() return cls.utcfromtimestamp(t) @classmethod def combine(cls, date, time): "Construct a datetime from a given date and a given time." if not isinstance(date, _date_class): raise TypeError("date argument must be a date instance") if not isinstance(time, _time_class): raise TypeError("time argument must be a time instance") return cls(date.year, date.month, date.day, time.hour, time.minute, time.second, time.microsecond, time.tzinfo) def timetuple(self): "Return local time tuple compatible with time.localtime()." dst = self.dst() if dst is None: dst = -1 elif dst: dst = 1 else: dst = 0 return _build_struct_time(self.year, self.month, self.day, self.hour, self.minute, self.second, dst) def timestamp(self): "Return POSIX timestamp as float" if self._tzinfo is None: return _time.mktime((self.year, self.month, self.day, self.hour, self.minute, self.second, -1, -1, -1)) + self.microsecond / 1e6 else: return (self - _EPOCH).total_seconds() def utctimetuple(self): "Return UTC time tuple compatible with time.gmtime()." offset = self.utcoffset() if offset: self -= offset y, m, d = self.year, self.month, self.day hh, mm, ss = self.hour, self.minute, self.second return _build_struct_time(y, m, d, hh, mm, ss, 0) def date(self): "Return the date part." return date(self._year, self._month, self._day) def time(self): "Return the time part, with tzinfo None." return time(self.hour, self.minute, self.second, self.microsecond) def timetz(self): "Return the time part, with same tzinfo." return time(self.hour, self.minute, self.second, self.microsecond, self._tzinfo) def replace(self, year=None, month=None, day=None, hour=None, minute=None, second=None, microsecond=None, tzinfo=True): """Return a new datetime with new values for the specified fields.""" if year is None: year = self.year if month is None: month = self.month if day is None: day = self.day if hour is None: hour = self.hour if minute is None: minute = self.minute if second is None: second = self.second if microsecond is None: microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo _check_date_fields(year, month, day) _check_time_fields(hour, minute, second, microsecond) _check_tzinfo_arg(tzinfo) return datetime(year, month, day, hour, minute, second, microsecond, tzinfo) def astimezone(self, tz=None): if tz is None: if self.tzinfo is None: raise ValueError("astimezone() requires an aware datetime") ts = (self - _EPOCH) // timedelta(seconds=1) localtm = _time.localtime(ts) local = datetime(*localtm[:6]) try: # Extract TZ data if available gmtoff = localtm.tm_gmtoff zone = localtm.tm_zone except AttributeError: # Compute UTC offset and compare with the value implied # by tm_isdst. If the values match, use the zone name # implied by tm_isdst. delta = local - datetime(*_time.gmtime(ts)[:6]) dst = _time.daylight and localtm.tm_isdst > 0 gmtoff = -(_time.altzone if dst else _time.timezone) if delta == timedelta(seconds=gmtoff): tz = timezone(delta, _time.tzname[dst]) else: tz = timezone(delta) else: tz = timezone(timedelta(seconds=gmtoff), zone) elif not isinstance(tz, tzinfo): raise TypeError("tz argument must be an instance of tzinfo") mytz = self.tzinfo if mytz is None: raise ValueError("astimezone() requires an aware datetime") if tz is mytz: return self # Convert self to UTC, and attach the new time zone object. myoffset = self.utcoffset() if myoffset is None: raise ValueError("astimezone() requires an aware datetime") utc = (self - myoffset).replace(tzinfo=tz) # Convert from UTC to tz's local time. return tz.fromutc(utc) # Ways to produce a string. def ctime(self): "Return ctime() style string." weekday = self.toordinal() % 7 or 7 return "%s %s %2d %02d:%02d:%02d %04d" % ( _DAYNAMES[weekday], _MONTHNAMES[self._month], self._day, self._hour, self._minute, self._second, self._year) def isoformat(self, sep='T'): """Return the time formatted according to ISO. This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if self.microsecond == 0. If self.tzinfo is not None, the UTC offset is also attached, giving 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'. Optional argument sep specifies the separator between date and time, default 'T'. """ s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) + _format_time(self._hour, self._minute, self._second, self._microsecond)) off = self.utcoffset() if off is not None: if off.days < 0: sign = "-" off = -off else: sign = "+" hh, mm = divmod(off, timedelta(hours=1)) assert not mm % timedelta(minutes=1), "whole minute" mm //= timedelta(minutes=1) s += "%s%02d:%02d" % (sign, hh, mm) return s def __repr__(self): """Convert to formal string, for repr().""" L = [self._year, self._month, self._day, # These are never zero self._hour, self._minute, self._second, self._microsecond] if L[-1] == 0: del L[-1] if L[-1] == 0: del L[-1] s = ", ".join(map(str, L)) s = "%s(%s)" % ('datetime.' + self.__class__.__name__, s) if self._tzinfo is not None: assert s[-1:] == ")" s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")" return s def __str__(self): "Convert to string, for str()." return self.isoformat(sep=' ') @classmethod def strptime(cls, date_string, format): 'string, format -> new datetime parsed from a string (like time.strptime()).' import _strptime return _strptime._strptime_datetime(cls, date_string, format) def utcoffset(self): """Return the timezone offset in minutes east of UTC (negative west of UTC).""" if self._tzinfo is None: return None offset = self._tzinfo.utcoffset(self) _check_utc_offset("utcoffset", offset) return offset def tzname(self): """Return the timezone name. Note that the name is 100% informational -- there's no requirement that it mean anything in particular. For example, "GMT", "UTC", "-500", "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. """ name = _call_tzinfo_method(self._tzinfo, "tzname", self) _check_tzname(name) return name def dst(self): """Return 0 if DST is not in effect, or the DST offset (in minutes eastward) if DST is in effect. This is purely informational; the DST offset has already been added to the UTC offset returned by utcoffset() if applicable, so there's no need to consult dst() unless you're interested in displaying the DST info. """ if self._tzinfo is None: return None offset = self._tzinfo.dst(self) _check_utc_offset("dst", offset) return offset # Comparisons of datetime objects with other. def __eq__(self, other): if isinstance(other, datetime): return self._cmp(other, allow_mixed=True) == 0 elif not isinstance(other, date): return NotImplemented else: return False def __ne__(self, other): if isinstance(other, datetime): return self._cmp(other, allow_mixed=True) != 0 elif not isinstance(other, date): return NotImplemented else: return True def __le__(self, other): if isinstance(other, datetime): return self._cmp(other) <= 0 elif not isinstance(other, date): return NotImplemented else: _cmperror(self, other) def __lt__(self, other): if isinstance(other, datetime): return self._cmp(other) < 0 elif not isinstance(other, date): return NotImplemented else: _cmperror(self, other) def __ge__(self, other): if isinstance(other, datetime): return self._cmp(other) >= 0 elif not isinstance(other, date): return NotImplemented else: _cmperror(self, other) def __gt__(self, other): if isinstance(other, datetime): return self._cmp(other) > 0 elif not isinstance(other, date): return NotImplemented else: _cmperror(self, other) def _cmp(self, other, allow_mixed=False): assert isinstance(other, datetime) mytz = self._tzinfo ottz = other._tzinfo myoff = otoff = None if mytz is ottz: base_compare = True else: myoff = self.utcoffset() otoff = other.utcoffset() base_compare = myoff == otoff if base_compare: return _cmp((self._year, self._month, self._day, self._hour, self._minute, self._second, self._microsecond), (other._year, other._month, other._day, other._hour, other._minute, other._second, other._microsecond)) if myoff is None or otoff is None: if allow_mixed: return 2 # arbitrary non-zero value else: raise TypeError("cannot compare naive and aware datetimes") # XXX What follows could be done more efficiently... diff = self - other # this will take offsets into account if diff.days < 0: return -1 return diff and 1 or 0 def __add__(self, other): "Add a datetime and a timedelta." if not isinstance(other, timedelta): return NotImplemented delta = timedelta(self.toordinal(), hours=self._hour, minutes=self._minute, seconds=self._second, microseconds=self._microsecond) delta += other hour, rem = divmod(delta.seconds, 3600) minute, second = divmod(rem, 60) if 0 < delta.days <= _MAXORDINAL: return datetime.combine(date.fromordinal(delta.days), time(hour, minute, second, delta.microseconds, tzinfo=self._tzinfo)) raise OverflowError("result out of range") __radd__ = __add__ def __sub__(self, other): "Subtract two datetimes, or a datetime and a timedelta." if not isinstance(other, datetime): if isinstance(other, timedelta): return self + -other return NotImplemented days1 = self.toordinal() days2 = other.toordinal() secs1 = self._second + self._minute * 60 + self._hour * 3600 secs2 = other._second + other._minute * 60 + other._hour * 3600 base = timedelta(days1 - days2, secs1 - secs2, self._microsecond - other._microsecond) if self._tzinfo is other._tzinfo: return base myoff = self.utcoffset() otoff = other.utcoffset() if myoff == otoff: return base if myoff is None or otoff is None: raise TypeError("cannot mix naive and timezone-aware time") return base + otoff - myoff def __hash__(self): tzoff = self.utcoffset() if tzoff is None: return hash(self._getstate()[0]) days = _ymd2ord(self.year, self.month, self.day) seconds = self.hour * 3600 + self.minute * 60 + self.second return hash(timedelta(days, seconds, self.microsecond) - tzoff) # Pickle support. def _getstate(self): yhi, ylo = divmod(self._year, 256) us2, us3 = divmod(self._microsecond, 256) us1, us2 = divmod(us2, 256) basestate = bytes([yhi, ylo, self._month, self._day, self._hour, self._minute, self._second, us1, us2, us3]) if self._tzinfo is None: return (basestate,) else: return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): (yhi, ylo, self._month, self._day, self._hour, self._minute, self._second, us1, us2, us3) = string self._year = yhi * 256 + ylo self._microsecond = (((us1 << 8) | us2) << 8) | us3 if tzinfo is None or isinstance(tzinfo, _tzinfo_class): self._tzinfo = tzinfo else: raise TypeError("bad tzinfo state arg %r" % tzinfo) def __reduce__(self): return (self.__class__, self._getstate()) datetime.min = datetime(1, 1, 1) datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999) datetime.resolution = timedelta(microseconds=1) def _isoweek1monday(year): # Helper to calculate the day number of the Monday starting week 1 # XXX This could be done more efficiently THURSDAY = 3 firstday = _ymd2ord(year, 1, 1) firstweekday = (firstday + 6) % 7 # See weekday() above week1monday = firstday - firstweekday if firstweekday > THURSDAY: week1monday += 7 return week1monday class timezone(tzinfo): __slots__ = '_offset', '_name' # Sentinel value to disallow None _Omitted = object() def __new__(cls, offset, name=_Omitted): if not isinstance(offset, timedelta): raise TypeError("offset must be a timedelta") if name is cls._Omitted: if not offset: return cls.utc name = None elif not isinstance(name, str): raise TypeError("name must be a string") if not cls._minoffset <= offset <= cls._maxoffset: raise ValueError("offset must be a timedelta" " strictly between -timedelta(hours=24) and" " timedelta(hours=24).") if (offset.microseconds != 0 or offset.seconds % 60 != 0): raise ValueError("offset must be a timedelta" " representing a whole number of minutes") return cls._create(offset, name) @classmethod def _create(cls, offset, name=None): self = tzinfo.__new__(cls) self._offset = offset self._name = name return self def __getinitargs__(self): """pickle support""" if self._name is None: return (self._offset,) return (self._offset, self._name) def __eq__(self, other): if type(other) != timezone: return False return self._offset == other._offset def __hash__(self): return hash(self._offset) def __repr__(self): """Convert to formal string, for repr(). >>> tz = timezone.utc >>> repr(tz) 'datetime.timezone.utc' >>> tz = timezone(timedelta(hours=-5), 'EST') >>> repr(tz) "datetime.timezone(datetime.timedelta(-1, 68400), 'EST')" """ if self is self.utc: return 'datetime.timezone.utc' if self._name is None: return "%s(%r)" % ('datetime.' + self.__class__.__name__, self._offset) return "%s(%r, %r)" % ('datetime.' + self.__class__.__name__, self._offset, self._name) def __str__(self): return self.tzname(None) def utcoffset(self, dt): if isinstance(dt, datetime) or dt is None: return self._offset raise TypeError("utcoffset() argument must be a datetime instance" " or None") def tzname(self, dt): if isinstance(dt, datetime) or dt is None: if self._name is None: return self._name_from_offset(self._offset) return self._name raise TypeError("tzname() argument must be a datetime instance" " or None") def dst(self, dt): if isinstance(dt, datetime) or dt is None: return None raise TypeError("dst() argument must be a datetime instance" " or None") def fromutc(self, dt): if isinstance(dt, datetime): if dt.tzinfo is not self: raise ValueError("fromutc: dt.tzinfo " "is not self") return dt + self._offset raise TypeError("fromutc() argument must be a datetime instance" " or None") _maxoffset = timedelta(hours=23, minutes=59) _minoffset = -_maxoffset @staticmethod def _name_from_offset(delta): if delta < timedelta(0): sign = '-' delta = -delta else: sign = '+' hours, rest = divmod(delta, timedelta(hours=1)) minutes = rest // timedelta(minutes=1) return 'UTC{}{:02d}:{:02d}'.format(sign, hours, minutes) timezone.utc = timezone._create(timedelta(0)) timezone.min = timezone._create(timezone._minoffset) timezone.max = timezone._create(timezone._maxoffset) _EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc) """ Some time zone algebra. For a datetime x, let x.n = x stripped of its timezone -- its naive time. x.o = x.utcoffset(), and assuming that doesn't raise an exception or return None x.d = x.dst(), and assuming that doesn't raise an exception or return None x.s = x's standard offset, x.o - x.d Now some derived rules, where k is a duration (timedelta). 1. x.o = x.s + x.d This follows from the definition of x.s. 2. If x and y have the same tzinfo member, x.s = y.s. This is actually a requirement, an assumption we need to make about sane tzinfo classes. 3. The naive UTC time corresponding to x is x.n - x.o. This is again a requirement for a sane tzinfo class. 4. (x+k).s = x.s This follows from #2, and that datimetimetz+timedelta preserves tzinfo. 5. (x+k).n = x.n + k Again follows from how arithmetic is defined. Now we can explain tz.fromutc(x). Let's assume it's an interesting case (meaning that the various tzinfo methods exist, and don't blow up or return None when called). The function wants to return a datetime y with timezone tz, equivalent to x. x is already in UTC. By #3, we want y.n - y.o = x.n [1] The algorithm starts by attaching tz to x.n, and calling that y. So x.n = y.n at the start. Then it wants to add a duration k to y, so that [1] becomes true; in effect, we want to solve [2] for k: (y+k).n - (y+k).o = x.n [2] By #1, this is the same as (y+k).n - ((y+k).s + (y+k).d) = x.n [3] By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start. Substituting that into [3], x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving k - (y+k).s - (y+k).d = 0; rearranging, k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so k = y.s - (y+k).d On the RHS, (y+k).d can't be computed directly, but y.s can be, and we approximate k by ignoring the (y+k).d term at first. Note that k can't be very large, since all offset-returning methods return a duration of magnitude less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must be 0, so ignoring it has no consequence then. In any case, the new value is z = y + y.s [4] It's helpful to step back at look at [4] from a higher level: it's simply mapping from UTC to tz's standard time. At this point, if z.n - z.o = x.n [5] we have an equivalent time, and are almost done. The insecurity here is at the start of daylight time. Picture US Eastern for concreteness. The wall time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good sense then. The docs ask that an Eastern tzinfo class consider such a time to be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST on the day DST starts. We want to return the 1:MM EST spelling because that's the only spelling that makes sense on the local wall clock. In fact, if [5] holds at this point, we do have the standard-time spelling, but that takes a bit of proof. We first prove a stronger result. What's the difference between the LHS and RHS of [5]? Let diff = x.n - (z.n - z.o) [6] Now z.n = by [4] (y + y.s).n = by #5 y.n + y.s = since y.n = x.n x.n + y.s = since z and y are have the same tzinfo member, y.s = z.s by #2 x.n + z.s Plugging that back into [6] gives diff = x.n - ((x.n + z.s) - z.o) = expanding x.n - x.n - z.s + z.o = cancelling - z.s + z.o = by #2 z.d So diff = z.d. If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time spelling we wanted in the endcase described above. We're done. Contrarily, if z.d = 0, then we have a UTC equivalent, and are also done. If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to add to z (in effect, z is in tz's standard time, and we need to shift the local clock into tz's daylight time). Let z' = z + z.d = z + diff [7] and we can again ask whether z'.n - z'.o = x.n [8] If so, we're done. If not, the tzinfo class is insane, according to the assumptions we've made. This also requires a bit of proof. As before, let's compute the difference between the LHS and RHS of [8] (and skipping some of the justifications for the kinds of substitutions we've done several times already): diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7] x.n - (z.n + diff - z'.o) = replacing diff via [6] x.n - (z.n + x.n - (z.n - z.o) - z'.o) = x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n - z.n + z.n - z.o + z'.o = cancel z.n - z.o + z'.o = #1 twice -z.s - z.d + z'.s + z'.d = z and z' have same tzinfo z'.d - z.d So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal, we've found the UTC-equivalent so are done. In fact, we stop with [7] and return z', not bothering to compute z'.d. How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by a dst() offset, and starting *from* a time already in DST (we know z.d != 0), would have to change the result dst() returns: we start in DST, and moving a little further into it takes us out of DST. There isn't a sane case where this can happen. The closest it gets is at the end of DST, where there's an hour in UTC with no spelling in a hybrid tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM UTC) because the docs insist on that, but 0:MM is taken as being in daylight time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in standard time. Since that's what the local clock *does*, we want to map both UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous in local time, but so it goes -- it's the way the local clock works. When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0, so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going. z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8] (correctly) concludes that z' is not UTC-equivalent to x. Because we know z.d said z was in daylight time (else [5] would have held and we would have stopped then), and we know z.d != z'.d (else [8] would have held and we have stopped then), and there are only 2 possible values dst() can return in Eastern, it follows that z'.d must be 0 (which it is in the example, but the reasoning doesn't depend on the example -- it depends on there being two possible dst() outcomes, one zero and the other non-zero). Therefore z' must be in standard time, and is the spelling we want in this case. Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is concerned (because it takes z' as being in standard time rather than the daylight time we intend here), but returning it gives the real-life "local clock repeats an hour" behavior when mapping the "unspellable" UTC hour into tz. When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with the 1:MM standard time spelling we want. So how can this break? One of the assumptions must be violated. Two possibilities: 1) [2] effectively says that y.s is invariant across all y belong to a given time zone. This isn't true if, for political reasons or continental drift, a region decides to change its base offset from UTC. 2) There may be versions of "double daylight" time where the tail end of the analysis gives up a step too early. I haven't thought about that enough to say. In any case, it's clear that the default fromutc() is strong enough to handle "almost all" time zones: so long as the standard offset is invariant, it doesn't matter if daylight time transition points change from year to year, or if daylight time is skipped in some years; it doesn't matter how large or small dst() may get within its bounds; and it doesn't even matter if some perverse time zone returns a negative dst()). So a breaking case must be pretty bizarre, and a tzinfo subclass can override fromutc() if it is. """ #brython does not have a _datetime, so lets comment this out for now. #try: # from _datetime import * #except ImportError: # pass #else: # # Clean up unused names # del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH, # _DI100Y, _DI400Y, _DI4Y, _MAXORDINAL, _MONTHNAMES, # _build_struct_time, _call_tzinfo_method, _check_date_fields, # _check_time_fields, _check_tzinfo_arg, _check_tzname, # _check_utc_offset, _cmp, _cmperror, _date_class, _days_before_month, # _days_before_year, _days_in_month, _format_time, _is_leap, # _isoweek1monday, _math, _ord2ymd, _time, _time_class, _tzinfo_class, # _wrap_strftime, _ymd2ord) # # XXX Since import * above excludes names that start with _, # # docstring does not get overwritten. In the future, it may be # # appropriate to maintain a single module level docstring and # # remove the following line. # #from _datetime import __doc__
gpl-2.0
stevenhwu/googletest
scripts/fuse_gtest_files.py
2577
8813
#!/usr/bin/env python # # Copyright 2009, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """fuse_gtest_files.py v0.2.0 Fuses Google Test source code into a .h file and a .cc file. SYNOPSIS fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR Scans GTEST_ROOT_DIR for Google Test source code, and generates two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc. Then you can build your tests by adding OUTPUT_DIR to the include search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These two files contain everything you need to use Google Test. Hence you can "install" Google Test by copying them to wherever you want. GTEST_ROOT_DIR can be omitted and defaults to the parent directory of the directory holding this script. EXAMPLES ./fuse_gtest_files.py fused_gtest ./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest This tool is experimental. In particular, it assumes that there is no conditional inclusion of Google Test headers. Please report any problems to googletestframework@googlegroups.com. You can read http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for more information. """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import sets import sys # We assume that this file is in the scripts/ directory in the Google # Test root directory. DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..') # Regex for matching '#include "gtest/..."'. INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"') # Regex for matching '#include "src/..."'. INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"') # Where to find the source seed files. GTEST_H_SEED = 'include/gtest/gtest.h' GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h' GTEST_ALL_CC_SEED = 'src/gtest-all.cc' # Where to put the generated files. GTEST_H_OUTPUT = 'gtest/gtest.h' GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc' def VerifyFileExists(directory, relative_path): """Verifies that the given file exists; aborts on failure. relative_path is the file path relative to the given directory. """ if not os.path.isfile(os.path.join(directory, relative_path)): print 'ERROR: Cannot find %s in directory %s.' % (relative_path, directory) print ('Please either specify a valid project root directory ' 'or omit it on the command line.') sys.exit(1) def ValidateGTestRootDir(gtest_root): """Makes sure gtest_root points to a valid gtest root directory. The function aborts the program on failure. """ VerifyFileExists(gtest_root, GTEST_H_SEED) VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED) def VerifyOutputFile(output_dir, relative_path): """Verifies that the given output file path is valid. relative_path is relative to the output_dir directory. """ # Makes sure the output file either doesn't exist or can be overwritten. output_file = os.path.join(output_dir, relative_path) if os.path.exists(output_file): # TODO(wan@google.com): The following user-interaction doesn't # work with automated processes. We should provide a way for the # Makefile to force overwriting the files. print ('%s already exists in directory %s - overwrite it? (y/N) ' % (relative_path, output_dir)) answer = sys.stdin.readline().strip() if answer not in ['y', 'Y']: print 'ABORTED.' sys.exit(1) # Makes sure the directory holding the output file exists; creates # it and all its ancestors if necessary. parent_directory = os.path.dirname(output_file) if not os.path.isdir(parent_directory): os.makedirs(parent_directory) def ValidateOutputDir(output_dir): """Makes sure output_dir points to a valid output directory. The function aborts the program on failure. """ VerifyOutputFile(output_dir, GTEST_H_OUTPUT) VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT) def FuseGTestH(gtest_root, output_dir): """Scans folder gtest_root to generate gtest/gtest.h in output_dir.""" output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w') processed_files = sets.Set() # Holds all gtest headers we've processed. def ProcessFile(gtest_header_path): """Processes the given gtest header file.""" # We don't process the same header twice. if gtest_header_path in processed_files: return processed_files.add(gtest_header_path) # Reads each line in the given gtest header. for line in file(os.path.join(gtest_root, gtest_header_path), 'r'): m = INCLUDE_GTEST_FILE_REGEX.match(line) if m: # It's '#include "gtest/..."' - let's process it recursively. ProcessFile('include/' + m.group(1)) else: # Otherwise we copy the line unchanged to the output file. output_file.write(line) ProcessFile(GTEST_H_SEED) output_file.close() def FuseGTestAllCcToFile(gtest_root, output_file): """Scans folder gtest_root to generate gtest/gtest-all.cc in output_file.""" processed_files = sets.Set() def ProcessFile(gtest_source_file): """Processes the given gtest source file.""" # We don't process the same #included file twice. if gtest_source_file in processed_files: return processed_files.add(gtest_source_file) # Reads each line in the given gtest source file. for line in file(os.path.join(gtest_root, gtest_source_file), 'r'): m = INCLUDE_GTEST_FILE_REGEX.match(line) if m: if 'include/' + m.group(1) == GTEST_SPI_H_SEED: # It's '#include "gtest/gtest-spi.h"'. This file is not # #included by "gtest/gtest.h", so we need to process it. ProcessFile(GTEST_SPI_H_SEED) else: # It's '#include "gtest/foo.h"' where foo is not gtest-spi. # We treat it as '#include "gtest/gtest.h"', as all other # gtest headers are being fused into gtest.h and cannot be # #included directly. # There is no need to #include "gtest/gtest.h" more than once. if not GTEST_H_SEED in processed_files: processed_files.add(GTEST_H_SEED) output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,)) else: m = INCLUDE_SRC_FILE_REGEX.match(line) if m: # It's '#include "src/foo"' - let's process it recursively. ProcessFile(m.group(1)) else: output_file.write(line) ProcessFile(GTEST_ALL_CC_SEED) def FuseGTestAllCc(gtest_root, output_dir): """Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir.""" output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w') FuseGTestAllCcToFile(gtest_root, output_file) output_file.close() def FuseGTest(gtest_root, output_dir): """Fuses gtest.h and gtest-all.cc.""" ValidateGTestRootDir(gtest_root) ValidateOutputDir(output_dir) FuseGTestH(gtest_root, output_dir) FuseGTestAllCc(gtest_root, output_dir) def main(): argc = len(sys.argv) if argc == 2: # fuse_gtest_files.py OUTPUT_DIR FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1]) elif argc == 3: # fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR FuseGTest(sys.argv[1], sys.argv[2]) else: print __doc__ sys.exit(1) if __name__ == '__main__': main()
bsd-3-clause
tinkerinestudio/Tinkerine-Suite
TinkerineSuite/python/Lib/numpy/doc/subclassing.py
97
20160
""" ============================= Subclassing ndarray in python ============================= Credits ------- This page is based with thanks on the wiki page on subclassing by Pierre Gerard-Marchant - http://www.scipy.org/Subclasses. Introduction ------------ Subclassing ndarray is relatively simple, but it has some complications compared to other Python objects. On this page we explain the machinery that allows you to subclass ndarray, and the implications for implementing a subclass. ndarrays and object creation ============================ Subclassing ndarray is complicated by the fact that new instances of ndarray classes can come about in three different ways. These are: #. Explicit constructor call - as in ``MySubClass(params)``. This is the usual route to Python instance creation. #. View casting - casting an existing ndarray as a given subclass #. New from template - creating a new instance from a template instance. Examples include returning slices from a subclassed array, creating return types from ufuncs, and copying arrays. See :ref:`new-from-template` for more details The last two are characteristics of ndarrays - in order to support things like array slicing. The complications of subclassing ndarray are due to the mechanisms numpy has to support these latter two routes of instance creation. .. _view-casting: View casting ------------ *View casting* is the standard ndarray mechanism by which you take an ndarray of any subclass, and return a view of the array as another (specified) subclass: >>> import numpy as np >>> # create a completely useless ndarray subclass >>> class C(np.ndarray): pass >>> # create a standard ndarray >>> arr = np.zeros((3,)) >>> # take a view of it, as our useless subclass >>> c_arr = arr.view(C) >>> type(c_arr) <class 'C'> .. _new-from-template: Creating new from template -------------------------- New instances of an ndarray subclass can also come about by a very similar mechanism to :ref:`view-casting`, when numpy finds it needs to create a new instance from a template instance. The most obvious place this has to happen is when you are taking slices of subclassed arrays. For example: >>> v = c_arr[1:] >>> type(v) # the view is of type 'C' <class 'C'> >>> v is c_arr # but it's a new instance False The slice is a *view* onto the original ``c_arr`` data. So, when we take a view from the ndarray, we return a new ndarray, of the same class, that points to the data in the original. There are other points in the use of ndarrays where we need such views, such as copying arrays (``c_arr.copy()``), creating ufunc output arrays (see also :ref:`array-wrap`), and reducing methods (like ``c_arr.mean()``. Relationship of view casting and new-from-template -------------------------------------------------- These paths both use the same machinery. We make the distinction here, because they result in different input to your methods. Specifically, :ref:`view-casting` means you have created a new instance of your array type from any potential subclass of ndarray. :ref:`new-from-template` means you have created a new instance of your class from a pre-existing instance, allowing you - for example - to copy across attributes that are particular to your subclass. Implications for subclassing ---------------------------- If we subclass ndarray, we need to deal not only with explicit construction of our array type, but also :ref:`view-casting` or :ref:`new-from-template`. Numpy has the machinery to do this, and this machinery that makes subclassing slightly non-standard. There are two aspects to the machinery that ndarray uses to support views and new-from-template in subclasses. The first is the use of the ``ndarray.__new__`` method for the main work of object initialization, rather then the more usual ``__init__`` method. The second is the use of the ``__array_finalize__`` method to allow subclasses to clean up after the creation of views and new instances from templates. A brief Python primer on ``__new__`` and ``__init__`` ===================================================== ``__new__`` is a standard Python method, and, if present, is called before ``__init__`` when we create a class instance. See the `python __new__ documentation <http://docs.python.org/reference/datamodel.html#object.__new__>`_ for more detail. For example, consider the following Python code: .. testcode:: class C(object): def __new__(cls, *args): print 'Cls in __new__:', cls print 'Args in __new__:', args return object.__new__(cls, *args) def __init__(self, *args): print 'type(self) in __init__:', type(self) print 'Args in __init__:', args meaning that we get: >>> c = C('hello') Cls in __new__: <class 'C'> Args in __new__: ('hello',) type(self) in __init__: <class 'C'> Args in __init__: ('hello',) When we call ``C('hello')``, the ``__new__`` method gets its own class as first argument, and the passed argument, which is the string ``'hello'``. After python calls ``__new__``, it usually (see below) calls our ``__init__`` method, with the output of ``__new__`` as the first argument (now a class instance), and the passed arguments following. As you can see, the object can be initialized in the ``__new__`` method or the ``__init__`` method, or both, and in fact ndarray does not have an ``__init__`` method, because all the initialization is done in the ``__new__`` method. Why use ``__new__`` rather than just the usual ``__init__``? Because in some cases, as for ndarray, we want to be able to return an object of some other class. Consider the following: .. testcode:: class D(C): def __new__(cls, *args): print 'D cls is:', cls print 'D args in __new__:', args return C.__new__(C, *args) def __init__(self, *args): # we never get here print 'In D __init__' meaning that: >>> obj = D('hello') D cls is: <class 'D'> D args in __new__: ('hello',) Cls in __new__: <class 'C'> Args in __new__: ('hello',) >>> type(obj) <class 'C'> The definition of ``C`` is the same as before, but for ``D``, the ``__new__`` method returns an instance of class ``C`` rather than ``D``. Note that the ``__init__`` method of ``D`` does not get called. In general, when the ``__new__`` method returns an object of class other than the class in which it is defined, the ``__init__`` method of that class is not called. This is how subclasses of the ndarray class are able to return views that preserve the class type. When taking a view, the standard ndarray machinery creates the new ndarray object with something like:: obj = ndarray.__new__(subtype, shape, ... where ``subdtype`` is the subclass. Thus the returned view is of the same class as the subclass, rather than being of class ``ndarray``. That solves the problem of returning views of the same type, but now we have a new problem. The machinery of ndarray can set the class this way, in its standard methods for taking views, but the ndarray ``__new__`` method knows nothing of what we have done in our own ``__new__`` method in order to set attributes, and so on. (Aside - why not call ``obj = subdtype.__new__(...`` then? Because we may not have a ``__new__`` method with the same call signature). The role of ``__array_finalize__`` ================================== ``__array_finalize__`` is the mechanism that numpy provides to allow subclasses to handle the various ways that new instances get created. Remember that subclass instances can come about in these three ways: #. explicit constructor call (``obj = MySubClass(params)``). This will call the usual sequence of ``MySubClass.__new__`` then (if it exists) ``MySubClass.__init__``. #. :ref:`view-casting` #. :ref:`new-from-template` Our ``MySubClass.__new__`` method only gets called in the case of the explicit constructor call, so we can't rely on ``MySubClass.__new__`` or ``MySubClass.__init__`` to deal with the view casting and new-from-template. It turns out that ``MySubClass.__array_finalize__`` *does* get called for all three methods of object creation, so this is where our object creation housekeeping usually goes. * For the explicit constructor call, our subclass will need to create a new ndarray instance of its own class. In practice this means that we, the authors of the code, will need to make a call to ``ndarray.__new__(MySubClass,...)``, or do view casting of an existing array (see below) * For view casting and new-from-template, the equivalent of ``ndarray.__new__(MySubClass,...`` is called, at the C level. The arguments that ``__array_finalize__`` recieves differ for the three methods of instance creation above. The following code allows us to look at the call sequences and arguments: .. testcode:: import numpy as np class C(np.ndarray): def __new__(cls, *args, **kwargs): print 'In __new__ with class %s' % cls return np.ndarray.__new__(cls, *args, **kwargs) def __init__(self, *args, **kwargs): # in practice you probably will not need or want an __init__ # method for your subclass print 'In __init__ with class %s' % self.__class__ def __array_finalize__(self, obj): print 'In array_finalize:' print ' self type is %s' % type(self) print ' obj type is %s' % type(obj) Now: >>> # Explicit constructor >>> c = C((10,)) In __new__ with class <class 'C'> In array_finalize: self type is <class 'C'> obj type is <type 'NoneType'> In __init__ with class <class 'C'> >>> # View casting >>> a = np.arange(10) >>> cast_a = a.view(C) In array_finalize: self type is <class 'C'> obj type is <type 'numpy.ndarray'> >>> # Slicing (example of new-from-template) >>> cv = c[:1] In array_finalize: self type is <class 'C'> obj type is <class 'C'> The signature of ``__array_finalize__`` is:: def __array_finalize__(self, obj): ``ndarray.__new__`` passes ``__array_finalize__`` the new object, of our own class (``self``) as well as the object from which the view has been taken (``obj``). As you can see from the output above, the ``self`` is always a newly created instance of our subclass, and the type of ``obj`` differs for the three instance creation methods: * When called from the explicit constructor, ``obj`` is ``None`` * When called from view casting, ``obj`` can be an instance of any subclass of ndarray, including our own. * When called in new-from-template, ``obj`` is another instance of our own subclass, that we might use to update the new ``self`` instance. Because ``__array_finalize__`` is the only method that always sees new instances being created, it is the sensible place to fill in instance defaults for new object attributes, among other tasks. This may be clearer with an example. Simple example - adding an extra attribute to ndarray ----------------------------------------------------- .. testcode:: import numpy as np class InfoArray(np.ndarray): def __new__(subtype, shape, dtype=float, buffer=None, offset=0, strides=None, order=None, info=None): # Create the ndarray instance of our type, given the usual # ndarray input arguments. This will call the standard # ndarray constructor, but return an object of our type. # It also triggers a call to InfoArray.__array_finalize__ obj = np.ndarray.__new__(subtype, shape, dtype, buffer, offset, strides, order) # set the new 'info' attribute to the value passed obj.info = info # Finally, we must return the newly created object: return obj def __array_finalize__(self, obj): # ``self`` is a new object resulting from # ndarray.__new__(InfoArray, ...), therefore it only has # attributes that the ndarray.__new__ constructor gave it - # i.e. those of a standard ndarray. # # We could have got to the ndarray.__new__ call in 3 ways: # From an explicit constructor - e.g. InfoArray(): # obj is None # (we're in the middle of the InfoArray.__new__ # constructor, and self.info will be set when we return to # InfoArray.__new__) if obj is None: return # From view casting - e.g arr.view(InfoArray): # obj is arr # (type(obj) can be InfoArray) # From new-from-template - e.g infoarr[:3] # type(obj) is InfoArray # # Note that it is here, rather than in the __new__ method, # that we set the default value for 'info', because this # method sees all creation of default objects - with the # InfoArray.__new__ constructor, but also with # arr.view(InfoArray). self.info = getattr(obj, 'info', None) # We do not need to return anything Using the object looks like this: >>> obj = InfoArray(shape=(3,)) # explicit constructor >>> type(obj) <class 'InfoArray'> >>> obj.info is None True >>> obj = InfoArray(shape=(3,), info='information') >>> obj.info 'information' >>> v = obj[1:] # new-from-template - here - slicing >>> type(v) <class 'InfoArray'> >>> v.info 'information' >>> arr = np.arange(10) >>> cast_arr = arr.view(InfoArray) # view casting >>> type(cast_arr) <class 'InfoArray'> >>> cast_arr.info is None True This class isn't very useful, because it has the same constructor as the bare ndarray object, including passing in buffers and shapes and so on. We would probably prefer the constructor to be able to take an already formed ndarray from the usual numpy calls to ``np.array`` and return an object. Slightly more realistic example - attribute added to existing array ------------------------------------------------------------------- Here is a class that takes a standard ndarray that already exists, casts as our type, and adds an extra attribute. .. testcode:: import numpy as np class RealisticInfoArray(np.ndarray): def __new__(cls, input_array, info=None): # Input array is an already formed ndarray instance # We first cast to be our class type obj = np.asarray(input_array).view(cls) # add the new attribute to the created instance obj.info = info # Finally, we must return the newly created object: return obj def __array_finalize__(self, obj): # see InfoArray.__array_finalize__ for comments if obj is None: return self.info = getattr(obj, 'info', None) So: >>> arr = np.arange(5) >>> obj = RealisticInfoArray(arr, info='information') >>> type(obj) <class 'RealisticInfoArray'> >>> obj.info 'information' >>> v = obj[1:] >>> type(v) <class 'RealisticInfoArray'> >>> v.info 'information' .. _array-wrap: ``__array_wrap__`` for ufuncs ------------------------------------------------------- ``__array_wrap__`` gets called at the end of numpy ufuncs and other numpy functions, to allow a subclass to set the type of the return value and update attributes and metadata. Let's show how this works with an example. First we make the same subclass as above, but with a different name and some print statements: .. testcode:: import numpy as np class MySubClass(np.ndarray): def __new__(cls, input_array, info=None): obj = np.asarray(input_array).view(cls) obj.info = info return obj def __array_finalize__(self, obj): print 'In __array_finalize__:' print ' self is %s' % repr(self) print ' obj is %s' % repr(obj) if obj is None: return self.info = getattr(obj, 'info', None) def __array_wrap__(self, out_arr, context=None): print 'In __array_wrap__:' print ' self is %s' % repr(self) print ' arr is %s' % repr(out_arr) # then just call the parent return np.ndarray.__array_wrap__(self, out_arr, context) We run a ufunc on an instance of our new array: >>> obj = MySubClass(np.arange(5), info='spam') In __array_finalize__: self is MySubClass([0, 1, 2, 3, 4]) obj is array([0, 1, 2, 3, 4]) >>> arr2 = np.arange(5)+1 >>> ret = np.add(arr2, obj) In __array_wrap__: self is MySubClass([0, 1, 2, 3, 4]) arr is array([1, 3, 5, 7, 9]) In __array_finalize__: self is MySubClass([1, 3, 5, 7, 9]) obj is MySubClass([0, 1, 2, 3, 4]) >>> ret MySubClass([1, 3, 5, 7, 9]) >>> ret.info 'spam' Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method of the input with the highest ``__array_priority__`` value, in this case ``MySubClass.__array_wrap__``, with arguments ``self`` as ``obj``, and ``out_arr`` as the (ndarray) result of the addition. In turn, the default ``__array_wrap__`` (``ndarray.__array_wrap__``) has cast the result to class ``MySubClass``, and called ``__array_finalize__`` - hence the copying of the ``info`` attribute. This has all happened at the C level. But, we could do anything we wanted: .. testcode:: class SillySubClass(np.ndarray): def __array_wrap__(self, arr, context=None): return 'I lost your data' >>> arr1 = np.arange(5) >>> obj = arr1.view(SillySubClass) >>> arr2 = np.arange(5) >>> ret = np.multiply(obj, arr2) >>> ret 'I lost your data' So, by defining a specific ``__array_wrap__`` method for our subclass, we can tweak the output from ufuncs. The ``__array_wrap__`` method requires ``self``, then an argument - which is the result of the ufunc - and an optional parameter *context*. This parameter is returned by some ufuncs as a 3-element tuple: (name of the ufunc, argument of the ufunc, domain of the ufunc). ``__array_wrap__`` should return an instance of its containing class. See the masked array subclass for an implementation. In addition to ``__array_wrap__``, which is called on the way out of the ufunc, there is also an ``__array_prepare__`` method which is called on the way into the ufunc, after the output arrays are created but before any computation has been performed. The default implementation does nothing but pass through the array. ``__array_prepare__`` should not attempt to access the array data or resize the array, it is intended for setting the output array type, updating attributes and metadata, and performing any checks based on the input that may be desired before computation begins. Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or subclass thereof or raise an error. Extra gotchas - custom ``__del__`` methods and ndarray.base ----------------------------------------------------------- One of the problems that ndarray solves is keeping track of memory ownership of ndarrays and their views. Consider the case where we have created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``. The two objects are looking at the same memory. Numpy keeps track of where the data came from for a particular array or view, with the ``base`` attribute: >>> # A normal ndarray, that owns its own data >>> arr = np.zeros((4,)) >>> # In this case, base is None >>> arr.base is None True >>> # We take a view >>> v1 = arr[1:] >>> # base now points to the array that it derived from >>> v1.base is arr True >>> # Take a view of a view >>> v2 = v1[1:] >>> # base points to the view it derived from >>> v2.base is v1 True In general, if the array owns its own memory, as for ``arr`` in this case, then ``arr.base`` will be None - there are some exceptions to this - see the numpy book for more details. The ``base`` attribute is useful in being able to tell whether we have a view or the original array. This in turn can be useful if we need to know whether or not to do some specific cleanup when the subclassed array is deleted. For example, we may only want to do the cleanup if the original array is deleted, but not the views. For an example of how this can work, have a look at the ``memmap`` class in ``numpy.core``. """
agpl-3.0
peterfpeterson/mantid
Framework/PythonInterface/plugins/algorithms/LoadCIF.py
3
20264
# Mantid Repository : https://github.com/mantidproject/mantid # # Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI, # NScD Oak Ridge National Laboratory, European Spallation Source, # Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS # SPDX - License - Identifier: GPL - 3.0 + # pylint: disable=no-init,too-few-public-methods from mantid.kernel import * from mantid.simpleapi import * from mantid.api import * from mantid.geometry import SpaceGroupFactory, CrystalStructure, UnitCell import re import numpy as np # pylint: disable=invalid-name def removeErrorEstimateFromNumber(numberString): errorBegin = numberString.find('(') if errorBegin == -1: return numberString return numberString[:errorBegin] def getFloatOrNone(strValue): try: return float(strValue) except ValueError: return None def convertBtoU(bIso): if bIso is None: return None return bIso / (8.0 * np.pi * np.pi) class SpaceGroupBuilder(object): """ Helper class that extracts the space group from CIF data provided by PyCifRW. For testing purposes, dictionaries with the appropriate data can be passed in as well, so the source of the parsed data is replaceable. """ string_keys = ['_space_group_name_h-m_alt', '_symmetry_space_group_name_h-m'] number_keys = ['_space_group_it_number', '_symmetry_int_tables_number'] def __init__(self, cifData=None): if cifData is not None: self.spaceGroup = self._getSpaceGroup(cifData) def _getSpaceGroup(self, cifData): try: return self._getSpaceGroupFromString(cifData) # pylint: disable=unused-variable except (RuntimeError, ValueError): try: return self._getSpaceGroupFromNumber(cifData) # pylint: disable=unused-variable,invalid-name except RuntimeError: raise RuntimeError( 'Can not create space group from supplied CIF-file. You could try to modify the HM-symbol ' 'to contain spaces between the components.\n' 'Keys to look for: _space_group_name_H-M_alt, _symmetry_space_group_name_H-M') def _getSpaceGroupFromString(self, cifData): # Try two possibilities for space group symbol. If neither is present, throw a RuntimeError. rawSpaceGroupSymbol = [str(cifData[x]) for x in self.string_keys if x in cifData.keys()] if len(rawSpaceGroupSymbol) == 0: raise RuntimeError('No space group symbol in CIF.') cleanSpaceGroupSymbol = self._getCleanSpaceGroupSymbol(rawSpaceGroupSymbol[0]) # If the symbol is not registered, throw as well. return SpaceGroupFactory.createSpaceGroup(cleanSpaceGroupSymbol).getHMSymbol() def _getCleanSpaceGroupSymbol(self, rawSpaceGroupSymbol): # Remove :1 and :H from the symbol. Those are not required at the moment because they are the default. # Also substitute 'R' and 'Z' endings used by ICSD to indicate alternative origin choice or settings mappings = {':[1Hh]':'', ' S$':'', ' H$':'', ' Z$':' :2', ' R$':' :r'} for k, v in mappings.items(): rawSpaceGroupSymbol = re.sub(k, v, rawSpaceGroupSymbol) return rawSpaceGroupSymbol.strip() def _getSpaceGroupFromNumber(self, cifData): spaceGroupNumber = [int(cifData[x]) for x in self.number_keys if x in cifData.keys()] if len(spaceGroupNumber) == 0: raise RuntimeError('No space group symbol in CIF.') possibleSpaceGroupSymbols = SpaceGroupFactory.subscribedSpaceGroupSymbols(spaceGroupNumber[0]) if len(possibleSpaceGroupSymbols) != 1: raise RuntimeError( 'Can not use space group number to determine space group for no. {0}'.format(spaceGroupNumber)) return SpaceGroupFactory.createSpaceGroup(possibleSpaceGroupSymbols[0]).getHMSymbol() class UnitCellBuilder(object): """ Helper class that builds a unit cell from CIF data provided by PyCifRW. For testing purposes, dictionaries with the appropriate data can be passed in as well, so the source of the parsed data is replaceable. """ def __init__(self, cifData=None): if cifData is not None: self.unitCell = self._getUnitCell(cifData) def _getUnitCell(self, cifData): unitCellComponents = ['_cell_length_a', '_cell_length_b', '_cell_length_c', '_cell_angle_alpha', '_cell_angle_beta', '_cell_angle_gamma'] unitCellValueMap = dict([(str(x), removeErrorEstimateFromNumber(str(cifData[x]))) if x in cifData.keys() else (str(x), None) for x in unitCellComponents]) if unitCellValueMap['_cell_length_a'] is None: raise RuntimeError('The a-parameter of the unit cell is not specified in the supplied CIF.\n' 'Key to look for: _cell_length_a') replacementMap = { '_cell_length_b': str(unitCellValueMap['_cell_length_a']), '_cell_length_c': str(unitCellValueMap['_cell_length_a']), '_cell_angle_alpha': '90.0', '_cell_angle_beta': '90.0', '_cell_angle_gamma': '90.0'} unitCellValues = [ unitCellValueMap[str(key)] if unitCellValueMap[str(key)] is not None else replacementMap[str(key)] for key in unitCellComponents] return ' '.join(unitCellValues) class AtomListBuilder(object): """ Helper class that builds a list of atoms from CIF data provided by PyCifRW. For testing purposes, dictionaries with the appropriate data can be passed in as well, so the source of the parsed data is replaceable. """ def __init__(self, cifData=None, unitCell=None): if cifData is not None: self.atomList = self._getAtoms(cifData, unitCell) def _getAtoms(self, cifData, unitCell=None): labels = self._getLabels(cifData) atomCoordinates = self._getAtomCoordinates(cifData, labels) occupancies = self._getOccupancies(cifData, labels) atomSymbols = self._getAtomSymbols(cifData, labels) isotropicUs = self._getIsotropicUs(cifData, labels, unitCell) atomLines = [] for atomLabel in labels: stringAtomLine = [str(x) for x in ( atomSymbols[atomLabel], atomCoordinates[atomLabel], occupancies[atomLabel], isotropicUs[atomLabel]) if x is not None] cleanLine = [stringAtomLine[0]] + [removeErrorEstimateFromNumber(x) for x in list(stringAtomLine[1:])] atomLines.append(' '.join(cleanLine)) return ';'.join(atomLines) def _getLabels(self, cifData): try: return cifData['_atom_site_label'] except KeyError: # If there are no atomic coordinates specified, there is really no point in continuing with replacement labels. if '_atom_site_fract_x' not in cifData.keys(): raise RuntimeError( 'Too much information missing from CIF-file. Does it contain a loop_ that defines atoms?') return [str(x) for x in range(len(cifData['_atom_site_fract_x']))] def _getAtomCoordinates(self, cifData, labels): coordinateFields = ['_atom_site_fract_x', '_atom_site_fract_y', '_atom_site_fract_z'] for field in coordinateFields: if field not in cifData.keys(): raise RuntimeError( 'Mandatory field {0} not found in CIF-file.' 'Please check the atomic position definitions.'.format(field)) # Return a dict like { 'label1': 'x y z', 'label2': 'x y z' } return dict( [(label, ' '.join([removeErrorEstimateFromNumber(c) for c in (x, y, z)])) for label, x, y, z in zip(labels, *[cifData[field] for field in coordinateFields])]) def _getOccupancies(self, cifData, labels): occupancyField = '_atom_site_occupancy' occupancies = [] if occupancyField in cifData.keys(): occupancies += cifData[occupancyField] else: occupancies += ['1.0'] * len(labels) return dict(list(zip(labels, occupancies))) def _getAtomSymbols(self, cifData, labels): rawAtomSymbols = [cifData[x] for x in ['_atom_site_type_symbol', '_atom_site_label'] if x in cifData.keys()] if len(rawAtomSymbols) == 0: raise RuntimeError('Cannot determine atom types, both _atom_site_type_symbol and _atom_site_label are ' 'missing.') # Return a dict like { 'label1': 'Element1', ... } extracted from either _atom_site_type_symbol or _atom_site_label return dict( [(label, self._getCleanAtomSymbol(x)) for label, x in zip(labels, rawAtomSymbols[0])]) def _getCleanAtomSymbol(self, atomSymbol): nonCharacterRe = re.compile('[^a-z]', re.IGNORECASE) return re.sub(nonCharacterRe, '', atomSymbol) def _getIsotropicUs(self, cifData, labels, unitCell): keyUIso = '_atom_site_u_iso_or_equiv' keyBIso = '_atom_site_b_iso_or_equiv' # Try to get a list of isotropic U-values, replace invalid ones by None isotropicUs = [] if keyUIso in cifData.keys(): isotropicUNoErrors = [removeErrorEstimateFromNumber(u) for u in cifData[keyUIso]] isotropicUs += [getFloatOrNone(u) for u in isotropicUNoErrors] elif keyBIso in cifData.keys(): isotropicBsNoErrors = [removeErrorEstimateFromNumber(b) for b in cifData[keyBIso]] isotropicUs += [convertBtoU(getFloatOrNone(b)) for b in isotropicBsNoErrors] else: isotropicUs += [None] * len(labels) isotropicUMap = dict(list(zip(labels, isotropicUs))) # If there are None-objects in the list, try to get the equivalent U-values if None in isotropicUs: try: anisoLabels = self._get_ansitropic_labels(cifData) equivalentUMap = self._getEquivalentUs(cifData, anisoLabels, unitCell) for key, uIso in isotropicUMap.items(): if uIso is None and key in equivalentUMap: isotropicUMap[key] = equivalentUMap[key] except RuntimeError: pass # Return dict like { 'label1': 'U_iso_or_equiv', ... } return isotropicUMap def _getEquivalentUs(self, cifData, labels, unitCell): anisotropicParameters = self._getAnisotropicParametersU(cifData, labels) sumWeights = self._getMetricDependentWeights(unitCell) # Return U_equiv calculated according to [Fischer & Tillmanns, Acta Cryst C44, p775, 10.1107/S0108270187012745] # in a dict like { 'label1': 'U_equiv1' ... }. Invalid matrices (containing None) are excluded. return dict([(label, np.around(np.sum(np.multiply(uMatrix, sumWeights)) / 3., decimals=5)) for label, uMatrix in anisotropicParameters.items() if uMatrix.dtype.type != np.object_]) def _getAnisotropicParametersU(self, cifData, labels): # Try to extract U or if that fails, B. try: return self._getTensors(cifData, labels, ['_atom_site_aniso_u_11', '_atom_site_aniso_u_12', '_atom_site_aniso_u_13', '_atom_site_aniso_u_22', '_atom_site_aniso_u_23', '_atom_site_aniso_u_33']) except RuntimeError: bTensors = self._getTensors(cifData, labels, ['_atom_site_aniso_b_11', '_atom_site_aniso_b_12', '_atom_site_aniso_b_13', '_atom_site_aniso_b_22', '_atom_site_aniso_b_23', '_atom_site_aniso_b_33']) return dict([(label, convertBtoU(bTensor)) for label, bTensor in bTensors.items()]) def _get_ansitropic_labels(self, cifData): anisoLabel = '_atom_site_aniso_label' if anisoLabel not in cifData.keys(): raise RuntimeError('Mandatory field \'_atom_site_aniso_label\' is missing.') anisoLabels = cifData[anisoLabel] return anisoLabels def _getTensors(self, cifData, labels, keys): values = [] for key in keys: if key not in cifData.keys(): raise RuntimeError('Can not construct tensor with missing element \'{0}\'.'.format(key)) else: values.append([getFloatOrNone(removeErrorEstimateFromNumber(x)) for x in cifData[key]]) # Return a 3x3-matrix for each label based on the assumption that u_j,i == u_i,j return dict([(label, np.array([[u11, u12, u13], [u12, u22, u23], [u13, u23, u33]])) for label, u11, u12, u13, u22, u23, u33 in zip(labels, *values)]) def _getMetricDependentWeights(self, unitCell): metricTensor = unitCell.getG() reciprocalMatrix = self._getReciprocalLengthSquaredMatrix(unitCell) return np.multiply(metricTensor, reciprocalMatrix) def _getReciprocalLengthSquaredMatrix(self, unitCell): reciprocalLengthVector = np.array([[unitCell.astar(), unitCell.bstar(), unitCell.cstar()]]) return np.dot(reciprocalLengthVector.transpose(), reciprocalLengthVector) class CrystalStructureBuilder(object): """ This helper class simplifies the creation of CrystalStructure-objects from CIF-files. It uses the helper classes defined above. """ def __init__(self, cif_data=None): if cif_data is not None: self.spaceGroup = SpaceGroupBuilder(cif_data).spaceGroup self.unitCell = UnitCellBuilder(cif_data).unitCell self.atoms = AtomListBuilder(cif_data, UnitCell(*[float(removeErrorEstimateFromNumber(x)) for x in self.unitCell.split()])).atomList def getCrystalStructure(self): return CrystalStructure(self.unitCell, self.spaceGroup, self.atoms) class UBMatrixBuilder(object): ub_matrix_keys = ['_diffrn_orient_matrix_ub_11', '_diffrn_orient_matrix_ub_12', '_diffrn_orient_matrix_ub_13', '_diffrn_orient_matrix_ub_21', '_diffrn_orient_matrix_ub_22', '_diffrn_orient_matrix_ub_23', '_diffrn_orient_matrix_ub_31', '_diffrn_orient_matrix_ub_32', '_diffrn_orient_matrix_ub_33'] def __init__(self, cif_data=None): if cif_data is not None: self._ubMatrix = self._getUBMatrix(cif_data) def getUBMatrix(self): return self._ubMatrix def _getUBMatrix(self, cifData): ubValues = [str(cifData[key]) if key in cifData.keys() else None for key in self.ub_matrix_keys] if None in ubValues: raise RuntimeError('Can not load UB matrix from CIF, values are missing.') return ','.join(ubValues) class LoadCIF(PythonAlgorithm): def category(self): return "Diffraction\\DataHandling" def name(self): return "LoadCIF" def summary(self): return "This algorithm loads a CIF file using the PyCifRW package and assigns a CrystalStructure to the sample of the workspace." def PyInit(self): self.declareProperty( WorkspaceProperty(name='Workspace', defaultValue='', direction=Direction.InOut), doc='Workspace into which the crystal structure is placed.') self.declareProperty( FileProperty(name='InputFile', defaultValue='', action=FileAction.Load, extensions=['cif']), doc='A CIF file containing a crystal structure.') self.declareProperty('LoadUBMatrix', False, direction=Direction.Input, doc='Load UB-matrix from CIF file if available.') def PyExec(self): try: self._loadFromCif() except ImportError: raise RuntimeError('This algorithm requires an additional Python package: PyCifRW' ' (https://pypi.python.org/pypi/PyCifRW/4.1)') def _loadFromCif(self): from CifFile import ReadCif cifFileUrl = self._getFileUrl() workspace = self.getProperty('Workspace').value # Try to parse cif file using PyCifRW parsedCifFile = ReadCif(cifFileUrl) cif_data = self._data_with_space_group_keys(parsedCifFile) self._setCrystalStructureFromCifFile(workspace, cif_data) ubOption = self.getProperty('LoadUBMatrix').value if ubOption: self._check_has_a_ub_matrix_key(cif_data) self._setUBMatrixFromCifFile(workspace, cif_data) def _data_with_space_group_keys(self, cif_file): """ Returns the cif data which contains at least one of the required SpaceGroupBuilder keys. :param cif_file: The parsed cif file to check for keys. :return: The Data section containing at least one of the required SpaceGroupBuilder keys. """ for data_key in cif_file.keys(): cif_data = cif_file[data_key] if self._has_a_space_group_key(cif_data): return cif_data raise RuntimeError(f"Could not find any Space Group keys. Missing one of the following: " f"{str(SpaceGroupBuilder.string_keys + SpaceGroupBuilder.number_keys)}") def _has_a_space_group_key(self, cif_data): """ Returns true if the cif data contains at least one Space Group key. :param cif_data: The cif data to check for a Space Group key. :return: True if the cif data contains at least one Space Group key. """ space_group_keys = SpaceGroupBuilder.string_keys + SpaceGroupBuilder.number_keys for key in space_group_keys: if key in cif_data.keys(): return True return False def _check_has_a_ub_matrix_key(self, cif_data): """ Checks to see if the cif data contains at least one UB Matrix key. Raises if it does not. :param cif_data: The cif data to check for a UB Matrix key. :return: None """ for key in UBMatrixBuilder.ub_matrix_keys: if key in cif_data.keys(): return raise RuntimeError(f"Could not find any UB Matrix keys. Missing one of the following: " f"{UBMatrixBuilder.ub_matrix_keys}") def _getFileUrl(self): # ReadCif requires a URL, windows path specs seem to confuse urllib, # so the pathname is converted to a URL before passing it to ReadCif. # pylint: disable=no-name-in-module try: from urllib import pathname2url except ImportError: from urllib.request import pathname2url cifFileName = self.getProperty('InputFile').value return pathname2url(cifFileName) def _setCrystalStructureFromCifFile(self, workspace, cif_data): crystalStructure = self._getCrystalStructureFromCifFile(cif_data) workspace.sample().setCrystalStructure(crystalStructure) def _getCrystalStructureFromCifFile(self, cif_data): builder = CrystalStructureBuilder(cif_data) crystalStructure = builder.getCrystalStructure() self.log().information('''Loaded the following crystal structure: Unit cell: {0} Space group: {1} Atoms: {2} '''.format(builder.unitCell, builder.spaceGroup, '\n '.join(builder.atoms.split(';')))) return crystalStructure def _setUBMatrixFromCifFile(self, workspace, cifFile): ubMatrix = self._getUBMatrixFromCifFile(cifFile) setUBAlgorithm = self.createChildAlgorithm('SetUB') setUBAlgorithm.setProperty('Workspace', workspace) setUBAlgorithm.setProperty('UB', ubMatrix) setUBAlgorithm.execute() def _getUBMatrixFromCifFile(self, cifFile): builder = UBMatrixBuilder(cifFile) return builder.getUBMatrix() AlgorithmFactory.subscribe(LoadCIF)
gpl-3.0
rbbratta/virt-test
libvirt/tests/src/virsh_cmd/domain/virsh_save.py
1
2450
import os from autotest.client.shared import error from virttest import libvirt_vm, virsh def run_virsh_save(test, params, env): """ Test command: virsh save. The command can save the RAM state of a running domain. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Run virsh save command with assigned options. 4.Recover test environment.(If the libvirtd service is stopped ,start the libvirtd service.) 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(params["main_vm"]) vm.verify_alive() domid = vm.get_id().strip() domuuid = vm.get_uuid().strip() savefile = params.get("save_file", "save.file") # If savefile is not an abs path, join it to test.tmpdir if os.path.dirname(savefile) is "": savefile = os.path.join(test.tmpdir, savefile) pre_vm_state = params.get("save_pre_vm_state", "null") libvirtd = params.get("save_libvirtd") extra_param = params.get("save_extra_param") vm_ref = params.get("save_vm_ref") # prepare the environment if vm_ref == "name" and pre_vm_state == "paused": virsh.suspend(vm_name) elif vm_ref == "name" and pre_vm_state == "shut off": virsh.destroy(vm_name) # set the option if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "save_invalid_id" or vm_ref == "save_invalid_uuid": vm_ref = params.get(vm_ref) elif vm_ref.find("name") != -1 or vm_ref == "extra_param": savefile = "%s %s" % (savefile, extra_param) if vm_ref == "only_name": savefile = " " vm_ref = vm_name if libvirtd == "off": libvirt_vm.libvirtd_stop() status = virsh.save(vm_ref, savefile, ignore_status=True).exit_status # recover libvirtd service start if libvirtd == "off": libvirt_vm.libvirtd_start() # cleanup if os.path.exists(savefile): virsh.restore(savefile) os.remove(savefile) # check status_error status_error = params.get("save_status_error") if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command")
gpl-2.0
TangXT/edx-platform
lms/djangoapps/instructor/tests/test_access.py
30
6315
""" Test instructor.access """ from nose.tools import raises from student.tests.factories import UserFactory from xmodule.modulestore.tests.factories import CourseFactory from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from django.test.utils import override_settings from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE from student.roles import CourseBetaTesterRole, CourseStaffRole from django_comment_common.models import (Role, FORUM_ROLE_MODERATOR) from instructor.access import (allow_access, revoke_access, list_with_level, update_forum_role) @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) class TestInstructorAccessList(ModuleStoreTestCase): """ Test access listings. """ def setUp(self): self.course = CourseFactory.create() self.instructors = [UserFactory.create() for _ in xrange(4)] for user in self.instructors: allow_access(self.course, user, 'instructor') self.beta_testers = [UserFactory.create() for _ in xrange(4)] for user in self.beta_testers: allow_access(self.course, user, 'beta') def test_list_instructors(self): instructors = list_with_level(self.course, 'instructor') self.assertEqual(set(instructors), set(self.instructors)) def test_list_beta(self): beta_testers = list_with_level(self.course, 'beta') self.assertEqual(set(beta_testers), set(self.beta_testers)) @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) class TestInstructorAccessAllow(ModuleStoreTestCase): """ Test access allow. """ def setUp(self): self.course = CourseFactory.create() def test_allow(self): user = UserFactory() allow_access(self.course, user, 'staff') self.assertTrue(CourseStaffRole(self.course.id).has_user(user)) def test_allow_twice(self): user = UserFactory() allow_access(self.course, user, 'staff') allow_access(self.course, user, 'staff') self.assertTrue(CourseStaffRole(self.course.id).has_user(user)) def test_allow_beta(self): """ Test allow beta against list beta. """ user = UserFactory() allow_access(self.course, user, 'beta') self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(user)) @raises(ValueError) def test_allow_badlevel(self): user = UserFactory() allow_access(self.course, user, 'robot-not-a-level') @raises(Exception) def test_allow_noneuser(self): user = None allow_access(self.course, user, 'staff') @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) class TestInstructorAccessRevoke(ModuleStoreTestCase): """ Test access revoke. """ def setUp(self): self.course = CourseFactory.create() self.staff = [UserFactory.create() for _ in xrange(4)] for user in self.staff: allow_access(self.course, user, 'staff') self.beta_testers = [UserFactory.create() for _ in xrange(4)] for user in self.beta_testers: allow_access(self.course, user, 'beta') def test_revoke(self): user = self.staff[0] revoke_access(self.course, user, 'staff') self.assertFalse(CourseStaffRole(self.course.id).has_user(user)) def test_revoke_twice(self): user = self.staff[0] revoke_access(self.course, user, 'staff') self.assertFalse(CourseStaffRole(self.course.id).has_user(user)) def test_revoke_beta(self): user = self.beta_testers[0] revoke_access(self.course, user, 'beta') self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(user)) @raises(ValueError) def test_revoke_badrolename(self): user = UserFactory() revoke_access(self.course, user, 'robot-not-a-level') @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) class TestInstructorAccessForum(ModuleStoreTestCase): """ Test forum access control. """ def setUp(self): self.course = CourseFactory.create() self.mod_role = Role.objects.create( course_id=self.course.id, name=FORUM_ROLE_MODERATOR ) self.moderators = [UserFactory.create() for _ in xrange(4)] for user in self.moderators: self.mod_role.users.add(user) def test_allow(self): user = UserFactory.create() update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'allow') self.assertIn(user, self.mod_role.users.all()) def test_allow_twice(self): user = UserFactory.create() update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'allow') self.assertIn(user, self.mod_role.users.all()) update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'allow') self.assertIn(user, self.mod_role.users.all()) @raises(Role.DoesNotExist) def test_allow_badrole(self): user = UserFactory.create() update_forum_role(self.course.id, user, 'robot-not-a-real-role', 'allow') def test_revoke(self): user = self.moderators[0] update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'revoke') self.assertNotIn(user, self.mod_role.users.all()) def test_revoke_twice(self): user = self.moderators[0] update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'revoke') self.assertNotIn(user, self.mod_role.users.all()) update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'revoke') self.assertNotIn(user, self.mod_role.users.all()) def test_revoke_notallowed(self): user = UserFactory() update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'revoke') self.assertNotIn(user, self.mod_role.users.all()) @raises(Role.DoesNotExist) def test_revoke_badrole(self): user = self.moderators[0] update_forum_role(self.course.id, user, 'robot-not-a-real-role', 'allow') @raises(ValueError) def test_bad_mode(self): user = UserFactory() update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'robot-not-a-mode')
agpl-3.0
chengduoZH/Paddle
python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py
4
2783
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import paddle.fluid as fluid import numpy import os class TestParallelExecutorDropExeScope(unittest.TestCase): def check_drop_scope(self, use_cuda=True): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() if not use_cuda: os.environ['CPU_NUM'] = str(2) train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): data = fluid.layers.data(name='X', shape=[1], dtype='float32') hidden = fluid.layers.fc(input=data, size=10) loss = fluid.layers.mean(hidden) test_program = fluid.default_main_program().clone(for_test=True) fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) exe = fluid.Executor(place) exe.run(startup_program) exec_strateg = fluid.ExecutionStrategy() exec_strateg.num_iteration_per_drop_scope = 10 train_exe = fluid.ParallelExecutor( use_cuda=use_cuda, main_program=train_program, loss_name=loss.name, exec_strategy=exec_strateg) test_exe = fluid.ParallelExecutor( use_cuda=use_cuda, main_program=test_program, share_vars_from=train_exe, exec_strategy=exec_strateg) x = numpy.random.random(size=(10, 1)).astype('float32') train_exe.run(feed={"X": x}, fetch_list=[loss.name]) test_exe.run(feed={"X": x}, fetch_list=[loss.name]) assert train_exe._need_create_local_exe_scopes() == False assert test_exe._need_create_local_exe_scopes() == False # drop the local execution scope immediately train_exe.drop_local_exe_scopes() test_exe.drop_local_exe_scopes() assert train_exe._need_create_local_exe_scopes() assert test_exe._need_create_local_exe_scopes() def test_drop_scope(self): self.check_drop_scope(use_cuda=False) if fluid.core.is_compiled_with_cuda(): self.check_drop_scope(use_cuda=True) if __name__ == '__main__': unittest.main()
apache-2.0
Bindupriya/nuxeo-drive
nuxeo-drive-client/nxdrive/osi/darwin/darwin.py
6
8399
''' @author: Remi Cattiau ''' from nxdrive.osi import AbstractOSIntegration import os import urllib2 from nxdrive.logging_config import get_logger from nxdrive.utils import normalized_path from nxdrive.manager import Manager log = get_logger(__name__) import objc from Foundation import * import AppKit from AppKit import * def serviceSelector(fn): # this is the signature of service selectors return objc.selector(fn, signature="v@:@@o^@") class RightClickService(NSObject): @serviceSelector def macRightClick_userData_error_(self, pboard, data, error): log.trace("macRightClick has been called") try: types = pboard.types() pboardString = None if NSURLPboardType in types: pboardArray = pboard.propertyListForType_(NSURLPboardType) log.error("Retrieve property list stuff %r", pboardArray) for value in pboardArray: if value is None or value == "": continue # TODO Replug prompt_metadata on this one url = Foundation.NSURL.URLWithString_(value) if url is None: if value.startswith("file://"): value = value[7:] value = urllib2.unquote(value) else: value = url.path() log.debug("Should open : %s", value) from PyQt4.QtCore import QCoreApplication QCoreApplication.instance().show_metadata(value) return None except Exception as e: log.exception(e) class DarwinIntegration(AbstractOSIntegration): ''' classdocs ''' NXDRIVE_SCHEME = 'nxdrive' NDRIVE_AGENT_TEMPLATE = """\ <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>Label</key> <string>org.nuxeo.drive.agentlauncher</string> <key>RunAtLoad</key> <true/> <key>Program</key> <string>%s</string> </dict> </plist> """ def _get_agent_file(self): agents_folder = os.path.expanduser('~/Library/LaunchAgents') agent_filepath = os.path.join(agents_folder, self._manager.get_cf_bundle_identifier() + '.plist') return agent_filepath def register_startup(self): """Register the Nuxeo Drive.app as a user Launch Agent http://developer.apple.com/library/mac/#documentation/MacOSX/Conceptual/BPSystemStartup/Chapters/CreatingLaunchdJobs.html """ agent_filepath = self._get_agent_file() agents_folder = os.path.dirname(agent_filepath) exe_path = self._manager.find_exe_path() log.debug("Registering '%s' for startup in: '%s'", exe_path, agent_filepath) if not os.path.exists(agents_folder): log.debug("Making launch agent folder %s", agents_folder) os.makedirs(agents_folder) log.debug("Writing launch agent file %s", agent_filepath) with open(agent_filepath, 'wb') as f: f.write(self.NDRIVE_AGENT_TEMPLATE % exe_path) def unregister_startup(self): agent_filepath = self._get_agent_file() if os.path.exists(agent_filepath): os.remove(agent_filepath) def _register_services(self): serviceProvider = RightClickService.alloc().init() NSRegisterServicesProvider(serviceProvider, self._manager.get_appname()) # Refresh services AppKit.NSUpdateDynamicServices() def register_contextual_menu(self): # Register the service that handle the right click self._register_services() def unregister_contextual_menu(self): # Specified in the Bundle plist pass def register_protocol_handlers(self): """Register the URL scheme listener using PyObjC""" try: from Foundation import NSBundle from LaunchServices import LSSetDefaultHandlerForURLScheme except ImportError: log.warning("Cannot register %r scheme: missing OSX Foundation module", self.NXDRIVE_SCHEME) return bundle_id = NSBundle.mainBundle().bundleIdentifier() if bundle_id == 'org.python.python': log.debug("Skipping URL scheme registration as this program " " was launched from the Python OSX app bundle") return LSSetDefaultHandlerForURLScheme(self.NXDRIVE_SCHEME, bundle_id) log.debug("Registered bundle '%s' for URL scheme '%s'", bundle_id, self.NXDRIVE_SCHEME) def unregister_protocol_handlers(self): # Dont unregister, should be removed when Bundle removed pass def is_partition_supported(self, folder): if folder is None: return False result = False to_delete = not os.path.exists(folder) try: if to_delete: os.mkdir(folder) if not os.access(folder, os.W_OK): import stat os.chmod(folder, stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IRUSR | stat.S_IWGRP | stat.S_IWUSR) import xattr attr = "drive-test" xattr.setxattr(folder, attr, attr) if xattr.getxattr(folder, attr) == attr: result = True xattr.removexattr(folder, attr) finally: try: if to_delete: os.rmdir(folder) except: pass return result def register_folder_link(self, folder_path, name=None): try: from LaunchServices import LSSharedFileListInsertItemURL from LaunchServices import kLSSharedFileListItemBeforeFirst from LaunchServices import CFURLCreateWithString except ImportError: log.warning("PyObjC package is not installed:" " skipping favorite link creation") return folder_path = normalized_path(folder_path) if name is None: name = self._manager.get_appname() lst = self._get_favorite_list() if lst is None: log.warning("Could not fetch the Finder favorite list.") return url = CFURLCreateWithString(None, "file://" + urllib2.quote(folder_path), None) if url is None: log.warning("Could not generate valid favorite URL for: %s", folder_path) return # Register the folder as favorite if not already there item = LSSharedFileListInsertItemURL( lst, kLSSharedFileListItemBeforeFirst, name, None, url, {}, []) if item is not None: log.debug("Registered new favorite in Finder for: %s", folder_path) def unregister_folder_link(self, name): try: from LaunchServices import LSSharedFileListItemRemove except ImportError: log.warning("PyObjC package is not installed:" " skipping favorite link creation") return if name is None: name = self._manager.get_appname() lst = self._get_favorite_list() if lst is None: log.warning("Could not fetch the Finder favorite list.") return item = self._find_item_in_list(lst, name) if item is None: log.warning("Unable to find the favorite list item") return LSSharedFileListItemRemove(lst, item) def _get_favorite_list(self): from LaunchServices import LSSharedFileListCreate from LaunchServices import kLSSharedFileListFavoriteItems return LSSharedFileListCreate(None, kLSSharedFileListFavoriteItems, None) def _find_item_in_list(self, lst, name): from LaunchServices import LSSharedFileListCopySnapshot from LaunchServices import LSSharedFileListItemCopyDisplayName for item in LSSharedFileListCopySnapshot(lst, None)[0]: if name == LSSharedFileListItemCopyDisplayName(item): return item return None
lgpl-2.1
farodin91/servo
tests/wpt/css-tests/tools/html5lib/html5lib/tests/test_treeadapters.py
451
1852
from __future__ import absolute_import, division, unicode_literals from . import support # flake8: noqa import html5lib from html5lib.treeadapters import sax from html5lib.treewalkers import getTreeWalker def test_to_sax(): handler = support.TracingSaxHandler() tree = html5lib.parse("""<html xml:lang="en"> <title>Directory Listing</title> <a href="/"><b/></p> """, treebuilder="etree") walker = getTreeWalker("etree") sax.to_sax(walker(tree), handler) expected = [ 'startDocument', ('startElementNS', ('http://www.w3.org/1999/xhtml', 'html'), 'html', {(None, 'xml:lang'): 'en'}), ('startElementNS', ('http://www.w3.org/1999/xhtml', 'head'), 'head', {}), ('startElementNS', ('http://www.w3.org/1999/xhtml', 'title'), 'title', {}), ('characters', 'Directory Listing'), ('endElementNS', ('http://www.w3.org/1999/xhtml', 'title'), 'title'), ('characters', '\n '), ('endElementNS', ('http://www.w3.org/1999/xhtml', 'head'), 'head'), ('startElementNS', ('http://www.w3.org/1999/xhtml', 'body'), 'body', {}), ('startElementNS', ('http://www.w3.org/1999/xhtml', 'a'), 'a', {(None, 'href'): '/'}), ('startElementNS', ('http://www.w3.org/1999/xhtml', 'b'), 'b', {}), ('startElementNS', ('http://www.w3.org/1999/xhtml', 'p'), 'p', {}), ('endElementNS', ('http://www.w3.org/1999/xhtml', 'p'), 'p'), ('characters', '\n '), ('endElementNS', ('http://www.w3.org/1999/xhtml', 'b'), 'b'), ('endElementNS', ('http://www.w3.org/1999/xhtml', 'a'), 'a'), ('endElementNS', ('http://www.w3.org/1999/xhtml', 'body'), 'body'), ('endElementNS', ('http://www.w3.org/1999/xhtml', 'html'), 'html'), 'endDocument', ] assert expected == handler.visited
mpl-2.0
E-lection/Booth
application.py
1
11264
# $ pip install --upgrade -r requirements.txt # $ python -m flask run from api_key_verification import BOOTH_KEY from flask import Flask, render_template, request, redirect, session from flask_wtf import FlaskForm as Form from forms import LoginForm from forms import PinForm from wtforms import StringField import flask_login from flask_login import LoginManager, UserMixin, \ login_required, login_user, logout_user import urllib, urllib2 import json import models as db from passlib.apps import custom_app_context as pwd_context import requests import string import random from Crypto.Cipher import PKCS1_OAEP from Crypto.PublicKey import RSA import base64 from random import shuffle application = Flask(__name__) application.config['TEMPLATES_AUTO_RELOAD'] = True application.secret_key = 'development key' login_manager = LoginManager() login_manager.init_app(application) login_manager.login_view = "login" # Booth User model class User(UserMixin): def __init__(self, id, username, station_id, vote_url, public_key): self.id = id self.station_id = station_id self.vote_url = vote_url self.public_key = public_key def __repr__(self): return "%s/%d" % (self.username, self.station_id) @application.template_filter('randomise') def randomise(s): return shuffle(s) # Displays login page for the clerk to set up the booth @application.route('/login', methods=['GET', 'POST']) def login(): form = LoginForm(request.form) # If someone has tried to log in if request.method == 'POST': username = request.form['username'] password = request.form['password'] valid_user = get_valid_user(username, password) if valid_user: user = User(valid_user[0], username, valid_user[1], valid_user[2], valid_user[3]) login_user(user) session['candidates_json'] = None session['voter_active'] = False session['voted_candidate'] = None session['vote_sent'] = False session['cancel'] = False return redirect('') else: return render_template('login.html', error_message="Login unsuccessful.", form=form) return render_template('login.html', form=form) # Checks user user_id and station_id for that usename and password def get_valid_user(username, password): users = db.retrieveUsers() user_id = -1 station_id = -1 for user in users: if user[1] == username: password_hash = user[2] if pwd_context.verify(password, password_hash): user_id = user[0] station_id = user[3] vote_url = user[4] public_key = user[5] return (user_id, station_id, vote_url, public_key) break #TODO: Error if there is no user matching return None @application.route("/logout") @login_required def logout(): logout_user() return redirect('/login') # handle login failed @application.errorhandler(401) def page_not_found(e): return render_template('login.html', error_message="Login unsuccessful.", form=form) # callback to reload the user object @login_manager.user_loader def load_user(userid): users = db.retrieveUsers() users_with_id = filter(lambda x: x[0] == int(userid), users) if users_with_id: user = users_with_id[0] return User(user[0], user[1], user[3], user[4], user[5]) else: return None # Once logged in successfully, use booth app to vote @application.route('/', methods=['GET']) @login_required def enter_pin(): if session['voter_active']: return redirect('/cast-vote') form = PinForm(request.form) return render_template('enter_pin.html', form=form, not_banner=True) @application.route('/', methods=['POST']) @login_required def verify_pin(): if session['voter_active']: return redirect('/cast-vote') form = PinForm(request.form) if form.validate_on_submit(): pin = request.form['voterpin'] session['voterpin'] = pin papiResponse = getPapiResponse(pin) success = papiResponse['valid_pin'] if success: # matching entry found voted = papiResponse['already_voted'] if voted: return render_template('enter_pin.html', error_message="You've already voted. PIN already used", form=form, not_banner=True) else: session['voter_active'] = True session['vote_sent'] = False session['candidates_json'] = None session['voting_error'] = None return redirect('/cast-vote') else: # no matching entry in database, try again return render_template('enter_pin.html', error_message="Invalid Voter PIN", form=form, not_banner=True) return render_template('enter_pin.html', error_message="Invalid voter pin entered", form=form, not_banner=True) # Checks if the voter is logged in and loads candidate options @application.route('/cast-vote', methods=['GET']) @login_required def choose_candidate(): if not session['voter_active']: return redirect('') else: if not session['candidates_json']: session['candidates_json'] = getCandidatesJson() if len(session['candidates_json']['candidates']) == 0: session['voter_active'] = False form = PinForm(request.form) return render_template('enter_pin.html', error_message="No running candidates found for this constituency.", form=form, not_banner=True) return render_template('cast_vote.html', candidates=session['candidates_json']['candidates']) @application.route('/cast-vote', methods=['POST']) @login_required def cast_vote(): if session['voter_active']: candidate_id = int(request.json['candidate_id']) if candidate_id == 0: session['voted_candidate'] = 'SPOILT' elif candidate_id == -1: # session['candidates_json'] = None session['voted_candidate'] = 'CANCEL' else: session['voted_candidate'] = getCandidateWithPK(candidate_id, session['candidates_json']['candidates']) return 'OK' else: return redirect('') @application.route('/confirm-vote', methods=['GET']) @login_required def show_candidate(): if session['voter_active'] and session['voted_candidate']: return render_template('confirm_vote.html', candidate=session['voted_candidate']) else: return redirect('') @application.route('/confirm-vote', methods=['POST']) @login_required def confirm_vote(): confirm = int(request.json['confirm']) if confirm and session['voter_active'] and session['voted_candidate']: if session['voted_candidate'] == 'CANCEL': session['cancel'] = True session['voter_active'] = False session['voted_candidate'] = None return 'OK' # TODO: What do we send in case of spoilt ballot resultsResp = sendEncryptedVote(session['voted_candidate'], flask_login.current_user.vote_url, flask_login.current_user.public_key, session['voterpin'], flask_login.current_user.station_id) if resultsResp: if resultsResp['success']: # Voting successful session['vote_sent'] = True else: session['voting_error'] = resultsResp['error'] session['voter_active'] = False session['voted_candidate'] = None else: session['vote_sent'] = False return 'OK' @application.route('/youve-voted') @login_required def youve_voted(): if session['cancel']: session['cancel'] = False form = PinForm(request.form) return render_template('enter_pin.html', form=form, not_banner=True) if session['voting_error']: error_message = session['voting_error'] session['voting_error'] = None form = PinForm(request.form) return render_template('enter_pin.html', error_message=error_message, form=form, not_banner=True) # Voting unsuccessful, retry (should redirect to enter pin?), we have the voted_candidate with us though if session['voter_active'] and session['voted_candidate'] and (not session['vote_sent']): return redirect('/cast-vote') session['vote_sent'] = False return render_template('youve_voted.html') # Gets PAPI resposne for a voter pin def getPapiResponse(pin): station_id = "/station_id/" + urllib.quote(str(flask_login.current_user.station_id)) pin = "/pin_code/" + urllib.quote(pin) url = "http://pins.eelection.co.uk/verify_pin_code_and_check_eligibility"+station_id+pin try: request = urllib2.Request(url) request.add_header("Authorization", BOOTH_KEY) dbresult = urllib2.urlopen(request).read() except: return None return json.loads(dbresult) # Gets the list of candidates for that station def createCandidatesURL(): station_id = "/" + urllib.quote(str(flask_login.current_user.station_id)) url = "http://voting.eelection.co.uk/get_candidates"+station_id return url # Sets candidates_json to the correct stuff for that station def getCandidatesJson(): request = urllib2.Request(createCandidatesURL()) request.add_header("Authorization", BOOTH_KEY); dbresult = urllib2.urlopen(request).read() resultjson = json.loads(dbresult) return resultjson def sendVote(voted_candidate): url = "http://results.eelection.co.uk/vote/" response = requests.post(url=url, data=json.dumps(voted_candidate), headers={'Authorization': BOOTH_KEY}) if response.status_code==200: resultJson = json.loads(response.text) return resultJson else: # Coudn't contact results server return None def sendEncryptedVote(voted_candidate, vote_url, public_key, voter_pin, station_id): secret = id_generator() vote_url += 'vote_encrypted/' voted_candidate['secret'] = secret vote = json.dumps(voted_candidate) public_key = public_key.replace('\\n', '\n') key = RSA.importKey(public_key) cipher = PKCS1_OAEP.new(key) encrypted_vote = base64.b64encode(cipher.encrypt(vote)) data = {'pin_code': voter_pin, 'station_id': station_id, 'encrypted_vote': encrypted_vote} response = requests.post(url=vote_url, data=json.dumps(data), headers={'Authorization': BOOTH_KEY}) if response.status_code==200: resultJson = json.loads(response.text) return resultJson else: # Coudn't contact results server return None def getCandidateWithPK(pk, candidates): global candidates_json for candidate in candidates: if candidate['pk'] == pk: return candidate['fields'] return None def id_generator(size=4, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) if __name__ == "__main__": # Setting debug to True enables debug output. This line should be # removed before deploying a production app. application.debug = True application.run()
gpl-3.0
leansoft/edx-platform
pavelib/utils/test/utils.py
74
2717
""" Helper functions for test tasks """ from paver.easy import sh, task, cmdopts from pavelib.utils.envs import Env import os import subprocess MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017')) MONGO_HOST = os.environ.get('EDXAPP_TEST_MONGO_HOST', 'localhost') __test__ = False # do not collect @task def clean_test_files(): """ Clean fixture files used by tests and .pyc files """ sh("git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads") sh("find . -type f -name \"*.pyc\" -not -path './.git/*' -delete") sh("rm -rf test_root/log/auto_screenshots/*") sh("rm -rf /tmp/mako_[cl]ms") def clean_dir(directory): """ Clean coverage files, to ensure that we don't use stale data to generate reports. """ # We delete the files but preserve the directory structure # so that coverage.py has a place to put the reports. sh('find {dir} -type f -delete'.format(dir=directory)) @task @cmdopts([ ('skip_clean', 'C', 'skip cleaning repository before running tests'), ]) def clean_reports_dir(options): """ Clean coverage files, to ensure that we don't use stale data to generate reports. """ if getattr(options, 'skip_clean', False): print('--skip_clean is set, skipping...') return # We delete the files but preserve the directory structure # so that coverage.py has a place to put the reports. reports_dir = Env.REPORT_DIR.makedirs_p() clean_dir(reports_dir) @task def clean_mongo(): """ Clean mongo test databases """ sh("mongo {host}:{port} {repo_root}/scripts/delete-mongo-test-dbs.js".format( host=MONGO_HOST, port=MONGO_PORT_NUM, repo_root=Env.REPO_ROOT, )) def check_firefox_version(): """ Check that firefox is the correct version. """ expected_firefox_ver = "Mozilla Firefox 28.0" firefox_ver = subprocess.check_output("firefox --version", shell=True).strip() if firefox_ver != expected_firefox_ver: raise Exception( 'Required firefox version not found.\n' 'Expected: {expected_version}; Actual: {actual_version}.\n\n' 'As the vagrant user in devstack, run the following:\n\n' '\t$ sudo wget -O /tmp/firefox_28.deb https://s3.amazonaws.com/vagrant.testeng.edx.org/firefox_28.0%2Bbuild2-0ubuntu0.12.04.1_amd64.deb\n' '\t$ sudo apt-get remove firefox\n\n' '\t$ sudo gdebi -nq /tmp/firefox_28.deb\n\n' 'Confirm the new version:\n' '\t$ firefox --version\n' '\t{expected_version}'.format(actual_version=firefox_ver, expected_version=expected_firefox_ver) )
agpl-3.0
newerthcom/savagerebirth
libs/python-2.72/Lib/plat-mac/Carbon/Sound.py
81
13258
# Generated from 'Sound.h' def FOUR_CHAR_CODE(x): return x soundListRsrc = FOUR_CHAR_CODE('snd ') kSimpleBeepID = 1 # rate48khz = (long)0xBB800000 # rate44khz = (long)0xAC440000 rate32khz = 0x7D000000 rate22050hz = 0x56220000 rate22khz = 0x56EE8BA3 rate16khz = 0x3E800000 rate11khz = 0x2B7745D1 rate11025hz = 0x2B110000 rate8khz = 0x1F400000 sampledSynth = 5 squareWaveSynth = 1 waveTableSynth = 3 MACE3snthID = 11 MACE6snthID = 13 kMiddleC = 60 kNoVolume = 0 kFullVolume = 0x0100 stdQLength = 128 dataOffsetFlag = 0x8000 kUseOptionalOutputDevice = -1 notCompressed = 0 fixedCompression = -1 variableCompression = -2 twoToOne = 1 eightToThree = 2 threeToOne = 3 sixToOne = 4 sixToOnePacketSize = 8 threeToOnePacketSize = 16 stateBlockSize = 64 leftOverBlockSize = 32 firstSoundFormat = 0x0001 secondSoundFormat = 0x0002 dbBufferReady = 0x00000001 dbLastBuffer = 0x00000004 sysBeepDisable = 0x0000 sysBeepEnable = (1 << 0) sysBeepSynchronous = (1 << 1) unitTypeNoSelection = 0xFFFF unitTypeSeconds = 0x0000 stdSH = 0x00 extSH = 0xFF cmpSH = 0xFE nullCmd = 0 quietCmd = 3 flushCmd = 4 reInitCmd = 5 waitCmd = 10 pauseCmd = 11 resumeCmd = 12 callBackCmd = 13 syncCmd = 14 availableCmd = 24 versionCmd = 25 volumeCmd = 46 getVolumeCmd = 47 clockComponentCmd = 50 getClockComponentCmd = 51 scheduledSoundCmd = 52 linkSoundComponentsCmd = 53 soundCmd = 80 bufferCmd = 81 rateMultiplierCmd = 86 getRateMultiplierCmd = 87 initCmd = 1 freeCmd = 2 totalLoadCmd = 26 loadCmd = 27 freqDurationCmd = 40 restCmd = 41 freqCmd = 42 ampCmd = 43 timbreCmd = 44 getAmpCmd = 45 waveTableCmd = 60 phaseCmd = 61 rateCmd = 82 continueCmd = 83 doubleBufferCmd = 84 getRateCmd = 85 sizeCmd = 90 convertCmd = 91 waveInitChannelMask = 0x07 waveInitChannel0 = 0x04 waveInitChannel1 = 0x05 waveInitChannel2 = 0x06 waveInitChannel3 = 0x07 initChan0 = waveInitChannel0 initChan1 = waveInitChannel1 initChan2 = waveInitChannel2 initChan3 = waveInitChannel3 outsideCmpSH = 0 insideCmpSH = 1 aceSuccess = 0 aceMemFull = 1 aceNilBlock = 2 aceBadComp = 3 aceBadEncode = 4 aceBadDest = 5 aceBadCmd = 6 initChanLeft = 0x0002 initChanRight = 0x0003 initNoInterp = 0x0004 initNoDrop = 0x0008 initMono = 0x0080 initStereo = 0x00C0 initMACE3 = 0x0300 initMACE6 = 0x0400 initPanMask = 0x0003 initSRateMask = 0x0030 initStereoMask = 0x00C0 initCompMask = 0xFF00 siActiveChannels = FOUR_CHAR_CODE('chac') siActiveLevels = FOUR_CHAR_CODE('lmac') siAGCOnOff = FOUR_CHAR_CODE('agc ') siAsync = FOUR_CHAR_CODE('asyn') siAVDisplayBehavior = FOUR_CHAR_CODE('avdb') siChannelAvailable = FOUR_CHAR_CODE('chav') siCompressionAvailable = FOUR_CHAR_CODE('cmav') siCompressionChannels = FOUR_CHAR_CODE('cpct') siCompressionFactor = FOUR_CHAR_CODE('cmfa') siCompressionHeader = FOUR_CHAR_CODE('cmhd') siCompressionNames = FOUR_CHAR_CODE('cnam') siCompressionParams = FOUR_CHAR_CODE('evaw') siCompressionSampleRate = FOUR_CHAR_CODE('cprt') siCompressionType = FOUR_CHAR_CODE('comp') siContinuous = FOUR_CHAR_CODE('cont') siDecompressionParams = FOUR_CHAR_CODE('wave') siDeviceBufferInfo = FOUR_CHAR_CODE('dbin') siDeviceConnected = FOUR_CHAR_CODE('dcon') siDeviceIcon = FOUR_CHAR_CODE('icon') siDeviceName = FOUR_CHAR_CODE('name') siEQSpectrumBands = FOUR_CHAR_CODE('eqsb') siEQSpectrumLevels = FOUR_CHAR_CODE('eqlv') siEQSpectrumOnOff = FOUR_CHAR_CODE('eqlo') siEQSpectrumResolution = FOUR_CHAR_CODE('eqrs') siEQToneControlGain = FOUR_CHAR_CODE('eqtg') siEQToneControlOnOff = FOUR_CHAR_CODE('eqtc') siHardwareBalance = FOUR_CHAR_CODE('hbal') siHardwareBalanceSteps = FOUR_CHAR_CODE('hbls') siHardwareBass = FOUR_CHAR_CODE('hbas') siHardwareBassSteps = FOUR_CHAR_CODE('hbst') siHardwareBusy = FOUR_CHAR_CODE('hwbs') siHardwareFormat = FOUR_CHAR_CODE('hwfm') siHardwareMute = FOUR_CHAR_CODE('hmut') siHardwareMuteNoPrefs = FOUR_CHAR_CODE('hmnp') siHardwareTreble = FOUR_CHAR_CODE('htrb') siHardwareTrebleSteps = FOUR_CHAR_CODE('hwts') siHardwareVolume = FOUR_CHAR_CODE('hvol') siHardwareVolumeSteps = FOUR_CHAR_CODE('hstp') siHeadphoneMute = FOUR_CHAR_CODE('pmut') siHeadphoneVolume = FOUR_CHAR_CODE('pvol') siHeadphoneVolumeSteps = FOUR_CHAR_CODE('hdst') siInputAvailable = FOUR_CHAR_CODE('inav') siInputGain = FOUR_CHAR_CODE('gain') siInputSource = FOUR_CHAR_CODE('sour') siInputSourceNames = FOUR_CHAR_CODE('snam') siLevelMeterOnOff = FOUR_CHAR_CODE('lmet') siModemGain = FOUR_CHAR_CODE('mgai') siMonitorAvailable = FOUR_CHAR_CODE('mnav') siMonitorSource = FOUR_CHAR_CODE('mons') siNumberChannels = FOUR_CHAR_CODE('chan') siOptionsDialog = FOUR_CHAR_CODE('optd') siOSTypeInputSource = FOUR_CHAR_CODE('inpt') siOSTypeInputAvailable = FOUR_CHAR_CODE('inav') siOutputDeviceName = FOUR_CHAR_CODE('onam') siPlayThruOnOff = FOUR_CHAR_CODE('plth') siPostMixerSoundComponent = FOUR_CHAR_CODE('psmx') siPreMixerSoundComponent = FOUR_CHAR_CODE('prmx') siQuality = FOUR_CHAR_CODE('qual') siRateMultiplier = FOUR_CHAR_CODE('rmul') siRecordingQuality = FOUR_CHAR_CODE('qual') siSampleRate = FOUR_CHAR_CODE('srat') siSampleRateAvailable = FOUR_CHAR_CODE('srav') siSampleSize = FOUR_CHAR_CODE('ssiz') siSampleSizeAvailable = FOUR_CHAR_CODE('ssav') siSetupCDAudio = FOUR_CHAR_CODE('sucd') siSetupModemAudio = FOUR_CHAR_CODE('sumd') siSlopeAndIntercept = FOUR_CHAR_CODE('flap') siSoundClock = FOUR_CHAR_CODE('sclk') siUseThisSoundClock = FOUR_CHAR_CODE('sclc') siSpeakerMute = FOUR_CHAR_CODE('smut') siSpeakerVolume = FOUR_CHAR_CODE('svol') siSSpCPULoadLimit = FOUR_CHAR_CODE('3dll') siSSpLocalization = FOUR_CHAR_CODE('3dif') siSSpSpeakerSetup = FOUR_CHAR_CODE('3dst') siStereoInputGain = FOUR_CHAR_CODE('sgai') siSubwooferMute = FOUR_CHAR_CODE('bmut') siTerminalType = FOUR_CHAR_CODE('ttyp') siTwosComplementOnOff = FOUR_CHAR_CODE('twos') siVendorProduct = FOUR_CHAR_CODE('vpro') siVolume = FOUR_CHAR_CODE('volu') siVoxRecordInfo = FOUR_CHAR_CODE('voxr') siVoxStopInfo = FOUR_CHAR_CODE('voxs') siWideStereo = FOUR_CHAR_CODE('wide') siSupportedExtendedFlags = FOUR_CHAR_CODE('exfl') siRateConverterRollOffSlope = FOUR_CHAR_CODE('rcdb') siOutputLatency = FOUR_CHAR_CODE('olte') siCloseDriver = FOUR_CHAR_CODE('clos') siInitializeDriver = FOUR_CHAR_CODE('init') siPauseRecording = FOUR_CHAR_CODE('paus') siUserInterruptProc = FOUR_CHAR_CODE('user') # kInvalidSource = (long)0xFFFFFFFF kNoSource = FOUR_CHAR_CODE('none') kCDSource = FOUR_CHAR_CODE('cd ') kExtMicSource = FOUR_CHAR_CODE('emic') kSoundInSource = FOUR_CHAR_CODE('sinj') kRCAInSource = FOUR_CHAR_CODE('irca') kTVFMTunerSource = FOUR_CHAR_CODE('tvfm') kDAVInSource = FOUR_CHAR_CODE('idav') kIntMicSource = FOUR_CHAR_CODE('imic') kMediaBaySource = FOUR_CHAR_CODE('mbay') kModemSource = FOUR_CHAR_CODE('modm') kPCCardSource = FOUR_CHAR_CODE('pcm ') kZoomVideoSource = FOUR_CHAR_CODE('zvpc') kDVDSource = FOUR_CHAR_CODE('dvda') kMicrophoneArray = FOUR_CHAR_CODE('mica') kNoSoundComponentType = FOUR_CHAR_CODE('****') kSoundComponentType = FOUR_CHAR_CODE('sift') kSoundComponentPPCType = FOUR_CHAR_CODE('nift') kRate8SubType = FOUR_CHAR_CODE('ratb') kRate16SubType = FOUR_CHAR_CODE('ratw') kConverterSubType = FOUR_CHAR_CODE('conv') kSndSourceSubType = FOUR_CHAR_CODE('sour') kMixerType = FOUR_CHAR_CODE('mixr') kMixer8SubType = FOUR_CHAR_CODE('mixb') kMixer16SubType = FOUR_CHAR_CODE('mixw') kSoundInputDeviceType = FOUR_CHAR_CODE('sinp') kWaveInSubType = FOUR_CHAR_CODE('wavi') kWaveInSnifferSubType = FOUR_CHAR_CODE('wisn') kSoundOutputDeviceType = FOUR_CHAR_CODE('sdev') kClassicSubType = FOUR_CHAR_CODE('clas') kASCSubType = FOUR_CHAR_CODE('asc ') kDSPSubType = FOUR_CHAR_CODE('dsp ') kAwacsSubType = FOUR_CHAR_CODE('awac') kGCAwacsSubType = FOUR_CHAR_CODE('awgc') kSingerSubType = FOUR_CHAR_CODE('sing') kSinger2SubType = FOUR_CHAR_CODE('sng2') kWhitSubType = FOUR_CHAR_CODE('whit') kSoundBlasterSubType = FOUR_CHAR_CODE('sbls') kWaveOutSubType = FOUR_CHAR_CODE('wavo') kWaveOutSnifferSubType = FOUR_CHAR_CODE('wosn') kDirectSoundSubType = FOUR_CHAR_CODE('dsnd') kDirectSoundSnifferSubType = FOUR_CHAR_CODE('dssn') kUNIXsdevSubType = FOUR_CHAR_CODE('un1x') kUSBSubType = FOUR_CHAR_CODE('usb ') kBlueBoxSubType = FOUR_CHAR_CODE('bsnd') kSoundCompressor = FOUR_CHAR_CODE('scom') kSoundDecompressor = FOUR_CHAR_CODE('sdec') kAudioComponentType = FOUR_CHAR_CODE('adio') kAwacsPhoneSubType = FOUR_CHAR_CODE('hphn') kAudioVisionSpeakerSubType = FOUR_CHAR_CODE('telc') kAudioVisionHeadphoneSubType = FOUR_CHAR_CODE('telh') kPhilipsFaderSubType = FOUR_CHAR_CODE('tvav') kSGSToneSubType = FOUR_CHAR_CODE('sgs0') kSoundEffectsType = FOUR_CHAR_CODE('snfx') kEqualizerSubType = FOUR_CHAR_CODE('eqal') kSSpLocalizationSubType = FOUR_CHAR_CODE('snd3') kSoundNotCompressed = FOUR_CHAR_CODE('NONE') k8BitOffsetBinaryFormat = FOUR_CHAR_CODE('raw ') k16BitBigEndianFormat = FOUR_CHAR_CODE('twos') k16BitLittleEndianFormat = FOUR_CHAR_CODE('sowt') kFloat32Format = FOUR_CHAR_CODE('fl32') kFloat64Format = FOUR_CHAR_CODE('fl64') k24BitFormat = FOUR_CHAR_CODE('in24') k32BitFormat = FOUR_CHAR_CODE('in32') k32BitLittleEndianFormat = FOUR_CHAR_CODE('23ni') kMACE3Compression = FOUR_CHAR_CODE('MAC3') kMACE6Compression = FOUR_CHAR_CODE('MAC6') kCDXA4Compression = FOUR_CHAR_CODE('cdx4') kCDXA2Compression = FOUR_CHAR_CODE('cdx2') kIMACompression = FOUR_CHAR_CODE('ima4') kULawCompression = FOUR_CHAR_CODE('ulaw') kALawCompression = FOUR_CHAR_CODE('alaw') kMicrosoftADPCMFormat = 0x6D730002 kDVIIntelIMAFormat = 0x6D730011 kDVAudioFormat = FOUR_CHAR_CODE('dvca') kQDesignCompression = FOUR_CHAR_CODE('QDMC') kQDesign2Compression = FOUR_CHAR_CODE('QDM2') kQUALCOMMCompression = FOUR_CHAR_CODE('Qclp') kOffsetBinary = k8BitOffsetBinaryFormat kTwosComplement = k16BitBigEndianFormat kLittleEndianFormat = k16BitLittleEndianFormat kMPEGLayer3Format = 0x6D730055 kFullMPEGLay3Format = FOUR_CHAR_CODE('.mp3') k16BitNativeEndianFormat = k16BitLittleEndianFormat k16BitNonNativeEndianFormat = k16BitBigEndianFormat k16BitNativeEndianFormat = k16BitBigEndianFormat k16BitNonNativeEndianFormat = k16BitLittleEndianFormat k8BitRawIn = (1 << 0) k8BitTwosIn = (1 << 1) k16BitIn = (1 << 2) kStereoIn = (1 << 3) k8BitRawOut = (1 << 8) k8BitTwosOut = (1 << 9) k16BitOut = (1 << 10) kStereoOut = (1 << 11) kReverse = (1L << 16) kRateConvert = (1L << 17) kCreateSoundSource = (1L << 18) kVMAwareness = (1L << 21) kHighQuality = (1L << 22) kNonRealTime = (1L << 23) kSourcePaused = (1 << 0) kPassThrough = (1L << 16) kNoSoundComponentChain = (1L << 17) kNoMixing = (1 << 0) kNoSampleRateConversion = (1 << 1) kNoSampleSizeConversion = (1 << 2) kNoSampleFormatConversion = (1 << 3) kNoChannelConversion = (1 << 4) kNoDecompression = (1 << 5) kNoVolumeConversion = (1 << 6) kNoRealtimeProcessing = (1 << 7) kScheduledSource = (1 << 8) kNonInterleavedBuffer = (1 << 9) kNonPagingMixer = (1 << 10) kSoundConverterMixer = (1 << 11) kPagingMixer = (1 << 12) kVMAwareMixer = (1 << 13) kExtendedSoundData = (1 << 14) kBestQuality = (1 << 0) kInputMask = 0x000000FF kOutputMask = 0x0000FF00 kOutputShift = 8 kActionMask = 0x00FF0000 kSoundComponentBits = 0x00FFFFFF kAudioFormatAtomType = FOUR_CHAR_CODE('frma') kAudioEndianAtomType = FOUR_CHAR_CODE('enda') kAudioVBRAtomType = FOUR_CHAR_CODE('vbra') kAudioTerminatorAtomType = 0 kAVDisplayHeadphoneRemove = 0 kAVDisplayHeadphoneInsert = 1 kAVDisplayPlainTalkRemove = 2 kAVDisplayPlainTalkInsert = 3 audioAllChannels = 0 audioLeftChannel = 1 audioRightChannel = 2 audioUnmuted = 0 audioMuted = 1 audioDoesMono = (1L << 0) audioDoesStereo = (1L << 1) audioDoesIndependentChannels = (1L << 2) siCDQuality = FOUR_CHAR_CODE('cd ') siBestQuality = FOUR_CHAR_CODE('best') siBetterQuality = FOUR_CHAR_CODE('betr') siGoodQuality = FOUR_CHAR_CODE('good') siNoneQuality = FOUR_CHAR_CODE('none') siDeviceIsConnected = 1 siDeviceNotConnected = 0 siDontKnowIfConnected = -1 siReadPermission = 0 siWritePermission = 1 kSoundConverterDidntFillBuffer = (1 << 0) kSoundConverterHasLeftOverData = (1 << 1) kExtendedSoundSampleCountNotValid = 1L << 0 kExtendedSoundBufferSizeValid = 1L << 1 kScheduledSoundDoScheduled = 1 << 0 kScheduledSoundDoCallBack = 1 << 1 kScheduledSoundExtendedHdr = 1 << 2 kSoundComponentInitOutputDeviceSelect = 0x0001 kSoundComponentSetSourceSelect = 0x0002 kSoundComponentGetSourceSelect = 0x0003 kSoundComponentGetSourceDataSelect = 0x0004 kSoundComponentSetOutputSelect = 0x0005 kSoundComponentAddSourceSelect = 0x0101 kSoundComponentRemoveSourceSelect = 0x0102 kSoundComponentGetInfoSelect = 0x0103 kSoundComponentSetInfoSelect = 0x0104 kSoundComponentStartSourceSelect = 0x0105 kSoundComponentStopSourceSelect = 0x0106 kSoundComponentPauseSourceSelect = 0x0107 kSoundComponentPlaySourceBufferSelect = 0x0108 kAudioGetVolumeSelect = 0x0000 kAudioSetVolumeSelect = 0x0001 kAudioGetMuteSelect = 0x0002 kAudioSetMuteSelect = 0x0003 kAudioSetToDefaultsSelect = 0x0004 kAudioGetInfoSelect = 0x0005 kAudioGetBassSelect = 0x0006 kAudioSetBassSelect = 0x0007 kAudioGetTrebleSelect = 0x0008 kAudioSetTrebleSelect = 0x0009 kAudioGetOutputDeviceSelect = 0x000A kAudioMuteOnEventSelect = 0x0081 kDelegatedSoundComponentSelectors = 0x0100 kSndInputReadAsyncSelect = 0x0001 kSndInputReadSyncSelect = 0x0002 kSndInputPauseRecordingSelect = 0x0003 kSndInputResumeRecordingSelect = 0x0004 kSndInputStopRecordingSelect = 0x0005 kSndInputGetStatusSelect = 0x0006 kSndInputGetDeviceInfoSelect = 0x0007 kSndInputSetDeviceInfoSelect = 0x0008 kSndInputInitHardwareSelect = 0x0009
gpl-2.0
xinwu/horizon
openstack_dashboard/contrib/sahara/content/data_processing/nodegroup_templates/workflows/copy.py
12
4414
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from openstack_dashboard.contrib.sahara.api import sahara as saharaclient import openstack_dashboard.contrib.sahara.content.data_processing. \ nodegroup_templates.workflows.create as create_flow LOG = logging.getLogger(__name__) class CopyNodegroupTemplate(create_flow.ConfigureNodegroupTemplate): success_message = _("Node Group Template copy %s created") def __init__(self, request, context_seed, entry_point, *args, **kwargs): self.template_id = context_seed["template_id"] self.template = saharaclient.nodegroup_template_get(request, self.template_id) self._set_configs_to_copy(self.template.node_configs) plugin = self.template.plugin_name hadoop_version = self.template.hadoop_version request.GET = request.GET.copy() request.GET.update( {"plugin_name": plugin, "hadoop_version": hadoop_version}) super(CopyNodegroupTemplate, self).__init__(request, context_seed, entry_point, *args, **kwargs) g_fields = None s_fields = None for step in self.steps: if isinstance(step, create_flow.GeneralConfig): g_fields = step.action.fields if isinstance(step, create_flow.SecurityConfig): s_fields = step.action.fields g_fields["nodegroup_name"].initial = self.template.name + "-copy" g_fields["description"].initial = self.template.description g_fields["flavor"].initial = self.template.flavor_id if hasattr(self.template, "availability_zone"): g_fields["availability_zone"].initial = ( self.template.availability_zone) if hasattr(self.template, "volumes_availability_zone"): g_fields["volumes_availability_zone"].initial = \ self.template.volumes_availability_zone storage = "cinder_volume" if self.template.volumes_per_node > 0 \ else "ephemeral_drive" volumes_per_node = self.template.volumes_per_node volumes_size = self.template.volumes_size g_fields["storage"].initial = storage g_fields["volumes_per_node"].initial = volumes_per_node g_fields["volumes_size"].initial = volumes_size g_fields["volumes_availability_zone"].initial = \ self.template.volumes_availability_zone if self.template.floating_ip_pool: g_fields['floating_ip_pool'].initial = ( self.template.floating_ip_pool) s_fields["security_autogroup"].initial = ( self.template.auto_security_group) if self.template.security_groups: s_fields["security_groups"].initial = dict( [(sg, sg) for sg in self.template.security_groups]) processes_dict = dict() try: plugin_details = saharaclient.plugin_get_version_details( request, plugin, hadoop_version) plugin_node_processes = plugin_details.node_processes except Exception: plugin_node_processes = dict() exceptions.handle(request, _("Unable to fetch plugin details.")) for process in self.template.node_processes: # need to know the service _service = None for service, processes in plugin_node_processes.items(): if process in processes: _service = service break processes_dict["%s:%s" % (_service, process)] = process g_fields["processes"].initial = processes_dict
apache-2.0
jcoady9/youtube-dl
youtube_dl/extractor/nationalgeographic.py
8
6390
from __future__ import unicode_literals import re from .common import InfoExtractor from .adobepass import AdobePassIE from ..utils import ( smuggle_url, url_basename, update_url_query, get_element_by_class, ) class NationalGeographicVideoIE(InfoExtractor): IE_NAME = 'natgeo:video' _VALID_URL = r'https?://video\.nationalgeographic\.com/.*?' _TESTS = [ { 'url': 'http://video.nationalgeographic.com/video/news/150210-news-crab-mating-vin?source=featuredvideo', 'md5': '730855d559abbad6b42c2be1fa584917', 'info_dict': { 'id': '0000014b-70a1-dd8c-af7f-f7b559330001', 'ext': 'mp4', 'title': 'Mating Crabs Busted by Sharks', 'description': 'md5:16f25aeffdeba55aaa8ec37e093ad8b3', 'timestamp': 1423523799, 'upload_date': '20150209', 'uploader': 'NAGS', }, 'add_ie': ['ThePlatform'], }, { 'url': 'http://video.nationalgeographic.com/wild/when-sharks-attack/the-real-jaws', 'md5': '6a3105eb448c070503b3105fb9b320b5', 'info_dict': { 'id': 'ngc-I0IauNSWznb_UV008GxSbwY35BZvgi2e', 'ext': 'mp4', 'title': 'The Real Jaws', 'description': 'md5:8d3e09d9d53a85cd397b4b21b2c77be6', 'timestamp': 1433772632, 'upload_date': '20150608', 'uploader': 'NAGS', }, 'add_ie': ['ThePlatform'], }, ] def _real_extract(self, url): name = url_basename(url) webpage = self._download_webpage(url, name) guid = self._search_regex( r'id="(?:videoPlayer|player-container)"[^>]+data-guid="([^"]+)"', webpage, 'guid') return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'url': smuggle_url( 'http://link.theplatform.com/s/ngs/media/guid/2423130747/%s?mbr=true' % guid, {'force_smil_url': True}), 'id': guid, } class NationalGeographicIE(AdobePassIE): IE_NAME = 'natgeo' _VALID_URL = r'https?://channel\.nationalgeographic\.com/(?:wild/)?[^/]+/(?:videos|episodes)/(?P<id>[^/?]+)' _TESTS = [ { 'url': 'http://channel.nationalgeographic.com/the-story-of-god-with-morgan-freeman/videos/uncovering-a-universal-knowledge/', 'md5': '518c9aa655686cf81493af5cc21e2a04', 'info_dict': { 'id': 'vKInpacll2pC', 'ext': 'mp4', 'title': 'Uncovering a Universal Knowledge', 'description': 'md5:1a89148475bf931b3661fcd6ddb2ae3a', 'timestamp': 1458680907, 'upload_date': '20160322', 'uploader': 'NEWA-FNG-NGTV', }, 'add_ie': ['ThePlatform'], }, { 'url': 'http://channel.nationalgeographic.com/wild/destination-wild/videos/the-stunning-red-bird-of-paradise/', 'md5': 'c4912f656b4cbe58f3e000c489360989', 'info_dict': { 'id': 'Pok5lWCkiEFA', 'ext': 'mp4', 'title': 'The Stunning Red Bird of Paradise', 'description': 'md5:7bc8cd1da29686be4d17ad1230f0140c', 'timestamp': 1459362152, 'upload_date': '20160330', 'uploader': 'NEWA-FNG-NGTV', }, 'add_ie': ['ThePlatform'], }, { 'url': 'http://channel.nationalgeographic.com/the-story-of-god-with-morgan-freeman/episodes/the-power-of-miracles/', 'only_matching': True, } ] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) release_url = self._search_regex( r'video_auth_playlist_url\s*=\s*"([^"]+)"', webpage, 'release url') query = { 'mbr': 'true', 'switch': 'http', } is_auth = self._search_regex(r'video_is_auth\s*=\s*"([^"]+)"', webpage, 'is auth', fatal=False) if is_auth == 'auth': auth_resource_id = self._search_regex( r"video_auth_resourceId\s*=\s*'([^']+)'", webpage, 'auth resource id') query['auth'] = self._extract_mvpd_auth(url, display_id, 'natgeo', auth_resource_id) return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'url': smuggle_url( update_url_query(release_url, query), {'force_smil_url': True}), 'display_id': display_id, } class NationalGeographicEpisodeGuideIE(InfoExtractor): IE_NAME = 'natgeo:episodeguide' _VALID_URL = r'https?://channel\.nationalgeographic\.com/(?:wild/)?(?P<id>[^/]+)/episode-guide' _TESTS = [ { 'url': 'http://channel.nationalgeographic.com/the-story-of-god-with-morgan-freeman/episode-guide/', 'info_dict': { 'id': 'the-story-of-god-with-morgan-freeman-season-1', 'title': 'The Story of God with Morgan Freeman - Season 1', }, 'playlist_mincount': 6, }, { 'url': 'http://channel.nationalgeographic.com/underworld-inc/episode-guide/?s=2', 'info_dict': { 'id': 'underworld-inc-season-2', 'title': 'Underworld, Inc. - Season 2', }, 'playlist_mincount': 7, }, ] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) show = get_element_by_class('show', webpage) selected_season = self._search_regex( r'<div[^>]+class="select-seasons[^"]*".*?<a[^>]*>(.*?)</a>', webpage, 'selected season') entries = [ self.url_result(self._proto_relative_url(entry_url), 'NationalGeographic') for entry_url in re.findall('(?s)<div[^>]+class="col-inner"[^>]*?>.*?<a[^>]+href="([^"]+)"', webpage)] return self.playlist_result( entries, '%s-%s' % (display_id, selected_season.lower().replace(' ', '-')), '%s - %s' % (show, selected_season))
unlicense
objmagic/heron
heron/tools/tracker/src/python/handlers/memoryhistogramhandler.py
10
3183
# Copyright 2016 Twitter. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ memoryhistogramhandler.py """ import json import traceback import tornado.gen import tornado.web from heron.common.src.python.utils.log import Log from heron.tools.tracker.src.python import utils from heron.tools.tracker.src.python.handlers import BaseHandler from heron.tools.tracker.src.python.handlers.pidhandler import getInstancePid class MemoryHistogramHandler(BaseHandler): """ URL - /topologies/histo?cluster=<cluster>&topology=<topology> \ &environ=<environment>&instance=<instance> Parameters: - cluster - Name of the cluster. - role - (optional) Role used to submit the topology. - environ - Running environment. - topology - Name of topology (Note: Case sensitive. Can only include [a-zA-Z0-9-_]+) - instance - Instance Id Resturns a histogram of top in memory object. The response JSON is a dict with following format: { 'command': Full command executed at server. 'stdout': Text on stdout of executing the command. 'stderr': <optional> Text on stderr. } """ # pylint: disable=attribute-defined-outside-init def initialize(self, tracker): """ initialize """ self.tracker = tracker @tornado.gen.coroutine def get(self): """ get method """ try: cluster = self.get_argument_cluster() role = self.get_argument_role() environ = self.get_argument_environ() topology_name = self.get_argument_topology() instance = self.get_argument_instance() topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ) ret = yield self.getInstanceMemoryHistogram(topology_info, instance) self.write_success_response(ret) except Exception as e: Log.debug(traceback.format_exc()) self.write_error_response(e) # pylint: disable=no-self-use @tornado.gen.coroutine def getInstanceMemoryHistogram(self, topology_info, instance_id): """ Fetches Instance top memory item as histogram. """ pid_response = yield getInstancePid(topology_info, instance_id) try: http_client = tornado.httpclient.AsyncHTTPClient() pid_json = json.loads(pid_response) pid = pid_json['stdout'].strip() if pid == '': raise Exception('Failed to get pid') endpoint = utils.make_shell_endpoint(topology_info, instance_id) url = "%s/histo/%s" % (endpoint, pid) response = yield http_client.fetch(url) Log.debug("HTTP call for url: %s", url) raise tornado.gen.Return(response.body) except tornado.httpclient.HTTPError as e: raise Exception(str(e))
apache-2.0
xupingmao/xnote
handlers/settings/settings.py
1
7271
# -*- coding: utf-8 -*- # @author xupingmao # @since 2017/02/19 # @modified 2021/05/05 17:43:23 import web import time import os import sys import platform import xutils import logging import json import threading import re import xtemplate import xconfig import xauth import xtables import xmanager from logging.handlers import TimedRotatingFileHandler from xutils import sqlite3, Storage, cacheutil from xtemplate import T try: import psutil except ImportError as e: psutil = None INIT_SCRIPT_URL = "/code/edit?type=script&path=" + str(xconfig.INIT_SCRIPT) USER_CONFIG_KEY_SET = set([ "TODO_MODE", "SIMPLE_MODE", "HOME_PATH", "LANG", "THEME", "FONT_SCALE", ]) def get_xnote_version(): try: return xutils.readfile("version.txt") except: return "" class Item: def __init__(self, key, value): self.key = key self.value = value class SettingsHandler: @xauth.login_required() def GET(self): mem_used = 0 sys_mem_used = 0 sys_mem_total = 0 thread_cnt = 0 formated_mem_size = 0 thread_cnt = len(threading.enumerate()) item_list = [ Item('软件版本', get_xnote_version()), Item('sqlite版本', sqlite3.sqlite_version if sqlite3 != None else '') ] return xtemplate.render("settings/page/settings.html", show_aside = False, html_title = T("设置"), item_list = item_list, sys_mem_total = xutils.format_size(sys_mem_total), thread_cnt = thread_cnt, xconfig = xconfig, xnote_version = get_xnote_version(), start_time = xconfig.START_TIME, init_script_url = INIT_SCRIPT_URL) class StorageHandler: """基于数据库的配置""" @xauth.login_required() def GET(self): key = xutils.get_argument("key") db = xtables.get_storage_table() config = db.select_first(where=dict(key=key, user=xauth.get_current_name())) if config is None: config = Storage(key=key, value="") return xtemplate.render("system/properties.html", action = "/system/storage", show_aside = False, config = config) @xauth.login_required() def POST(self): key = xutils.get_argument("key") value = xutils.get_argument("value") user = xauth.get_current_name() db = xtables.get_storage_table() config = db.select_first(where=dict(key=key, user=user)) if config is None: db.insert(user = user, key = key, value = value, ctime = xutils.format_datetime(), mtime = xutils.format_datetime()) else: db.update(value=value, mtime = xutils.format_datetime(), where=dict(key=key, user=user)) config = Storage(key = key, value = value) return xtemplate.render("system/properties.html", action = "/system/storage", show_aside = False, config = config) DEFAULT_SETTINGS = ''' # 导航配置 [NAV_LIST] About = /code/wiki/README.md # 索引目录 [INDEX_DIRS] ''' class PropertiesHandler: """基于缓存的配置""" @xauth.login_required() def GET(self): key = xutils.get_argument("key") user = xauth.get_current_name() default_value = "" if key == "settings": default_value = DEFAULT_SETTINGS config = Storage(key = key, value = xutils.cache_get("%s@prop_%s" % (user, key), default_value)) if config is None: config = Storage(key=key, value="") return xtemplate.render("system/properties.html", show_aside = False, config = config) @xauth.login_required() def POST(self): key = xutils.get_argument("key") value = xutils.get_argument("value") user = xauth.get_current_name() xutils.cache_put("%s@prop_%s" % (user, key), value) if key == "settings": self.update_settings(value) config = Storage(key = key, value = value) return xtemplate.render("system/properties.html", show_aside = False, config = config) def update_settings(self, config_text): from xutils import ConfigParser nav_list = [] cf = ConfigParser() cf.read_string(config_text) names = cf.sections() options = cf.options('NAV_LIST') for option in options: value = cf.get('NAV_LIST', option) nav_list.append(Storage(name = option, url = value)) # 处理导航 xconfig.NAV_LIST = nav_list @xauth.login_required() def set_user_config(key, value): if key not in USER_CONFIG_KEY_SET: return user = xauth.current_user() if user.config is None: user.config = Storage() user.config[key] = value xauth.update_user(user["name"], user) @xauth.login_required("admin") def set_sys_config(key, value): setattr(xconfig, key, value) cacheutil.hset('sys.config', key, value) class ConfigHandler: @xauth.login_required() def POST(self): key = xutils.get_argument("key") value = xutils.get_argument("value") type = xutils.get_argument("type") xutils.info("UpdateConfig", "%s,%s,%s" % (type, key, value)) if type == "int": value = int(value) if type == "bool": value = value.lower() in ("true", "yes", "on") if key == "BASE_TEMPLATE": xmanager.reload() if key in ("DEV_MODE", "DEBUG"): xconfig.DEBUG = value xconfig.DEV_MODE = value web.config.debug = value if key in ("RECENT_SEARCH_LIMIT", "RECENT_SIZE", "PAGE_SIZE", "TRASH_EXPIRE"): value = int(value) if key in USER_CONFIG_KEY_SET: set_user_config(key, value) else: set_sys_config(key, value) return dict(code="success") class HomeEntrySettingsHandler: @xauth.login_required() def GET(self): pass @xmanager.listen("sys.reload") def on_reload(ctx = None): keys = ( "THEME", 'FS_HIDE_FILES', 'OPTION_STYLE', 'PAGE_OPEN', 'RECENT_SEARCH_LIMIT', "PAGE_SIZE", "RECENT_SIZE", "RECORD_LOCATION", "TRASH_EXPIRE", "PAGE_WIDTH", "FS_VIEW_MODE", "HIDE_DICT_ENTRY" ) for key in keys: value = cacheutil.hget('sys.config', key) xutils.trace("HGET", "key=%s, value=%s" % (key, value)) if value is not None: setattr(xconfig, key, value) path = os.path.join(xconfig.SCRIPTS_DIR, "user.css") if not os.path.exists(path): return xconfig.USER_CSS = xutils.readfile(path) # 暂时取消多主题 # xconfig.THEME = "left" xurls = ( r"/settings/index", SettingsHandler, r"/settings/entry", HomeEntrySettingsHandler, r"/system/settings", SettingsHandler, r"/system/properties", PropertiesHandler, r"/system/storage", StorageHandler, r"/system/config", ConfigHandler, )
gpl-3.0
JakeLowey/HackRPI2
django/contrib/localflavor/il/forms.py
317
2192
""" Israeli-specific form helpers """ import re from django.core.exceptions import ValidationError from django.core.validators import EMPTY_VALUES from django.forms.fields import RegexField, Field, EMPTY_VALUES from django.utils.checksums import luhn from django.utils.translation import ugettext_lazy as _ # Israeli ID numbers consist of up to 8 digits followed by a checksum digit. # Numbers which are shorter than 8 digits are effectively left-zero-padded. # The checksum digit is occasionally separated from the number by a hyphen, # and is calculated using the luhn algorithm. # # Relevant references: # # (hebrew) http://he.wikipedia.org/wiki/%D7%9E%D7%A1%D7%A4%D7%A8_%D7%96%D7%94%D7%95%D7%AA_(%D7%99%D7%A9%D7%A8%D7%90%D7%9C) # (hebrew) http://he.wikipedia.org/wiki/%D7%A1%D7%A4%D7%A8%D7%AA_%D7%91%D7%99%D7%A7%D7%95%D7%A8%D7%AA id_number_re = re.compile(r'^(?P<number>\d{1,8})-?(?P<check>\d)$') class ILPostalCodeField(RegexField): """ A form field that validates its input as an Israeli postal code. Valid form is XXXXX where X represents integer. """ default_error_messages = { 'invalid': _(u'Enter a postal code in the format XXXXX'), } def __init__(self, *args, **kwargs): super(ILPostalCodeField, self).__init__(r'^\d{5}$', *args, **kwargs) def clean(self, value): if value not in EMPTY_VALUES: value = value.replace(" ", "") return super(ILPostalCodeField, self).clean(value) class ILIDNumberField(Field): """ A form field that validates its input as an Israeli identification number. Valid form is per the Israeli ID specification. """ default_error_messages = { 'invalid': _(u'Enter a valid ID number.'), } def clean(self, value): value = super(ILIDNumberField, self).clean(value) if value in EMPTY_VALUES: return u'' match = id_number_re.match(value) if not match: raise ValidationError(self.error_messages['invalid']) value = match.group('number') + match.group('check') if not luhn(value): raise ValidationError(self.error_messages['invalid']) return value
mit
killbug2004/volatility
volatility/plugins/gui/screenshot.py
58
3878
# Volatility # Copyright (C) 2007-2013 Volatility Foundation # Copyright (C) 2010,2011,2012 Michael Hale Ligh <michael.ligh@mnin.org> # Copyright (C) 2009 Brendan Dolan-Gavitt # # This file is part of Volatility. # # Volatility is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Volatility is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Volatility. If not, see <http://www.gnu.org/licenses/>. # import os import volatility.plugins.gui.windowstations as windowstations import volatility.debug as debug try: from PIL import Image, ImageDraw has_pil = True except ImportError: has_pil = False class Screenshot(windowstations.WndScan): """Save a pseudo-screenshot based on GDI windows""" def __init__(self, config, *args, **kwargs): windowstations.WndScan.__init__(self, config, *args, **kwargs) config.add_option("DUMP-DIR", short_option = 'D', type = "string", help = "Output directory", action = "store") def draw_text(self, draw, text, left, top, fill = "Black"): """Label windows in the screen shot""" lines = text.split('\x0d\x0a') for line in lines: draw.text( (left, top), line, fill = fill) _, height = draw.textsize(line) top += height def render_text(self, outfd, data): if not has_pil: debug.error("Please install PIL") if not self._config.DUMP_DIR or not os.path.isdir(self._config.DUMP_DIR): debug.error("Please supply an existing --dump-dir") seen = [] for window_station in data: for desktop in window_station.desktops(): offset = desktop.PhysicalAddress if offset in seen: continue seen.append(offset) # The foreground window win = desktop.DeskInfo.spwnd # Some desktops don't have any windows if not win: debug.warning("{0}\{1}\{2} has no windows\n".format( desktop.dwSessionId, window_station.Name, desktop.Name)) continue im = Image.new("RGB", (win.rcWindow.right + 1, win.rcWindow.bottom + 1), "White") draw = ImageDraw.Draw(im) # Traverse windows, visible only for win, _level in desktop.windows( win = win, filter = lambda x : 'WS_VISIBLE' in str(x.style)): draw.rectangle(win.rcWindow.get_tup(), outline = "Black", fill = "White") draw.rectangle(win.rcClient.get_tup(), outline = "Black", fill = "White") ## Create labels for the windows self.draw_text(draw, str(win.strName or ''), win.rcWindow.left + 2, win.rcWindow.top) file_name = "session_{0}.{1}.{2}.png".format( desktop.dwSessionId, window_station.Name, desktop.Name) file_name = os.path.join(self._config.DUMP_DIR, file_name) try: im.save(file_name, "PNG") result = "Wrote {0}".format(file_name) except SystemError, why: result = why outfd.write("{0}\n".format(result))
gpl-2.0
ruibarreira/linuxtrail
usr/lib/python2.7/re.py
131
13423
# # Secret Labs' Regular Expression Engine # # re-compatible interface for the sre matching engine # # Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. # # This version of the SRE library can be redistributed under CNRI's # Python 1.6 license. For any other use, please contact Secret Labs # AB (info@pythonware.com). # # Portions of this engine have been developed in cooperation with # CNRI. Hewlett-Packard provided funding for 1.6 integration and # other compatibility work. # r"""Support for regular expressions (RE). This module provides regular expression matching operations similar to those found in Perl. It supports both 8-bit and Unicode strings; both the pattern and the strings being processed can contain null bytes and characters outside the US ASCII range. Regular expressions can contain both special and ordinary characters. Most ordinary characters, like "A", "a", or "0", are the simplest regular expressions; they simply match themselves. You can concatenate ordinary characters, so last matches the string 'last'. The special characters are: "." Matches any character except a newline. "^" Matches the start of the string. "$" Matches the end of the string or just before the newline at the end of the string. "*" Matches 0 or more (greedy) repetitions of the preceding RE. Greedy means that it will match as many repetitions as possible. "+" Matches 1 or more (greedy) repetitions of the preceding RE. "?" Matches 0 or 1 (greedy) of the preceding RE. *?,+?,?? Non-greedy versions of the previous three special characters. {m,n} Matches from m to n repetitions of the preceding RE. {m,n}? Non-greedy version of the above. "\\" Either escapes special characters or signals a special sequence. [] Indicates a set of characters. A "^" as the first character indicates a complementing set. "|" A|B, creates an RE that will match either A or B. (...) Matches the RE inside the parentheses. The contents can be retrieved or matched later in the string. (?iLmsux) Set the I, L, M, S, U, or X flag for the RE (see below). (?:...) Non-grouping version of regular parentheses. (?P<name>...) The substring matched by the group is accessible by name. (?P=name) Matches the text matched earlier by the group named name. (?#...) A comment; ignored. (?=...) Matches if ... matches next, but doesn't consume the string. (?!...) Matches if ... doesn't match next. (?<=...) Matches if preceded by ... (must be fixed length). (?<!...) Matches if not preceded by ... (must be fixed length). (?(id/name)yes|no) Matches yes pattern if the group with id/name matched, the (optional) no pattern otherwise. The special sequences consist of "\\" and a character from the list below. If the ordinary character is not on the list, then the resulting RE will match the second character. \number Matches the contents of the group of the same number. \A Matches only at the start of the string. \Z Matches only at the end of the string. \b Matches the empty string, but only at the start or end of a word. \B Matches the empty string, but not at the start or end of a word. \d Matches any decimal digit; equivalent to the set [0-9]. \D Matches any non-digit character; equivalent to the set [^0-9]. \s Matches any whitespace character; equivalent to [ \t\n\r\f\v]. \S Matches any non-whitespace character; equiv. to [^ \t\n\r\f\v]. \w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_]. With LOCALE, it will match the set [0-9_] plus characters defined as letters for the current locale. \W Matches the complement of \w. \\ Matches a literal backslash. This module exports the following functions: match Match a regular expression pattern to the beginning of a string. search Search a string for the presence of a pattern. sub Substitute occurrences of a pattern found in a string. subn Same as sub, but also return the number of substitutions made. split Split a string by the occurrences of a pattern. findall Find all occurrences of a pattern in a string. finditer Return an iterator yielding a match object for each match. compile Compile a pattern into a RegexObject. purge Clear the regular expression cache. escape Backslash all non-alphanumerics in a string. Some of the functions in this module takes flags as optional parameters: I IGNORECASE Perform case-insensitive matching. L LOCALE Make \w, \W, \b, \B, dependent on the current locale. M MULTILINE "^" matches the beginning of lines (after a newline) as well as the string. "$" matches the end of lines (before a newline) as well as the end of the string. S DOTALL "." matches any character at all, including the newline. X VERBOSE Ignore whitespace and comments for nicer looking RE's. U UNICODE Make \w, \W, \b, \B, dependent on the Unicode locale. This module also defines an exception 'error'. """ import sys import sre_compile import sre_parse try: import _locale except ImportError: _locale = None # public symbols __all__ = [ "match", "search", "sub", "subn", "split", "findall", "compile", "purge", "template", "escape", "I", "L", "M", "S", "X", "U", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE", "UNICODE", "error" ] __version__ = "2.2.1" # flags I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode locale M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments # sre extensions (experimental, don't rely on these) T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation # sre exception error = sre_compile.error # -------------------------------------------------------------------- # public interface def match(pattern, string, flags=0): """Try to apply the pattern at the start of the string, returning a match object, or None if no match was found.""" return _compile(pattern, flags).match(string) def search(pattern, string, flags=0): """Scan through string looking for a match to the pattern, returning a match object, or None if no match was found.""" return _compile(pattern, flags).search(string) def sub(pattern, repl, string, count=0, flags=0): """Return the string obtained by replacing the leftmost non-overlapping occurrences of the pattern in string by the replacement repl. repl can be either a string or a callable; if a string, backslash escapes in it are processed. If it is a callable, it's passed the match object and must return a replacement string to be used.""" return _compile(pattern, flags).sub(repl, string, count) def subn(pattern, repl, string, count=0, flags=0): """Return a 2-tuple containing (new_string, number). new_string is the string obtained by replacing the leftmost non-overlapping occurrences of the pattern in the source string by the replacement repl. number is the number of substitutions that were made. repl can be either a string or a callable; if a string, backslash escapes in it are processed. If it is a callable, it's passed the match object and must return a replacement string to be used.""" return _compile(pattern, flags).subn(repl, string, count) def split(pattern, string, maxsplit=0, flags=0): """Split the source string by the occurrences of the pattern, returning a list containing the resulting substrings.""" return _compile(pattern, flags).split(string, maxsplit) def findall(pattern, string, flags=0): """Return a list of all non-overlapping matches in the string. If one or more groups are present in the pattern, return a list of groups; this will be a list of tuples if the pattern has more than one group. Empty matches are included in the result.""" return _compile(pattern, flags).findall(string) if sys.hexversion >= 0x02020000: __all__.append("finditer") def finditer(pattern, string, flags=0): """Return an iterator over all non-overlapping matches in the string. For each match, the iterator returns a match object. Empty matches are included in the result.""" return _compile(pattern, flags).finditer(string) def compile(pattern, flags=0): "Compile a regular expression pattern, returning a pattern object." return _compile(pattern, flags) def purge(): "Clear the regular expression cache" _cache.clear() _cache_repl.clear() def template(pattern, flags=0): "Compile a template pattern, returning a pattern object" return _compile(pattern, flags|T) _alphanum = frozenset( "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") def escape(pattern): "Escape all non-alphanumeric characters in pattern." s = list(pattern) alphanum = _alphanum for i, c in enumerate(pattern): if c not in alphanum: if c == "\000": s[i] = "\\000" else: s[i] = "\\" + c return pattern[:0].join(s) # -------------------------------------------------------------------- # internals _cache = {} _cache_repl = {} _pattern_type = type(sre_compile.compile("", 0)) _MAXCACHE = 100 def _compile(*key): # internal: compile pattern pattern, flags = key bypass_cache = flags & DEBUG if not bypass_cache: cachekey = (type(key[0]),) + key try: p, loc = _cache[cachekey] if loc is None or loc == _locale.setlocale(_locale.LC_CTYPE): return p except KeyError: pass if isinstance(pattern, _pattern_type): if flags: raise ValueError('Cannot process flags argument with a compiled pattern') return pattern if not sre_compile.isstring(pattern): raise TypeError, "first argument must be string or compiled pattern" try: p = sre_compile.compile(pattern, flags) except error, v: raise error, v # invalid expression if not bypass_cache: if len(_cache) >= _MAXCACHE: _cache.clear() if p.flags & LOCALE: if not _locale: return p loc = _locale.setlocale(_locale.LC_CTYPE) else: loc = None _cache[cachekey] = p, loc return p def _compile_repl(*key): # internal: compile replacement pattern p = _cache_repl.get(key) if p is not None: return p repl, pattern = key try: p = sre_parse.parse_template(repl, pattern) except error, v: raise error, v # invalid expression if len(_cache_repl) >= _MAXCACHE: _cache_repl.clear() _cache_repl[key] = p return p def _expand(pattern, match, template): # internal: match.expand implementation hook template = sre_parse.parse_template(template, pattern) return sre_parse.expand_template(template, match) def _subx(pattern, template): # internal: pattern.sub/subn implementation helper template = _compile_repl(template, pattern) if not template[0] and len(template[1]) == 1: # literal replacement return template[1][0] def filter(match, template=template): return sre_parse.expand_template(template, match) return filter # register myself for pickling import copy_reg def _pickle(p): return _compile, (p.pattern, p.flags) copy_reg.pickle(_pattern_type, _pickle, _compile) # -------------------------------------------------------------------- # experimental stuff (see python-dev discussions for details) class Scanner: def __init__(self, lexicon, flags=0): from sre_constants import BRANCH, SUBPATTERN self.lexicon = lexicon # combine phrases into a compound pattern p = [] s = sre_parse.Pattern() s.flags = flags for phrase, action in lexicon: p.append(sre_parse.SubPattern(s, [ (SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))), ])) s.groups = len(p)+1 p = sre_parse.SubPattern(s, [(BRANCH, (None, p))]) self.scanner = sre_compile.compile(p) def scan(self, string): result = [] append = result.append match = self.scanner.scanner(string).match i = 0 while 1: m = match() if not m: break j = m.end() if i == j: break action = self.lexicon[m.lastindex-1][1] if hasattr(action, '__call__'): self.match = m action = action(self, m.group()) if action is not None: append(action) i = j return result, string[i:]
gpl-3.0
swharden/ROI-Analysis-Pipeline
pyLS/old/processFolders.py
1
3097
from pyLineScan import LineScan import glob import os from PIL import Image import matplotlib.pyplot as plt import datetime def analyzeSubfolders(folderParent,overwrite=False): """ given a parent directly, perform automated linescan analysis on all sub-folders. Output data is saved in each linescan folder's 'results' sub-folder. """ folderParent=os.path.abspath(folderParent) print("analyzing all linescans in",folderParent) linescanFolders=sorted(os.listdir(folderParent)) for i,name in enumerate(linescanFolders): if not name.startswith("LineScan-"): continue folderLinescan=os.path.join(folderParent,name) print("PROCESSING LINESCAN %d OF %d: %s"%(i+1,len(linescanFolders),name)) folderOutput=os.path.join(folderLinescan,"analysis") if not os.path.exists(folderOutput): os.mkdir(folderOutput) if overwrite or not os.path.exists(os.path.join(folderOutput,"fig_01_img.png")): print(" analyzing linescan data...") LS=LineScan(folderLinescan,baseline=None) LS.allFigures() plt.close('all') if overwrite or not os.path.exists(os.path.join(folderOutput,"ref.png")): refFigures=glob.glob(folderLinescan+"/References/*Window2*.tif") if len(refFigures): print(" generating reference figure...") im=Image.open(refFigures[0]) im.save(os.path.join(folderOutput,"ref.png")) def index(folderParent): """make index.html and stick it in the parent directory.""" timestamp=datetime.datetime.now().strftime("%I:%M %p on %B %d, %Y") folders=os.listdir(folderParent) out="<html><style>" out+=""" img{ margin: 10px; border: 1px solid black; box-shadow: 5px 5px 10px rgba(0, 0, 0, .2); } """ out+="</style><body>" out+="<b style='font-size: 300%%'>boshLS</b><br><i>automatic linescan index generated at %s</i><hr><br>"%timestamp for folder in sorted(folders): if not folder.startswith("LineScan-"): continue path=os.path.abspath(folderParent+"/"+folder) rel=folderParent+"/"+folder out+="<div style='background-color: #336699; color: white; padding: 10px; page-break-before: always;'>" out+="<span style='font-size: 200%%; font-weight: bold;'>%s</span><br>"%folder out+="<code>%s</code></div>"%path for fname in sorted(glob.glob(folderParent+"/"+folder+"/analysis/*.png")): fname=os.path.basename(fname) out+='<a href="%s/analysis/%s"><img src="%s/analysis/%s" height=300></a>'%(rel,fname,rel,fname) out+="<br>"*6 out+="</code></body></html>" fileOut=os.path.abspath(folderParent+"/index.html") with open(fileOut,'w') as f: f.write(out) print("saved",fileOut) if __name__=="__main__": #folderParent='../data/linescan/realistic/' folderParent=r'X:\Data\SCOTT\2017-06-16 OXT-Tom\2p' analyzeSubfolders(folderParent,overwrite=False) index(folderParent) print("DONE")
mit
ChristopherOlson/ArduHeli
Tools/autotest/param_metadata/htmlemit.py
28
2757
#!/usr/bin/env python """ Emit docs in a form acceptable to the old Ardupilot wordpress docs site """ from param import known_param_fields from emit import Emit import cgi class HtmlEmit(Emit): def __init__(self): Emit.__init__(self) html_fname = 'Parameters.html' self.f = open(html_fname, mode='w') self.preamble = """<!-- Dynamically generated list of documented parameters This page was generated using Tools/autotest/param_metadata/param_parse.py DO NOT EDIT --> <h3 style="text-align: center">Complete Parameter List</h3> <hr /> <p>This is a complete list of the parameters which can be set via the MAVLink protocol in the EEPROM of your APM to control vehicle behaviour. This list is automatically generated from the latest ardupilot source code, and so may contain parameters which are not yet in the stable released versions of the code.</p> <!-- add auto-generated table of contents with "Table of Contents Plus" plugin --> [toc exclude="Complete Parameter List"] """ self.t = '' def escape(self, s): s = s.replace(' ', '-') s = s.replace(':', '-') s = s.replace('(', '') s = s.replace(')', '') return s def close(self): self.f.write(self.preamble) self.f.write(self.t) self.f.close() def start_libraries(self): pass def emit(self, g, f): tag = '%s Parameters' % g.name t = '\n\n<h1>%s</h1>\n' % tag for param in g.params: if not hasattr(param, 'DisplayName') or not hasattr(param, 'Description'): continue d = param.__dict__ tag = '%s (%s)' % (param.DisplayName, param.name) t += '\n\n<h2>%s</h2>' % tag if d.get('User', None) == 'Advanced': t += '<em>Note: This parameter is for advanced users</em><br>' t += "\n\n<p>%s</p>\n" % cgi.escape(param.Description) t += "<ul>\n" for field in param.__dict__.keys(): if field not in ['name', 'DisplayName', 'Description', 'User'] and field in known_param_fields: if field == 'Values' and Emit.prog_values_field.match(param.__dict__[field]): values = (param.__dict__[field]).split(',') t += "<table><th>Value</th><th>Meaning</th>\n" for value in values: v = value.split(':') t += "<tr><td>%s</td><td>%s</td></tr>\n" % (v[0], v[1]) t += "</table>\n" else: t += "<li>%s: %s</li>\n" % (field, cgi.escape(param.__dict__[field])) t += "</ul>\n" self.t += t
gpl-3.0
eduNEXT/edx-platform
lms/djangoapps/course_home_api/course_metadata/v1/serializers.py
3
1572
# pylint: disable=abstract-method """ Course Home Course Metadata Serializers. Returns Course Metadata used for all Course Home pages. """ from django.urls import reverse from django.utils.translation import ugettext as _ from rest_framework import serializers from lms.djangoapps.course_home_api.mixins import VerifiedModeSerializerMixin class CourseTabSerializer(serializers.Serializer): """ Serializer for the Course Home Tabs """ tab_id = serializers.CharField() title = serializers.SerializerMethodField() url = serializers.SerializerMethodField() def get_title(self, tab): title = tab.title or tab.get('name', '') return _(title) # pylint: disable=translation-of-non-string def get_url(self, tab): request = self.context.get('request') return request.build_absolute_uri(tab.link_func(self.context.get('course'), reverse)) class CourseHomeMetadataSerializer(VerifiedModeSerializerMixin, serializers.Serializer): """ Serializer for the Course Home Course Metadata """ course_id = serializers.CharField() username = serializers.CharField() is_enrolled = serializers.BooleanField() is_self_paced = serializers.BooleanField() is_staff = serializers.BooleanField() number = serializers.CharField() org = serializers.CharField() original_user_is_staff = serializers.BooleanField() tabs = CourseTabSerializer(many=True) title = serializers.CharField() can_load_courseware = serializers.BooleanField() celebrations = serializers.DictField()
agpl-3.0
sogelink/ansible
lib/ansible/module_utils/network.py
28
7031
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com> # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import env_fallback from ansible.module_utils.netcli import Cli from ansible.module_utils._text import to_native from ansible.module_utils.six import iteritems NET_TRANSPORT_ARGS = dict( host=dict(required=True), port=dict(type='int'), username=dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])), password=dict(no_log=True, fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD'])), ssh_keyfile=dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'), authorize=dict(default=False, fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'), auth_pass=dict(no_log=True, fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS'])), provider=dict(type='dict', no_log=True), transport=dict(choices=list()), timeout=dict(default=10, type='int') ) NET_CONNECTION_ARGS = dict() NET_CONNECTIONS = dict() def _transitional_argument_spec(): argument_spec = {} for key, value in iteritems(NET_TRANSPORT_ARGS): value['required'] = False argument_spec[key] = value return argument_spec def to_list(val): if isinstance(val, (list, tuple)): return list(val) elif val is not None: return [val] else: return list() class ModuleStub(object): def __init__(self, argument_spec, fail_json): self.params = dict() for key, value in argument_spec.items(): self.params[key] = value.get('default') self.fail_json = fail_json class NetworkError(Exception): def __init__(self, msg, **kwargs): super(NetworkError, self).__init__(msg) self.kwargs = kwargs class Config(object): def __init__(self, connection): self.connection = connection def __call__(self, commands, **kwargs): lines = to_list(commands) return self.connection.configure(lines, **kwargs) def load_config(self, commands, **kwargs): commands = to_list(commands) return self.connection.load_config(commands, **kwargs) def get_config(self, **kwargs): return self.connection.get_config(**kwargs) def save_config(self): return self.connection.save_config() class NetworkModule(AnsibleModule): def __init__(self, *args, **kwargs): connect_on_load = kwargs.pop('connect_on_load', True) argument_spec = NET_TRANSPORT_ARGS.copy() argument_spec['transport']['choices'] = NET_CONNECTIONS.keys() argument_spec.update(NET_CONNECTION_ARGS.copy()) if kwargs.get('argument_spec'): argument_spec.update(kwargs['argument_spec']) kwargs['argument_spec'] = argument_spec super(NetworkModule, self).__init__(*args, **kwargs) self.connection = None self._cli = None self._config = None try: transport = self.params['transport'] or '__default__' cls = NET_CONNECTIONS[transport] self.connection = cls() except KeyError: self.fail_json(msg='Unknown transport or no default transport specified') except (TypeError, NetworkError) as exc: self.fail_json(msg=to_native(exc), exception=traceback.format_exc()) if connect_on_load: self.connect() @property def cli(self): if not self.connected: self.connect() if self._cli: return self._cli self._cli = Cli(self.connection) return self._cli @property def config(self): if not self.connected: self.connect() if self._config: return self._config self._config = Config(self.connection) return self._config @property def connected(self): return self.connection._connected def _load_params(self): super(NetworkModule, self)._load_params() provider = self.params.get('provider') or dict() for key, value in provider.items(): for args in [NET_TRANSPORT_ARGS, NET_CONNECTION_ARGS]: if key in args: if self.params.get(key) is None and value is not None: self.params[key] = value def connect(self): try: if not self.connected: self.connection.connect(self.params) if self.params['authorize']: self.connection.authorize(self.params) self.log('connected to %s:%s using %s' % (self.params['host'], self.params['port'], self.params['transport'])) except NetworkError as exc: self.fail_json(msg=to_native(exc), exception=traceback.format_exc()) def disconnect(self): try: if self.connected: self.connection.disconnect() self.log('disconnected from %s' % self.params['host']) except NetworkError as exc: self.fail_json(msg=to_native(exc), exception=traceback.format_exc()) def register_transport(transport, default=False): def register(cls): NET_CONNECTIONS[transport] = cls if default: NET_CONNECTIONS['__default__'] = cls return cls return register def add_argument(key, value): NET_CONNECTION_ARGS[key] = value
gpl-3.0
glennw/servo
tests/wpt/css-tests/tools/pywebsocket/src/mod_pywebsocket/http_header_util.py
694
6905
# Copyright 2011, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Utilities for parsing and formatting headers that follow the grammar defined in HTTP RFC http://www.ietf.org/rfc/rfc2616.txt. """ import urlparse _SEPARATORS = '()<>@,;:\\"/[]?={} \t' def _is_char(c): """Returns true iff c is in CHAR as specified in HTTP RFC.""" return ord(c) <= 127 def _is_ctl(c): """Returns true iff c is in CTL as specified in HTTP RFC.""" return ord(c) <= 31 or ord(c) == 127 class ParsingState(object): def __init__(self, data): self.data = data self.head = 0 def peek(state, pos=0): """Peeks the character at pos from the head of data.""" if state.head + pos >= len(state.data): return None return state.data[state.head + pos] def consume(state, amount=1): """Consumes specified amount of bytes from the head and returns the consumed bytes. If there's not enough bytes to consume, returns None. """ if state.head + amount > len(state.data): return None result = state.data[state.head:state.head + amount] state.head = state.head + amount return result def consume_string(state, expected): """Given a parsing state and a expected string, consumes the string from the head. Returns True if consumed successfully. Otherwise, returns False. """ pos = 0 for c in expected: if c != peek(state, pos): return False pos += 1 consume(state, pos) return True def consume_lws(state): """Consumes a LWS from the head. Returns True if any LWS is consumed. Otherwise, returns False. LWS = [CRLF] 1*( SP | HT ) """ original_head = state.head consume_string(state, '\r\n') pos = 0 while True: c = peek(state, pos) if c == ' ' or c == '\t': pos += 1 else: if pos == 0: state.head = original_head return False else: consume(state, pos) return True def consume_lwses(state): """Consumes *LWS from the head.""" while consume_lws(state): pass def consume_token(state): """Consumes a token from the head. Returns the token or None if no token was found. """ pos = 0 while True: c = peek(state, pos) if c is None or c in _SEPARATORS or _is_ctl(c) or not _is_char(c): if pos == 0: return None return consume(state, pos) else: pos += 1 def consume_token_or_quoted_string(state): """Consumes a token or a quoted-string, and returns the token or unquoted string. If no token or quoted-string was found, returns None. """ original_head = state.head if not consume_string(state, '"'): return consume_token(state) result = [] expect_quoted_pair = False while True: if not expect_quoted_pair and consume_lws(state): result.append(' ') continue c = consume(state) if c is None: # quoted-string is not enclosed with double quotation state.head = original_head return None elif expect_quoted_pair: expect_quoted_pair = False if _is_char(c): result.append(c) else: # Non CHAR character found in quoted-pair state.head = original_head return None elif c == '\\': expect_quoted_pair = True elif c == '"': return ''.join(result) elif _is_ctl(c): # Invalid character %r found in qdtext state.head = original_head return None else: result.append(c) def quote_if_necessary(s): """Quotes arbitrary string into quoted-string.""" quote = False if s == '': return '""' result = [] for c in s: if c == '"' or c in _SEPARATORS or _is_ctl(c) or not _is_char(c): quote = True if c == '"' or _is_ctl(c): result.append('\\' + c) else: result.append(c) if quote: return '"' + ''.join(result) + '"' else: return ''.join(result) def parse_uri(uri): """Parse absolute URI then return host, port and resource.""" parsed = urlparse.urlsplit(uri) if parsed.scheme != 'wss' and parsed.scheme != 'ws': # |uri| must be a relative URI. # TODO(toyoshim): Should validate |uri|. return None, None, uri if parsed.hostname is None: return None, None, None port = None try: port = parsed.port except ValueError, e: # port property cause ValueError on invalid null port description like # 'ws://host:/path'. return None, None, None if port is None: if parsed.scheme == 'ws': port = 80 else: port = 443 path = parsed.path if not path: path += '/' if parsed.query: path += '?' + parsed.query if parsed.fragment: path += '#' + parsed.fragment return parsed.hostname, port, path try: urlparse.uses_netloc.index('ws') except ValueError, e: # urlparse in Python2.5.1 doesn't have 'ws' and 'wss' entries. urlparse.uses_netloc.append('ws') urlparse.uses_netloc.append('wss') # vi:sts=4 sw=4 et
mpl-2.0
wwj718/edx-video
common/djangoapps/cache_toolbox/relation.py
239
3483
""" Caching instances via ``related_name`` -------------------------------------- ``cache_relation`` adds utility methods to a model to obtain ``related_name`` instances via the cache. Usage ~~~~~ :: from django.db import models from django.contrib.auth.models import User class Foo(models.Model): user = models.OneToOneField( User, primary_key=True, related_name='foo', ) name = models.CharField(max_length=20) cache_relation(User.foo) :: >>> user = User.objects.get(pk=1) >>> user.foo_cache # Cache miss - hits the database <Foo: > >>> user = User.objects.get(pk=1) >>> user.foo_cache # Cache hit - no database access <Foo: > >>> user = User.objects.get(pk=2) >>> user.foo # Regular lookup - hits the database <Foo: > >>> user.foo_cache # Special-case: Will not hit cache or database. <Foo: > Accessing ``user_instance.foo_cache`` (note the "_cache" suffix) will now obtain the related ``Foo`` instance via the cache. Accessing the original ``user_instance.foo`` attribute will perform the lookup as normal. Invalidation ~~~~~~~~~~~~ Upon saving (or deleting) the instance, the cache is cleared. For example:: >>> user = User.objects.get(pk=1) >>> foo = user.foo_cache # (Assume cache hit from previous session) >>> foo.name = "New name" >>> foo.save() # Cache is cleared on save >>> user = User.objects.get(pk=1) >>> user.foo_cache # Cache miss. <Foo: > Manual invalidation may also be performed using the following methods:: >>> user_instance.foo_cache_clear() >>> User.foo_cache_clear_fk(user_instance_pk) Manual invalidation is required if you use ``.update()`` methods which the ``post_save`` and ``post_delete`` hooks cannot intercept. Support ~~~~~~~ ``cache_relation`` currently only works with ``OneToOneField`` fields. Support for regular ``ForeignKey`` fields is planned. """ from django.db.models.signals import post_save, post_delete from .core import get_instance, delete_instance def cache_relation(descriptor, timeout=None): rel = descriptor.related related_name = '%s_cache' % rel.field.related_query_name() @property def get(self): # Always use the cached "real" instance if available try: return getattr(self, descriptor.cache_name) except AttributeError: pass # Lookup cached instance try: return getattr(self, '_%s_cache' % related_name) except AttributeError: pass # import logging # log = logging.getLogger("tracking") # log.info( "DEBUG: "+str(str(rel.model)+"/"+str(self.pk) )) instance = get_instance(rel.model, self.pk, timeout) setattr(self, '_%s_cache' % related_name, instance) return instance setattr(rel.parent_model, related_name, get) # Clearing cache def clear(self): delete_instance(rel.model, self) @classmethod def clear_pk(cls, *instances_or_pk): delete_instance(rel.model, *instances_or_pk) def clear_cache(sender, instance, *args, **kwargs): delete_instance(rel.model, instance) setattr(rel.parent_model, '%s_clear' % related_name, clear) setattr(rel.parent_model, '%s_clear_pk' % related_name, clear_pk) post_save.connect(clear_cache, sender=rel.model, weak=False) post_delete.connect(clear_cache, sender=rel.model, weak=False)
agpl-3.0
glewarne/Note2Core_v3_kernel_N710x
arch/ia64/scripts/unwcheck.py
13143
1714
#!/usr/bin/python # # Usage: unwcheck.py FILE # # This script checks the unwind info of each function in file FILE # and verifies that the sum of the region-lengths matches the total # length of the function. # # Based on a shell/awk script originally written by Harish Patil, # which was converted to Perl by Matthew Chapman, which was converted # to Python by David Mosberger. # import os import re import sys if len(sys.argv) != 2: print "Usage: %s FILE" % sys.argv[0] sys.exit(2) readelf = os.getenv("READELF", "readelf") start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]") rlen_pattern = re.compile(".*rlen=([0-9]+)") def check_func (func, slots, rlen_sum): if slots != rlen_sum: global num_errors num_errors += 1 if not func: func = "[%#x-%#x]" % (start, end) print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum) return num_funcs = 0 num_errors = 0 func = False slots = 0 rlen_sum = 0 for line in os.popen("%s -u %s" % (readelf, sys.argv[1])): m = start_pattern.match(line) if m: check_func(func, slots, rlen_sum) func = m.group(1) start = long(m.group(2), 16) end = long(m.group(3), 16) slots = 3 * (end - start) / 16 rlen_sum = 0L num_funcs += 1 else: m = rlen_pattern.match(line) if m: rlen_sum += long(m.group(1)) check_func(func, slots, rlen_sum) if num_errors == 0: print "No errors detected in %u functions." % num_funcs else: if num_errors > 1: err="errors" else: err="error" print "%u %s detected in %u functions." % (num_errors, err, num_funcs) sys.exit(1)
gpl-2.0
labcodes/django
django/contrib/auth/admin.py
29
8711
from django.conf import settings from django.conf.urls import url from django.contrib import admin, messages from django.contrib.admin.options import IS_POPUP_VAR from django.contrib.admin.utils import unquote from django.contrib.auth import update_session_auth_hash from django.contrib.auth.forms import ( AdminPasswordChangeForm, UserChangeForm, UserCreationForm, ) from django.contrib.auth.models import Group, User from django.core.exceptions import PermissionDenied from django.db import router, transaction from django.http import Http404, HttpResponseRedirect from django.template.response import TemplateResponse from django.urls import reverse from django.utils.decorators import method_decorator from django.utils.html import escape from django.utils.translation import gettext, gettext_lazy as _ from django.views.decorators.csrf import csrf_protect from django.views.decorators.debug import sensitive_post_parameters csrf_protect_m = method_decorator(csrf_protect) sensitive_post_parameters_m = method_decorator(sensitive_post_parameters()) @admin.register(Group) class GroupAdmin(admin.ModelAdmin): search_fields = ('name',) ordering = ('name',) filter_horizontal = ('permissions',) def formfield_for_manytomany(self, db_field, request=None, **kwargs): if db_field.name == 'permissions': qs = kwargs.get('queryset', db_field.remote_field.model.objects) # Avoid a major performance hit resolving permission names which # triggers a content_type load: kwargs['queryset'] = qs.select_related('content_type') return super().formfield_for_manytomany(db_field, request=request, **kwargs) @admin.register(User) class UserAdmin(admin.ModelAdmin): add_form_template = 'admin/auth/user/add_form.html' change_user_password_template = None fieldsets = ( (None, {'fields': ('username', 'password')}), (_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}), (_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser', 'groups', 'user_permissions')}), (_('Important dates'), {'fields': ('last_login', 'date_joined')}), ) add_fieldsets = ( (None, { 'classes': ('wide',), 'fields': ('username', 'password1', 'password2'), }), ) form = UserChangeForm add_form = UserCreationForm change_password_form = AdminPasswordChangeForm list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff') list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups') search_fields = ('username', 'first_name', 'last_name', 'email') ordering = ('username',) filter_horizontal = ('groups', 'user_permissions',) def get_fieldsets(self, request, obj=None): if not obj: return self.add_fieldsets return super().get_fieldsets(request, obj) def get_form(self, request, obj=None, **kwargs): """ Use special form during user creation """ defaults = {} if obj is None: defaults['form'] = self.add_form defaults.update(kwargs) return super().get_form(request, obj, **defaults) def get_urls(self): return [ url( r'^(.+)/password/$', self.admin_site.admin_view(self.user_change_password), name='auth_user_password_change', ), ] + super().get_urls() def lookup_allowed(self, lookup, value): # See #20078: we don't want to allow any lookups involving passwords. if lookup.startswith('password'): return False return super().lookup_allowed(lookup, value) @sensitive_post_parameters_m @csrf_protect_m def add_view(self, request, form_url='', extra_context=None): with transaction.atomic(using=router.db_for_write(self.model)): return self._add_view(request, form_url, extra_context) def _add_view(self, request, form_url='', extra_context=None): # It's an error for a user to have add permission but NOT change # permission for users. If we allowed such users to add users, they # could create superusers, which would mean they would essentially have # the permission to change users. To avoid the problem entirely, we # disallow users from adding users if they don't have change # permission. if not self.has_change_permission(request): if self.has_add_permission(request) and settings.DEBUG: # Raise Http404 in debug mode so that the user gets a helpful # error message. raise Http404( 'Your user does not have the "Change user" permission. In ' 'order to add users, Django requires that your user ' 'account have both the "Add user" and "Change user" ' 'permissions set.') raise PermissionDenied if extra_context is None: extra_context = {} username_field = self.model._meta.get_field(self.model.USERNAME_FIELD) defaults = { 'auto_populated_fields': (), 'username_help_text': username_field.help_text, } extra_context.update(defaults) return super().add_view(request, form_url, extra_context) @sensitive_post_parameters_m def user_change_password(self, request, id, form_url=''): if not self.has_change_permission(request): raise PermissionDenied user = self.get_object(request, unquote(id)) if user is None: raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % { 'name': self.model._meta.verbose_name, 'key': escape(id), }) if request.method == 'POST': form = self.change_password_form(user, request.POST) if form.is_valid(): form.save() change_message = self.construct_change_message(request, form, None) self.log_change(request, user, change_message) msg = gettext('Password changed successfully.') messages.success(request, msg) update_session_auth_hash(request, form.user) return HttpResponseRedirect( reverse( '%s:%s_%s_change' % ( self.admin_site.name, user._meta.app_label, user._meta.model_name, ), args=(user.pk,), ) ) else: form = self.change_password_form(user) fieldsets = [(None, {'fields': list(form.base_fields)})] adminForm = admin.helpers.AdminForm(form, fieldsets, {}) context = { 'title': _('Change password: %s') % escape(user.get_username()), 'adminForm': adminForm, 'form_url': form_url, 'form': form, 'is_popup': (IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET), 'add': True, 'change': False, 'has_delete_permission': False, 'has_change_permission': True, 'has_absolute_url': False, 'opts': self.model._meta, 'original': user, 'save_as': False, 'show_save': True, } context.update(self.admin_site.each_context(request)) request.current_app = self.admin_site.name return TemplateResponse( request, self.change_user_password_template or 'admin/auth/user/change_password.html', context, ) def response_add(self, request, obj, post_url_continue=None): """ Determine the HttpResponse for the add_view stage. It mostly defers to its superclass implementation but is customized because the User model has a slightly different workflow. """ # We should allow further modification of the user just added i.e. the # 'Save' button should behave like the 'Save and continue editing' # button except in two scenarios: # * The user has pressed the 'Save and add another' button # * We are adding a user in a popup if '_addanother' not in request.POST and IS_POPUP_VAR not in request.POST: request.POST = request.POST.copy() request.POST['_continue'] = 1 return super().response_add(request, obj, post_url_continue)
bsd-3-clause
replicatorg/ReplicatorG
skein_engines/skeinforge-47/skeinforge_application/skeinforge_utilities/skeinforge_craft.py
4
9113
""" Craft is a script to access the plugins which craft a gcode file. The plugin buttons which are commonly used are bolded and the ones which are rarely used have normal font weight. """ from __future__ import absolute_import #Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module. import __init__ from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret from fabmetheus_utilities import archive from fabmetheus_utilities import euclidean from fabmetheus_utilities import gcodec from fabmetheus_utilities import settings from skeinforge_application.skeinforge_utilities import skeinforge_analyze from skeinforge_application.skeinforge_utilities import skeinforge_polyfile from skeinforge_application.skeinforge_utilities import skeinforge_profile import os import sys import time __author__ = 'Enrique Perez (perez_enrique@yahoo.com)' __date__ = '$Date: 2008/21/04 $' __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' def getChainText( fileName, procedure ): "Get a crafted shape file." text='' if fileName.endswith('.gcode') or fileName.endswith('.svg'): text = archive.getFileText(fileName) procedures = getProcedures( procedure, text ) return getChainTextFromProcedures( fileName, procedures, text ) def getChainTextFromProcedures(fileName, procedures, text): 'Get a crafted shape file from a list of procedures.' lastProcedureTime = time.time() for procedure in procedures: craftModule = getCraftModule(procedure) if craftModule != None: text = craftModule.getCraftedText(fileName, text) if text == '': print('Warning, the text was not recognized in getChainTextFromProcedures in skeinforge_craft for') print(fileName) return '' if gcodec.isProcedureDone( text, procedure ): print('%s procedure took %s.' % (procedure.capitalize(), euclidean.getDurationString(time.time() - lastProcedureTime))) lastProcedureTime = time.time() return text def getCraftModule(pluginName): 'Get craft module.' return archive.getModuleWithDirectoryPath(getPluginsDirectoryPath(), pluginName) def getCraftPreferences(pluginName): 'Get craft preferences.' return settings.getReadRepository(getCraftModule(pluginName).getNewRepository()).preferences def getCraftValue(preferenceName, preferences): "Get craft preferences value." for preference in preferences: if preference.name.startswith(preferenceName): return preference.value return None def getLastModule(): "Get the last tool." craftSequence = getReadCraftSequence() if len( craftSequence ) < 1: return None return getCraftModule( craftSequence[-1] ) def getNewRepository(): 'Get new repository.' return CraftRepository() def getPluginFileNames(): "Get craft plugin fileNames." craftSequence = getReadCraftSequence() craftSequence.sort() return craftSequence def getPluginsDirectoryPath(): "Get the plugins directory path." return archive.getCraftPluginsDirectoryPath() def getProcedures( procedure, text ): "Get the procedures up to and including the given procedure." craftSequence = getReadCraftSequence() sequenceIndexPlusOneFromText = getSequenceIndexPlusOneFromText(text) sequenceIndexFromProcedure = getSequenceIndexFromProcedure(procedure) return craftSequence[ sequenceIndexPlusOneFromText : sequenceIndexFromProcedure + 1 ] def getReadCraftSequence(): "Get profile sequence." return skeinforge_profile.getCraftTypePluginModule().getCraftSequence() def getSequenceIndexFromProcedure(procedure): "Get the profile sequence index of the procedure. Return None if the procedure is not in the sequence" craftSequence = getReadCraftSequence() if procedure not in craftSequence: return 0 return craftSequence.index(procedure) def getSequenceIndexPlusOneFromText(fileText): "Get the profile sequence index of the file plus one. Return zero if the procedure is not in the file" craftSequence = getReadCraftSequence() for craftSequenceIndex in xrange( len( craftSequence ) - 1, - 1, - 1 ): procedure = craftSequence[ craftSequenceIndex ] if gcodec.isProcedureDone( fileText, procedure ): return craftSequenceIndex + 1 return 0 def writeChainTextWithNounMessage(fileName, procedure, shouldAnalyze=True): 'Get and write a crafted shape file.' print('') print('The %s tool is parsing the file:' % procedure) print(os.path.basename(fileName)) print('') startTime = time.time() fileNameSuffix = fileName[: fileName.rfind('.')] + '_' + procedure + '.gcode' craftText = getChainText(fileName, procedure) if craftText == '': print('Warning, there was no text output in writeChainTextWithNounMessage in skeinforge_craft for:') print(fileName) return archive.writeFileText(fileNameSuffix, craftText) window = None if shouldAnalyze: window = skeinforge_analyze.writeOutput(fileName, fileNameSuffix, fileNameSuffix, True, craftText) print('') print('The %s tool has created the file:' % procedure) print(fileNameSuffix) print('') print('It took %s to craft the file.' % euclidean.getDurationString(time.time() - startTime)) return window def writeOutput(fileName, shouldAnalyze=True): "Craft a gcode file with the last module." pluginModule = getLastModule() if pluginModule != None: return pluginModule.writeOutput(fileName, shouldAnalyze) def writeSVGTextWithNounMessage(fileName, repository, shouldAnalyze=True): 'Get and write an svg text and print messages.' print('') print('The %s tool is parsing the file:' % repository.lowerName) print(os.path.basename(fileName)) print('') startTime = time.time() fileNameSuffix = fileName[: fileName.rfind('.')] + '_' + repository.lowerName + '.svg' craftText = getChainText(fileName, repository.lowerName) if craftText == '': return archive.writeFileText(fileNameSuffix, craftText) print('') print('The %s tool has created the file:' % repository.lowerName) print(fileNameSuffix) print('') print('It took %s to craft the file.' % euclidean.getDurationString(time.time() - startTime)) if shouldAnalyze: settings.getReadRepository(repository) settings.openSVGPage(fileNameSuffix, repository.svgViewer.value) class CraftRadioButtonsSaveListener: "A class to update the craft radio buttons." def addToDialog( self, gridPosition ): "Add this to the dialog." euclidean.addElementToListDictionaryIfNotThere( self, self.repository.repositoryDialog, settings.globalProfileSaveListenerListTable ) self.gridPosition = gridPosition.getCopy() self.gridPosition.row = gridPosition.rowStart self.gridPosition.increment() self.setRadioButtons() def getFromRadioPlugins( self, radioPlugins, repository ): "Initialize." self.name = 'CraftRadioButtonsSaveListener' self.radioPlugins = radioPlugins self.repository = repository repository.displayEntities.append(self) return self def save(self): "Profile has been saved and craft radio plugins should be updated." self.setRadioButtons() def setRadioButtons(self): "Profile has been saved and craft radio plugins should be updated." activeRadioPlugins = [] craftSequence = skeinforge_profile.getCraftTypePluginModule().getCraftSequence() gridPosition = self.gridPosition.getCopy() isRadioPluginSelected = False settings.getReadRepository(self.repository) for radioPlugin in self.radioPlugins: if radioPlugin.name in craftSequence: activeRadioPlugins.append(radioPlugin) radioPlugin.incrementGridPosition(gridPosition) if radioPlugin.value: radioPlugin.setSelect() isRadioPluginSelected = True else: radioPlugin.radiobutton.grid_remove() if not isRadioPluginSelected: radioPluginNames = self.repository.importantFileNames + [activeRadioPlugins[0].name] settings.getSelectedRadioPlugin(radioPluginNames , activeRadioPlugins).setSelect() self.repository.pluginFrame.update() class CraftRepository: "A class to handle the craft settings." def __init__(self): "Set the default settings, execute title & settings fileName." skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_utilities.skeinforge_craft.html', self) self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Craft', self, '') self.importantFileNames = ['carve', 'chop', 'feed', 'flow', 'lift', 'raft', 'speed'] allCraftNames = archive.getPluginFileNamesFromDirectoryPath(getPluginsDirectoryPath()) self.radioPlugins = settings.getRadioPluginsAddPluginFrame(getPluginsDirectoryPath(), self.importantFileNames, allCraftNames, self) CraftRadioButtonsSaveListener().getFromRadioPlugins(self.radioPlugins, self) self.executeTitle = 'Craft' def execute(self): "Craft button has been clicked." fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode( self.fileNameInput.value, [], self.fileNameInput.wasCancelled ) for fileName in fileNames: writeOutput(fileName) def main(): "Write craft output." writeOutput(' '.join(sys.argv[1 :]), False) if __name__ == "__main__": main()
gpl-2.0
vnsofthe/odoo-dev
addons/crm/wizard/crm_lead_to_opportunity.py
146
13701
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ import re class crm_lead2opportunity_partner(osv.osv_memory): _name = 'crm.lead2opportunity.partner' _description = 'Lead To Opportunity Partner' _inherit = 'crm.partner.binding' _columns = { 'name': fields.selection([ ('convert', 'Convert to opportunity'), ('merge', 'Merge with existing opportunities') ], 'Conversion Action', required=True), 'opportunity_ids': fields.many2many('crm.lead', string='Opportunities'), 'user_id': fields.many2one('res.users', 'Salesperson', select=True), 'section_id': fields.many2one('crm.case.section', 'Sales Team', select=True), } def onchange_action(self, cr, uid, ids, action, context=None): return {'value': {'partner_id': False if action != 'exist' else self._find_matching_partner(cr, uid, context=context)}} def _get_duplicated_leads(self, cr, uid, partner_id, email, include_lost=False, context=None): """ Search for opportunities that have the same partner and that arent done or cancelled """ return self.pool.get('crm.lead')._get_duplicated_leads_by_emails(cr, uid, partner_id, email, include_lost=include_lost, context=context) def default_get(self, cr, uid, fields, context=None): """ Default get for name, opportunity_ids. If there is an exisitng partner link to the lead, find all existing opportunities links with this partner to merge all information together """ lead_obj = self.pool.get('crm.lead') res = super(crm_lead2opportunity_partner, self).default_get(cr, uid, fields, context=context) if context.get('active_id'): tomerge = [int(context['active_id'])] partner_id = res.get('partner_id') lead = lead_obj.browse(cr, uid, int(context['active_id']), context=context) email = lead.partner_id and lead.partner_id.email or lead.email_from tomerge.extend(self._get_duplicated_leads(cr, uid, partner_id, email, include_lost=True, context=context)) tomerge = list(set(tomerge)) if 'action' in fields and not res.get('action'): res.update({'action' : partner_id and 'exist' or 'create'}) if 'partner_id' in fields: res.update({'partner_id' : partner_id}) if 'name' in fields: res.update({'name' : len(tomerge) >= 2 and 'merge' or 'convert'}) if 'opportunity_ids' in fields and len(tomerge) >= 2: res.update({'opportunity_ids': tomerge}) if lead.user_id: res.update({'user_id': lead.user_id.id}) if lead.section_id: res.update({'section_id': lead.section_id.id}) return res def on_change_user(self, cr, uid, ids, user_id, section_id, context=None): """ When changing the user, also set a section_id or restrict section id to the ones user_id is member of. """ if user_id: if section_id: user_in_section = self.pool.get('crm.case.section').search(cr, uid, [('id', '=', section_id), '|', ('user_id', '=', user_id), ('member_ids', '=', user_id)], context=context, count=True) else: user_in_section = False if not user_in_section: result = self.pool['crm.lead'].on_change_user(cr, uid, ids, user_id, context=context) section_id = result.get('value') and result['value'].get('section_id') and result['value']['section_id'] or False return {'value': {'section_id': section_id}} def view_init(self, cr, uid, fields, context=None): """ Check some preconditions before the wizard executes. """ if context is None: context = {} lead_obj = self.pool.get('crm.lead') for lead in lead_obj.browse(cr, uid, context.get('active_ids', []), context=context): if lead.probability == 100: raise osv.except_osv(_("Warning!"), _("Closed/Dead leads cannot be converted into opportunities.")) return False def _convert_opportunity(self, cr, uid, ids, vals, context=None): if context is None: context = {} lead = self.pool.get('crm.lead') res = False lead_ids = vals.get('lead_ids', []) team_id = vals.get('section_id', False) partner_id = vals.get('partner_id') data = self.browse(cr, uid, ids, context=context)[0] leads = lead.browse(cr, uid, lead_ids, context=context) for lead_id in leads: partner_id = self._create_partner(cr, uid, lead_id.id, data.action, partner_id or lead_id.partner_id.id, context=context) res = lead.convert_opportunity(cr, uid, [lead_id.id], partner_id, [], False, context=context) user_ids = vals.get('user_ids', False) if context.get('no_force_assignation'): leads_to_allocate = [lead_id.id for lead_id in leads if not lead_id.user_id] else: leads_to_allocate = lead_ids if user_ids: lead.allocate_salesman(cr, uid, leads_to_allocate, user_ids, team_id=team_id, context=context) return res def action_apply(self, cr, uid, ids, context=None): """ Convert lead to opportunity or merge lead and opportunity and open the freshly created opportunity view. """ if context is None: context = {} lead_obj = self.pool['crm.lead'] w = self.browse(cr, uid, ids, context=context)[0] opp_ids = [o.id for o in w.opportunity_ids] vals = { 'section_id': w.section_id.id, } if w.partner_id: vals['partner_id'] = w.partner_id.id if w.name == 'merge': lead_id = lead_obj.merge_opportunity(cr, uid, opp_ids, context=context) lead_ids = [lead_id] lead = lead_obj.read(cr, uid, lead_id, ['type', 'user_id'], context=context) if lead['type'] == "lead": context = dict(context, active_ids=lead_ids) vals.update({'lead_ids': lead_ids, 'user_ids': [w.user_id.id]}) self._convert_opportunity(cr, uid, ids, vals, context=context) elif not context.get('no_force_assignation') or not lead['user_id']: vals.update({'user_id': w.user_id.id}) lead_obj.write(cr, uid, lead_id, vals, context=context) else: lead_ids = context.get('active_ids', []) vals.update({'lead_ids': lead_ids, 'user_ids': [w.user_id.id]}) self._convert_opportunity(cr, uid, ids, vals, context=context) return self.pool.get('crm.lead').redirect_opportunity_view(cr, uid, lead_ids[0], context=context) def _create_partner(self, cr, uid, lead_id, action, partner_id, context=None): """ Create partner based on action. :return dict: dictionary organized as followed: {lead_id: partner_assigned_id} """ #TODO this method in only called by crm_lead2opportunity_partner #wizard and would probably diserve to be refactored or at least #moved to a better place if context is None: context = {} lead = self.pool.get('crm.lead') if action == 'each_exist_or_create': ctx = dict(context) ctx['active_id'] = lead_id partner_id = self._find_matching_partner(cr, uid, context=ctx) action = 'create' res = lead.handle_partner_assignation(cr, uid, [lead_id], action, partner_id, context=context) return res.get(lead_id) class crm_lead2opportunity_mass_convert(osv.osv_memory): _name = 'crm.lead2opportunity.partner.mass' _description = 'Mass Lead To Opportunity Partner' _inherit = 'crm.lead2opportunity.partner' _columns = { 'user_ids': fields.many2many('res.users', string='Salesmen'), 'section_id': fields.many2one('crm.case.section', 'Sales Team', select=True), 'deduplicate': fields.boolean('Apply deduplication', help='Merge with existing leads/opportunities of each partner'), 'action': fields.selection([ ('each_exist_or_create', 'Use existing partner or create'), ('nothing', 'Do not link to a customer') ], 'Related Customer', required=True), 'force_assignation': fields.boolean('Force assignation', help='If unchecked, this will leave the salesman of duplicated opportunities'), } _defaults = { 'deduplicate': True, } def default_get(self, cr, uid, fields, context=None): res = super(crm_lead2opportunity_mass_convert, self).default_get(cr, uid, fields, context) if 'partner_id' in fields: # avoid forcing the partner of the first lead as default res['partner_id'] = False if 'action' in fields: res['action'] = 'each_exist_or_create' if 'name' in fields: res['name'] = 'convert' if 'opportunity_ids' in fields: res['opportunity_ids'] = False return res def on_change_action(self, cr, uid, ids, action, context=None): vals = {} if action != 'exist': vals = {'value': {'partner_id': False}} return vals def on_change_deduplicate(self, cr, uid, ids, deduplicate, context=None): if context is None: context = {} active_leads = self.pool['crm.lead'].browse(cr, uid, context['active_ids'], context=context) partner_ids = [(lead.partner_id.id, lead.partner_id and lead.partner_id.email or lead.email_from) for lead in active_leads] partners_duplicated_leads = {} for partner_id, email in partner_ids: duplicated_leads = self._get_duplicated_leads(cr, uid, partner_id, email) if len(duplicated_leads) > 1: partners_duplicated_leads.setdefault((partner_id, email), []).extend(duplicated_leads) leads_with_duplicates = [] for lead in active_leads: lead_tuple = (lead.partner_id.id, lead.partner_id.email if lead.partner_id else lead.email_from) if len(partners_duplicated_leads.get(lead_tuple, [])) > 1: leads_with_duplicates.append(lead.id) return {'value': {'opportunity_ids': leads_with_duplicates}} def _convert_opportunity(self, cr, uid, ids, vals, context=None): """ When "massively" (more than one at a time) converting leads to opportunities, check the salesteam_id and salesmen_ids and update the values before calling super. """ if context is None: context = {} data = self.browse(cr, uid, ids, context=context)[0] salesteam_id = data.section_id and data.section_id.id or False salesmen_ids = [] if data.user_ids: salesmen_ids = [x.id for x in data.user_ids] vals.update({'user_ids': salesmen_ids, 'section_id': salesteam_id}) return super(crm_lead2opportunity_mass_convert, self)._convert_opportunity(cr, uid, ids, vals, context=context) def mass_convert(self, cr, uid, ids, context=None): data = self.browse(cr, uid, ids, context=context)[0] ctx = dict(context) if data.name == 'convert' and data.deduplicate: merged_lead_ids = [] remaining_lead_ids = [] lead_selected = context.get('active_ids', []) for lead_id in lead_selected: if lead_id not in merged_lead_ids: lead = self.pool['crm.lead'].browse(cr, uid, lead_id, context=context) duplicated_lead_ids = self._get_duplicated_leads(cr, uid, lead.partner_id.id, lead.partner_id and lead.partner_id.email or lead.email_from) if len(duplicated_lead_ids) > 1: lead_id = self.pool.get('crm.lead').merge_opportunity(cr, uid, duplicated_lead_ids, False, False, context=context) merged_lead_ids.extend(duplicated_lead_ids) remaining_lead_ids.append(lead_id) active_ids = set(context.get('active_ids', [])) active_ids = active_ids.difference(merged_lead_ids) active_ids = active_ids.union(remaining_lead_ids) ctx['active_ids'] = list(active_ids) ctx['no_force_assignation'] = context.get('no_force_assignation', not data.force_assignation) return self.action_apply(cr, uid, ids, context=ctx) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
chungg/aodh
aodh/api/controllers/v2/utils.py
1
11984
# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance <licensing@enovance.com> # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import inspect from oslo_utils import timeutils import pecan import wsme from aodh.api.controllers.v2 import base from aodh.api import rbac def get_auth_project(on_behalf_of=None): # when an alarm is created by an admin on behalf of another tenant # we must ensure for: # - threshold alarm, that an implicit query constraint on project_id is # added so that admin-level visibility on statistics is not leaked # - combination alarm, that alarm ids verification is scoped to # alarms owned by the alarm project. # hence for null auth_project (indicating admin-ness) we check if # the creating tenant differs from the tenant on whose behalf the # alarm is being created auth_project = rbac.get_limited_to_project(pecan.request.headers) created_by = pecan.request.headers.get('X-Project-Id') is_admin = auth_project is None if is_admin and on_behalf_of != created_by: auth_project = on_behalf_of return auth_project def sanitize_query(query, db_func, on_behalf_of=None): """Check the query. See if: 1) the request is coming from admin - then allow full visibility 2) non-admin - make sure that the query includes the requester's project. """ q = copy.copy(query) auth_project = get_auth_project(on_behalf_of) if auth_project: _verify_query_segregation(q, auth_project) proj_q = [i for i in q if i.field == 'project_id'] valid_keys = inspect.getargspec(db_func)[0] if not proj_q and 'on_behalf_of' not in valid_keys: # The user is restricted, but they didn't specify a project # so add it for them. q.append(base.Query(field='project_id', op='eq', value=auth_project)) return q def _verify_query_segregation(query, auth_project=None): """Ensure non-admin queries are not constrained to another project.""" auth_project = (auth_project or rbac.get_limited_to_project(pecan.request.headers)) if not auth_project: return for q in query: if q.field in ('project', 'project_id') and auth_project != q.value: raise base.ProjectNotAuthorized(q.value) def validate_query(query, db_func, internal_keys=None, allow_timestamps=True): """Validates the syntax of the query and verifies the query. Verification check if the query request is authorized for the included project. :param query: Query expression that should be validated :param db_func: the function on the storage level, of which arguments will form the valid_keys list, which defines the valid fields for a query expression :param internal_keys: internally used field names, that should not be used for querying :param allow_timestamps: defines whether the timestamp-based constraint is applicable for this query or not :raises InvalidInput: if an operator is not supported for a given field :raises InvalidInput: if timestamp constraints are allowed, but search_offset was included without timestamp constraint :raises: UnknownArgument: if a field name is not a timestamp field, nor in the list of valid keys """ internal_keys = internal_keys or [] _verify_query_segregation(query) valid_keys = inspect.getargspec(db_func)[0] if 'alarm_type' in valid_keys: valid_keys.remove('alarm_type') valid_keys.append('type') internal_timestamp_keys = ['end_timestamp', 'start_timestamp', 'end_timestamp_op', 'start_timestamp_op'] if 'start_timestamp' in valid_keys: internal_keys += internal_timestamp_keys valid_keys += ['timestamp', 'search_offset'] internal_keys.append('self') internal_keys.append('metaquery') valid_keys = set(valid_keys) - set(internal_keys) translation = {'user_id': 'user', 'project_id': 'project', 'resource_id': 'resource'} has_timestamp_query = _validate_timestamp_fields(query, 'timestamp', ('lt', 'le', 'gt', 'ge'), allow_timestamps) has_search_offset_query = _validate_timestamp_fields(query, 'search_offset', 'eq', allow_timestamps) if has_search_offset_query and not has_timestamp_query: raise wsme.exc.InvalidInput('field', 'search_offset', "search_offset cannot be used without " + "timestamp") def _is_field_metadata(field): return (field.startswith('metadata.') or field.startswith('resource_metadata.')) for i in query: if i.field not in ('timestamp', 'search_offset'): key = translation.get(i.field, i.field) operator = i.op if key in valid_keys or _is_field_metadata(i.field): if operator == 'eq': if key == 'enabled': i._get_value_as_type('boolean') elif _is_field_metadata(key): i._get_value_as_type() else: raise wsme.exc.InvalidInput('op', i.op, 'unimplemented operator for ' '%s' % i.field) else: msg = ("unrecognized field in query: %s, " "valid keys: %s") % (query, sorted(valid_keys)) raise wsme.exc.UnknownArgument(key, msg) def _validate_timestamp_fields(query, field_name, operator_list, allow_timestamps): """Validates the timestamp related constraints in a query if there are any. :param query: query expression that may contain the timestamp fields :param field_name: timestamp name, which should be checked (timestamp, search_offset) :param operator_list: list of operators that are supported for that timestamp, which was specified in the parameter field_name :param allow_timestamps: defines whether the timestamp-based constraint is applicable to this query or not :returns: True, if there was a timestamp constraint, containing a timestamp field named as defined in field_name, in the query and it was allowed and syntactically correct. :returns: False, if there wasn't timestamp constraint, containing a timestamp field named as defined in field_name, in the query :raises InvalidInput: if an operator is unsupported for a given timestamp field :raises UnknownArgument: if the timestamp constraint is not allowed in the query """ for item in query: if item.field == field_name: # If *timestamp* or *search_offset* field was specified in the # query, but timestamp is not supported on that resource, on # which the query was invoked, then raise an exception. if not allow_timestamps: raise wsme.exc.UnknownArgument(field_name, "not valid for " + "this resource") if item.op not in operator_list: raise wsme.exc.InvalidInput('op', item.op, 'unimplemented operator for %s' % item.field) return True return False def query_to_kwargs(query, db_func, internal_keys=None, allow_timestamps=True): validate_query(query, db_func, internal_keys=internal_keys, allow_timestamps=allow_timestamps) query = sanitize_query(query, db_func) translation = {'user_id': 'user', 'project_id': 'project', 'resource_id': 'resource', 'type': 'alarm_type'} stamp = {} metaquery = {} kwargs = {} for i in query: if i.field == 'timestamp': if i.op in ('lt', 'le'): stamp['end_timestamp'] = i.value stamp['end_timestamp_op'] = i.op elif i.op in ('gt', 'ge'): stamp['start_timestamp'] = i.value stamp['start_timestamp_op'] = i.op else: if i.op == 'eq': if i.field == 'search_offset': stamp['search_offset'] = i.value elif i.field == 'enabled': kwargs[i.field] = i._get_value_as_type('boolean') elif i.field.startswith('metadata.'): metaquery[i.field] = i._get_value_as_type() elif i.field.startswith('resource_metadata.'): metaquery[i.field[9:]] = i._get_value_as_type() else: key = translation.get(i.field, i.field) kwargs[key] = i.value if metaquery and 'metaquery' in inspect.getargspec(db_func)[0]: kwargs['metaquery'] = metaquery if stamp: kwargs.update(_get_query_timestamps(stamp)) return kwargs def _get_query_timestamps(args=None): """Return any optional timestamp information in the request. Determine the desired range, if any, from the GET arguments. Set up the query range using the specified offset. [query_start ... start_timestamp ... end_timestamp ... query_end] Returns a dictionary containing: start_timestamp: First timestamp to use for query start_timestamp_op: First timestamp operator to use for query end_timestamp: Final timestamp to use for query end_timestamp_op: Final timestamp operator to use for query """ if args is None: return {} search_offset = int(args.get('search_offset', 0)) def _parse_timestamp(timestamp): if not timestamp: return None try: iso_timestamp = timeutils.parse_isotime(timestamp) iso_timestamp = iso_timestamp.replace(tzinfo=None) except ValueError: raise wsme.exc.InvalidInput('timestamp', timestamp, 'invalid timestamp format') return iso_timestamp start_timestamp = _parse_timestamp(args.get('start_timestamp')) end_timestamp = _parse_timestamp(args.get('end_timestamp')) start_timestamp = start_timestamp - datetime.timedelta( minutes=search_offset) if start_timestamp else None end_timestamp = end_timestamp + datetime.timedelta( minutes=search_offset) if end_timestamp else None return {'start_timestamp': start_timestamp, 'end_timestamp': end_timestamp, 'start_timestamp_op': args.get('start_timestamp_op'), 'end_timestamp_op': args.get('end_timestamp_op')}
apache-2.0
linkmyth/advance_medical
node_modules/node-gyp/gyp/pylib/gyp/generator/eclipse.py
1825
17014
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """GYP backend that generates Eclipse CDT settings files. This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML files that can be imported into an Eclipse CDT project. The XML file contains a list of include paths and symbols (i.e. defines). Because a full .cproject definition is not created by this generator, it's not possible to properly define the include dirs and symbols for each file individually. Instead, one set of includes/symbols is generated for the entire project. This works fairly well (and is a vast improvement in general), but may still result in a few indexer issues here and there. This generator has no automated tests, so expect it to be broken. """ from xml.sax.saxutils import escape import os.path import subprocess import gyp import gyp.common import gyp.msvs_emulation import shlex import xml.etree.cElementTree as ET generator_wants_static_library_dependencies_adjusted = False generator_default_variables = { } for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']: # Some gyp steps fail if these are empty(!), so we convert them to variables generator_default_variables[dirname] = '$' + dirname for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME', 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX', 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX', 'CONFIGURATION_NAME']: generator_default_variables[unused] = '' # Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as # part of the path when dealing with generated headers. This value will be # replaced dynamically for each configuration. generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \ '$SHARED_INTERMEDIATE_DIR' def CalculateVariables(default_variables, params): generator_flags = params.get('generator_flags', {}) for key, val in generator_flags.items(): default_variables.setdefault(key, val) flavor = gyp.common.GetFlavor(params) default_variables.setdefault('OS', flavor) if flavor == 'win': # Copy additional generator configuration data from VS, which is shared # by the Eclipse generator. import gyp.generator.msvs as msvs_generator generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', []) gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) def CalculateGeneratorInputInfo(params): """Calculate the generator specific info that gets fed to input (called by gyp).""" generator_flags = params.get('generator_flags', {}) if generator_flags.get('adjust_static_libraries', False): global generator_wants_static_library_dependencies_adjusted generator_wants_static_library_dependencies_adjusted = True def GetAllIncludeDirectories(target_list, target_dicts, shared_intermediate_dirs, config_name, params, compiler_path): """Calculate the set of include directories to be used. Returns: A list including all the include_dir's specified for every target followed by any include directories that were added as cflag compiler options. """ gyp_includes_set = set() compiler_includes_list = [] # Find compiler's default include dirs. if compiler_path: command = shlex.split(compiler_path) command.extend(['-E', '-xc++', '-v', '-']) proc = subprocess.Popen(args=command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = proc.communicate()[1] # Extract the list of include dirs from the output, which has this format: # ... # #include "..." search starts here: # #include <...> search starts here: # /usr/include/c++/4.6 # /usr/local/include # End of search list. # ... in_include_list = False for line in output.splitlines(): if line.startswith('#include'): in_include_list = True continue if line.startswith('End of search list.'): break if in_include_list: include_dir = line.strip() if include_dir not in compiler_includes_list: compiler_includes_list.append(include_dir) flavor = gyp.common.GetFlavor(params) if flavor == 'win': generator_flags = params.get('generator_flags', {}) for target_name in target_list: target = target_dicts[target_name] if config_name in target['configurations']: config = target['configurations'][config_name] # Look for any include dirs that were explicitly added via cflags. This # may be done in gyp files to force certain includes to come at the end. # TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and # remove this. if flavor == 'win': msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags) cflags = msvs_settings.GetCflags(config_name) else: cflags = config['cflags'] for cflag in cflags: if cflag.startswith('-I'): include_dir = cflag[2:] if include_dir not in compiler_includes_list: compiler_includes_list.append(include_dir) # Find standard gyp include dirs. if config.has_key('include_dirs'): include_dirs = config['include_dirs'] for shared_intermediate_dir in shared_intermediate_dirs: for include_dir in include_dirs: include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR', shared_intermediate_dir) if not os.path.isabs(include_dir): base_dir = os.path.dirname(target_name) include_dir = base_dir + '/' + include_dir include_dir = os.path.abspath(include_dir) gyp_includes_set.add(include_dir) # Generate a list that has all the include dirs. all_includes_list = list(gyp_includes_set) all_includes_list.sort() for compiler_include in compiler_includes_list: if not compiler_include in gyp_includes_set: all_includes_list.append(compiler_include) # All done. return all_includes_list def GetCompilerPath(target_list, data, options): """Determine a command that can be used to invoke the compiler. Returns: If this is a gyp project that has explicit make settings, try to determine the compiler from that. Otherwise, see if a compiler was specified via the CC_target environment variable. """ # First, see if the compiler is configured in make's settings. build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0]) make_global_settings_dict = data[build_file].get('make_global_settings', {}) for key, value in make_global_settings_dict: if key in ['CC', 'CXX']: return os.path.join(options.toplevel_dir, value) # Check to see if the compiler was specified as an environment variable. for key in ['CC_target', 'CC', 'CXX']: compiler = os.environ.get(key) if compiler: return compiler return 'gcc' def GetAllDefines(target_list, target_dicts, data, config_name, params, compiler_path): """Calculate the defines for a project. Returns: A dict that includes explict defines declared in gyp files along with all of the default defines that the compiler uses. """ # Get defines declared in the gyp files. all_defines = {} flavor = gyp.common.GetFlavor(params) if flavor == 'win': generator_flags = params.get('generator_flags', {}) for target_name in target_list: target = target_dicts[target_name] if flavor == 'win': msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags) extra_defines = msvs_settings.GetComputedDefines(config_name) else: extra_defines = [] if config_name in target['configurations']: config = target['configurations'][config_name] target_defines = config['defines'] else: target_defines = [] for define in target_defines + extra_defines: split_define = define.split('=', 1) if len(split_define) == 1: split_define.append('1') if split_define[0].strip() in all_defines: # Already defined continue all_defines[split_define[0].strip()] = split_define[1].strip() # Get default compiler defines (if possible). if flavor == 'win': return all_defines # Default defines already processed in the loop above. if compiler_path: command = shlex.split(compiler_path) command.extend(['-E', '-dM', '-']) cpp_proc = subprocess.Popen(args=command, cwd='.', stdin=subprocess.PIPE, stdout=subprocess.PIPE) cpp_output = cpp_proc.communicate()[0] cpp_lines = cpp_output.split('\n') for cpp_line in cpp_lines: if not cpp_line.strip(): continue cpp_line_parts = cpp_line.split(' ', 2) key = cpp_line_parts[1] if len(cpp_line_parts) >= 3: val = cpp_line_parts[2] else: val = '1' all_defines[key] = val return all_defines def WriteIncludePaths(out, eclipse_langs, include_dirs): """Write the includes section of a CDT settings export file.""" out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \ 'settingswizards.IncludePaths">\n') out.write(' <language name="holder for library settings"></language>\n') for lang in eclipse_langs: out.write(' <language name="%s">\n' % lang) for include_dir in include_dirs: out.write(' <includepath workspace_path="false">%s</includepath>\n' % include_dir) out.write(' </language>\n') out.write(' </section>\n') def WriteMacros(out, eclipse_langs, defines): """Write the macros section of a CDT settings export file.""" out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \ 'settingswizards.Macros">\n') out.write(' <language name="holder for library settings"></language>\n') for lang in eclipse_langs: out.write(' <language name="%s">\n' % lang) for key in sorted(defines.iterkeys()): out.write(' <macro><name>%s</name><value>%s</value></macro>\n' % (escape(key), escape(defines[key]))) out.write(' </language>\n') out.write(' </section>\n') def GenerateOutputForConfig(target_list, target_dicts, data, params, config_name): options = params['options'] generator_flags = params.get('generator_flags', {}) # build_dir: relative path from source root to our output files. # e.g. "out/Debug" build_dir = os.path.join(generator_flags.get('output_dir', 'out'), config_name) toplevel_build = os.path.join(options.toplevel_dir, build_dir) # Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the # SHARED_INTERMEDIATE_DIR. Include both possible locations. shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'), os.path.join(toplevel_build, 'gen')] GenerateCdtSettingsFile(target_list, target_dicts, data, params, config_name, os.path.join(toplevel_build, 'eclipse-cdt-settings.xml'), options, shared_intermediate_dirs) GenerateClasspathFile(target_list, target_dicts, options.toplevel_dir, toplevel_build, os.path.join(toplevel_build, 'eclipse-classpath.xml')) def GenerateCdtSettingsFile(target_list, target_dicts, data, params, config_name, out_name, options, shared_intermediate_dirs): gyp.common.EnsureDirExists(out_name) with open(out_name, 'w') as out: out.write('<?xml version="1.0" encoding="UTF-8"?>\n') out.write('<cdtprojectproperties>\n') eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File', 'GNU C++', 'GNU C', 'Assembly'] compiler_path = GetCompilerPath(target_list, data, options) include_dirs = GetAllIncludeDirectories(target_list, target_dicts, shared_intermediate_dirs, config_name, params, compiler_path) WriteIncludePaths(out, eclipse_langs, include_dirs) defines = GetAllDefines(target_list, target_dicts, data, config_name, params, compiler_path) WriteMacros(out, eclipse_langs, defines) out.write('</cdtprojectproperties>\n') def GenerateClasspathFile(target_list, target_dicts, toplevel_dir, toplevel_build, out_name): '''Generates a classpath file suitable for symbol navigation and code completion of Java code (such as in Android projects) by finding all .java and .jar files used as action inputs.''' gyp.common.EnsureDirExists(out_name) result = ET.Element('classpath') def AddElements(kind, paths): # First, we need to normalize the paths so they are all relative to the # toplevel dir. rel_paths = set() for path in paths: if os.path.isabs(path): rel_paths.add(os.path.relpath(path, toplevel_dir)) else: rel_paths.add(path) for path in sorted(rel_paths): entry_element = ET.SubElement(result, 'classpathentry') entry_element.set('kind', kind) entry_element.set('path', path) AddElements('lib', GetJavaJars(target_list, target_dicts, toplevel_dir)) AddElements('src', GetJavaSourceDirs(target_list, target_dicts, toplevel_dir)) # Include the standard JRE container and a dummy out folder AddElements('con', ['org.eclipse.jdt.launching.JRE_CONTAINER']) # Include a dummy out folder so that Eclipse doesn't use the default /bin # folder in the root of the project. AddElements('output', [os.path.join(toplevel_build, '.eclipse-java-build')]) ET.ElementTree(result).write(out_name) def GetJavaJars(target_list, target_dicts, toplevel_dir): '''Generates a sequence of all .jars used as inputs.''' for target_name in target_list: target = target_dicts[target_name] for action in target.get('actions', []): for input_ in action['inputs']: if os.path.splitext(input_)[1] == '.jar' and not input_.startswith('$'): if os.path.isabs(input_): yield input_ else: yield os.path.join(os.path.dirname(target_name), input_) def GetJavaSourceDirs(target_list, target_dicts, toplevel_dir): '''Generates a sequence of all likely java package root directories.''' for target_name in target_list: target = target_dicts[target_name] for action in target.get('actions', []): for input_ in action['inputs']: if (os.path.splitext(input_)[1] == '.java' and not input_.startswith('$')): dir_ = os.path.dirname(os.path.join(os.path.dirname(target_name), input_)) # If there is a parent 'src' or 'java' folder, navigate up to it - # these are canonical package root names in Chromium. This will # break if 'src' or 'java' exists in the package structure. This # could be further improved by inspecting the java file for the # package name if this proves to be too fragile in practice. parent_search = dir_ while os.path.basename(parent_search) not in ['src', 'java']: parent_search, _ = os.path.split(parent_search) if not parent_search or parent_search == toplevel_dir: # Didn't find a known root, just return the original path yield dir_ break else: yield parent_search def GenerateOutput(target_list, target_dicts, data, params): """Generate an XML settings file that can be imported into a CDT project.""" if params['options'].generator_output: raise NotImplementedError("--generator_output not implemented for eclipse") user_config = params.get('generator_flags', {}).get('config', None) if user_config: GenerateOutputForConfig(target_list, target_dicts, data, params, user_config) else: config_names = target_dicts[target_list[0]]['configurations'].keys() for config_name in config_names: GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
mit
mrquim/repository.mrquim
repo/script.module.youtube.dl/lib/youtube_dl/extractor/porn91.py
40
1888
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( parse_duration, int_or_none, ExtractorError, ) class Porn91IE(InfoExtractor): IE_NAME = '91porn' _VALID_URL = r'(?:https?://)(?:www\.|)91porn\.com/.+?\?viewkey=(?P<id>[\w\d]+)' _TEST = { 'url': 'http://91porn.com/view_video.php?viewkey=7e42283b4f5ab36da134', 'md5': '7fcdb5349354f40d41689bd0fa8db05a', 'info_dict': { 'id': '7e42283b4f5ab36da134', 'title': '18岁大一漂亮学妹,水嫩性感,再爽一次!', 'ext': 'mp4', 'duration': 431, 'age_limit': 18, } } def _real_extract(self, url): video_id = self._match_id(url) self._set_cookie('91porn.com', 'language', 'cn_CN') webpage = self._download_webpage( 'http://91porn.com/view_video.php?viewkey=%s' % video_id, video_id) if '作为游客,你每天只可观看10个视频' in webpage: raise ExtractorError('91 Porn says: Daily limit 10 videos exceeded', expected=True) title = self._search_regex( r'<div id="viewvideo-title">([^<]+)</div>', webpage, 'title') title = title.replace('\n', '') info_dict = self._parse_html5_media_entries(url, webpage, video_id)[0] duration = parse_duration(self._search_regex( r'时长:\s*</span>\s*(\d+:\d+)', webpage, 'duration', fatal=False)) comment_count = int_or_none(self._search_regex( r'留言:\s*</span>\s*(\d+)', webpage, 'comment count', fatal=False)) info_dict.update({ 'id': video_id, 'title': title, 'duration': duration, 'comment_count': comment_count, 'age_limit': self._rta_search(webpage), }) return info_dict
gpl-2.0
ipab-rad/baxter_mill
src/baxter_mill/baxter_controller.py
1
5679
import sys import random import rospkg import rospy from std_msgs.msg import ( String, ) from sensor_msgs.msg import ( Image, ) import baxter_interface import cv import cv_bridge class BaxterController(object): def __init__(self, limb): self._rp = rospkg.RosPack() self._config_path = self._rp.get_path('baxter_mill') + '/config/' self._config_file_path = self._config_path + limb + '_positions.config' self._images_path = self._rp.get_path('learn_play') + '/share/images/' self._good_face_path = self._images_path + "good_face.jpg" self._angry_face_path = self._images_path + "angry_face.jpg" self._cheeky_face_path = self._images_path + "cheeky_face.jpg" self._limb = limb self._baxter_limb = baxter_interface.Limb(self._limb) self._baxter_gripper = baxter_interface.Gripper(self._limb) self._baxter_head = baxter_interface.Head() self._baxter_head.set_pan(0.0) print "Calibrating gripper..." self._baxter_gripper.calibrate() self._baxter_gripper.set_holding_force(50.0) self.acceptable = {'left': ['a1', 'a4', 'a7', 'b2', 'b4', 'b6', 'c3', 'c4', 'c5', 'd1', 'd2', 'd3', 'd5', 'd6', 'd7', 'e3', 'e4', 'e5', 'f2', 'f4', 'f6', 'g1', 'g4', 'g7'], 'positions' : ['p1','p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8', 'p9']} self._mill_pos = {} self._picking_pos = {} self.picked = False self._is_pickable = False self._nodded = False if (not self._check_config()): exit(0) self._read_config(self._config_file_path) def _get_mill_pos(self, x, y, limb): alph = ['a', 'b', 'c', 'd', 'e', 'f', 'g'] mill_x = alph[x] mill = mill_x + str(y+1) if mill in self.acceptable[limb]: return mill else: return None def _check_config(self): ri = "" while (1): ri = raw_input("Have you calibrated the arm? [y/n] ") if ri.lower() == "y": print "Awesome! Carry on." return True elif ri.lower() == "n": print ">> run `rosrun baxter_mill calibrate.py` first! <<" return False def _read_config(self, file): """ Read positions from config file. """ print "Reading positions from file." f = open(file, 'r') lines = f.readlines() splitted = [line.split("=") for line in lines] for x in range(1,10): t = "p" + str(x) self._picking_pos[t] = eval(splitted.pop(0)[1]) self._neutral_pos = eval(splitted.pop(0)[1]) for x in range(7): for y in range(7): t = self._get_mill_pos(x, y, self._limb) if not t: continue self._mill_pos[t] = eval(splitted.pop(0)[1]) print "Positions are in memory." f.close() def send_image(self, path): """ Send the image located at the specified path to the head display on Baxter. @param path: path to the image file to load and send """ img = cv.LoadImage(path) msg = cv_bridge.CvBridge().cv_to_imgmsg(img, encoding="bgr8") pub = rospy.Publisher('/robot/xdisplay', Image, latch=True) pub.publish(msg) # Sleep to allow for image to be published. # rospy.sleep(1) def head_turn(self, direction=-1): """ -1 = left, 1 = right """ self._baxter_head.set_pan(direction*0.8, 50) self._baxter_head.set_pan(0.0, 10) def gripper_open(self, percentage): if percentage < 100: return self._baxter_gripper.command_position(percentage) else: return False def pick(self, pos): self._baxter_limb.move_to_joint_positions(self._neutral_pos) if pos[0] == "p": self._baxter_limb.move_to_joint_positions(self._picking_pos[(pos)][1]) self._baxter_limb.move_to_joint_positions(self._picking_pos[(pos)][0]) self._baxter_gripper.close() rospy.sleep(0.2) self._baxter_limb.move_to_joint_positions(self._picking_pos[(pos)][1]) else: self._baxter_limb.move_to_joint_positions(self._mill_pos[(pos)][1]) self._baxter_limb.move_to_joint_positions(self._mill_pos[(pos)][0]) self._baxter_gripper.close() rospy.sleep(0.2) self._baxter_limb.move_to_joint_positions(self._mill_pos[(pos)][1]) def release(self, pos): self._baxter_limb.move_to_joint_positions(self._neutral_pos) if pos[0] == "p": self._baxter_limb.move_to_joint_positions(self._picking_pos[(pos)][1]) self._baxter_limb.move_to_joint_positions(self._picking_pos[(pos)][0]) self.gripper_open(70) rospy.sleep(0.2) self._baxter_limb.move_to_joint_positions(self._picking_pos[(pos)][1]) else: self._baxter_limb.move_to_joint_positions(self._mill_pos[(pos)][1]) self._baxter_limb.move_to_joint_positions(self._mill_pos[(pos)][0]) self.gripper_open(70) rospy.sleep(0.2) self._baxter_limb.move_to_joint_positions(self._mill_pos[(pos)][1]) self._baxter_limb.move_to_joint_positions(self._neutral_pos) def go_neutral(self): self._baxter_limb.move_to_joint_positions(self._neutral_pos)
mit
atreyv/sympy
sympy/integrals/rationaltools.py
53
10050
"""This module implements tools for integrating rational functions. """ from __future__ import print_function, division from sympy import S, Symbol, symbols, I, log, atan, \ roots, RootSum, Lambda, cancel, Dummy from sympy.polys import Poly, resultant, ZZ from sympy.polys.polytools import count_roots from sympy.core.compatibility import range def ratint(f, x, **flags): """Performs indefinite integration of rational functions. Given a field :math:`K` and a rational function :math:`f = p/q`, where :math:`p` and :math:`q` are polynomials in :math:`K[x]`, returns a function :math:`g` such that :math:`f = g'`. >>> from sympy.integrals.rationaltools import ratint >>> from sympy.abc import x >>> ratint(36/(x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2), x) (12*x + 6)/(x**2 - 1) + 4*log(x - 2) - 4*log(x + 1) References ========== .. [Bro05] M. Bronstein, Symbolic Integration I: Transcendental Functions, Second Edition, Springer-Verlag, 2005, pp. 35-70 See Also ======== sympy.integrals.integrals.Integral.doit ratint_logpart, ratint_ratpart """ if type(f) is not tuple: p, q = f.as_numer_denom() else: p, q = f p, q = Poly(p, x, composite=False, field=True), Poly(q, x, composite=False, field=True) coeff, p, q = p.cancel(q) poly, p = p.div(q) result = poly.integrate(x).as_expr() if p.is_zero: return coeff*result g, h = ratint_ratpart(p, q, x) P, Q = h.as_numer_denom() P = Poly(P, x) Q = Poly(Q, x) q, r = P.div(Q) result += g + q.integrate(x).as_expr() if not r.is_zero: symbol = flags.get('symbol', 't') if not isinstance(symbol, Symbol): t = Dummy(symbol) else: t = symbol.as_dummy() L = ratint_logpart(r, Q, x, t) real = flags.get('real') if real is None: if type(f) is not tuple: atoms = f.atoms() else: p, q = f atoms = p.atoms() | q.atoms() for elt in atoms - set([x]): if not elt.is_real: real = False break else: real = True eps = S(0) if not real: for h, q in L: eps += RootSum( q, Lambda(t, t*log(h.as_expr())), quadratic=True) else: for h, q in L: R = log_to_real(h, q, x, t) if R is not None: eps += R else: eps += RootSum( q, Lambda(t, t*log(h.as_expr())), quadratic=True) result += eps return coeff*result def ratint_ratpart(f, g, x): """ Horowitz-Ostrogradsky algorithm. Given a field K and polynomials f and g in K[x], such that f and g are coprime and deg(f) < deg(g), returns fractions A and B in K(x), such that f/g = A' + B and B has square-free denominator. Examples ======== >>> from sympy.integrals.rationaltools import ratint_ratpart >>> from sympy.abc import x, y >>> from sympy import Poly >>> ratint_ratpart(Poly(1, x, domain='ZZ'), ... Poly(x + 1, x, domain='ZZ'), x) (0, 1/(x + 1)) >>> ratint_ratpart(Poly(1, x, domain='EX'), ... Poly(x**2 + y**2, x, domain='EX'), x) (0, 1/(x**2 + y**2)) >>> ratint_ratpart(Poly(36, x, domain='ZZ'), ... Poly(x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2, x, domain='ZZ'), x) ((12*x + 6)/(x**2 - 1), 12/(x**2 - x - 2)) See Also ======== ratint, ratint_logpart """ from sympy import solve f = Poly(f, x) g = Poly(g, x) u, v, _ = g.cofactors(g.diff()) n = u.degree() m = v.degree() A_coeffs = [ Dummy('a' + str(n - i)) for i in range(0, n) ] B_coeffs = [ Dummy('b' + str(m - i)) for i in range(0, m) ] C_coeffs = A_coeffs + B_coeffs A = Poly(A_coeffs, x, domain=ZZ[C_coeffs]) B = Poly(B_coeffs, x, domain=ZZ[C_coeffs]) H = f - A.diff()*v + A*(u.diff()*v).quo(u) - B*u result = solve(H.coeffs(), C_coeffs) A = A.as_expr().subs(result) B = B.as_expr().subs(result) rat_part = cancel(A/u.as_expr(), x) log_part = cancel(B/v.as_expr(), x) return rat_part, log_part def ratint_logpart(f, g, x, t=None): """ Lazard-Rioboo-Trager algorithm. Given a field K and polynomials f and g in K[x], such that f and g are coprime, deg(f) < deg(g) and g is square-free, returns a list of tuples (s_i, q_i) of polynomials, for i = 1..n, such that s_i in K[t, x] and q_i in K[t], and: ___ ___ d f d \ ` \ ` -- - = -- ) ) a log(s_i(a, x)) dx g dx /__, /__, i=1..n a | q_i(a) = 0 Examples ======== >>> from sympy.integrals.rationaltools import ratint_logpart >>> from sympy.abc import x >>> from sympy import Poly >>> ratint_logpart(Poly(1, x, domain='ZZ'), ... Poly(x**2 + x + 1, x, domain='ZZ'), x) [(Poly(x + 3*_t/2 + 1/2, x, domain='QQ[_t]'), ...Poly(3*_t**2 + 1, _t, domain='ZZ'))] >>> ratint_logpart(Poly(12, x, domain='ZZ'), ... Poly(x**2 - x - 2, x, domain='ZZ'), x) [(Poly(x - 3*_t/8 - 1/2, x, domain='QQ[_t]'), ...Poly(-_t**2 + 16, _t, domain='ZZ'))] See Also ======== ratint, ratint_ratpart """ f, g = Poly(f, x), Poly(g, x) t = t or Dummy('t') a, b = g, f - g.diff()*Poly(t, x) res, R = resultant(a, b, includePRS=True) res = Poly(res, t, composite=False) assert res, "BUG: resultant(%s, %s) can't be zero" % (a, b) R_map, H = {}, [] for r in R: R_map[r.degree()] = r def _include_sign(c, sqf): if (c < 0) == True: h, k = sqf[0] sqf[0] = h*c, k C, res_sqf = res.sqf_list() _include_sign(C, res_sqf) for q, i in res_sqf: _, q = q.primitive() if g.degree() == i: H.append((g, q)) else: h = R_map[i] h_lc = Poly(h.LC(), t, field=True) c, h_lc_sqf = h_lc.sqf_list(all=True) _include_sign(c, h_lc_sqf) for a, j in h_lc_sqf: h = h.quo(Poly(a.gcd(q)**j, x)) inv, coeffs = h_lc.invert(q), [S(1)] for coeff in h.coeffs()[1:]: T = (inv*coeff).rem(q) coeffs.append(T.as_expr()) h = Poly(dict(list(zip(h.monoms(), coeffs))), x) H.append((h, q)) return H def log_to_atan(f, g): """ Convert complex logarithms to real arctangents. Given a real field K and polynomials f and g in K[x], with g != 0, returns a sum h of arctangents of polynomials in K[x], such that: dh d f + I g -- = -- I log( ------- ) dx dx f - I g Examples ======== >>> from sympy.integrals.rationaltools import log_to_atan >>> from sympy.abc import x >>> from sympy import Poly, sqrt, S >>> log_to_atan(Poly(x, x, domain='ZZ'), Poly(1, x, domain='ZZ')) 2*atan(x) >>> log_to_atan(Poly(x + S(1)/2, x, domain='QQ'), ... Poly(sqrt(3)/2, x, domain='EX')) 2*atan(2*sqrt(3)*x/3 + sqrt(3)/3) See Also ======== log_to_real """ if f.degree() < g.degree(): f, g = -g, f f = f.to_field() g = g.to_field() p, q = f.div(g) if q.is_zero: return 2*atan(p.as_expr()) else: s, t, h = g.gcdex(-f) u = (f*s + g*t).quo(h) A = 2*atan(u.as_expr()) return A + log_to_atan(s, t) def log_to_real(h, q, x, t): """ Convert complex logarithms to real functions. Given real field K and polynomials h in K[t,x] and q in K[t], returns real function f such that: ___ df d \ ` -- = -- ) a log(h(a, x)) dx dx /__, a | q(a) = 0 Examples ======== >>> from sympy.integrals.rationaltools import log_to_real >>> from sympy.abc import x, y >>> from sympy import Poly, sqrt, S >>> log_to_real(Poly(x + 3*y/2 + S(1)/2, x, domain='QQ[y]'), ... Poly(3*y**2 + 1, y, domain='ZZ'), x, y) 2*sqrt(3)*atan(2*sqrt(3)*x/3 + sqrt(3)/3)/3 >>> log_to_real(Poly(x**2 - 1, x, domain='ZZ'), ... Poly(-2*y + 1, y, domain='ZZ'), x, y) log(x**2 - 1)/2 See Also ======== log_to_atan """ from sympy import collect u, v = symbols('u,v', cls=Dummy) H = h.as_expr().subs({t: u + I*v}).expand() Q = q.as_expr().subs({t: u + I*v}).expand() H_map = collect(H, I, evaluate=False) Q_map = collect(Q, I, evaluate=False) a, b = H_map.get(S(1), S(0)), H_map.get(I, S(0)) c, d = Q_map.get(S(1), S(0)), Q_map.get(I, S(0)) R = Poly(resultant(c, d, v), u) R_u = roots(R, filter='R') if len(R_u) != R.count_roots(): return None result = S(0) for r_u in R_u.keys(): C = Poly(c.subs({u: r_u}), v) R_v = roots(C, filter='R') if len(R_v) != C.count_roots(): return None for r_v in R_v: if not r_v.is_positive: continue D = d.subs({u: r_u, v: r_v}) if D.evalf(chop=True) != 0: continue A = Poly(a.subs({u: r_u, v: r_v}), x) B = Poly(b.subs({u: r_u, v: r_v}), x) AB = (A**2 + B**2).as_expr() result += r_u*log(AB) + r_v*log_to_atan(A, B) R_q = roots(q, filter='R') if len(R_q) != q.count_roots(): return None for r in R_q.keys(): result += r*log(h.as_expr().subs(t, r)) return result
bsd-3-clause
segsignal/bitcoin
contrib/devtools/github-merge.py
46
10860
#!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # This script will locally construct a merge commit for a pull request on a # github repository, inspect it, sign it and optionally push it. # The following temporary branches are created/overwritten and deleted: # * pull/$PULL/base (the current master we're merging onto) # * pull/$PULL/head (the current state of the remote pull request) # * pull/$PULL/merge (github's merge) # * pull/$PULL/local-merge (our merge) # In case of a clean merge that is accepted by the user, the local branch with # name $BRANCH is overwritten with the merged result, and optionally pushed. from __future__ import division,print_function,unicode_literals import os from sys import stdin,stdout,stderr import argparse import subprocess import json,codecs try: from urllib.request import Request,urlopen except: from urllib2 import Request,urlopen # External tools (can be overridden using environment) GIT = os.getenv('GIT','git') BASH = os.getenv('BASH','bash') # OS specific configuration for terminal attributes ATTR_RESET = '' ATTR_PR = '' COMMIT_FORMAT = '%h %s (%an)%d' if os.name == 'posix': # if posix, assume we can use basic terminal escapes ATTR_RESET = '\033[0m' ATTR_PR = '\033[1;36m' COMMIT_FORMAT = '%C(bold blue)%h%Creset %s %C(cyan)(%an)%Creset%C(green)%d%Creset' def git_config_get(option, default=None): ''' Get named configuration option from git repository. ''' try: return subprocess.check_output([GIT,'config','--get',option]).rstrip().decode('utf-8') except subprocess.CalledProcessError as e: return default def retrieve_pr_info(repo,pull): ''' Retrieve pull request information from github. Return None if no title can be found, or an error happens. ''' try: req = Request("https://api.github.com/repos/"+repo+"/pulls/"+pull) result = urlopen(req) reader = codecs.getreader('utf-8') obj = json.load(reader(result)) return obj except Exception as e: print('Warning: unable to retrieve pull information from github: %s' % e) return None def ask_prompt(text): print(text,end=" ",file=stderr) stderr.flush() reply = stdin.readline().rstrip() print("",file=stderr) return reply def parse_arguments(): epilog = ''' In addition, you can set the following git configuration variables: githubmerge.repository (mandatory), user.signingkey (mandatory), githubmerge.host (default: git@github.com), githubmerge.branch (no default), githubmerge.testcmd (default: none). ''' parser = argparse.ArgumentParser(description='Utility to merge, sign and push github pull requests', epilog=epilog) parser.add_argument('pull', metavar='PULL', type=int, nargs=1, help='Pull request ID to merge') parser.add_argument('branch', metavar='BRANCH', type=str, nargs='?', default=None, help='Branch to merge against (default: githubmerge.branch setting, or base branch for pull, or \'master\')') return parser.parse_args() def main(): # Extract settings from git repo repo = git_config_get('githubmerge.repository') host = git_config_get('githubmerge.host','git@github.com') opt_branch = git_config_get('githubmerge.branch',None) testcmd = git_config_get('githubmerge.testcmd') signingkey = git_config_get('user.signingkey') if repo is None: print("ERROR: No repository configured. Use this command to set:", file=stderr) print("git config githubmerge.repository <owner>/<repo>", file=stderr) exit(1) if signingkey is None: print("ERROR: No GPG signing key set. Set one using:",file=stderr) print("git config --global user.signingkey <key>",file=stderr) exit(1) host_repo = host+":"+repo # shortcut for push/pull target # Extract settings from command line args = parse_arguments() pull = str(args.pull[0]) # Receive pull information from github info = retrieve_pr_info(repo,pull) if info is None: exit(1) title = info['title'] # precedence order for destination branch argument: # - command line argument # - githubmerge.branch setting # - base branch for pull (as retrieved from github) # - 'master' branch = args.branch or opt_branch or info['base']['ref'] or 'master' # Initialize source branches head_branch = 'pull/'+pull+'/head' base_branch = 'pull/'+pull+'/base' merge_branch = 'pull/'+pull+'/merge' local_merge_branch = 'pull/'+pull+'/local-merge' devnull = open(os.devnull,'w') try: subprocess.check_call([GIT,'checkout','-q',branch]) except subprocess.CalledProcessError as e: print("ERROR: Cannot check out branch %s." % (branch), file=stderr) exit(3) try: subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/pull/'+pull+'/*:refs/heads/pull/'+pull+'/*']) except subprocess.CalledProcessError as e: print("ERROR: Cannot find pull request #%s on %s." % (pull,host_repo), file=stderr) exit(3) try: subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+head_branch], stdout=devnull, stderr=stdout) except subprocess.CalledProcessError as e: print("ERROR: Cannot find head of pull request #%s on %s." % (pull,host_repo), file=stderr) exit(3) try: subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+merge_branch], stdout=devnull, stderr=stdout) except subprocess.CalledProcessError as e: print("ERROR: Cannot find merge of pull request #%s on %s." % (pull,host_repo), file=stderr) exit(3) try: subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/heads/'+branch+':refs/heads/'+base_branch]) except subprocess.CalledProcessError as e: print("ERROR: Cannot find branch %s on %s." % (branch,host_repo), file=stderr) exit(3) subprocess.check_call([GIT,'checkout','-q',base_branch]) subprocess.call([GIT,'branch','-q','-D',local_merge_branch], stderr=devnull) subprocess.check_call([GIT,'checkout','-q','-b',local_merge_branch]) try: # Create unsigned merge commit. if title: firstline = 'Merge #%s: %s' % (pull,title) else: firstline = 'Merge #%s' % (pull,) message = firstline + '\n\n' message += subprocess.check_output([GIT,'log','--no-merges','--topo-order','--pretty=format:%h %s (%an)',base_branch+'..'+head_branch]).decode('utf-8') try: subprocess.check_call([GIT,'merge','-q','--commit','--no-edit','--no-ff','-m',message.encode('utf-8'),head_branch]) except subprocess.CalledProcessError as e: print("ERROR: Cannot be merged cleanly.",file=stderr) subprocess.check_call([GIT,'merge','--abort']) exit(4) logmsg = subprocess.check_output([GIT,'log','--pretty=format:%s','-n','1']).decode('utf-8') if logmsg.rstrip() != firstline.rstrip(): print("ERROR: Creating merge failed (already merged?).",file=stderr) exit(4) print('%s#%s%s %s %sinto %s%s' % (ATTR_RESET+ATTR_PR,pull,ATTR_RESET,title,ATTR_RESET+ATTR_PR,branch,ATTR_RESET)) subprocess.check_call([GIT,'log','--graph','--topo-order','--pretty=format:'+COMMIT_FORMAT,base_branch+'..'+head_branch]) print() # Run test command if configured. if testcmd: # Go up to the repository's root. toplevel = subprocess.check_output([GIT,'rev-parse','--show-toplevel']).strip() os.chdir(toplevel) if subprocess.call(testcmd,shell=True): print("ERROR: Running %s failed." % testcmd,file=stderr) exit(5) # Show the created merge. diff = subprocess.check_output([GIT,'diff',merge_branch+'..'+local_merge_branch]) subprocess.check_call([GIT,'diff',base_branch+'..'+local_merge_branch]) if diff: print("WARNING: merge differs from github!",file=stderr) reply = ask_prompt("Type 'ignore' to continue.") if reply.lower() == 'ignore': print("Difference with github ignored.",file=stderr) else: exit(6) reply = ask_prompt("Press 'd' to accept the diff.") if reply.lower() == 'd': print("Diff accepted.",file=stderr) else: print("ERROR: Diff rejected.",file=stderr) exit(6) else: # Verify the result manually. print("Dropping you on a shell so you can try building/testing the merged source.",file=stderr) print("Run 'git diff HEAD~' to show the changes being merged.",file=stderr) print("Type 'exit' when done.",file=stderr) if os.path.isfile('/etc/debian_version'): # Show pull number on Debian default prompt os.putenv('debian_chroot',pull) subprocess.call([BASH,'-i']) reply = ask_prompt("Type 'm' to accept the merge.") if reply.lower() == 'm': print("Merge accepted.",file=stderr) else: print("ERROR: Merge rejected.",file=stderr) exit(7) # Sign the merge commit. reply = ask_prompt("Type 's' to sign off on the merge.") if reply == 's': try: subprocess.check_call([GIT,'commit','-q','--gpg-sign','--amend','--no-edit']) except subprocess.CalledProcessError as e: print("Error signing, exiting.",file=stderr) exit(1) else: print("Not signing off on merge, exiting.",file=stderr) exit(1) # Put the result in branch. subprocess.check_call([GIT,'checkout','-q',branch]) subprocess.check_call([GIT,'reset','-q','--hard',local_merge_branch]) finally: # Clean up temporary branches. subprocess.call([GIT,'checkout','-q',branch]) subprocess.call([GIT,'branch','-q','-D',head_branch],stderr=devnull) subprocess.call([GIT,'branch','-q','-D',base_branch],stderr=devnull) subprocess.call([GIT,'branch','-q','-D',merge_branch],stderr=devnull) subprocess.call([GIT,'branch','-q','-D',local_merge_branch],stderr=devnull) # Push the result. reply = ask_prompt("Type 'push' to push the result to %s, branch %s." % (host_repo,branch)) if reply.lower() == 'push': subprocess.check_call([GIT,'push',host_repo,'refs/heads/'+branch]) if __name__ == '__main__': main()
mit
wang1352083/pythontool
python-2.7.12-lib/test/test_os.py
3
35098
# As a test suite for the os module, this is woefully inadequate, but this # does add tests for a few functions which have been determined to be more # portable than they had been thought to be. import os import errno import unittest import warnings import sys import signal import subprocess import sysconfig import textwrap import time try: import resource except ImportError: resource = None from test import test_support from test.script_helper import assert_python_ok import mmap import uuid warnings.filterwarnings("ignore", "tempnam", RuntimeWarning, __name__) warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning, __name__) # Tests creating TESTFN class FileTests(unittest.TestCase): def setUp(self): if os.path.exists(test_support.TESTFN): os.unlink(test_support.TESTFN) tearDown = setUp def test_access(self): f = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR) os.close(f) self.assertTrue(os.access(test_support.TESTFN, os.W_OK)) def test_closerange(self): first = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR) # We must allocate two consecutive file descriptors, otherwise # it will mess up other file descriptors (perhaps even the three # standard ones). second = os.dup(first) try: retries = 0 while second != first + 1: os.close(first) retries += 1 if retries > 10: # XXX test skipped self.skipTest("couldn't allocate two consecutive fds") first, second = second, os.dup(second) finally: os.close(second) # close a fd that is open, and one that isn't os.closerange(first, first + 2) self.assertRaises(OSError, os.write, first, "a") @test_support.cpython_only def test_rename(self): path = unicode(test_support.TESTFN) old = sys.getrefcount(path) self.assertRaises(TypeError, os.rename, path, 0) new = sys.getrefcount(path) self.assertEqual(old, new) class TemporaryFileTests(unittest.TestCase): def setUp(self): self.files = [] os.mkdir(test_support.TESTFN) def tearDown(self): for name in self.files: os.unlink(name) os.rmdir(test_support.TESTFN) def check_tempfile(self, name): # make sure it doesn't already exist: self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file open(name, "w") self.files.append(name) @unittest.skipUnless(hasattr(os, 'tempnam'), 'test needs os.tempnam()') def test_tempnam(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore", "tempnam", RuntimeWarning, r"test_os$") warnings.filterwarnings("ignore", "tempnam", DeprecationWarning) self.check_tempfile(os.tempnam()) name = os.tempnam(test_support.TESTFN) self.check_tempfile(name) name = os.tempnam(test_support.TESTFN, "pfx") self.assertTrue(os.path.basename(name)[:3] == "pfx") self.check_tempfile(name) @unittest.skipUnless(hasattr(os, 'tmpfile'), 'test needs os.tmpfile()') def test_tmpfile(self): # As with test_tmpnam() below, the Windows implementation of tmpfile() # attempts to create a file in the root directory of the current drive. # On Vista and Server 2008, this test will always fail for normal users # as writing to the root directory requires elevated privileges. With # XP and below, the semantics of tmpfile() are the same, but the user # running the test is more likely to have administrative privileges on # their account already. If that's the case, then os.tmpfile() should # work. In order to make this test as useful as possible, rather than # trying to detect Windows versions or whether or not the user has the # right permissions, just try and create a file in the root directory # and see if it raises a 'Permission denied' OSError. If it does, then # test that a subsequent call to os.tmpfile() raises the same error. If # it doesn't, assume we're on XP or below and the user running the test # has administrative privileges, and proceed with the test as normal. with warnings.catch_warnings(): warnings.filterwarnings("ignore", "tmpfile", DeprecationWarning) if sys.platform == 'win32': name = '\\python_test_os_test_tmpfile.txt' if os.path.exists(name): os.remove(name) try: fp = open(name, 'w') except IOError, first: # open() failed, assert tmpfile() fails in the same way. # Although open() raises an IOError and os.tmpfile() raises an # OSError(), 'args' will be (13, 'Permission denied') in both # cases. try: fp = os.tmpfile() except OSError, second: self.assertEqual(first.args, second.args) else: self.fail("expected os.tmpfile() to raise OSError") return else: # open() worked, therefore, tmpfile() should work. Close our # dummy file and proceed with the test as normal. fp.close() os.remove(name) fp = os.tmpfile() fp.write("foobar") fp.seek(0,0) s = fp.read() fp.close() self.assertTrue(s == "foobar") @unittest.skipUnless(hasattr(os, 'tmpnam'), 'test needs os.tmpnam()') def test_tmpnam(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning, r"test_os$") warnings.filterwarnings("ignore", "tmpnam", DeprecationWarning) name = os.tmpnam() if sys.platform in ("win32",): # The Windows tmpnam() seems useless. From the MS docs: # # The character string that tmpnam creates consists of # the path prefix, defined by the entry P_tmpdir in the # file STDIO.H, followed by a sequence consisting of the # digit characters '0' through '9'; the numerical value # of this string is in the range 1 - 65,535. Changing the # definitions of L_tmpnam or P_tmpdir in STDIO.H does not # change the operation of tmpnam. # # The really bizarre part is that, at least under MSVC6, # P_tmpdir is "\\". That is, the path returned refers to # the root of the current drive. That's a terrible place to # put temp files, and, depending on privileges, the user # may not even be able to open a file in the root directory. self.assertFalse(os.path.exists(name), "file already exists for temporary file") else: self.check_tempfile(name) # Test attributes on return values from os.*stat* family. class StatAttributeTests(unittest.TestCase): def setUp(self): os.mkdir(test_support.TESTFN) self.fname = os.path.join(test_support.TESTFN, "f1") f = open(self.fname, 'wb') f.write("ABC") f.close() def tearDown(self): os.unlink(self.fname) os.rmdir(test_support.TESTFN) @unittest.skipUnless(hasattr(os, 'stat'), 'test needs os.stat()') def test_stat_attributes(self): import stat result = os.stat(self.fname) # Make sure direct access works self.assertEqual(result[stat.ST_SIZE], 3) self.assertEqual(result.st_size, 3) # Make sure all the attributes are there members = dir(result) for name in dir(stat): if name[:3] == 'ST_': attr = name.lower() if name.endswith("TIME"): def trunc(x): return int(x) else: def trunc(x): return x self.assertEqual(trunc(getattr(result, attr)), result[getattr(stat, name)]) self.assertIn(attr, members) try: result[200] self.fail("No exception raised") except IndexError: pass # Make sure that assignment fails try: result.st_mode = 1 self.fail("No exception raised") except (AttributeError, TypeError): pass try: result.st_rdev = 1 self.fail("No exception raised") except (AttributeError, TypeError): pass try: result.parrot = 1 self.fail("No exception raised") except AttributeError: pass # Use the stat_result constructor with a too-short tuple. try: result2 = os.stat_result((10,)) self.fail("No exception raised") except TypeError: pass # Use the constructor with a too-long tuple. try: result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14)) except TypeError: pass @unittest.skipUnless(hasattr(os, 'statvfs'), 'test needs os.statvfs()') def test_statvfs_attributes(self): try: result = os.statvfs(self.fname) except OSError, e: # On AtheOS, glibc always returns ENOSYS if e.errno == errno.ENOSYS: self.skipTest('glibc always returns ENOSYS on AtheOS') # Make sure direct access works self.assertEqual(result.f_bfree, result[3]) # Make sure all the attributes are there. members = ('bsize', 'frsize', 'blocks', 'bfree', 'bavail', 'files', 'ffree', 'favail', 'flag', 'namemax') for value, member in enumerate(members): self.assertEqual(getattr(result, 'f_' + member), result[value]) # Make sure that assignment really fails try: result.f_bfree = 1 self.fail("No exception raised") except TypeError: pass try: result.parrot = 1 self.fail("No exception raised") except AttributeError: pass # Use the constructor with a too-short tuple. try: result2 = os.statvfs_result((10,)) self.fail("No exception raised") except TypeError: pass # Use the constructor with a too-long tuple. try: result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14)) except TypeError: pass def test_utime_dir(self): delta = 1000000 st = os.stat(test_support.TESTFN) # round to int, because some systems may support sub-second # time stamps in stat, but not in utime. os.utime(test_support.TESTFN, (st.st_atime, int(st.st_mtime-delta))) st2 = os.stat(test_support.TESTFN) self.assertEqual(st2.st_mtime, int(st.st_mtime-delta)) # Restrict tests to Win32, since there is no guarantee other # systems support centiseconds def get_file_system(path): if sys.platform == 'win32': root = os.path.splitdrive(os.path.abspath(path))[0] + '\\' import ctypes kernel32 = ctypes.windll.kernel32 buf = ctypes.create_string_buffer("", 100) if kernel32.GetVolumeInformationA(root, None, 0, None, None, None, buf, len(buf)): return buf.value @unittest.skipUnless(sys.platform == "win32", "Win32 specific tests") @unittest.skipUnless(get_file_system(test_support.TESTFN) == "NTFS", "requires NTFS") def test_1565150(self): t1 = 1159195039.25 os.utime(self.fname, (t1, t1)) self.assertEqual(os.stat(self.fname).st_mtime, t1) @unittest.skipUnless(sys.platform == "win32", "Win32 specific tests") @unittest.skipUnless(get_file_system(test_support.TESTFN) == "NTFS", "requires NTFS") def test_large_time(self): t1 = 5000000000 # some day in 2128 os.utime(self.fname, (t1, t1)) self.assertEqual(os.stat(self.fname).st_mtime, t1) @unittest.skipUnless(sys.platform == "win32", "Win32 specific tests") def test_1686475(self): # Verify that an open file can be stat'ed try: os.stat(r"c:\pagefile.sys") except WindowsError, e: if e.errno == 2: # file does not exist; cannot run test self.skipTest(r'c:\pagefile.sys does not exist') self.fail("Could not stat pagefile.sys") from test import mapping_tests class EnvironTests(mapping_tests.BasicTestMappingProtocol): """check that os.environ object conform to mapping protocol""" type2test = None def _reference(self): return {"KEY1":"VALUE1", "KEY2":"VALUE2", "KEY3":"VALUE3"} def _empty_mapping(self): os.environ.clear() return os.environ def setUp(self): self.__save = dict(os.environ) os.environ.clear() def tearDown(self): os.environ.clear() os.environ.update(self.__save) # Bug 1110478 def test_update2(self): if os.path.exists("/bin/sh"): os.environ.update(HELLO="World") with os.popen("/bin/sh -c 'echo $HELLO'") as popen: value = popen.read().strip() self.assertEqual(value, "World") # On FreeBSD < 7 and OS X < 10.6, unsetenv() doesn't return a value (issue # #13415). @unittest.skipIf(sys.platform.startswith(('freebsd', 'darwin')), "due to known OS bug: see issue #13415") def test_unset_error(self): if sys.platform == "win32": # an environment variable is limited to 32,767 characters key = 'x' * 50000 self.assertRaises(ValueError, os.environ.__delitem__, key) else: # "=" is not allowed in a variable name key = 'key=' self.assertRaises(OSError, os.environ.__delitem__, key) class WalkTests(unittest.TestCase): """Tests for os.walk().""" def test_traversal(self): import os from os.path import join # Build: # TESTFN/ # TEST1/ a file kid and two directory kids # tmp1 # SUB1/ a file kid and a directory kid # tmp2 # SUB11/ no kids # SUB2/ a file kid and a dirsymlink kid # tmp3 # link/ a symlink to TESTFN.2 # TEST2/ # tmp4 a lone file walk_path = join(test_support.TESTFN, "TEST1") sub1_path = join(walk_path, "SUB1") sub11_path = join(sub1_path, "SUB11") sub2_path = join(walk_path, "SUB2") tmp1_path = join(walk_path, "tmp1") tmp2_path = join(sub1_path, "tmp2") tmp3_path = join(sub2_path, "tmp3") link_path = join(sub2_path, "link") t2_path = join(test_support.TESTFN, "TEST2") tmp4_path = join(test_support.TESTFN, "TEST2", "tmp4") # Create stuff. os.makedirs(sub11_path) os.makedirs(sub2_path) os.makedirs(t2_path) for path in tmp1_path, tmp2_path, tmp3_path, tmp4_path: f = file(path, "w") f.write("I'm " + path + " and proud of it. Blame test_os.\n") f.close() if hasattr(os, "symlink"): os.symlink(os.path.abspath(t2_path), link_path) sub2_tree = (sub2_path, ["link"], ["tmp3"]) else: sub2_tree = (sub2_path, [], ["tmp3"]) # Walk top-down. all = list(os.walk(walk_path)) self.assertEqual(len(all), 4) # We can't know which order SUB1 and SUB2 will appear in. # Not flipped: TESTFN, SUB1, SUB11, SUB2 # flipped: TESTFN, SUB2, SUB1, SUB11 flipped = all[0][1][0] != "SUB1" all[0][1].sort() self.assertEqual(all[0], (walk_path, ["SUB1", "SUB2"], ["tmp1"])) self.assertEqual(all[1 + flipped], (sub1_path, ["SUB11"], ["tmp2"])) self.assertEqual(all[2 + flipped], (sub11_path, [], [])) self.assertEqual(all[3 - 2 * flipped], sub2_tree) # Prune the search. all = [] for root, dirs, files in os.walk(walk_path): all.append((root, dirs, files)) # Don't descend into SUB1. if 'SUB1' in dirs: # Note that this also mutates the dirs we appended to all! dirs.remove('SUB1') self.assertEqual(len(all), 2) self.assertEqual(all[0], (walk_path, ["SUB2"], ["tmp1"])) self.assertEqual(all[1], sub2_tree) # Walk bottom-up. all = list(os.walk(walk_path, topdown=False)) self.assertEqual(len(all), 4) # We can't know which order SUB1 and SUB2 will appear in. # Not flipped: SUB11, SUB1, SUB2, TESTFN # flipped: SUB2, SUB11, SUB1, TESTFN flipped = all[3][1][0] != "SUB1" all[3][1].sort() self.assertEqual(all[3], (walk_path, ["SUB1", "SUB2"], ["tmp1"])) self.assertEqual(all[flipped], (sub11_path, [], [])) self.assertEqual(all[flipped + 1], (sub1_path, ["SUB11"], ["tmp2"])) self.assertEqual(all[2 - 2 * flipped], sub2_tree) if hasattr(os, "symlink"): # Walk, following symlinks. for root, dirs, files in os.walk(walk_path, followlinks=True): if root == link_path: self.assertEqual(dirs, []) self.assertEqual(files, ["tmp4"]) break else: self.fail("Didn't follow symlink with followlinks=True") def tearDown(self): # Tear everything down. This is a decent use for bottom-up on # Windows, which doesn't have a recursive delete command. The # (not so) subtlety is that rmdir will fail unless the dir's # kids are removed first, so bottom up is essential. for root, dirs, files in os.walk(test_support.TESTFN, topdown=False): for name in files: os.remove(os.path.join(root, name)) for name in dirs: dirname = os.path.join(root, name) if not os.path.islink(dirname): os.rmdir(dirname) else: os.remove(dirname) os.rmdir(test_support.TESTFN) class MakedirTests (unittest.TestCase): def setUp(self): os.mkdir(test_support.TESTFN) def test_makedir(self): base = test_support.TESTFN path = os.path.join(base, 'dir1', 'dir2', 'dir3') os.makedirs(path) # Should work path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4') os.makedirs(path) # Try paths with a '.' in them self.assertRaises(OSError, os.makedirs, os.curdir) path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4', 'dir5', os.curdir) os.makedirs(path) path = os.path.join(base, 'dir1', os.curdir, 'dir2', 'dir3', 'dir4', 'dir5', 'dir6') os.makedirs(path) def tearDown(self): path = os.path.join(test_support.TESTFN, 'dir1', 'dir2', 'dir3', 'dir4', 'dir5', 'dir6') # If the tests failed, the bottom-most directory ('../dir6') # may not have been created, so we look for the outermost directory # that exists. while not os.path.exists(path) and path != test_support.TESTFN: path = os.path.dirname(path) os.removedirs(path) class DevNullTests (unittest.TestCase): def test_devnull(self): f = file(os.devnull, 'w') f.write('hello') f.close() f = file(os.devnull, 'r') self.assertEqual(f.read(), '') f.close() class URandomTests (unittest.TestCase): def test_urandom_length(self): self.assertEqual(len(os.urandom(0)), 0) self.assertEqual(len(os.urandom(1)), 1) self.assertEqual(len(os.urandom(10)), 10) self.assertEqual(len(os.urandom(100)), 100) self.assertEqual(len(os.urandom(1000)), 1000) def test_urandom_value(self): data1 = os.urandom(16) data2 = os.urandom(16) self.assertNotEqual(data1, data2) def get_urandom_subprocess(self, count): # We need to use repr() and eval() to avoid line ending conversions # under Windows. code = '\n'.join(( 'import os, sys', 'data = os.urandom(%s)' % count, 'sys.stdout.write(repr(data))', 'sys.stdout.flush()', 'print >> sys.stderr, (len(data), data)')) cmd_line = [sys.executable, '-c', code] p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() self.assertEqual(p.wait(), 0, (p.wait(), err)) out = eval(out) self.assertEqual(len(out), count, err) return out def test_urandom_subprocess(self): data1 = self.get_urandom_subprocess(16) data2 = self.get_urandom_subprocess(16) self.assertNotEqual(data1, data2) HAVE_GETENTROPY = (sysconfig.get_config_var('HAVE_GETENTROPY') == 1) @unittest.skipIf(HAVE_GETENTROPY, "getentropy() does not use a file descriptor") class URandomFDTests(unittest.TestCase): @unittest.skipUnless(resource, "test requires the resource module") def test_urandom_failure(self): # Check urandom() failing when it is not able to open /dev/random. # We spawn a new process to make the test more robust (if getrlimit() # failed to restore the file descriptor limit after this, the whole # test suite would crash; this actually happened on the OS X Tiger # buildbot). code = """if 1: import errno import os import resource soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (1, hard_limit)) try: os.urandom(16) except OSError as e: assert e.errno == errno.EMFILE, e.errno else: raise AssertionError("OSError not raised") """ assert_python_ok('-c', code) class ExecvpeTests(unittest.TestCase): def test_execvpe_with_bad_arglist(self): self.assertRaises(ValueError, os.execvpe, 'notepad', [], None) @unittest.skipUnless(sys.platform == "win32", "Win32 specific tests") class Win32ErrorTests(unittest.TestCase): def test_rename(self): self.assertRaises(WindowsError, os.rename, test_support.TESTFN, test_support.TESTFN+".bak") def test_remove(self): self.assertRaises(WindowsError, os.remove, test_support.TESTFN) def test_chdir(self): self.assertRaises(WindowsError, os.chdir, test_support.TESTFN) def test_mkdir(self): f = open(test_support.TESTFN, "w") try: self.assertRaises(WindowsError, os.mkdir, test_support.TESTFN) finally: f.close() os.unlink(test_support.TESTFN) def test_utime(self): self.assertRaises(WindowsError, os.utime, test_support.TESTFN, None) def test_chmod(self): self.assertRaises(WindowsError, os.chmod, test_support.TESTFN, 0) class TestInvalidFD(unittest.TestCase): singles = ["fchdir", "fdopen", "dup", "fdatasync", "fstat", "fstatvfs", "fsync", "tcgetpgrp", "ttyname"] #singles.append("close") #We omit close because it doesn'r raise an exception on some platforms def get_single(f): def helper(self): if hasattr(os, f): self.check(getattr(os, f)) return helper for f in singles: locals()["test_"+f] = get_single(f) def check(self, f, *args): try: f(test_support.make_bad_fd(), *args) except OSError as e: self.assertEqual(e.errno, errno.EBADF) else: self.fail("%r didn't raise a OSError with a bad file descriptor" % f) @unittest.skipUnless(hasattr(os, 'isatty'), 'test needs os.isatty()') def test_isatty(self): self.assertEqual(os.isatty(test_support.make_bad_fd()), False) @unittest.skipUnless(hasattr(os, 'closerange'), 'test needs os.closerange()') def test_closerange(self): fd = test_support.make_bad_fd() # Make sure none of the descriptors we are about to close are # currently valid (issue 6542). for i in range(10): try: os.fstat(fd+i) except OSError: pass else: break if i < 2: raise unittest.SkipTest( "Unable to acquire a range of invalid file descriptors") self.assertEqual(os.closerange(fd, fd + i-1), None) @unittest.skipUnless(hasattr(os, 'dup2'), 'test needs os.dup2()') def test_dup2(self): self.check(os.dup2, 20) @unittest.skipUnless(hasattr(os, 'fchmod'), 'test needs os.fchmod()') def test_fchmod(self): self.check(os.fchmod, 0) @unittest.skipUnless(hasattr(os, 'fchown'), 'test needs os.fchown()') def test_fchown(self): self.check(os.fchown, -1, -1) @unittest.skipUnless(hasattr(os, 'fpathconf'), 'test needs os.fpathconf()') def test_fpathconf(self): self.check(os.fpathconf, "PC_NAME_MAX") @unittest.skipUnless(hasattr(os, 'ftruncate'), 'test needs os.ftruncate()') def test_ftruncate(self): self.check(os.ftruncate, 0) @unittest.skipUnless(hasattr(os, 'lseek'), 'test needs os.lseek()') def test_lseek(self): self.check(os.lseek, 0, 0) @unittest.skipUnless(hasattr(os, 'read'), 'test needs os.read()') def test_read(self): self.check(os.read, 1) @unittest.skipUnless(hasattr(os, 'tcsetpgrp'), 'test needs os.tcsetpgrp()') def test_tcsetpgrpt(self): self.check(os.tcsetpgrp, 0) @unittest.skipUnless(hasattr(os, 'write'), 'test needs os.write()') def test_write(self): self.check(os.write, " ") @unittest.skipIf(sys.platform == "win32", "Posix specific tests") class PosixUidGidTests(unittest.TestCase): @unittest.skipUnless(hasattr(os, 'setuid'), 'test needs os.setuid()') def test_setuid(self): if os.getuid() != 0: self.assertRaises(os.error, os.setuid, 0) self.assertRaises(OverflowError, os.setuid, 1<<32) @unittest.skipUnless(hasattr(os, 'setgid'), 'test needs os.setgid()') def test_setgid(self): if os.getuid() != 0: self.assertRaises(os.error, os.setgid, 0) self.assertRaises(OverflowError, os.setgid, 1<<32) @unittest.skipUnless(hasattr(os, 'seteuid'), 'test needs os.seteuid()') def test_seteuid(self): if os.getuid() != 0: self.assertRaises(os.error, os.seteuid, 0) self.assertRaises(OverflowError, os.seteuid, 1<<32) @unittest.skipUnless(hasattr(os, 'setegid'), 'test needs os.setegid()') def test_setegid(self): if os.getuid() != 0: self.assertRaises(os.error, os.setegid, 0) self.assertRaises(OverflowError, os.setegid, 1<<32) @unittest.skipUnless(hasattr(os, 'setreuid'), 'test needs os.setreuid()') def test_setreuid(self): if os.getuid() != 0: self.assertRaises(os.error, os.setreuid, 0, 0) self.assertRaises(OverflowError, os.setreuid, 1<<32, 0) self.assertRaises(OverflowError, os.setreuid, 0, 1<<32) @unittest.skipUnless(hasattr(os, 'setreuid'), 'test needs os.setreuid()') def test_setreuid_neg1(self): # Needs to accept -1. We run this in a subprocess to avoid # altering the test runner's process state (issue8045). subprocess.check_call([ sys.executable, '-c', 'import os,sys;os.setreuid(-1,-1);sys.exit(0)']) @unittest.skipUnless(hasattr(os, 'setregid'), 'test needs os.setregid()') def test_setregid(self): if os.getuid() != 0: self.assertRaises(os.error, os.setregid, 0, 0) self.assertRaises(OverflowError, os.setregid, 1<<32, 0) self.assertRaises(OverflowError, os.setregid, 0, 1<<32) @unittest.skipUnless(hasattr(os, 'setregid'), 'test needs os.setregid()') def test_setregid_neg1(self): # Needs to accept -1. We run this in a subprocess to avoid # altering the test runner's process state (issue8045). subprocess.check_call([ sys.executable, '-c', 'import os,sys;os.setregid(-1,-1);sys.exit(0)']) @unittest.skipUnless(sys.platform == "win32", "Win32 specific tests") class Win32KillTests(unittest.TestCase): def _kill(self, sig): # Start sys.executable as a subprocess and communicate from the # subprocess to the parent that the interpreter is ready. When it # becomes ready, send *sig* via os.kill to the subprocess and check # that the return code is equal to *sig*. import ctypes from ctypes import wintypes import msvcrt # Since we can't access the contents of the process' stdout until the # process has exited, use PeekNamedPipe to see what's inside stdout # without waiting. This is done so we can tell that the interpreter # is started and running at a point where it could handle a signal. PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe PeekNamedPipe.restype = wintypes.BOOL PeekNamedPipe.argtypes = (wintypes.HANDLE, # Pipe handle ctypes.POINTER(ctypes.c_char), # stdout buf wintypes.DWORD, # Buffer size ctypes.POINTER(wintypes.DWORD), # bytes read ctypes.POINTER(wintypes.DWORD), # bytes avail ctypes.POINTER(wintypes.DWORD)) # bytes left msg = "running" proc = subprocess.Popen([sys.executable, "-c", "import sys;" "sys.stdout.write('{}');" "sys.stdout.flush();" "input()".format(msg)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) self.addCleanup(proc.stdout.close) self.addCleanup(proc.stderr.close) self.addCleanup(proc.stdin.close) count, max = 0, 100 while count < max and proc.poll() is None: # Create a string buffer to store the result of stdout from the pipe buf = ctypes.create_string_buffer(len(msg)) # Obtain the text currently in proc.stdout # Bytes read/avail/left are left as NULL and unused rslt = PeekNamedPipe(msvcrt.get_osfhandle(proc.stdout.fileno()), buf, ctypes.sizeof(buf), None, None, None) self.assertNotEqual(rslt, 0, "PeekNamedPipe failed") if buf.value: self.assertEqual(msg, buf.value) break time.sleep(0.1) count += 1 else: self.fail("Did not receive communication from the subprocess") os.kill(proc.pid, sig) self.assertEqual(proc.wait(), sig) def test_kill_sigterm(self): # SIGTERM doesn't mean anything special, but make sure it works self._kill(signal.SIGTERM) def test_kill_int(self): # os.kill on Windows can take an int which gets set as the exit code self._kill(100) def _kill_with_event(self, event, name): tagname = "test_os_%s" % uuid.uuid1() m = mmap.mmap(-1, 1, tagname) m[0] = '0' # Run a script which has console control handling enabled. proc = subprocess.Popen([sys.executable, os.path.join(os.path.dirname(__file__), "win_console_handler.py"), tagname], creationflags=subprocess.CREATE_NEW_PROCESS_GROUP) # Let the interpreter startup before we send signals. See #3137. count, max = 0, 20 while count < max and proc.poll() is None: if m[0] == '1': break time.sleep(0.5) count += 1 else: self.fail("Subprocess didn't finish initialization") os.kill(proc.pid, event) # proc.send_signal(event) could also be done here. # Allow time for the signal to be passed and the process to exit. time.sleep(0.5) if not proc.poll(): # Forcefully kill the process if we weren't able to signal it. os.kill(proc.pid, signal.SIGINT) self.fail("subprocess did not stop on {}".format(name)) @unittest.skip("subprocesses aren't inheriting Ctrl+C property") def test_CTRL_C_EVENT(self): from ctypes import wintypes import ctypes # Make a NULL value by creating a pointer with no argument. NULL = ctypes.POINTER(ctypes.c_int)() SetConsoleCtrlHandler = ctypes.windll.kernel32.SetConsoleCtrlHandler SetConsoleCtrlHandler.argtypes = (ctypes.POINTER(ctypes.c_int), wintypes.BOOL) SetConsoleCtrlHandler.restype = wintypes.BOOL # Calling this with NULL and FALSE causes the calling process to # handle Ctrl+C, rather than ignore it. This property is inherited # by subprocesses. SetConsoleCtrlHandler(NULL, 0) self._kill_with_event(signal.CTRL_C_EVENT, "CTRL_C_EVENT") def test_CTRL_BREAK_EVENT(self): self._kill_with_event(signal.CTRL_BREAK_EVENT, "CTRL_BREAK_EVENT") def test_main(): test_support.run_unittest( FileTests, TemporaryFileTests, StatAttributeTests, EnvironTests, WalkTests, MakedirTests, DevNullTests, URandomTests, URandomFDTests, ExecvpeTests, Win32ErrorTests, TestInvalidFD, PosixUidGidTests, Win32KillTests ) if __name__ == "__main__": test_main()
mit
mahmoud/womp
womp/utils.py
1
1208
# -*- coding: utf-8 -*- def get_decoded_kwargs(args): import sys kwargs = dict(args._get_kwargs()) for k, v in kwargs.items(): if not isinstance(v, unicode): try: kwargs[k] = v.decode(sys.stdin.encoding) except AttributeError: pass return kwargs def rotated_sequence(seq, start_index): n = len(seq) for i in xrange(n): yield seq[(i + start_index) % n] def get_max_width(table, index): """Get the maximum width of the given column index""" return max([len(str(row[index])) for row in table]) def pprint_table(table): """Prints out a table of data, padded for alignment @param out: Output stream (file-like object) @param table: The table to print. A list of lists. Each row must have the same number of columns. """ col_paddings = [] for i in range(len(table[0])): col_paddings.append(get_max_width(table, i)) for row in table: # left col print str(row[0]).ljust(col_paddings[0] + 1), # rest of the cols for i in range(1, len(row)): col = str(row[i]).rjust(col_paddings[i] + 2) print col, print
gpl-3.0
sergiohgz/incubator-airflow
tests/impersonation.py
15
4984
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from __future__ import print_function import errno import os import subprocess import unittest import logging from airflow import jobs, models from airflow.utils.state import State from airflow.utils.timezone import datetime DEV_NULL = '/dev/null' TEST_DAG_FOLDER = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'dags') DEFAULT_DATE = datetime(2015, 1, 1) TEST_USER = 'airflow_test_user' logger = logging.getLogger(__name__) # TODO(aoen): Adding/remove a user as part of a test is very bad (especially if the user # already existed to begin with on the OS), this logic should be moved into a test # that is wrapped in a container like docker so that the user can be safely added/removed. # When this is done we can also modify the sudoers file to ensure that useradd will work # without any manual modification of the sudoers file by the agent that is running these # tests. class ImpersonationTest(unittest.TestCase): def setUp(self): self.dagbag = models.DagBag( dag_folder=TEST_DAG_FOLDER, include_examples=False, ) logger.info('Loaded DAGS:') logger.info(self.dagbag.dagbag_report()) try: subprocess.check_output(['sudo', 'useradd', '-m', TEST_USER, '-g', str(os.getegid())]) except OSError as e: if e.errno == errno.ENOENT: raise unittest.SkipTest( "The 'useradd' command did not exist so unable to test " "impersonation; Skipping Test. These tests can only be run on a " "linux host that supports 'useradd'." ) else: raise unittest.SkipTest( "The 'useradd' command exited non-zero; Skipping tests. Does the " "current user have permission to run 'useradd' without a password " "prompt (check sudoers file)?" ) def tearDown(self): subprocess.check_output(['sudo', 'userdel', '-r', TEST_USER]) def run_backfill(self, dag_id, task_id): dag = self.dagbag.get_dag(dag_id) dag.clear() jobs.BackfillJob( dag=dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE).run() ti = models.TaskInstance( task=dag.get_task(task_id), execution_date=DEFAULT_DATE) ti.refresh_from_db() self.assertEqual(ti.state, State.SUCCESS) def test_impersonation(self): """ Tests that impersonating a unix user works """ self.run_backfill( 'test_impersonation', 'test_impersonated_user' ) def test_no_impersonation(self): """ If default_impersonation=None, tests that the job is run as the current user (which will be a sudoer) """ self.run_backfill( 'test_no_impersonation', 'test_superuser', ) def test_default_impersonation(self): """ If default_impersonation=TEST_USER, tests that the job defaults to running as TEST_USER for a test without run_as_user set """ os.environ['AIRFLOW__CORE__DEFAULT_IMPERSONATION'] = TEST_USER try: self.run_backfill( 'test_default_impersonation', 'test_deelevated_user' ) finally: del os.environ['AIRFLOW__CORE__DEFAULT_IMPERSONATION'] def test_impersonation_custom(self): """ Tests that impersonation using a unix user works with custom packages in PYTHONPATH """ # PYTHONPATH is already set in script triggering tests assert 'PYTHONPATH' in os.environ self.run_backfill( 'impersonation_with_custom_pkg', 'exec_python_fn' ) def test_impersonation_subdag(self): """ Tests that impersonation using a subdag correctly passes the right configuration :return: """ self.run_backfill( 'impersonation_subdag', 'test_subdag_operation' )
apache-2.0
joshuajan/odoo
addons/mass_mailing/__openerp__.py
45
2174
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## { 'name': 'Mass Mailing Campaigns', 'summary': 'Design, send and track emails', 'description': """ Easily send mass mailing to your leads, opportunities or customers. Track marketing campaigns performance to improve conversion rates. Design professional emails and reuse templates in a few clicks. """, 'version': '2.0', 'author': 'OpenERP', 'website': 'http://www.openerp.com', 'category': 'Marketing', 'depends': [ 'mail', 'email_template', 'marketing', 'web_kanban_gauge', 'web_kanban_sparkline', 'website_mail', ], 'data': [ 'data/mail_data.xml', 'data/mass_mailing_data.xml', 'wizard/mail_compose_message_view.xml', 'wizard/test_mailing.xml', 'views/mass_mailing.xml', 'views/res_config.xml', 'views/res_partner.xml', 'views/email_template.xml', 'views/website_mass_mailing.xml', 'views/snippets.xml', 'security/ir.model.access.csv', 'views/mass_mailing.xml', ], 'qweb': [], 'demo': [ 'data/mass_mailing_demo.xml', ], 'installable': True, 'auto_install': False, }
agpl-3.0
adamhaney/airflow
airflow/migrations/versions/86770d1215c0_add_kubernetes_scheduler_uniqueness.py
7
1792
# flake8: noqa # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """add kubernetes scheduler uniqueness Revision ID: 86770d1215c0 Revises: 27c6a30d7c24 Create Date: 2018-04-03 15:31:20.814328 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '86770d1215c0' down_revision = '27c6a30d7c24' branch_labels = None depends_on = None RESOURCE_TABLE = "kube_worker_uuid" def upgrade(): columns_and_constraints = [ sa.Column("one_row_id", sa.Boolean, server_default=sa.true(), primary_key=True), sa.Column("worker_uuid", sa.String(255)) ] conn = op.get_bind() # alembic creates an invalid SQL for mssql dialect if conn.dialect.name not in ('mssql'): columns_and_constraints.append(sa.CheckConstraint("one_row_id", name="kube_worker_one_row_id")) table = op.create_table( RESOURCE_TABLE, *columns_and_constraints ) op.bulk_insert(table, [ {"worker_uuid": ""} ]) def downgrade(): op.drop_table(RESOURCE_TABLE)
apache-2.0
VillarrealA/pyoptools
pyoptools/misc/pmisc/misc.py
9
18011
#!/usr/bin/env python # -*- coding: UTF-8 -*- import numpy as N from numpy import array, sin, cos, float64, dot, float_, sqrt, ceil, floor, dot, \ meshgrid, zeros, zeros_like, where, nan, pi, isnan, nonzero, rint, \ linspace, arange, argwhere from numpy.ma import is_masked, MaskedArray from numpy.ma import array as ma_array #from enthought.traits.api import Trait, TraitHandler from scipy import interpolate from pylab import griddata, meshgrid '''Auxiliary functions and classes ''' #~ class TraitUnitVector(TraitHandler): #~ ''' Class to define unit vector trait #~ #~ Description: #~ #~ This class defines a unit vector. If the value assigned is not a unit #~ vector, it gets automaticaly normalized #~ ''' #~ #~ def validate(self, object, name, value): #~ try: #~ avalue=array(value) #~ except: #~ self.error(object, name, value) #~ #~ if len(avalue.shape)!=1 or avalue.shape[0]!=3: #~ return self.error(object, name, avalue) #~ #~ avalue=array(avalue/sqrt(dot(avalue,avalue))) #~ return avalue #~ #~ # Trait to define a unit vector based on the unit vector trait #~ UnitVector = Trait(array([0,0,1], float_),TraitUnitVector()) #~ print "Nota: Hay que revisar las convenciones de las rotaciones para que queden\n\r "\ #~ "consistentes en x,y,z. Me parece que hay un error en el signo de la \n\r rotacion"\ #~ "al rededor de alguno de los ejes. Modulo misc \n\r"\ #~ "si no estoy mal el error esta en la rotacion respecto a los ejez Y y Z" def rot_x(tx): '''Returns the transformation matrix for a rotation around the X axis ''' return array([[1.,0. ,0. ], [0.,cos(tx),-sin(tx)], [0.,sin(tx), cos(tx)]]).astype(float64) def rot_y(ty): '''Returns the transformation matrix for a rotation around the Y axis ''' return array([[ cos(ty),0. ,sin(ty) ], [ 0. ,1 ,0. ], [-sin(ty),0. ,cos(ty) ]]).astype(float64) def rot_z(tz): '''Returns the transformation matrix for a rotation around the Z axis ''' return array([[ cos(tz),-sin(tz),0. ], [ sin(tz), cos(tz),0. ], [ 0. ,0. ,1. ]]).astype(float64) #~ def rot_mat(r): #~ '''Returns the transformation matrix for a rotation around the Z,Y,X axes #~ #~ The rotation is made first around the Z axis, then around the Y axis, and #~ finally around the X axis. #~ #~ Parameters #~ #~ r= (rx,ry,rz) #~ ''' #~ #~ c=cos(r) #~ s=sin(r) #~ #~ rx=array([[1. , 0., 0.], #~ [0. , c[0],-s[0]], #~ [0. , s[0], c[0]]]) #~ #~ ry=array([[ c[1], 0., s[1]], #~ [ 0., 1., 0.], #~ [-s[1], 0., c[1]]]) #~ #~ #~ rz=array([[ c[2],-s[2], 0.], #~ [ s[2], c[2], 0.], #~ [ 0., 0., 1.]]) #~ #~ #~ tm=dot(rz,dot(ry,rx)) #~ #~ return tm # To improve speed, this routine was moved to cmisc.pyx #~ def rot_mat_i(r): #~ '''Returns the inverse transformation matrix for a rotation around the Z,Y,X axes #~ #~ Parameters #~ #~ r= (rx,ry,rz) #~ ''' #~ #~ c=cos(r) #~ s=sin(r) #~ #~ rx=array([[ 1., 0., 0.], #~ [ 0., c[0], s[0]], #~ [ 0.,-s[0], c[0]]]) #~ #~ ry=array([[ c[1], 0.,-s[1]], #~ [ 0., 1., 0.], #~ [ s[1], 0., c[1]]]) #~ #~ #~ rz=array([[ c[2], s[2], 0.], #~ [-s[2], c[2], 0.], #~ [ 0., 0., 1.]]) #~ #~ # Nota: se hizo una prueba para optimizar escribirndo la expresión del producto #~ # escalar, y el resultado fue considerablemente mas lento, toca revisar #~ #~ #~ return dot(rx,dot(ry,rz)) def cross(a,b): '''3D Vector product producto vectorial ''' x1,y1,z1=a x2,y2,z2=b return array((y1*z2-y2*z1,x2*z1-x1*z2,x1*y2-x2*y1)) def wavelength2RGB(wl): '''Function to aproximate and RGB tuple from the wavelength value Parameter: wavelength wavelength in um if the wavelength is outside the visible spectrum returns (0,0,0) Original code fount at: http://www.physics.sfasu.edu/astro/color/spectra.html ''' R,G,B=0.,0.,0. if (wl>=.380) & (wl<.440): R = -1.*(wl-.440)/(.440-.380) G = 0. B = 1. if (wl>=.440) & (wl<.490): R = 0. G = (wl-.440)/(.490-.440) B = 1. if (wl>=.490) & (wl<.510): R = 0. G = 1. B = -1.*(wl-.510)/(.510-.490) if (wl>=.510) & (wl<.580): R = (wl-.510)/(.580-.510) G = 1. B = 0. if (wl>=.580) & (wl<.645): R = 1. G = -1.*(wl-.645)/(.645-.580) B = 0. if (wl>=.645) & (wl < .780): R = 1. G = 0. B = 0. # LET THE INTENSITY FALL OFF NEAR THE VISION LIMITS if (wl>=.700): sss =.3+.7* (.780-wl)/(.780-.700) elif (wl < .420) : sss=.3+.7*(wl-.380)/(.420-.380) else : sss=1 R=R*sss G=G*sss B=B*sss return (R,G,B) def matrix_interpolation(M, i, j, type="bilinear"): """Returns the interpolated value of a matrix, when the indices i,j are floating point numbers. M Matrix to interpolate i,j Indices to interpolate type Interpolation type. supported types: nearest,bilinear """ mi, mj=M.shape if i<0 or i>mi-2 or j<0 or j>mj-2: raise IndexError("matrix Indexes out of range") # Allowed interpolation types inter_types=["nearest","bilinear", ] if not type in inter_types: raise ValueError("Interpolation type not allowed. The allowed types"\ " are: {0}".format(inter_types)) if type=="nearest": iri=int(round(i)) irj=int(round(j)) return M[iri, irj] elif type=="bilinear": i_s, j_s=floor((i, j)) #calc 1 m=M[i_s:i_s+2, j_s:j_s+2] iv=array([1-(i-i_s), i-i_s]) jv=array([[1-(j-j_s),], [j-j_s, ]]) return dot(iv, dot(m, jv))[0] #dx=i-i_s #dy=j-j_s ##print i, j, i_s, j_s, dx, dy #p1=dx*dy*M[i_s, j_s] #p2=(1.-dx)*dy*M[i_s+1, j_s] #p3=dx*(1.-dy)*M[i_s, j_s+1] #p4=(1.-dx)*(1.-dy)*M[i_s+1, j_s+1] #return p1+ p2+ p3+ p4 print "error" return 1. def hitlist2int(x, y, z, xi, yi): """Function that estimates an intensity distribution on a plane from a ray hitlist """ import matplotlib.delaunay as delaunay from pylab import griddata, meshgrid from scipy import interpolate #if xi.ndim != yi.ndim: # raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)") #if xi.ndim != 1 and xi.ndim != 2: # raise TypeError("inputs xi and yi must be 1D or 2D.") #if not len(x)==len(y)==len(z): # raise TypeError("inputs x,y,z must all be 1D arrays of the same length") # remove masked points. #if hasattr(z,'mask'): # x = x.compress(z.mask == False) # y = y.compress(z.mask == False) # z = z.compressed() #if xi.ndim == 1: # xi,yi = meshgrid(xi,yi) #triangulate data tri=delaunay.Triangulation(x, y) #calculate triangles area ntriangles=tri.circumcenters.shape[0] coord=array(zip(tri.x, tri.y)) #I=zeros((ntriangles, )) #xc=zeros((ntriangles, )) #yc=zeros((ntriangles, )) # for i in range(ntriangles): # i1, i2, i3=tri.triangle_nodes[i] # p1=coord[i1] # p2=coord[i2] # p3=coord[i3] # v1=p1-p2 # v2=p3-p2 # I[i]=1./(abs(v1[0]*v2[1]-v1[1]*v2[0])) # # the circumcenter data from the triangulation, has some problems so we # # recalculate it # xc[i], yc[i]=(p1+p2+p3)/3. # The previous code was replaced by the following code ### i1=tri.triangle_nodes[:, 0] i2=tri.triangle_nodes[:, 1] i3=tri.triangle_nodes[:, 2] p1=coord[i1] p2=coord[i2] p3=coord[i3] v1=p1-p2 v2=p3-p2 I=abs(1./(v1[:, 0]*v2[:, 1]-v1[:, 1]*v2[:, 0])) c=(p1+p2+p3)/3. xc=c[:, 0] yc=c[:, 1] ### # Because of the triangulation algorithm, there are some really high values # in the intensity data. To filter these values, remove the 5% points of the # higher intensity. ni=int(0.1*len(I)) j=I.argsort()[:-ni] xc=xc[j] yc=yc[j] I=I[j] I=I/I.max() # #print tri.circumcenters[:, 0] # #print tri.circumcenters.shape # print ntriangles, tri.circumcenters[:, 0].shape, tri.circumcenters[:, 0].flatten().shape #itri=delaunay.Triangulation(xc,yc) #inti=itri.linear_interpolator(I) #xi,yi = meshgrid(xi,yi) #d1=itri(xi, yi) #Interpolacion con Splines #di=interpolate.SmoothBivariateSpline(xc, yc, I) #d1=di(xi,yi) #Interpolacion nn, y generación de pupila xi,yi = meshgrid(xi,yi) d1=griddata(xc, yc, I,xi, yi ) return d1 def hitlist2int_list(x, y): """Function that estimates an intensity distribution on a plane from a ray hitlist. Returns the intensity samples as an x,y,I list """ import matplotlib.delaunay as delaunay from pylab import griddata, meshgrid from scipy import interpolate #if xi.ndim != yi.ndim: # raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)") #if xi.ndim != 1 and xi.ndim != 2: # raise TypeError("inputs xi and yi must be 1D or 2D.") #if not len(x)==len(y)==len(z): # raise TypeError("inputs x,y,z must all be 1D arrays of the same length") # remove masked points. #if hasattr(z,'mask'): # x = x.compress(z.mask == False) # y = y.compress(z.mask == False) # z = z.compressed() #if xi.ndim == 1: # xi,yi = meshgrid(xi,yi) #triangulate data tri=delaunay.Triangulation(x, y) #calculate triangles area ntriangles=tri.circumcenters.shape[0] coord=array(zip(tri.x, tri.y)) #I=zeros((ntriangles, )) #xc=zeros((ntriangles, )) #yc=zeros((ntriangles, )) # for i in range(ntriangles): # i1, i2, i3=tri.triangle_nodes[i] # p1=coord[i1] # p2=coord[i2] # p3=coord[i3] # v1=p1-p2 # v2=p3-p2 # I[i]=1./(abs(v1[0]*v2[1]-v1[1]*v2[0])) # # the circumcenter data from the triangulation, has some problems so we # # recalculate it # xc[i], yc[i]=(p1+p2+p3)/3. # The previous code was replaced by the following code ### i1=tri.triangle_nodes[:, 0] i2=tri.triangle_nodes[:, 1] i3=tri.triangle_nodes[:, 2] p1=coord[i1] p2=coord[i2] p3=coord[i3] v1=p1-p2 v2=p3-p2 I=abs(1./(v1[:, 0]*v2[:, 1]-v1[:, 1]*v2[:, 0])) c=(p1+p2+p3)/3. xc=c[:, 0] yc=c[:, 1] ### # Because of the triangulation algorithm, there are some really high values # in the intensity data. To filter these values, remove the 5% points of the # higher intensity. ni=int(0.1*len(I)) j=I.argsort()[:-ni] xc=xc[j] yc=yc[j] I=I[j] I=I/I.max() # #print tri.circumcenters[:, 0] # #print tri.circumcenters.shape # print ntriangles, tri.circumcenters[:, 0].shape, tri.circumcenters[:, 0].flatten().shape #itri=delaunay.Triangulation(xc,yc) #inti=itri.linear_interpolator(I) #xi,yi = meshgrid(xi,yi) #d1=itri(xi, yi) #Interpolacion con Splines #di=interpolate.SmoothBivariateSpline(xc, yc, I) #d1=di(xi,yi) return xc,yc,I def unwrapv(inph,in_p=(), uv=2*pi): """Return the input matrix unwraped the value given in uv This is a vectorized routine, but is not as fast as it should """ if not is_masked(inph): fasei=MaskedArray(inph, isnan(inph)) else: fasei=inph.copy() size=fasei.shape nx, ny=size # If the initial unwraping point is not given, take the center of the image # as initial coordinate if in_p==(): in_p=(int(size[0]/2),int(size[1]/2)) # Create a temporal space to mark if the points are already unwrapped # 0 the point has not been unwrapped # 1 the point has not been unwrapped, but it is in the unwrapping list # 2 the point was already unwrapped fl=N.zeros(size) # List containing the points to unwrap l_un=[in_p] fl[in_p]=1 # unwrapped values faseo=fasei.copy() XI_, YI_= meshgrid(range(-1, 2), range(-1, 2)) XI_=XI_.flatten() YI_=YI_.flatten() while len(l_un)>0: # remove the first value from the list unp=l_un.pop(0) #l_un[0:1]=[] XI=XI_+unp[0] YI=YI_+unp[1] #Remove from the list the values where XI is negative nxi=XI>-1 nyi=YI>-1 nxf=XI<nx nyf=YI<ny n=nonzero(nxi& nyi & nxf & nyf) lco=zip(XI[n], YI[n]) # Put the coordinates of unwrapped the neigbors in the list # And check for wrapping nv=0 wv=0 for co in lco: if (fl[co]==0) & (faseo.mask[co]==False): fl[co]=1 l_un.append(co) elif fl[co]==2: wv=wv+rint((faseo[co]-faseo[unp])/uv) nv=nv+1 if nv!=0: wv=wv/nv #if wv>=0: wv=int(wv+0.5) #else: wv=int(wv-0.5) fl[unp]=2 faseo[unp]=faseo[unp]+wv*uv return faseo def unwrap_py(inph,in_p=(), uv=2*pi): """Return the input matrix unwraped the valu given in uv The same as unwrapv, but using for-s, written in python """ if not is_masked(inph): fasei=MaskedArray(inph, isnan(inph)) else: fasei=inph nx, ny=(fasei.shape[0],fasei.shape[1]) # If the initial unwraping point is not given, take the center of the image # as initial coordinate if in_p==(): in_p=(int(nx/2),int(ny/2)) # Create a temporal space to mark if the points are already unwrapped # 0 the point has not been unwrapped # 1 the point has not been unwrapped, but it is in the unwrapping list # 2 the point was already unwrapped fl=zeros((nx, ny)) # List containing the points to unwrap l_un=[in_p] fl[in_p]=1 # unwrapped values faseo=fasei.copy() while len(l_un)>0: # remove the first value from the list cx, cy=l_un.pop(0) # Put the coordinates of unwrapped the neigbors in the list # And check for wrapping nv=0 wv=0 for i in range(cx-1, cx+2): for j in range(cy-1, cy+2): if (i>-1) and (i<nx) and (j>-1) and (j<ny): if (fl[i, j]==0)&(faseo.mask[i, j]==False): fl[i, j]=1 l_un.append((i, j)) elif fl[i, j]==2: wv=wv+rint((faseo[i, j]-faseo[cx, cy])/uv) nv=nv+1 if nv!=0: wv=wv/nv fl[cx, cy]=2 faseo[cx, cy]=faseo[cx, cy]+wv*uv return faseo def interpolate_g(xi,yi,zi,xx,yy,knots=10, error=False,mask=None): """Create a grid of zi values interpolating the values from xi,yi,zi xi,yi,zi 1D Lists or arrays containing the values to use as base for the interpolation xx,yy 1D vectors or lists containing the output coordinates samples tuple containing the shape of the output array. knots number of knots to be used in each direction error if set to true, half of the points (x, y, z) are used to create the interpolation, and half are used to evaluate the interpolation error """ xi=array(xi) yi=array(yi) zi=array(zi) #print xi #print yi #print zi assert xi.ndim==1 ,"xi must ba a 1D array or list" assert yi.ndim==1 ,"yi must ba a 1D array or list" assert zi.ndim==1 ,"zi must ba a 1D array or list" assert xx.ndim==1 ,"xx must ba a 1D array or list" assert yy.ndim==1 ,"yy must ba a 1D array or list" assert len(xi)==len(yi) and len(xi)==len(zi), "xi, yi, zi must have the same number of items" if error==True: # Create a list of indexes to be able to select the points that are going # to be used as spline generators, and as control points idx=where(arange(len(xi)) %2 ==0, False, True) # Use only half of the samples to create the Spline, if error == True: isp=argwhere(idx==True) ich=argwhere(idx==False) xsp=xi[isp] ysp=yi[isp] zsp=zi[isp] xch=xi[ich] ych=yi[ich] zch=zi[ich] else: xsp=xi ysp=yi zsp=zi #Distribute homogeneously the knots xk=linspace(xsp.min(), xsp.max(),knots) yk=linspace(ysp.min(), ysp.max(),knots) # LSQBivariateSpline using some knots gives smaller error than # SmoothBivariateSpline di=interpolate.LSQBivariateSpline(xsp, ysp, zsp, xk[1:-1], yk[1:-1]) #print xsp,ysp,zsp #di=interpolate.SmoothBivariateSpline(xsp, ysp, zsp) # Evaluate error if error==True: zch1=di.ev(xch, ych) er=(zch.flatten()-zch1).std() if mask==None: #d=griddata(xi, yi, zi, xx, yy) # d=di(xx,yy).transpose() else: d=ma_array(di(xx,yy).transpose(), mask=mask) if error==True: return d, er else: return d ####### Fin Funciones auxiliares
bsd-3-clause
dcroc16/skunk_works
google_appengine/lib/django-1.2/django/views/decorators/http.py
63
5999
""" Decorators for views based on HTTP headers. """ try: from functools import wraps except ImportError: from django.utils.functional import wraps # Python 2.4 fallback. from calendar import timegm from datetime import timedelta from email.Utils import formatdate from django.utils.decorators import decorator_from_middleware, available_attrs from django.utils.http import parse_etags, quote_etag from django.middleware.http import ConditionalGetMiddleware from django.http import HttpResponseNotAllowed, HttpResponseNotModified, HttpResponse conditional_page = decorator_from_middleware(ConditionalGetMiddleware) def require_http_methods(request_method_list): """ Decorator to make a view only accept particular request methods. Usage:: @require_http_methods(["GET", "POST"]) def my_view(request): # I can assume now that only GET or POST requests make it this far # ... Note that request methods should be in uppercase. """ def decorator(func): def inner(request, *args, **kwargs): if request.method not in request_method_list: return HttpResponseNotAllowed(request_method_list) return func(request, *args, **kwargs) return wraps(func, assigned=available_attrs(func))(inner) return decorator require_GET = require_http_methods(["GET"]) require_GET.__doc__ = "Decorator to require that a view only accept the GET method." require_POST = require_http_methods(["POST"]) require_POST.__doc__ = "Decorator to require that a view only accept the POST method." def condition(etag_func=None, last_modified_func=None): """ Decorator to support conditional retrieval (or change) for a view function. The parameters are callables to compute the ETag and last modified time for the requested resource, respectively. The callables are passed the same parameters as the view itself. The Etag function should return a string (or None if the resource doesn't exist), whilst the last_modified function should return a datetime object (or None if the resource doesn't exist). If both parameters are provided, all the preconditions must be met before the view is processed. This decorator will either pass control to the wrapped view function or return an HTTP 304 response (unmodified) or 412 response (preconditions failed), depending upon the request method. Any behavior marked as "undefined" in the HTTP spec (e.g. If-none-match plus If-modified-since headers) will result in the view function being called. """ def decorator(func): def inner(request, *args, **kwargs): # Get HTTP request headers if_modified_since = request.META.get("HTTP_IF_MODIFIED_SINCE") if_none_match = request.META.get("HTTP_IF_NONE_MATCH") if_match = request.META.get("HTTP_IF_MATCH") if if_none_match or if_match: # There can be more than one ETag in the request, so we # consider the list of values. try: etags = parse_etags(if_none_match or if_match) except ValueError: # In case of invalid etag ignore all ETag headers. # Apparently Opera sends invalidly quoted headers at times # (we should be returning a 400 response, but that's a # little extreme) -- this is Django bug #10681. if_none_match = None if_match = None # Compute values (if any) for the requested resource. if etag_func: res_etag = etag_func(request, *args, **kwargs) else: res_etag = None if last_modified_func: dt = last_modified_func(request, *args, **kwargs) if dt: res_last_modified = formatdate(timegm(dt.utctimetuple()))[:26] + 'GMT' else: res_last_modified = None else: res_last_modified = None response = None if not ((if_match and (if_modified_since or if_none_match)) or (if_match and if_none_match)): # We only get here if no undefined combinations of headers are # specified. if ((if_none_match and (res_etag in etags or "*" in etags and res_etag)) and (not if_modified_since or res_last_modified == if_modified_since)): if request.method in ("GET", "HEAD"): response = HttpResponseNotModified() else: response = HttpResponse(status=412) elif if_match and ((not res_etag and "*" in etags) or (res_etag and res_etag not in etags)): response = HttpResponse(status=412) elif (not if_none_match and if_modified_since and request.method == "GET" and res_last_modified == if_modified_since): response = HttpResponseNotModified() if response is None: response = func(request, *args, **kwargs) # Set relevant headers on the response if they don't already exist. if res_last_modified and not response.has_header('Last-Modified'): response['Last-Modified'] = res_last_modified if res_etag and not response.has_header('ETag'): response['ETag'] = quote_etag(res_etag) return response return inner return decorator # Shortcut decorators for common cases based on ETag or Last-Modified only def etag(etag_func): return condition(etag_func=etag_func) def last_modified(last_modified_func): return condition(last_modified_func=last_modified_func)
mit
ptcrypto/pycoin
pycoin/block.py
13
6490
# -*- coding: utf-8 -*- """ Parse and stream Bitcoin blocks as either Block or BlockHeader structures. The MIT License (MIT) Copyright (c) 2013 by Richard Kiss Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import struct import io from .encoding import double_sha256 from .merkle import merkle from .serialize.bitcoin_streamer import parse_struct, stream_struct from .serialize import b2h, b2h_rev from .tx import Tx class BadMerkleRootError(Exception): pass def difficulty_max_mask_for_bits(bits): prefix = bits >> 24 mask = (bits & 0x7ffff) << (8 * (prefix - 3)) return mask class BlockHeader(object): """A BlockHeader is a block with the transaction data removed. With a complete Merkle tree database, it can be reconstructed from the merkle_root.""" @classmethod def parse(self, f): """Parse the BlockHeader from the file-like object in the standard way that blocks are sent in the network (well, except we ignore the transaction information).""" (version, previous_block_hash, merkle_root, timestamp, difficulty, nonce) = struct.unpack("<L32s32sLLL", f.read(4+32+32+4*3)) return self(version, previous_block_hash, merkle_root, timestamp, difficulty, nonce) def __init__(self, version, previous_block_hash, merkle_root, timestamp, difficulty, nonce): self.version = version self.previous_block_hash = previous_block_hash self.merkle_root = merkle_root self.timestamp = timestamp self.difficulty = difficulty self.nonce = nonce def hash(self): """Calculate the hash for the block header. Note that this has the bytes in the opposite order from how the header is usually displayed (so the long string of 00 bytes is at the end, not the beginning).""" if not hasattr(self, "__hash"): s = io.BytesIO() self.stream_header(s) self.__hash = double_sha256(s.getvalue()) return self.__hash def stream_header(self, f): """Stream the block header in the standard way to the file-like object f.""" stream_struct("L##LLL", f, self.version, self.previous_block_hash, self.merkle_root, self.timestamp, self.difficulty, self.nonce) def stream(self, f): """Stream the block header in the standard way to the file-like object f. The Block subclass also includes the transactions.""" self.stream_header(f) def id(self): """Returns the hash of the block displayed with the bytes in the order they are usually displayed in.""" return b2h_rev(self.hash()) def previous_block_id(self): """Returns the hash of the previous block, with the bytes in the order they are usually displayed in.""" return b2h_rev(self.previous_block_hash) def __str__(self): return "BlockHeader [%s] (previous %s)" % (self.id(), self.previous_block_id()) def __repr__(self): return "BlockHeader [%s] (previous %s)" % (self.id(), self.previous_block_id()) class Block(BlockHeader): """A Block is an element of the Bitcoin chain. Generating a block yields a reward!""" @classmethod def parse(self, f): """Parse the Block from the file-like object in the standard way that blocks are sent in the network.""" (version, previous_block_hash, merkle_root, timestamp, difficulty, nonce, count) = parse_struct("L##LLLI", f) txs = [] for i in range(count): offset_in_block = f.tell() tx = Tx.parse(f) txs.append(tx) tx.offset_in_block = offset_in_block block = self(version, previous_block_hash, merkle_root, timestamp, difficulty, nonce, txs) for tx in txs: tx.block = block return block def __init__(self, version, previous_block_hash, merkle_root, timestamp, difficulty, nonce, txs): self.version = version self.previous_block_hash = previous_block_hash self.merkle_root = merkle_root self.timestamp = timestamp self.difficulty = difficulty self.nonce = nonce self.txs = txs def as_blockheader(self): return BlockHeader(self.version, self.previous_block_hash, self.merkle_root, self.timestamp, self.difficulty, self.nonce) def stream(self, f): """Stream the block in the standard way to the file-like object f.""" stream_struct("L##LLLI", f, self.version, self.previous_block_hash, self.merkle_root, self.timestamp, self.difficulty, self.nonce, len(self.txs)) for t in self.txs: t.stream(f) def check_merkle_hash(self): """Raise a BadMerkleRootError if the Merkle hash of the transactions does not match the Merkle hash included in the block.""" calculated_hash = merkle([tx.hash() for tx in self.txs], double_sha256) if calculated_hash != self.merkle_root: raise BadMerkleRootError( "calculated %s but block contains %s" % (b2h(calculated_hash), b2h(self.merkle_root))) def __str__(self): return "Block [%s] (previous %s) [tx count: %d]" % ( self.id(), self.previous_block_id(), len(self.txs)) def __repr__(self): return "Block [%s] (previous %s) [tx count: %d] %s" % ( self.id(), self.previous_block_id(), len(self.txs), self.txs)
mit
hojel/calibre
src/html5lib/treewalkers/pulldom.py
1729
2302
from __future__ import absolute_import, division, unicode_literals from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \ COMMENT, IGNORABLE_WHITESPACE, CHARACTERS from . import _base from ..constants import voidElements class TreeWalker(_base.TreeWalker): def __iter__(self): ignore_until = None previous = None for event in self.tree: if previous is not None and \ (ignore_until is None or previous[1] is ignore_until): if previous[1] is ignore_until: ignore_until = None for token in self.tokens(previous, event): yield token if token["type"] == "EmptyTag": ignore_until = previous[1] previous = event if ignore_until is None or previous[1] is ignore_until: for token in self.tokens(previous, None): yield token elif ignore_until is not None: raise ValueError("Illformed DOM event stream: void element without END_ELEMENT") def tokens(self, event, next): type, node = event if type == START_ELEMENT: name = node.nodeName namespace = node.namespaceURI attrs = {} for attr in list(node.attributes.keys()): attr = node.getAttributeNode(attr) attrs[(attr.namespaceURI, attr.localName)] = attr.value if name in voidElements: for token in self.emptyTag(namespace, name, attrs, not next or next[1] is not node): yield token else: yield self.startTag(namespace, name, attrs) elif type == END_ELEMENT: name = node.nodeName namespace = node.namespaceURI if name not in voidElements: yield self.endTag(namespace, name) elif type == COMMENT: yield self.comment(node.nodeValue) elif type in (IGNORABLE_WHITESPACE, CHARACTERS): for token in self.text(node.nodeValue): yield token else: yield self.unknown(type)
gpl-3.0
alfredodeza/pytest
src/_pytest/_io/__init__.py
2
1585
from typing import List from typing import Sequence from py.io import TerminalWriter as BaseTerminalWriter # noqa: F401 class TerminalWriter(BaseTerminalWriter): def _write_source(self, lines: List[str], indents: Sequence[str] = ()) -> None: """Write lines of source code possibly highlighted. Keeping this private for now because the API is clunky. We should discuss how to evolve the terminal writer so we can have more precise color support, for example being able to write part of a line in one color and the rest in another, and so on. """ if indents and len(indents) != len(lines): raise ValueError( "indents size ({}) should have same size as lines ({})".format( len(indents), len(lines) ) ) if not indents: indents = [""] * len(lines) source = "\n".join(lines) new_lines = self._highlight(source).splitlines() for indent, new_line in zip(indents, new_lines): self.line(indent + new_line) def _highlight(self, source): """Highlight the given source code if we have markup support""" if not self.hasmarkup: return source try: from pygments.formatters.terminal import TerminalFormatter from pygments.lexers.python import PythonLexer from pygments import highlight except ImportError: return source else: return highlight(source, PythonLexer(), TerminalFormatter(bg="dark"))
mit
paul99/clank
tools/grit/grit/node/message.py
1
9466
#!/usr/bin/python2.4 # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. '''Handling of the <message> element. ''' import re import types from grit.node import base import grit.format.rc_header import grit.format.rc from grit import clique from grit import exception from grit import tclib from grit import util BINARY, UTF8, UTF16 = range(3) # Finds whitespace at the start and end of a string which can be multiline. _WHITESPACE = re.compile('(?P<start>\s*)(?P<body>.+?)(?P<end>\s*)\Z', re.DOTALL | re.MULTILINE) class MessageNode(base.ContentNode): '''A <message> element.''' # For splitting a list of things that can be separated by commas or # whitespace _SPLIT_RE = re.compile('\s*,\s*|\s+') def __init__(self): super(type(self), self).__init__() # Valid after EndParsing, this is the MessageClique that contains the # source message and any translations of it that have been loaded. self.clique = None # We don't send leading and trailing whitespace into the translation # console, but rather tack it onto the source message and any # translations when formatting them into RC files or what have you. self.ws_at_start = '' # Any whitespace characters at the start of the text self.ws_at_end = '' # --"-- at the end of the text # A list of "shortcut groups" this message is in. We check to make sure # that shortcut keys (e.g. &J) within each shortcut group are unique. self.shortcut_groups_ = [] def _IsValidChild(self, child): return isinstance(child, (PhNode)) def _IsValidAttribute(self, name, value): if name not in ['name', 'offset', 'translateable', 'desc', 'meaning', 'internal_comment', 'shortcut_groups', 'custom_type', 'validation_expr', 'use_name_for_id']: return False if name == 'translateable' and value not in ['true', 'false']: return False return True def MandatoryAttributes(self): return ['name|offset'] def DefaultAttributes(self): return { 'translateable' : 'true', 'desc' : '', 'meaning' : '', 'internal_comment' : '', 'shortcut_groups' : '', 'custom_type' : '', 'validation_expr' : '', 'use_name_for_id' : 'false', } def GetTextualIds(self): ''' Returns the concatenation of the parent's node first_id and this node's offset if it has one, otherwise just call the superclass' implementation ''' if 'offset' in self.attrs: # we search for the first grouping node in the parents' list # to take care of the case where the first parent is an <if> node grouping_parent = self.parent import grit.node.empty while grouping_parent and not isinstance(grouping_parent, grit.node.empty.GroupingNode): grouping_parent = grouping_parent.parent assert 'first_id' in grouping_parent.attrs return [grouping_parent.attrs['first_id'] + '_' + self.attrs['offset']] else: return super(type(self), self).GetTextualIds() def IsTranslateable(self): return self.attrs['translateable'] == 'true' def ItemFormatter(self, t): # Only generate an output if the if condition is satisfied. if not self.SatisfiesOutputCondition(): return super(type(self), self).ItemFormatter(t) if t == 'rc_header': return grit.format.rc_header.Item() elif t in ('rc_all', 'rc_translateable', 'rc_nontranslateable'): return grit.format.rc.Message() elif t == 'js_map_format': return grit.format.js_map_format.Message() else: return super(type(self), self).ItemFormatter(t) def EndParsing(self): super(type(self), self).EndParsing() # Make the text (including placeholder references) and list of placeholders, # then strip and store leading and trailing whitespace and create the # tclib.Message() and a clique to contain it. text = '' placeholders = [] for item in self.mixed_content: if isinstance(item, types.StringTypes): text += item else: presentation = item.attrs['name'].upper() text += presentation ex = ' ' if len(item.children): ex = item.children[0].GetCdata() original = item.GetCdata() placeholders.append(tclib.Placeholder(presentation, original, ex)) m = _WHITESPACE.match(text) if m: self.ws_at_start = m.group('start') self.ws_at_end = m.group('end') text = m.group('body') self.shortcut_groups_ = self._SPLIT_RE.split(self.attrs['shortcut_groups']) self.shortcut_groups_ = [i for i in self.shortcut_groups_ if i != ''] description_or_id = self.attrs['desc'] if description_or_id == '' and 'name' in self.attrs: description_or_id = 'ID: %s' % self.attrs['name'] assigned_id = None if (self.attrs['use_name_for_id'] == 'true' and self.SatisfiesOutputCondition()): assigned_id = self.attrs['name'] message = tclib.Message(text=text, placeholders=placeholders, description=description_or_id, meaning=self.attrs['meaning'], assigned_id=assigned_id) self.clique = self.UberClique().MakeClique(message, self.IsTranslateable()) for group in self.shortcut_groups_: self.clique.AddToShortcutGroup(group) if self.attrs['custom_type'] != '': self.clique.SetCustomType(util.NewClassInstance(self.attrs['custom_type'], clique.CustomType)) elif self.attrs['validation_expr'] != '': self.clique.SetCustomType( clique.OneOffCustomType(self.attrs['validation_expr'])) def GetCliques(self): if self.clique: return [self.clique] else: return [] def Translate(self, lang): '''Returns a translated version of this message. ''' assert self.clique msg = self.clique.MessageForLanguage(lang, self.PseudoIsAllowed(), self.ShouldFallbackToEnglish() ).GetRealContent() return msg.replace('[GRITLANGCODE]', lang) def NameOrOffset(self): if 'name' in self.attrs: return self.attrs['name'] else: return self.attrs['offset'] def GetDataPackPair(self, lang, encoding): '''Returns a (id, string) pair that represents the string id and the string in utf8. This is used to generate the data pack data file. ''' from grit.format import rc_header id_map = rc_header.Item.tids_ id = id_map[self.GetTextualIds()[0]] message = self.ws_at_start + self.Translate(lang) + self.ws_at_end # |message| is a python unicode string, so convert to a byte stream that # has the correct encoding requested for the datapacks. We skip the first # 2 bytes of text resources because it is the BOM. if encoding == UTF8: return id, message.encode('utf8') if encoding == UTF16: return id, message.encode('utf16')[2:] # Default is BINARY return id, message # static method def Construct(parent, message, name, desc='', meaning='', translateable=True): '''Constructs a new message node that is a child of 'parent', with the name, desc, meaning and translateable attributes set using the same-named parameters and the text of the message and any placeholders taken from 'message', which must be a tclib.Message() object.''' # Convert type to appropriate string if translateable: translateable = 'true' else: translateable = 'false' node = MessageNode() node.StartParsing('message', parent) node.HandleAttribute('name', name) node.HandleAttribute('desc', desc) node.HandleAttribute('meaning', meaning) node.HandleAttribute('translateable', translateable) items = message.GetContent() for ix in range(len(items)): if isinstance(items[ix], types.StringTypes): text = items[ix] # Ensure whitespace at front and back of message is correctly handled. if ix == 0: text = "'''" + text if ix == len(items) - 1: text = text + "'''" node.AppendContent(text) else: phnode = PhNode() phnode.StartParsing('ph', node) phnode.HandleAttribute('name', items[ix].GetPresentation()) phnode.AppendContent(items[ix].GetOriginal()) if len(items[ix].GetExample()) and items[ix].GetExample() != ' ': exnode = ExNode() exnode.StartParsing('ex', phnode) exnode.AppendContent(items[ix].GetExample()) exnode.EndParsing() phnode.AddChild(exnode) phnode.EndParsing() node.AddChild(phnode) node.EndParsing() return node Construct = staticmethod(Construct) class PhNode(base.ContentNode): '''A <ph> element.''' def _IsValidChild(self, child): return isinstance(child, ExNode) def MandatoryAttributes(self): return ['name'] def EndParsing(self): super(type(self), self).EndParsing() # We only allow a single example for each placeholder if len(self.children) > 1: raise exception.TooManyExamples() class ExNode(base.ContentNode): '''An <ex> element.''' pass
bsd-3-clause
ict-felix/stack
vt_manager_kvm/src/python/scripts/setup_ch.py
3
3204
''' Created on Jul 19, 2010 @author: jnaous ''' from django.core.urlresolvers import reverse from django.test import Client from common.tests.client import test_get_and_post_form from django.contrib.auth.models import User from pyquery import PyQuery as pq from openflow.plugin.models import OpenFlowInterface, NonOpenFlowConnection from geni.planetlab.models import PlanetLabNode try: from setup_expedient_params import \ SUPERUSER_USERNAME, SUPERUSER_PASSWORD,\ USER_INFO,\ PL_AGGREGATE_INFO,\ OF_AGGREGATE_INFO,\ OF_PL_CONNECTIONS except ImportError: print """ Could not import setup_om_params module. Make sure this module exists and that it contains the following variables: SUPERUSER_USERNAME, SUPERUSER_PASSWORD, CH_PASSWORD, CH_USERNAME """ raise def run(): client = Client() client.login(username=SUPERUSER_USERNAME, password=SUPERUSER_PASSWORD) # Add all planetlab aggregates for pl_agg in PL_AGGREGATE_INFO: print "adding pl agg %s" % pl_agg["url"] response = test_get_and_post_form( client, reverse("planetlab_aggregate_create"), pl_agg, ) print "got response %s" % response assert response.status_code == 302 for of_agg in OF_AGGREGATE_INFO: print "adding of agg %s" % of_agg["url"] response = test_get_and_post_form( client, reverse("openflow_aggregate_create"), of_agg, del_params=["verify_certs"], ) assert response.status_code == 302 for cnxn_tuple in OF_PL_CONNECTIONS: print "adding cnxn %s" % (cnxn_tuple,) NonOpenFlowConnection.objects.get_or_create( of_iface=OpenFlowInterface.objects.get( switch__datapath_id=cnxn_tuple[0], port_num=cnxn_tuple[1], ), resource=PlanetLabNode.objects.get(name=cnxn_tuple[2]), ) client.logout() for username, info in USER_INFO.items(): # create user User.objects.create_user( username=username, email=info["email"], password=info["password"]) client.login(username=username, password=info["password"]) # create project and slice for project in info["projects"]: response = test_get_and_post_form( client, reverse("project_create"), params=dict( name=project["name"], description=project["description"], ), ) assert response.status_code == 302 # This code is missing the project id. Need to get somehow to use reverse. # for slice in project["slices"]: # response = test_get_and_post_form( # client, reverse("slice_create"), # params=dict( # name=slice["name"], # description=slice["description"], # ), # ) # assert response.status_code == 302 client.logout()
apache-2.0
proxysh/Safejumper-for-Desktop
buildmac/Resources/env/lib/python2.7/site-packages/Crypto/PublicKey/ElGamal.py
124
13212
# # ElGamal.py : ElGamal encryption/decryption and signatures # # Part of the Python Cryptography Toolkit # # Originally written by: A.M. Kuchling # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== """ElGamal public-key algorithm (randomized encryption and signature). Signature algorithm ------------------- The security of the ElGamal signature scheme is based (like DSA) on the discrete logarithm problem (DLP_). Given a cyclic group, a generator *g*, and an element *h*, it is hard to find an integer *x* such that *g^x = h*. The group is the largest multiplicative sub-group of the integers modulo *p*, with *p* prime. The signer holds a value *x* (*0<x<p-1*) as private key, and its public key (*y* where *y=g^x mod p*) is distributed. The ElGamal signature is twice as big as *p*. Encryption algorithm -------------------- The security of the ElGamal encryption scheme is based on the computational Diffie-Hellman problem (CDH_). Given a cyclic group, a generator *g*, and two integers *a* and *b*, it is difficult to find the element *g^{ab}* when only *g^a* and *g^b* are known, and not *a* and *b*. As before, the group is the largest multiplicative sub-group of the integers modulo *p*, with *p* prime. The receiver holds a value *a* (*0<a<p-1*) as private key, and its public key (*b* where *b*=g^a*) is given to the sender. The ElGamal ciphertext is twice as big as *p*. Domain parameters ----------------- For both signature and encryption schemes, the values *(p,g)* are called *domain parameters*. They are not sensitive but must be distributed to all parties (senders and receivers). Different signers can share the same domain parameters, as can different recipients of encrypted messages. Security -------- Both DLP and CDH problem are believed to be difficult, and they have been proved such (and therefore secure) for more than 30 years. The cryptographic strength is linked to the magnitude of *p*. In 2012, a sufficient size for *p* is deemed to be 2048 bits. For more information, see the most recent ECRYPT_ report. Even though ElGamal algorithms are in theory reasonably secure for new designs, in practice there are no real good reasons for using them. The signature is four times larger than the equivalent DSA, and the ciphertext is two times larger than the equivalent RSA. Functionality ------------- This module provides facilities for generating new ElGamal keys and for constructing them from known components. ElGamal keys allows you to perform basic signing, verification, encryption, and decryption. >>> from Crypto import Random >>> from Crypto.Random import random >>> from Crypto.PublicKey import ElGamal >>> from Crypto.Util.number import GCD >>> from Crypto.Hash import SHA >>> >>> message = "Hello" >>> key = ElGamal.generate(1024, Random.new().read) >>> h = SHA.new(message).digest() >>> while 1: >>> k = random.StrongRandom().randint(1,key.p-1) >>> if GCD(k,key.p-1)==1: break >>> sig = key.sign(h,k) >>> ... >>> if key.verify(h,sig): >>> print "OK" >>> else: >>> print "Incorrect signature" .. _DLP: http://www.cosic.esat.kuleuven.be/publications/talk-78.pdf .. _CDH: http://en.wikipedia.org/wiki/Computational_Diffie%E2%80%93Hellman_assumption .. _ECRYPT: http://www.ecrypt.eu.org/documents/D.SPA.17.pdf """ __revision__ = "$Id$" __all__ = ['generate', 'construct', 'error', 'ElGamalobj'] from Crypto.PublicKey.pubkey import * from Crypto.Util import number class error (Exception): pass # Generate an ElGamal key with N bits def generate(bits, randfunc, progress_func=None): """Randomly generate a fresh, new ElGamal key. The key will be safe for use for both encryption and signature (although it should be used for **only one** purpose). :Parameters: bits : int Key length, or size (in bits) of the modulus *p*. Recommended value is 2048. randfunc : callable Random number generation function; it should accept a single integer N and return a string of random data N bytes long. progress_func : callable Optional function that will be called with a short string containing the key parameter currently being generated; it's useful for interactive applications where a user is waiting for a key to be generated. :attention: You should always use a cryptographically secure random number generator, such as the one defined in the ``Crypto.Random`` module; **don't** just use the current time and the ``random`` module. :Return: An ElGamal key object (`ElGamalobj`). """ obj=ElGamalobj() # Generate a safe prime p # See Algorithm 4.86 in Handbook of Applied Cryptography if progress_func: progress_func('p\n') while 1: q = bignum(getPrime(bits-1, randfunc)) obj.p = 2*q+1 if number.isPrime(obj.p, randfunc=randfunc): break # Generate generator g # See Algorithm 4.80 in Handbook of Applied Cryptography # Note that the order of the group is n=p-1=2q, where q is prime if progress_func: progress_func('g\n') while 1: # We must avoid g=2 because of Bleichenbacher's attack described # in "Generating ElGamal signatures without knowning the secret key", # 1996 # obj.g = number.getRandomRange(3, obj.p, randfunc) safe = 1 if pow(obj.g, 2, obj.p)==1: safe=0 if safe and pow(obj.g, q, obj.p)==1: safe=0 # Discard g if it divides p-1 because of the attack described # in Note 11.67 (iii) in HAC if safe and divmod(obj.p-1, obj.g)[1]==0: safe=0 # g^{-1} must not divide p-1 because of Khadir's attack # described in "Conditions of the generator for forging ElGamal # signature", 2011 ginv = number.inverse(obj.g, obj.p) if safe and divmod(obj.p-1, ginv)[1]==0: safe=0 if safe: break # Generate private key x if progress_func: progress_func('x\n') obj.x=number.getRandomRange(2, obj.p-1, randfunc) # Generate public key y if progress_func: progress_func('y\n') obj.y = pow(obj.g, obj.x, obj.p) return obj def construct(tup): """Construct an ElGamal key from a tuple of valid ElGamal components. The modulus *p* must be a prime. The following conditions must apply: - 1 < g < p-1 - g^{p-1} = 1 mod p - 1 < x < p-1 - g^x = y mod p :Parameters: tup : tuple A tuple of long integers, with 3 or 4 items in the following order: 1. Modulus (*p*). 2. Generator (*g*). 3. Public key (*y*). 4. Private key (*x*). Optional. :Return: An ElGamal key object (`ElGamalobj`). """ obj=ElGamalobj() if len(tup) not in [3,4]: raise ValueError('argument for construct() wrong length') for i in range(len(tup)): field = obj.keydata[i] setattr(obj, field, tup[i]) return obj class ElGamalobj(pubkey): """Class defining an ElGamal key. :undocumented: __getstate__, __setstate__, __repr__, __getattr__ """ #: Dictionary of ElGamal parameters. #: #: A public key will only have the following entries: #: #: - **y**, the public key. #: - **g**, the generator. #: - **p**, the modulus. #: #: A private key will also have: #: #: - **x**, the private key. keydata=['p', 'g', 'y', 'x'] def encrypt(self, plaintext, K): """Encrypt a piece of data with ElGamal. :Parameter plaintext: The piece of data to encrypt with ElGamal. It must be numerically smaller than the module (*p*). :Type plaintext: byte string or long :Parameter K: A secret number, chosen randomly in the closed range *[1,p-2]*. :Type K: long (recommended) or byte string (not recommended) :Return: A tuple with two items. Each item is of the same type as the plaintext (string or long). :attention: selection of *K* is crucial for security. Generating a random number larger than *p-1* and taking the modulus by *p-1* is **not** secure, since smaller values will occur more frequently. Generating a random number systematically smaller than *p-1* (e.g. *floor((p-1)/8)* random bytes) is also **not** secure. In general, it shall not be possible for an attacker to know the value of any bit of K. :attention: The number *K* shall not be reused for any other operation and shall be discarded immediately. """ return pubkey.encrypt(self, plaintext, K) def decrypt(self, ciphertext): """Decrypt a piece of data with ElGamal. :Parameter ciphertext: The piece of data to decrypt with ElGamal. :Type ciphertext: byte string, long or a 2-item tuple as returned by `encrypt` :Return: A byte string if ciphertext was a byte string or a tuple of byte strings. A long otherwise. """ return pubkey.decrypt(self, ciphertext) def sign(self, M, K): """Sign a piece of data with ElGamal. :Parameter M: The piece of data to sign with ElGamal. It may not be longer in bit size than *p-1*. :Type M: byte string or long :Parameter K: A secret number, chosen randomly in the closed range *[1,p-2]* and such that *gcd(k,p-1)=1*. :Type K: long (recommended) or byte string (not recommended) :attention: selection of *K* is crucial for security. Generating a random number larger than *p-1* and taking the modulus by *p-1* is **not** secure, since smaller values will occur more frequently. Generating a random number systematically smaller than *p-1* (e.g. *floor((p-1)/8)* random bytes) is also **not** secure. In general, it shall not be possible for an attacker to know the value of any bit of K. :attention: The number *K* shall not be reused for any other operation and shall be discarded immediately. :attention: M must be be a cryptographic hash, otherwise an attacker may mount an existential forgery attack. :Return: A tuple with 2 longs. """ return pubkey.sign(self, M, K) def verify(self, M, signature): """Verify the validity of an ElGamal signature. :Parameter M: The expected message. :Type M: byte string or long :Parameter signature: The ElGamal signature to verify. :Type signature: A tuple with 2 longs as return by `sign` :Return: True if the signature is correct, False otherwise. """ return pubkey.verify(self, M, signature) def _encrypt(self, M, K): a=pow(self.g, K, self.p) b=( M*pow(self.y, K, self.p) ) % self.p return ( a,b ) def _decrypt(self, M): if (not hasattr(self, 'x')): raise TypeError('Private key not available in this object') ax=pow(M[0], self.x, self.p) plaintext=(M[1] * inverse(ax, self.p ) ) % self.p return plaintext def _sign(self, M, K): if (not hasattr(self, 'x')): raise TypeError('Private key not available in this object') p1=self.p-1 if (GCD(K, p1)!=1): raise ValueError('Bad K value: GCD(K,p-1)!=1') a=pow(self.g, K, self.p) t=(M-self.x*a) % p1 while t<0: t=t+p1 b=(t*inverse(K, p1)) % p1 return (a, b) def _verify(self, M, sig): if sig[0]<1 or sig[0]>self.p-1: return 0 v1=pow(self.y, sig[0], self.p) v1=(v1*pow(sig[0], sig[1], self.p)) % self.p v2=pow(self.g, M, self.p) if v1==v2: return 1 return 0 def size(self): return number.size(self.p) - 1 def has_private(self): if hasattr(self, 'x'): return 1 else: return 0 def publickey(self): return construct((self.p, self.g, self.y)) object=ElGamalobj
gpl-2.0
gorlemik/selenium
py/test/selenium/test_default_server.py
65
2265
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from selenium import selenium import unittest import sys, time class TestDefaultServer(unittest.TestCase): seleniumHost = 'localhost' seleniumPort = str(4444) #browserStartCommand = "c:\\program files\\internet explorer\\iexplore.exe" browserStartCommand = "*firefox" browserURL = "http://localhost:4444" def setUp(self): print("Using selenium server at " + self.seleniumHost + ":" + self.seleniumPort) self.selenium = selenium(self.seleniumHost, self.seleniumPort, self.browserStartCommand, self.browserURL) self.selenium.start() def testLinks(self): selenium = self.selenium selenium.open("/selenium-server/tests/html/test_click_page1.html") self.failUnless(selenium.get_text("link").find("Click here for next page") != -1, "link 'link' doesn't contain expected text") links = selenium.get_all_links() self.failUnless(len(links) > 3) self.assertEqual("linkToAnchorOnThisPage", links[3]) selenium.click("link") selenium.wait_for_page_to_load(5000) self.failUnless(selenium.get_location().endswith("/selenium-server/tests/html/test_click_page2.html")) selenium.click("previousPage") selenium.wait_for_page_to_load(5000) self.failUnless(selenium.get_location().endswith("/selenium-server/tests/html/test_click_page1.html")) def tearDown(self): self.selenium.stop() if __name__ == "__main__": unittest.main()
apache-2.0
nerith/servo
components/script/dom/bindings/codegen/parser/tests/test_const.py
134
3000
import WebIDL def WebIDLTest(parser, harness): parser.parse(""" interface TestConsts { const byte zero = 0; const byte b = -1; const octet o = 2; const short s = -3; const unsigned short us = 0x4; const long l = -0X5; const unsigned long ul = 6; const unsigned long long ull = 7; const long long ll = -010; const boolean t = true; const boolean f = false; const boolean? n = null; const boolean? nt = true; const boolean? nf = false; }; """) results = parser.finish() harness.ok(True, "TestConsts interface parsed without error.") harness.check(len(results), 1, "Should be one production.") iface = results[0] harness.ok(isinstance(iface, WebIDL.IDLInterface), "Should be an IDLInterface") harness.check(iface.identifier.QName(), "::TestConsts", "Interface has the right QName") harness.check(iface.identifier.name, "TestConsts", "Interface has the right name") harness.check(len(iface.members), 14, "Expect 14 members") consts = iface.members def checkConst(const, QName, name, type, value): harness.ok(isinstance(const, WebIDL.IDLConst), "Should be an IDLConst") harness.ok(const.isConst(), "Const is a const") harness.ok(not const.isAttr(), "Const is not an attr") harness.ok(not const.isMethod(), "Const is not a method") harness.check(const.identifier.QName(), QName, "Const has the right QName") harness.check(const.identifier.name, name, "Const has the right name") harness.check(str(const.type), type, "Const has the right type") harness.ok(const.type.isPrimitive(), "All consts should be primitive") harness.check(str(const.value.type), str(const.type), "Const's value has the same type as the type") harness.check(const.value.value, value, "Const value has the right value.") checkConst(consts[0], "::TestConsts::zero", "zero", "Byte", 0) checkConst(consts[1], "::TestConsts::b", "b", "Byte", -1) checkConst(consts[2], "::TestConsts::o", "o", "Octet", 2) checkConst(consts[3], "::TestConsts::s", "s", "Short", -3) checkConst(consts[4], "::TestConsts::us", "us", "UnsignedShort", 4) checkConst(consts[5], "::TestConsts::l", "l", "Long", -5) checkConst(consts[6], "::TestConsts::ul", "ul", "UnsignedLong", 6) checkConst(consts[7], "::TestConsts::ull", "ull", "UnsignedLongLong", 7) checkConst(consts[8], "::TestConsts::ll", "ll", "LongLong", -8) checkConst(consts[9], "::TestConsts::t", "t", "Boolean", True) checkConst(consts[10], "::TestConsts::f", "f", "Boolean", False) checkConst(consts[11], "::TestConsts::n", "n", "BooleanOrNull", None) checkConst(consts[12], "::TestConsts::nt", "nt", "BooleanOrNull", True) checkConst(consts[13], "::TestConsts::nf", "nf", "BooleanOrNull", False)
mpl-2.0
puppetlabs/jenkins-job-builder
jenkins_jobs/cli/subcommand/get_plugins_info.py
5
1929
#!/usr/bin/env python # Copyright (C) 2017 Thanh Ha # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import yaml from jenkins_jobs.builder import JenkinsManager import jenkins_jobs.cli.subcommand.base as base logger = logging.getLogger(__name__) class GetPluginsInfoSubCommand(base.BaseSubCommand): def parse_args(self, subparser): plugins_info = subparser.add_parser( 'get-plugins-info', help='get plugins info yaml by querying Jenkins server.') plugins_info.add_argument( '-o', '--output-file', default='plugins_info.yaml', dest='plugins_info_file', help='file to save output to.') def execute(self, options, jjb_config): builder = JenkinsManager(jjb_config) plugin_data = builder.jenkins.get_plugins_info() plugins_info = [] for plugin in plugin_data: info = { 'longName': str(plugin['longName']), 'shortName': str(plugin['shortName']), 'version': str(plugin['version']), } plugins_info.append(info) if options.plugins_info_file: with open(options.plugins_info_file, 'w') as outfile: outfile.write(yaml.dump(plugins_info)) logger.info("Generated {} file".format(options.plugins_info_file)) else: print(yaml.dump(plugins_info))
apache-2.0
openpolis/scrapeit
scrapers.py
1
8144
#!/usr/bin/env python # -*- coding: utf-8 -*- import csv from datetime import datetime import io import json import logging import logging.config import re import requests import zipfile from slugify import slugify from utils import DictReaderInsensitive, DictInsensitive from utils.codice_fiscale import db from utils.codice_fiscale.codicefiscale import codice_fiscale, codice_cognome, codice_nome __author__ = 'guglielmo' # define connection to sqlite DB with places->catasto codes con = db.Connessione() # pre-compile some regular expressions used within the loops prov_com_re = re.compile(r'(?P<city>[\w \']+)\((?P<prov>[\w \']+)\)') state_re = re.compile(r'(?P<state>[\w \']+)') logging.config.dictConfig(json.load(open('logging.conf.json'))) class DataScraperException(Exception): pass class DataScraper(object): """ Base DataScraper class from which each class extends. Default logger and arguments parser defined """ def __init__(self, **kwargs): self.logger = logging.getLogger('import_script') def scrape(self, **kwargs): """ Need to be implemented by extending class. :param kwargs: :return: """ raise Exception("not implemented") def get_iterator(self): raise Exception("not implemented") class MinintCSVDictReader(DictReaderInsensitive): def __init__(self, f, institution=None, **kwargs): DictReaderInsensitive.__init__(self, f, **kwargs) self.institution = institution def get_codice_fiscale(self, nome, cognome, data_nascita, luogo_nascita, sesso, **kwargs): first_name = nome last_name = cognome try: birth_date = datetime.strptime(data_nascita, "%d/%m/%Y") except ValueError as e: raise DataScraperException("Impossibile parsare data nascita:{0}:.Skipping.".format(data_nascita)) birth_place = { 'state': 'ITALIA', 'prov': None, 'city': None } m = prov_com_re.match(luogo_nascita) if m is None: m = state_re.match(luogo_nascita) if m is None: raise DataScraperException("Impossibile parsare luogo nascita:{0}:.Skipping.".format(luogo_nascita)) birth_place['state'] = m.groupdict()['state'] else: birth_place.update({ 'prov': m.groupdict()['prov'].strip().upper(), 'city': m.groupdict()['city'].strip().upper() }) try: return codice_fiscale( last_name, first_name, birth_date, sesso, birth_place['state'], birth_place['prov'], birth_place['city'], con.codici_geografici ) except db.DBNoDataError: raise DataScraperException("Impossibile determinare CF:{0}:.Skipping.".format(luogo_nascita)) except Exception: raise DataScraperException("Impossibile determinare CF.Skipping.") def get_unique_id(self, row): istituzione = self.institution localita = 'nd' key = "denominazione_{0}".format(self.institution) localita = row[key] start_date = datetime.strptime(row['data_entrata_in_carica'], "%d/%m/%Y").strftime("%Y%m%d") unique_id = slugify( "-".join([ row['codice_fiscale'], row['descrizione_carica'], istituzione, localita, start_date, 'in carica' ]) ) return unique_id def __next__(self): row = DictReaderInsensitive.__next__(self) carica = row['descrizione_carica'].lower() if 'commissario' in carica or 'commissione' in carica: row['codice_fiscale'] = "{0}{1}---------C".format( codice_cognome(row['cognome']), codice_nome(row['nome']) ) else: try: row['codice_fiscale'] = self.get_codice_fiscale(**row) except DataScraperException as e: return (e, row) row['istituzione'] = self.institution row['unique_id'] = self.get_unique_id(row) return row class MinintStoriciCSVDictReader(MinintCSVDictReader): def get_unique_id(self, row): istituzione = self.institution localita = 'nd' key = "desc_{0}".format(self.institution) localita = row[key] start_date = datetime.strptime(row['data_nomina'], "%d/%m/%Y").strftime("%Y%m%d") end_date = datetime.strptime(row['data_cessazione'], "%d/%m/%Y").strftime("%Y%m%d") unique_id = slugify( "-".join([ row['codice_fiscale'], row['descrizione_carica'], istituzione, localita, start_date, end_date ]) ) return unique_id def __next__(self): row = DictReaderInsensitive.__next__(self) if 'commissario' in row['descrizione_carica'].lower(): row['codice_fiscale'] = "{cognome} {nome}".format(**row) else: try: row['codice_fiscale'] = "{cognome} {nome} {data_nascita} {desc_sede_nascita} {sesso}".format(**row) except DataScraperException as e: return (e, row) row['istituzione'] = self.institution row['unique_id'] = self.get_unique_id(row) return row class MinintDataScraper(DataScraper): def __init__(self, url, log_level): DataScraper.__init__(self) self.url = url self.log_level = log_level def scrape(self): self.logger.setLevel(getattr(logging, self.log_level.upper(), logging.WARNING)) self.logger.info("Start") self.logger.debug("minint_url: {0}".format(self.url)) # retrieve bulk_data from minint_url return self.get_iterator() def get_iterator(self): """ Read the zip file from the site, extract its content and return a csv.DictReader to it :return: csv.DictReader """ # request zip file from url r = requests.get(self.url) # read content from url and create a ZipFile out of it's content, archive = zipfile.ZipFile(io.BytesIO(r.content), 'r') # extract filename file = archive.infolist()[0].filename # uncompress zipped file and remove the first 2 lines (they are notes) archive_txt = "\n".join(archive.read(file).decode('latin1').split("\r\n")[2:]) # create an extended csv.DictReader # injecting codice fiscale and unique_id computation dummy, filename = file.split("/") institution = { 'ammreg.txt': 'regione', 'ammprov.txt': 'provincia', 'ammcom.txt': 'comune' }[filename] archive_reader = MinintCSVDictReader(io.StringIO(archive_txt), delimiter=";", institution=institution) return archive_reader class MinintStoriciDataScraper(MinintDataScraper): def get_iterator(self): """ Read the zip file from the site, extract its content and return a csv.DictReader to it :return: csv.DictReader """ # request zip file from url r = requests.get(self.url) # read content from url and create a ZipFile out of it's content, archive = zipfile.ZipFile(io.BytesIO(r.content), 'r') # extract filename file = archive.infolist()[0].filename # uncompress zipped file archive_txt = "\n".join(archive.read(file).decode('latin1').split("\r\n")) # create an extended csv.DictReader # injecting codice fiscale and unique_id computation institution_context = file[:-12] institution = { 'regioni': 'regione', 'province': 'provincia', 'comuni': 'comune' }[institution_context] archive_reader = MinintStoriciCSVDictReader( io.StringIO(archive_txt), delimiter=";", institution=institution ) return archive_reader
mit
skwbc/numpy
numpy/fft/tests/test_fftpack.py
134
6052
from __future__ import division, absolute_import, print_function import numpy as np from numpy.random import random from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal from numpy.testing import assert_array_equal import threading import sys if sys.version_info[0] >= 3: import queue else: import Queue as queue def fft1(x): L = len(x) phase = -2j*np.pi*(np.arange(L)/float(L)) phase = np.arange(L).reshape(-1, 1) * phase return np.sum(x*np.exp(phase), axis=1) class TestFFTShift(TestCase): def test_fft_n(self): self.assertRaises(ValueError, np.fft.fft, [1, 2, 3], 0) class TestFFT1D(TestCase): def test_fft(self): x = random(30) + 1j*random(30) assert_array_almost_equal(fft1(x), np.fft.fft(x)) assert_array_almost_equal(fft1(x) / np.sqrt(30), np.fft.fft(x, norm="ortho")) def test_ifft(self): x = random(30) + 1j*random(30) assert_array_almost_equal(x, np.fft.ifft(np.fft.fft(x))) assert_array_almost_equal( x, np.fft.ifft(np.fft.fft(x, norm="ortho"), norm="ortho")) def test_fft2(self): x = random((30, 20)) + 1j*random((30, 20)) assert_array_almost_equal(np.fft.fft(np.fft.fft(x, axis=1), axis=0), np.fft.fft2(x)) assert_array_almost_equal(np.fft.fft2(x) / np.sqrt(30 * 20), np.fft.fft2(x, norm="ortho")) def test_ifft2(self): x = random((30, 20)) + 1j*random((30, 20)) assert_array_almost_equal(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0), np.fft.ifft2(x)) assert_array_almost_equal(np.fft.ifft2(x) * np.sqrt(30 * 20), np.fft.ifft2(x, norm="ortho")) def test_fftn(self): x = random((30, 20, 10)) + 1j*random((30, 20, 10)) assert_array_almost_equal( np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0), np.fft.fftn(x)) assert_array_almost_equal(np.fft.fftn(x) / np.sqrt(30 * 20 * 10), np.fft.fftn(x, norm="ortho")) def test_ifftn(self): x = random((30, 20, 10)) + 1j*random((30, 20, 10)) assert_array_almost_equal( np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0), np.fft.ifftn(x)) assert_array_almost_equal(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10), np.fft.ifftn(x, norm="ortho")) def test_rfft(self): x = random(30) assert_array_almost_equal(np.fft.fft(x)[:16], np.fft.rfft(x)) assert_array_almost_equal(np.fft.rfft(x) / np.sqrt(30), np.fft.rfft(x, norm="ortho")) def test_irfft(self): x = random(30) assert_array_almost_equal(x, np.fft.irfft(np.fft.rfft(x))) assert_array_almost_equal( x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), norm="ortho")) def test_rfft2(self): x = random((30, 20)) assert_array_almost_equal(np.fft.fft2(x)[:, :11], np.fft.rfft2(x)) assert_array_almost_equal(np.fft.rfft2(x) / np.sqrt(30 * 20), np.fft.rfft2(x, norm="ortho")) def test_irfft2(self): x = random((30, 20)) assert_array_almost_equal(x, np.fft.irfft2(np.fft.rfft2(x))) assert_array_almost_equal( x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), norm="ortho")) def test_rfftn(self): x = random((30, 20, 10)) assert_array_almost_equal(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x)) assert_array_almost_equal(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10), np.fft.rfftn(x, norm="ortho")) def test_irfftn(self): x = random((30, 20, 10)) assert_array_almost_equal(x, np.fft.irfftn(np.fft.rfftn(x))) assert_array_almost_equal( x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), norm="ortho")) def test_hfft(self): x = random(14) + 1j*random(14) x_herm = np.concatenate((random(1), x, random(1))) x = np.concatenate((x_herm, x[::-1].conj())) assert_array_almost_equal(np.fft.fft(x), np.fft.hfft(x_herm)) assert_array_almost_equal(np.fft.hfft(x_herm) / np.sqrt(30), np.fft.hfft(x_herm, norm="ortho")) def test_ihttf(self): x = random(14) + 1j*random(14) x_herm = np.concatenate((random(1), x, random(1))) x = np.concatenate((x_herm, x[::-1].conj())) assert_array_almost_equal(x_herm, np.fft.ihfft(np.fft.hfft(x_herm))) assert_array_almost_equal( x_herm, np.fft.ihfft(np.fft.hfft(x_herm, norm="ortho"), norm="ortho")) class TestFFTThreadSafe(TestCase): threads = 16 input_shape = (800, 200) def _test_mtsame(self, func, *args): def worker(args, q): q.put(func(*args)) q = queue.Queue() expected = func(*args) # Spin off a bunch of threads to call the same function simultaneously t = [threading.Thread(target=worker, args=(args, q)) for i in range(self.threads)] [x.start() for x in t] [x.join() for x in t] # Make sure all threads returned the correct value for i in range(self.threads): assert_array_equal(q.get(timeout=5), expected, 'Function returned wrong value in multithreaded context') def test_fft(self): a = np.ones(self.input_shape) * 1+0j self._test_mtsame(np.fft.fft, a) def test_ifft(self): a = np.ones(self.input_shape) * 1+0j self._test_mtsame(np.fft.ifft, a) def test_rfft(self): a = np.ones(self.input_shape) self._test_mtsame(np.fft.rfft, a) def test_irfft(self): a = np.ones(self.input_shape) * 1+0j self._test_mtsame(np.fft.irfft, a) if __name__ == "__main__": run_module_suite()
bsd-3-clause
sunny94/temp
sympy/physics/mechanics/tests/test_rigidbody.py
42
2393
from sympy import symbols from sympy.physics.mechanics import Point, ReferenceFrame, Dyadic, RigidBody from sympy.physics.mechanics import dynamicsymbols, outer from sympy.physics.mechanics import inertia_of_point_mass def test_rigidbody(): m, m2, v1, v2, v3, omega = symbols('m m2 v1 v2 v3 omega') A = ReferenceFrame('A') A2 = ReferenceFrame('A2') P = Point('P') P2 = Point('P2') I = Dyadic(0) I2 = Dyadic(0) B = RigidBody('B', P, A, m, (I, P)) assert B.mass == m assert B.frame == A assert B.masscenter == P assert B.inertia == (I, B.masscenter) B.mass = m2 B.frame = A2 B.masscenter = P2 B.inertia = (I2, B.masscenter) assert B.mass == m2 assert B.frame == A2 assert B.masscenter == P2 assert B.inertia == (I2, B.masscenter) assert B.masscenter == P2 assert B.inertia == (I2, B.masscenter) # Testing linear momentum function assuming A2 is the inertial frame N = ReferenceFrame('N') P2.set_vel(N, v1 * N.x + v2 * N.y + v3 * N.z) assert B.linear_momentum(N) == m2 * (v1 * N.x + v2 * N.y + v3 * N.z) def test_rigidbody2(): M, v, r, omega, g, h = dynamicsymbols('M v r omega g h') N = ReferenceFrame('N') b = ReferenceFrame('b') b.set_ang_vel(N, omega * b.x) P = Point('P') I = outer(b.x, b.x) Inertia_tuple = (I, P) B = RigidBody('B', P, b, M, Inertia_tuple) P.set_vel(N, v * b.x) assert B.angular_momentum(P, N) == omega * b.x O = Point('O') O.set_vel(N, v * b.x) P.set_pos(O, r * b.y) assert B.angular_momentum(O, N) == omega * b.x - M*v*r*b.z B.set_potential_energy(M * g * h) assert B.potential_energy == M * g * h assert B.kinetic_energy(N) == (omega**2 + M * v**2) / 2 def test_rigidbody3(): q1, q2, q3, q4 = dynamicsymbols('q1:5') p1, p2, p3 = symbols('p1:4') m = symbols('m') A = ReferenceFrame('A') B = A.orientnew('B', 'axis', [q1, A.x]) O = Point('O') O.set_vel(A, q2*A.x + q3*A.y + q4*A.z) P = O.locatenew('P', p1*B.x + p2*B.y + p3*B.z) I = outer(B.x, B.x) rb1 = RigidBody('rb1', P, B, m, (I, P)) # I_S/O = I_S/S* + I_S*/O rb2 = RigidBody('rb2', P, B, m, (I + inertia_of_point_mass(m, P.pos_from(O), B), O)) assert rb1.central_inertia == rb2.central_inertia assert rb1.angular_momentum(O, A) == rb2.angular_momentum(O, A)
bsd-3-clause
shishaochen/TensorFlow-0.8-Win
tensorflow/contrib/bayesflow/__init__.py
3
1307
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops for representing statistical distributions. ## This package provides classes for statistical distributions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import,wildcard-import, line-too-long from tensorflow.contrib.distributions.python.ops import gaussian_conjugate_posteriors from tensorflow.contrib.distributions.python.ops.dirichlet_multinomial import * from tensorflow.contrib.distributions.python.ops.gaussian import * # from tensorflow.contrib.distributions.python.ops.dirichlet import * # pylint: disable=line-too-long
apache-2.0
AlericInglewood/3p-google-breakpad
src/third_party/protobuf/protobuf/python/google/protobuf/internal/reflection_test.py
253
99091
#! /usr/bin/python # -*- coding: utf-8 -*- # # Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # http://code.google.com/p/protobuf/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unittest for reflection.py, which also indirectly tests the output of the pure-Python protocol compiler. """ __author__ = 'robinson@google.com (Will Robinson)' import operator import struct import unittest from google.protobuf import unittest_import_pb2 from google.protobuf import unittest_mset_pb2 from google.protobuf import unittest_pb2 from google.protobuf import descriptor_pb2 from google.protobuf import descriptor from google.protobuf import message from google.protobuf import reflection from google.protobuf.internal import api_implementation from google.protobuf.internal import more_extensions_pb2 from google.protobuf.internal import more_messages_pb2 from google.protobuf.internal import wire_format from google.protobuf.internal import test_util from google.protobuf.internal import decoder class _MiniDecoder(object): """Decodes a stream of values from a string. Once upon a time we actually had a class called decoder.Decoder. Then we got rid of it during a redesign that made decoding much, much faster overall. But a couple tests in this file used it to check that the serialized form of a message was correct. So, this class implements just the methods that were used by said tests, so that we don't have to rewrite the tests. """ def __init__(self, bytes): self._bytes = bytes self._pos = 0 def ReadVarint(self): result, self._pos = decoder._DecodeVarint(self._bytes, self._pos) return result ReadInt32 = ReadVarint ReadInt64 = ReadVarint ReadUInt32 = ReadVarint ReadUInt64 = ReadVarint def ReadSInt64(self): return wire_format.ZigZagDecode(self.ReadVarint()) ReadSInt32 = ReadSInt64 def ReadFieldNumberAndWireType(self): return wire_format.UnpackTag(self.ReadVarint()) def ReadFloat(self): result = struct.unpack("<f", self._bytes[self._pos:self._pos+4])[0] self._pos += 4 return result def ReadDouble(self): result = struct.unpack("<d", self._bytes[self._pos:self._pos+8])[0] self._pos += 8 return result def EndOfStream(self): return self._pos == len(self._bytes) class ReflectionTest(unittest.TestCase): def assertListsEqual(self, values, others): self.assertEqual(len(values), len(others)) for i in range(len(values)): self.assertEqual(values[i], others[i]) def testScalarConstructor(self): # Constructor with only scalar types should succeed. proto = unittest_pb2.TestAllTypes( optional_int32=24, optional_double=54.321, optional_string='optional_string') self.assertEqual(24, proto.optional_int32) self.assertEqual(54.321, proto.optional_double) self.assertEqual('optional_string', proto.optional_string) def testRepeatedScalarConstructor(self): # Constructor with only repeated scalar types should succeed. proto = unittest_pb2.TestAllTypes( repeated_int32=[1, 2, 3, 4], repeated_double=[1.23, 54.321], repeated_bool=[True, False, False], repeated_string=["optional_string"]) self.assertEquals([1, 2, 3, 4], list(proto.repeated_int32)) self.assertEquals([1.23, 54.321], list(proto.repeated_double)) self.assertEquals([True, False, False], list(proto.repeated_bool)) self.assertEquals(["optional_string"], list(proto.repeated_string)) def testRepeatedCompositeConstructor(self): # Constructor with only repeated composite types should succeed. proto = unittest_pb2.TestAllTypes( repeated_nested_message=[ unittest_pb2.TestAllTypes.NestedMessage( bb=unittest_pb2.TestAllTypes.FOO), unittest_pb2.TestAllTypes.NestedMessage( bb=unittest_pb2.TestAllTypes.BAR)], repeated_foreign_message=[ unittest_pb2.ForeignMessage(c=-43), unittest_pb2.ForeignMessage(c=45324), unittest_pb2.ForeignMessage(c=12)], repeatedgroup=[ unittest_pb2.TestAllTypes.RepeatedGroup(), unittest_pb2.TestAllTypes.RepeatedGroup(a=1), unittest_pb2.TestAllTypes.RepeatedGroup(a=2)]) self.assertEquals( [unittest_pb2.TestAllTypes.NestedMessage( bb=unittest_pb2.TestAllTypes.FOO), unittest_pb2.TestAllTypes.NestedMessage( bb=unittest_pb2.TestAllTypes.BAR)], list(proto.repeated_nested_message)) self.assertEquals( [unittest_pb2.ForeignMessage(c=-43), unittest_pb2.ForeignMessage(c=45324), unittest_pb2.ForeignMessage(c=12)], list(proto.repeated_foreign_message)) self.assertEquals( [unittest_pb2.TestAllTypes.RepeatedGroup(), unittest_pb2.TestAllTypes.RepeatedGroup(a=1), unittest_pb2.TestAllTypes.RepeatedGroup(a=2)], list(proto.repeatedgroup)) def testMixedConstructor(self): # Constructor with only mixed types should succeed. proto = unittest_pb2.TestAllTypes( optional_int32=24, optional_string='optional_string', repeated_double=[1.23, 54.321], repeated_bool=[True, False, False], repeated_nested_message=[ unittest_pb2.TestAllTypes.NestedMessage( bb=unittest_pb2.TestAllTypes.FOO), unittest_pb2.TestAllTypes.NestedMessage( bb=unittest_pb2.TestAllTypes.BAR)], repeated_foreign_message=[ unittest_pb2.ForeignMessage(c=-43), unittest_pb2.ForeignMessage(c=45324), unittest_pb2.ForeignMessage(c=12)]) self.assertEqual(24, proto.optional_int32) self.assertEqual('optional_string', proto.optional_string) self.assertEquals([1.23, 54.321], list(proto.repeated_double)) self.assertEquals([True, False, False], list(proto.repeated_bool)) self.assertEquals( [unittest_pb2.TestAllTypes.NestedMessage( bb=unittest_pb2.TestAllTypes.FOO), unittest_pb2.TestAllTypes.NestedMessage( bb=unittest_pb2.TestAllTypes.BAR)], list(proto.repeated_nested_message)) self.assertEquals( [unittest_pb2.ForeignMessage(c=-43), unittest_pb2.ForeignMessage(c=45324), unittest_pb2.ForeignMessage(c=12)], list(proto.repeated_foreign_message)) def testConstructorTypeError(self): self.assertRaises( TypeError, unittest_pb2.TestAllTypes, optional_int32="foo") self.assertRaises( TypeError, unittest_pb2.TestAllTypes, optional_string=1234) self.assertRaises( TypeError, unittest_pb2.TestAllTypes, optional_nested_message=1234) self.assertRaises( TypeError, unittest_pb2.TestAllTypes, repeated_int32=1234) self.assertRaises( TypeError, unittest_pb2.TestAllTypes, repeated_int32=["foo"]) self.assertRaises( TypeError, unittest_pb2.TestAllTypes, repeated_string=1234) self.assertRaises( TypeError, unittest_pb2.TestAllTypes, repeated_string=[1234]) self.assertRaises( TypeError, unittest_pb2.TestAllTypes, repeated_nested_message=1234) self.assertRaises( TypeError, unittest_pb2.TestAllTypes, repeated_nested_message=[1234]) def testConstructorInvalidatesCachedByteSize(self): message = unittest_pb2.TestAllTypes(optional_int32 = 12) self.assertEquals(2, message.ByteSize()) message = unittest_pb2.TestAllTypes( optional_nested_message = unittest_pb2.TestAllTypes.NestedMessage()) self.assertEquals(3, message.ByteSize()) message = unittest_pb2.TestAllTypes(repeated_int32 = [12]) self.assertEquals(3, message.ByteSize()) message = unittest_pb2.TestAllTypes( repeated_nested_message = [unittest_pb2.TestAllTypes.NestedMessage()]) self.assertEquals(3, message.ByteSize()) def testSimpleHasBits(self): # Test a scalar. proto = unittest_pb2.TestAllTypes() self.assertTrue(not proto.HasField('optional_int32')) self.assertEqual(0, proto.optional_int32) # HasField() shouldn't be true if all we've done is # read the default value. self.assertTrue(not proto.HasField('optional_int32')) proto.optional_int32 = 1 # Setting a value however *should* set the "has" bit. self.assertTrue(proto.HasField('optional_int32')) proto.ClearField('optional_int32') # And clearing that value should unset the "has" bit. self.assertTrue(not proto.HasField('optional_int32')) def testHasBitsWithSinglyNestedScalar(self): # Helper used to test foreign messages and groups. # # composite_field_name should be the name of a non-repeated # composite (i.e., foreign or group) field in TestAllTypes, # and scalar_field_name should be the name of an integer-valued # scalar field within that composite. # # I never thought I'd miss C++ macros and templates so much. :( # This helper is semantically just: # # assert proto.composite_field.scalar_field == 0 # assert not proto.composite_field.HasField('scalar_field') # assert not proto.HasField('composite_field') # # proto.composite_field.scalar_field = 10 # old_composite_field = proto.composite_field # # assert proto.composite_field.scalar_field == 10 # assert proto.composite_field.HasField('scalar_field') # assert proto.HasField('composite_field') # # proto.ClearField('composite_field') # # assert not proto.composite_field.HasField('scalar_field') # assert not proto.HasField('composite_field') # assert proto.composite_field.scalar_field == 0 # # # Now ensure that ClearField('composite_field') disconnected # # the old field object from the object tree... # assert old_composite_field is not proto.composite_field # old_composite_field.scalar_field = 20 # assert not proto.composite_field.HasField('scalar_field') # assert not proto.HasField('composite_field') def TestCompositeHasBits(composite_field_name, scalar_field_name): proto = unittest_pb2.TestAllTypes() # First, check that we can get the scalar value, and see that it's the # default (0), but that proto.HasField('omposite') and # proto.composite.HasField('scalar') will still return False. composite_field = getattr(proto, composite_field_name) original_scalar_value = getattr(composite_field, scalar_field_name) self.assertEqual(0, original_scalar_value) # Assert that the composite object does not "have" the scalar. self.assertTrue(not composite_field.HasField(scalar_field_name)) # Assert that proto does not "have" the composite field. self.assertTrue(not proto.HasField(composite_field_name)) # Now set the scalar within the composite field. Ensure that the setting # is reflected, and that proto.HasField('composite') and # proto.composite.HasField('scalar') now both return True. new_val = 20 setattr(composite_field, scalar_field_name, new_val) self.assertEqual(new_val, getattr(composite_field, scalar_field_name)) # Hold on to a reference to the current composite_field object. old_composite_field = composite_field # Assert that the has methods now return true. self.assertTrue(composite_field.HasField(scalar_field_name)) self.assertTrue(proto.HasField(composite_field_name)) # Now call the clear method... proto.ClearField(composite_field_name) # ...and ensure that the "has" bits are all back to False... composite_field = getattr(proto, composite_field_name) self.assertTrue(not composite_field.HasField(scalar_field_name)) self.assertTrue(not proto.HasField(composite_field_name)) # ...and ensure that the scalar field has returned to its default. self.assertEqual(0, getattr(composite_field, scalar_field_name)) # Finally, ensure that modifications to the old composite field object # don't have any effect on the parent. Possible only with the pure-python # implementation of the API. # # (NOTE that when we clear the composite field in the parent, we actually # don't recursively clear down the tree. Instead, we just disconnect the # cleared composite from the tree.) if api_implementation.Type() != 'python': return self.assertTrue(old_composite_field is not composite_field) setattr(old_composite_field, scalar_field_name, new_val) self.assertTrue(not composite_field.HasField(scalar_field_name)) self.assertTrue(not proto.HasField(composite_field_name)) self.assertEqual(0, getattr(composite_field, scalar_field_name)) # Test simple, single-level nesting when we set a scalar. TestCompositeHasBits('optionalgroup', 'a') TestCompositeHasBits('optional_nested_message', 'bb') TestCompositeHasBits('optional_foreign_message', 'c') TestCompositeHasBits('optional_import_message', 'd') def testReferencesToNestedMessage(self): proto = unittest_pb2.TestAllTypes() nested = proto.optional_nested_message del proto # A previous version had a bug where this would raise an exception when # hitting a now-dead weak reference. nested.bb = 23 def testDisconnectingNestedMessageBeforeSettingField(self): if api_implementation.Type() != 'python': return proto = unittest_pb2.TestAllTypes() nested = proto.optional_nested_message proto.ClearField('optional_nested_message') # Should disconnect from parent self.assertTrue(nested is not proto.optional_nested_message) nested.bb = 23 self.assertTrue(not proto.HasField('optional_nested_message')) self.assertEqual(0, proto.optional_nested_message.bb) def testHasBitsWhenModifyingRepeatedFields(self): # Test nesting when we add an element to a repeated field in a submessage. proto = unittest_pb2.TestNestedMessageHasBits() proto.optional_nested_message.nestedmessage_repeated_int32.append(5) self.assertEqual( [5], proto.optional_nested_message.nestedmessage_repeated_int32) self.assertTrue(proto.HasField('optional_nested_message')) # Do the same test, but with a repeated composite field within the # submessage. proto.ClearField('optional_nested_message') self.assertTrue(not proto.HasField('optional_nested_message')) proto.optional_nested_message.nestedmessage_repeated_foreignmessage.add() self.assertTrue(proto.HasField('optional_nested_message')) def testHasBitsForManyLevelsOfNesting(self): # Test nesting many levels deep. recursive_proto = unittest_pb2.TestMutualRecursionA() self.assertTrue(not recursive_proto.HasField('bb')) self.assertEqual(0, recursive_proto.bb.a.bb.a.bb.optional_int32) self.assertTrue(not recursive_proto.HasField('bb')) recursive_proto.bb.a.bb.a.bb.optional_int32 = 5 self.assertEqual(5, recursive_proto.bb.a.bb.a.bb.optional_int32) self.assertTrue(recursive_proto.HasField('bb')) self.assertTrue(recursive_proto.bb.HasField('a')) self.assertTrue(recursive_proto.bb.a.HasField('bb')) self.assertTrue(recursive_proto.bb.a.bb.HasField('a')) self.assertTrue(recursive_proto.bb.a.bb.a.HasField('bb')) self.assertTrue(not recursive_proto.bb.a.bb.a.bb.HasField('a')) self.assertTrue(recursive_proto.bb.a.bb.a.bb.HasField('optional_int32')) def testSingularListFields(self): proto = unittest_pb2.TestAllTypes() proto.optional_fixed32 = 1 proto.optional_int32 = 5 proto.optional_string = 'foo' # Access sub-message but don't set it yet. nested_message = proto.optional_nested_message self.assertEqual( [ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 5), (proto.DESCRIPTOR.fields_by_name['optional_fixed32'], 1), (proto.DESCRIPTOR.fields_by_name['optional_string' ], 'foo') ], proto.ListFields()) proto.optional_nested_message.bb = 123 self.assertEqual( [ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 5), (proto.DESCRIPTOR.fields_by_name['optional_fixed32'], 1), (proto.DESCRIPTOR.fields_by_name['optional_string' ], 'foo'), (proto.DESCRIPTOR.fields_by_name['optional_nested_message' ], nested_message) ], proto.ListFields()) def testRepeatedListFields(self): proto = unittest_pb2.TestAllTypes() proto.repeated_fixed32.append(1) proto.repeated_int32.append(5) proto.repeated_int32.append(11) proto.repeated_string.extend(['foo', 'bar']) proto.repeated_string.extend([]) proto.repeated_string.append('baz') proto.repeated_string.extend(str(x) for x in xrange(2)) proto.optional_int32 = 21 proto.repeated_bool # Access but don't set anything; should not be listed. self.assertEqual( [ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 21), (proto.DESCRIPTOR.fields_by_name['repeated_int32' ], [5, 11]), (proto.DESCRIPTOR.fields_by_name['repeated_fixed32'], [1]), (proto.DESCRIPTOR.fields_by_name['repeated_string' ], ['foo', 'bar', 'baz', '0', '1']) ], proto.ListFields()) def testSingularListExtensions(self): proto = unittest_pb2.TestAllExtensions() proto.Extensions[unittest_pb2.optional_fixed32_extension] = 1 proto.Extensions[unittest_pb2.optional_int32_extension ] = 5 proto.Extensions[unittest_pb2.optional_string_extension ] = 'foo' self.assertEqual( [ (unittest_pb2.optional_int32_extension , 5), (unittest_pb2.optional_fixed32_extension, 1), (unittest_pb2.optional_string_extension , 'foo') ], proto.ListFields()) def testRepeatedListExtensions(self): proto = unittest_pb2.TestAllExtensions() proto.Extensions[unittest_pb2.repeated_fixed32_extension].append(1) proto.Extensions[unittest_pb2.repeated_int32_extension ].append(5) proto.Extensions[unittest_pb2.repeated_int32_extension ].append(11) proto.Extensions[unittest_pb2.repeated_string_extension ].append('foo') proto.Extensions[unittest_pb2.repeated_string_extension ].append('bar') proto.Extensions[unittest_pb2.repeated_string_extension ].append('baz') proto.Extensions[unittest_pb2.optional_int32_extension ] = 21 self.assertEqual( [ (unittest_pb2.optional_int32_extension , 21), (unittest_pb2.repeated_int32_extension , [5, 11]), (unittest_pb2.repeated_fixed32_extension, [1]), (unittest_pb2.repeated_string_extension , ['foo', 'bar', 'baz']) ], proto.ListFields()) def testListFieldsAndExtensions(self): proto = unittest_pb2.TestFieldOrderings() test_util.SetAllFieldsAndExtensions(proto) unittest_pb2.my_extension_int self.assertEqual( [ (proto.DESCRIPTOR.fields_by_name['my_int' ], 1), (unittest_pb2.my_extension_int , 23), (proto.DESCRIPTOR.fields_by_name['my_string'], 'foo'), (unittest_pb2.my_extension_string , 'bar'), (proto.DESCRIPTOR.fields_by_name['my_float' ], 1.0) ], proto.ListFields()) def testDefaultValues(self): proto = unittest_pb2.TestAllTypes() self.assertEqual(0, proto.optional_int32) self.assertEqual(0, proto.optional_int64) self.assertEqual(0, proto.optional_uint32) self.assertEqual(0, proto.optional_uint64) self.assertEqual(0, proto.optional_sint32) self.assertEqual(0, proto.optional_sint64) self.assertEqual(0, proto.optional_fixed32) self.assertEqual(0, proto.optional_fixed64) self.assertEqual(0, proto.optional_sfixed32) self.assertEqual(0, proto.optional_sfixed64) self.assertEqual(0.0, proto.optional_float) self.assertEqual(0.0, proto.optional_double) self.assertEqual(False, proto.optional_bool) self.assertEqual('', proto.optional_string) self.assertEqual('', proto.optional_bytes) self.assertEqual(41, proto.default_int32) self.assertEqual(42, proto.default_int64) self.assertEqual(43, proto.default_uint32) self.assertEqual(44, proto.default_uint64) self.assertEqual(-45, proto.default_sint32) self.assertEqual(46, proto.default_sint64) self.assertEqual(47, proto.default_fixed32) self.assertEqual(48, proto.default_fixed64) self.assertEqual(49, proto.default_sfixed32) self.assertEqual(-50, proto.default_sfixed64) self.assertEqual(51.5, proto.default_float) self.assertEqual(52e3, proto.default_double) self.assertEqual(True, proto.default_bool) self.assertEqual('hello', proto.default_string) self.assertEqual('world', proto.default_bytes) self.assertEqual(unittest_pb2.TestAllTypes.BAR, proto.default_nested_enum) self.assertEqual(unittest_pb2.FOREIGN_BAR, proto.default_foreign_enum) self.assertEqual(unittest_import_pb2.IMPORT_BAR, proto.default_import_enum) proto = unittest_pb2.TestExtremeDefaultValues() self.assertEqual(u'\u1234', proto.utf8_string) def testHasFieldWithUnknownFieldName(self): proto = unittest_pb2.TestAllTypes() self.assertRaises(ValueError, proto.HasField, 'nonexistent_field') def testClearFieldWithUnknownFieldName(self): proto = unittest_pb2.TestAllTypes() self.assertRaises(ValueError, proto.ClearField, 'nonexistent_field') def testDisallowedAssignments(self): # It's illegal to assign values directly to repeated fields # or to nonrepeated composite fields. Ensure that this fails. proto = unittest_pb2.TestAllTypes() # Repeated fields. self.assertRaises(AttributeError, setattr, proto, 'repeated_int32', 10) # Lists shouldn't work, either. self.assertRaises(AttributeError, setattr, proto, 'repeated_int32', [10]) # Composite fields. self.assertRaises(AttributeError, setattr, proto, 'optional_nested_message', 23) # Assignment to a repeated nested message field without specifying # the index in the array of nested messages. self.assertRaises(AttributeError, setattr, proto.repeated_nested_message, 'bb', 34) # Assignment to an attribute of a repeated field. self.assertRaises(AttributeError, setattr, proto.repeated_float, 'some_attribute', 34) # proto.nonexistent_field = 23 should fail as well. self.assertRaises(AttributeError, setattr, proto, 'nonexistent_field', 23) def testSingleScalarTypeSafety(self): proto = unittest_pb2.TestAllTypes() self.assertRaises(TypeError, setattr, proto, 'optional_int32', 1.1) self.assertRaises(TypeError, setattr, proto, 'optional_int32', 'foo') self.assertRaises(TypeError, setattr, proto, 'optional_string', 10) self.assertRaises(TypeError, setattr, proto, 'optional_bytes', 10) def testSingleScalarBoundsChecking(self): def TestMinAndMaxIntegers(field_name, expected_min, expected_max): pb = unittest_pb2.TestAllTypes() setattr(pb, field_name, expected_min) self.assertEqual(expected_min, getattr(pb, field_name)) setattr(pb, field_name, expected_max) self.assertEqual(expected_max, getattr(pb, field_name)) self.assertRaises(ValueError, setattr, pb, field_name, expected_min - 1) self.assertRaises(ValueError, setattr, pb, field_name, expected_max + 1) TestMinAndMaxIntegers('optional_int32', -(1 << 31), (1 << 31) - 1) TestMinAndMaxIntegers('optional_uint32', 0, 0xffffffff) TestMinAndMaxIntegers('optional_int64', -(1 << 63), (1 << 63) - 1) TestMinAndMaxIntegers('optional_uint64', 0, 0xffffffffffffffff) pb = unittest_pb2.TestAllTypes() pb.optional_nested_enum = 1 self.assertEqual(1, pb.optional_nested_enum) # Invalid enum values. pb.optional_nested_enum = 0 self.assertEqual(0, pb.optional_nested_enum) bytes_size_before = pb.ByteSize() pb.optional_nested_enum = 4 self.assertEqual(4, pb.optional_nested_enum) pb.optional_nested_enum = 0 self.assertEqual(0, pb.optional_nested_enum) # Make sure that setting the same enum field doesn't just add unknown # fields (but overwrites them). self.assertEqual(bytes_size_before, pb.ByteSize()) # Is the invalid value preserved after serialization? serialized = pb.SerializeToString() pb2 = unittest_pb2.TestAllTypes() pb2.ParseFromString(serialized) self.assertEqual(0, pb2.optional_nested_enum) self.assertEqual(pb, pb2) def testRepeatedScalarTypeSafety(self): proto = unittest_pb2.TestAllTypes() self.assertRaises(TypeError, proto.repeated_int32.append, 1.1) self.assertRaises(TypeError, proto.repeated_int32.append, 'foo') self.assertRaises(TypeError, proto.repeated_string, 10) self.assertRaises(TypeError, proto.repeated_bytes, 10) proto.repeated_int32.append(10) proto.repeated_int32[0] = 23 self.assertRaises(IndexError, proto.repeated_int32.__setitem__, 500, 23) self.assertRaises(TypeError, proto.repeated_int32.__setitem__, 0, 'abc') # Repeated enums tests. #proto.repeated_nested_enum.append(0) def testSingleScalarGettersAndSetters(self): proto = unittest_pb2.TestAllTypes() self.assertEqual(0, proto.optional_int32) proto.optional_int32 = 1 self.assertEqual(1, proto.optional_int32) proto.optional_uint64 = 0xffffffffffff self.assertEqual(0xffffffffffff, proto.optional_uint64) proto.optional_uint64 = 0xffffffffffffffff self.assertEqual(0xffffffffffffffff, proto.optional_uint64) # TODO(robinson): Test all other scalar field types. def testSingleScalarClearField(self): proto = unittest_pb2.TestAllTypes() # Should be allowed to clear something that's not there (a no-op). proto.ClearField('optional_int32') proto.optional_int32 = 1 self.assertTrue(proto.HasField('optional_int32')) proto.ClearField('optional_int32') self.assertEqual(0, proto.optional_int32) self.assertTrue(not proto.HasField('optional_int32')) # TODO(robinson): Test all other scalar field types. def testEnums(self): proto = unittest_pb2.TestAllTypes() self.assertEqual(1, proto.FOO) self.assertEqual(1, unittest_pb2.TestAllTypes.FOO) self.assertEqual(2, proto.BAR) self.assertEqual(2, unittest_pb2.TestAllTypes.BAR) self.assertEqual(3, proto.BAZ) self.assertEqual(3, unittest_pb2.TestAllTypes.BAZ) def testRepeatedScalars(self): proto = unittest_pb2.TestAllTypes() self.assertTrue(not proto.repeated_int32) self.assertEqual(0, len(proto.repeated_int32)) proto.repeated_int32.append(5) proto.repeated_int32.append(10) proto.repeated_int32.append(15) self.assertTrue(proto.repeated_int32) self.assertEqual(3, len(proto.repeated_int32)) self.assertEqual([5, 10, 15], proto.repeated_int32) # Test single retrieval. self.assertEqual(5, proto.repeated_int32[0]) self.assertEqual(15, proto.repeated_int32[-1]) # Test out-of-bounds indices. self.assertRaises(IndexError, proto.repeated_int32.__getitem__, 1234) self.assertRaises(IndexError, proto.repeated_int32.__getitem__, -1234) # Test incorrect types passed to __getitem__. self.assertRaises(TypeError, proto.repeated_int32.__getitem__, 'foo') self.assertRaises(TypeError, proto.repeated_int32.__getitem__, None) # Test single assignment. proto.repeated_int32[1] = 20 self.assertEqual([5, 20, 15], proto.repeated_int32) # Test insertion. proto.repeated_int32.insert(1, 25) self.assertEqual([5, 25, 20, 15], proto.repeated_int32) # Test slice retrieval. proto.repeated_int32.append(30) self.assertEqual([25, 20, 15], proto.repeated_int32[1:4]) self.assertEqual([5, 25, 20, 15, 30], proto.repeated_int32[:]) # Test slice assignment with an iterator proto.repeated_int32[1:4] = (i for i in xrange(3)) self.assertEqual([5, 0, 1, 2, 30], proto.repeated_int32) # Test slice assignment. proto.repeated_int32[1:4] = [35, 40, 45] self.assertEqual([5, 35, 40, 45, 30], proto.repeated_int32) # Test that we can use the field as an iterator. result = [] for i in proto.repeated_int32: result.append(i) self.assertEqual([5, 35, 40, 45, 30], result) # Test single deletion. del proto.repeated_int32[2] self.assertEqual([5, 35, 45, 30], proto.repeated_int32) # Test slice deletion. del proto.repeated_int32[2:] self.assertEqual([5, 35], proto.repeated_int32) # Test extending. proto.repeated_int32.extend([3, 13]) self.assertEqual([5, 35, 3, 13], proto.repeated_int32) # Test clearing. proto.ClearField('repeated_int32') self.assertTrue(not proto.repeated_int32) self.assertEqual(0, len(proto.repeated_int32)) proto.repeated_int32.append(1) self.assertEqual(1, proto.repeated_int32[-1]) # Test assignment to a negative index. proto.repeated_int32[-1] = 2 self.assertEqual(2, proto.repeated_int32[-1]) # Test deletion at negative indices. proto.repeated_int32[:] = [0, 1, 2, 3] del proto.repeated_int32[-1] self.assertEqual([0, 1, 2], proto.repeated_int32) del proto.repeated_int32[-2] self.assertEqual([0, 2], proto.repeated_int32) self.assertRaises(IndexError, proto.repeated_int32.__delitem__, -3) self.assertRaises(IndexError, proto.repeated_int32.__delitem__, 300) del proto.repeated_int32[-2:-1] self.assertEqual([2], proto.repeated_int32) del proto.repeated_int32[100:10000] self.assertEqual([2], proto.repeated_int32) def testRepeatedScalarsRemove(self): proto = unittest_pb2.TestAllTypes() self.assertTrue(not proto.repeated_int32) self.assertEqual(0, len(proto.repeated_int32)) proto.repeated_int32.append(5) proto.repeated_int32.append(10) proto.repeated_int32.append(5) proto.repeated_int32.append(5) self.assertEqual(4, len(proto.repeated_int32)) proto.repeated_int32.remove(5) self.assertEqual(3, len(proto.repeated_int32)) self.assertEqual(10, proto.repeated_int32[0]) self.assertEqual(5, proto.repeated_int32[1]) self.assertEqual(5, proto.repeated_int32[2]) proto.repeated_int32.remove(5) self.assertEqual(2, len(proto.repeated_int32)) self.assertEqual(10, proto.repeated_int32[0]) self.assertEqual(5, proto.repeated_int32[1]) proto.repeated_int32.remove(10) self.assertEqual(1, len(proto.repeated_int32)) self.assertEqual(5, proto.repeated_int32[0]) # Remove a non-existent element. self.assertRaises(ValueError, proto.repeated_int32.remove, 123) def testRepeatedComposites(self): proto = unittest_pb2.TestAllTypes() self.assertTrue(not proto.repeated_nested_message) self.assertEqual(0, len(proto.repeated_nested_message)) m0 = proto.repeated_nested_message.add() m1 = proto.repeated_nested_message.add() self.assertTrue(proto.repeated_nested_message) self.assertEqual(2, len(proto.repeated_nested_message)) self.assertListsEqual([m0, m1], proto.repeated_nested_message) self.assertTrue(isinstance(m0, unittest_pb2.TestAllTypes.NestedMessage)) # Test out-of-bounds indices. self.assertRaises(IndexError, proto.repeated_nested_message.__getitem__, 1234) self.assertRaises(IndexError, proto.repeated_nested_message.__getitem__, -1234) # Test incorrect types passed to __getitem__. self.assertRaises(TypeError, proto.repeated_nested_message.__getitem__, 'foo') self.assertRaises(TypeError, proto.repeated_nested_message.__getitem__, None) # Test slice retrieval. m2 = proto.repeated_nested_message.add() m3 = proto.repeated_nested_message.add() m4 = proto.repeated_nested_message.add() self.assertListsEqual( [m1, m2, m3], proto.repeated_nested_message[1:4]) self.assertListsEqual( [m0, m1, m2, m3, m4], proto.repeated_nested_message[:]) self.assertListsEqual( [m0, m1], proto.repeated_nested_message[:2]) self.assertListsEqual( [m2, m3, m4], proto.repeated_nested_message[2:]) self.assertEqual( m0, proto.repeated_nested_message[0]) self.assertListsEqual( [m0], proto.repeated_nested_message[:1]) # Test that we can use the field as an iterator. result = [] for i in proto.repeated_nested_message: result.append(i) self.assertListsEqual([m0, m1, m2, m3, m4], result) # Test single deletion. del proto.repeated_nested_message[2] self.assertListsEqual([m0, m1, m3, m4], proto.repeated_nested_message) # Test slice deletion. del proto.repeated_nested_message[2:] self.assertListsEqual([m0, m1], proto.repeated_nested_message) # Test extending. n1 = unittest_pb2.TestAllTypes.NestedMessage(bb=1) n2 = unittest_pb2.TestAllTypes.NestedMessage(bb=2) proto.repeated_nested_message.extend([n1,n2]) self.assertEqual(4, len(proto.repeated_nested_message)) self.assertEqual(n1, proto.repeated_nested_message[2]) self.assertEqual(n2, proto.repeated_nested_message[3]) # Test clearing. proto.ClearField('repeated_nested_message') self.assertTrue(not proto.repeated_nested_message) self.assertEqual(0, len(proto.repeated_nested_message)) # Test constructing an element while adding it. proto.repeated_nested_message.add(bb=23) self.assertEqual(1, len(proto.repeated_nested_message)) self.assertEqual(23, proto.repeated_nested_message[0].bb) def testHandWrittenReflection(self): # Hand written extensions are only supported by the pure-Python # implementation of the API. if api_implementation.Type() != 'python': return FieldDescriptor = descriptor.FieldDescriptor foo_field_descriptor = FieldDescriptor( name='foo_field', full_name='MyProto.foo_field', index=0, number=1, type=FieldDescriptor.TYPE_INT64, cpp_type=FieldDescriptor.CPPTYPE_INT64, label=FieldDescriptor.LABEL_OPTIONAL, default_value=0, containing_type=None, message_type=None, enum_type=None, is_extension=False, extension_scope=None, options=descriptor_pb2.FieldOptions()) mydescriptor = descriptor.Descriptor( name='MyProto', full_name='MyProto', filename='ignored', containing_type=None, nested_types=[], enum_types=[], fields=[foo_field_descriptor], extensions=[], options=descriptor_pb2.MessageOptions()) class MyProtoClass(message.Message): DESCRIPTOR = mydescriptor __metaclass__ = reflection.GeneratedProtocolMessageType myproto_instance = MyProtoClass() self.assertEqual(0, myproto_instance.foo_field) self.assertTrue(not myproto_instance.HasField('foo_field')) myproto_instance.foo_field = 23 self.assertEqual(23, myproto_instance.foo_field) self.assertTrue(myproto_instance.HasField('foo_field')) def testTopLevelExtensionsForOptionalScalar(self): extendee_proto = unittest_pb2.TestAllExtensions() extension = unittest_pb2.optional_int32_extension self.assertTrue(not extendee_proto.HasExtension(extension)) self.assertEqual(0, extendee_proto.Extensions[extension]) # As with normal scalar fields, just doing a read doesn't actually set the # "has" bit. self.assertTrue(not extendee_proto.HasExtension(extension)) # Actually set the thing. extendee_proto.Extensions[extension] = 23 self.assertEqual(23, extendee_proto.Extensions[extension]) self.assertTrue(extendee_proto.HasExtension(extension)) # Ensure that clearing works as well. extendee_proto.ClearExtension(extension) self.assertEqual(0, extendee_proto.Extensions[extension]) self.assertTrue(not extendee_proto.HasExtension(extension)) def testTopLevelExtensionsForRepeatedScalar(self): extendee_proto = unittest_pb2.TestAllExtensions() extension = unittest_pb2.repeated_string_extension self.assertEqual(0, len(extendee_proto.Extensions[extension])) extendee_proto.Extensions[extension].append('foo') self.assertEqual(['foo'], extendee_proto.Extensions[extension]) string_list = extendee_proto.Extensions[extension] extendee_proto.ClearExtension(extension) self.assertEqual(0, len(extendee_proto.Extensions[extension])) self.assertTrue(string_list is not extendee_proto.Extensions[extension]) # Shouldn't be allowed to do Extensions[extension] = 'a' self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions, extension, 'a') def testTopLevelExtensionsForOptionalMessage(self): extendee_proto = unittest_pb2.TestAllExtensions() extension = unittest_pb2.optional_foreign_message_extension self.assertTrue(not extendee_proto.HasExtension(extension)) self.assertEqual(0, extendee_proto.Extensions[extension].c) # As with normal (non-extension) fields, merely reading from the # thing shouldn't set the "has" bit. self.assertTrue(not extendee_proto.HasExtension(extension)) extendee_proto.Extensions[extension].c = 23 self.assertEqual(23, extendee_proto.Extensions[extension].c) self.assertTrue(extendee_proto.HasExtension(extension)) # Save a reference here. foreign_message = extendee_proto.Extensions[extension] extendee_proto.ClearExtension(extension) self.assertTrue(foreign_message is not extendee_proto.Extensions[extension]) # Setting a field on foreign_message now shouldn't set # any "has" bits on extendee_proto. foreign_message.c = 42 self.assertEqual(42, foreign_message.c) self.assertTrue(foreign_message.HasField('c')) self.assertTrue(not extendee_proto.HasExtension(extension)) # Shouldn't be allowed to do Extensions[extension] = 'a' self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions, extension, 'a') def testTopLevelExtensionsForRepeatedMessage(self): extendee_proto = unittest_pb2.TestAllExtensions() extension = unittest_pb2.repeatedgroup_extension self.assertEqual(0, len(extendee_proto.Extensions[extension])) group = extendee_proto.Extensions[extension].add() group.a = 23 self.assertEqual(23, extendee_proto.Extensions[extension][0].a) group.a = 42 self.assertEqual(42, extendee_proto.Extensions[extension][0].a) group_list = extendee_proto.Extensions[extension] extendee_proto.ClearExtension(extension) self.assertEqual(0, len(extendee_proto.Extensions[extension])) self.assertTrue(group_list is not extendee_proto.Extensions[extension]) # Shouldn't be allowed to do Extensions[extension] = 'a' self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions, extension, 'a') def testNestedExtensions(self): extendee_proto = unittest_pb2.TestAllExtensions() extension = unittest_pb2.TestRequired.single # We just test the non-repeated case. self.assertTrue(not extendee_proto.HasExtension(extension)) required = extendee_proto.Extensions[extension] self.assertEqual(0, required.a) self.assertTrue(not extendee_proto.HasExtension(extension)) required.a = 23 self.assertEqual(23, extendee_proto.Extensions[extension].a) self.assertTrue(extendee_proto.HasExtension(extension)) extendee_proto.ClearExtension(extension) self.assertTrue(required is not extendee_proto.Extensions[extension]) self.assertTrue(not extendee_proto.HasExtension(extension)) # If message A directly contains message B, and # a.HasField('b') is currently False, then mutating any # extension in B should change a.HasField('b') to True # (and so on up the object tree). def testHasBitsForAncestorsOfExtendedMessage(self): # Optional scalar extension. toplevel = more_extensions_pb2.TopLevelMessage() self.assertTrue(not toplevel.HasField('submessage')) self.assertEqual(0, toplevel.submessage.Extensions[ more_extensions_pb2.optional_int_extension]) self.assertTrue(not toplevel.HasField('submessage')) toplevel.submessage.Extensions[ more_extensions_pb2.optional_int_extension] = 23 self.assertEqual(23, toplevel.submessage.Extensions[ more_extensions_pb2.optional_int_extension]) self.assertTrue(toplevel.HasField('submessage')) # Repeated scalar extension. toplevel = more_extensions_pb2.TopLevelMessage() self.assertTrue(not toplevel.HasField('submessage')) self.assertEqual([], toplevel.submessage.Extensions[ more_extensions_pb2.repeated_int_extension]) self.assertTrue(not toplevel.HasField('submessage')) toplevel.submessage.Extensions[ more_extensions_pb2.repeated_int_extension].append(23) self.assertEqual([23], toplevel.submessage.Extensions[ more_extensions_pb2.repeated_int_extension]) self.assertTrue(toplevel.HasField('submessage')) # Optional message extension. toplevel = more_extensions_pb2.TopLevelMessage() self.assertTrue(not toplevel.HasField('submessage')) self.assertEqual(0, toplevel.submessage.Extensions[ more_extensions_pb2.optional_message_extension].foreign_message_int) self.assertTrue(not toplevel.HasField('submessage')) toplevel.submessage.Extensions[ more_extensions_pb2.optional_message_extension].foreign_message_int = 23 self.assertEqual(23, toplevel.submessage.Extensions[ more_extensions_pb2.optional_message_extension].foreign_message_int) self.assertTrue(toplevel.HasField('submessage')) # Repeated message extension. toplevel = more_extensions_pb2.TopLevelMessage() self.assertTrue(not toplevel.HasField('submessage')) self.assertEqual(0, len(toplevel.submessage.Extensions[ more_extensions_pb2.repeated_message_extension])) self.assertTrue(not toplevel.HasField('submessage')) foreign = toplevel.submessage.Extensions[ more_extensions_pb2.repeated_message_extension].add() self.assertEqual(foreign, toplevel.submessage.Extensions[ more_extensions_pb2.repeated_message_extension][0]) self.assertTrue(toplevel.HasField('submessage')) def testDisconnectionAfterClearingEmptyMessage(self): toplevel = more_extensions_pb2.TopLevelMessage() extendee_proto = toplevel.submessage extension = more_extensions_pb2.optional_message_extension extension_proto = extendee_proto.Extensions[extension] extendee_proto.ClearExtension(extension) extension_proto.foreign_message_int = 23 self.assertTrue(extension_proto is not extendee_proto.Extensions[extension]) def testExtensionFailureModes(self): extendee_proto = unittest_pb2.TestAllExtensions() # Try non-extension-handle arguments to HasExtension, # ClearExtension(), and Extensions[]... self.assertRaises(KeyError, extendee_proto.HasExtension, 1234) self.assertRaises(KeyError, extendee_proto.ClearExtension, 1234) self.assertRaises(KeyError, extendee_proto.Extensions.__getitem__, 1234) self.assertRaises(KeyError, extendee_proto.Extensions.__setitem__, 1234, 5) # Try something that *is* an extension handle, just not for # this message... unknown_handle = more_extensions_pb2.optional_int_extension self.assertRaises(KeyError, extendee_proto.HasExtension, unknown_handle) self.assertRaises(KeyError, extendee_proto.ClearExtension, unknown_handle) self.assertRaises(KeyError, extendee_proto.Extensions.__getitem__, unknown_handle) self.assertRaises(KeyError, extendee_proto.Extensions.__setitem__, unknown_handle, 5) # Try call HasExtension() with a valid handle, but for a # *repeated* field. (Just as with non-extension repeated # fields, Has*() isn't supported for extension repeated fields). self.assertRaises(KeyError, extendee_proto.HasExtension, unittest_pb2.repeated_string_extension) def testStaticParseFrom(self): proto1 = unittest_pb2.TestAllTypes() test_util.SetAllFields(proto1) string1 = proto1.SerializeToString() proto2 = unittest_pb2.TestAllTypes.FromString(string1) # Messages should be equal. self.assertEqual(proto2, proto1) def testMergeFromSingularField(self): # Test merge with just a singular field. proto1 = unittest_pb2.TestAllTypes() proto1.optional_int32 = 1 proto2 = unittest_pb2.TestAllTypes() # This shouldn't get overwritten. proto2.optional_string = 'value' proto2.MergeFrom(proto1) self.assertEqual(1, proto2.optional_int32) self.assertEqual('value', proto2.optional_string) def testMergeFromRepeatedField(self): # Test merge with just a repeated field. proto1 = unittest_pb2.TestAllTypes() proto1.repeated_int32.append(1) proto1.repeated_int32.append(2) proto2 = unittest_pb2.TestAllTypes() proto2.repeated_int32.append(0) proto2.MergeFrom(proto1) self.assertEqual(0, proto2.repeated_int32[0]) self.assertEqual(1, proto2.repeated_int32[1]) self.assertEqual(2, proto2.repeated_int32[2]) def testMergeFromOptionalGroup(self): # Test merge with an optional group. proto1 = unittest_pb2.TestAllTypes() proto1.optionalgroup.a = 12 proto2 = unittest_pb2.TestAllTypes() proto2.MergeFrom(proto1) self.assertEqual(12, proto2.optionalgroup.a) def testMergeFromRepeatedNestedMessage(self): # Test merge with a repeated nested message. proto1 = unittest_pb2.TestAllTypes() m = proto1.repeated_nested_message.add() m.bb = 123 m = proto1.repeated_nested_message.add() m.bb = 321 proto2 = unittest_pb2.TestAllTypes() m = proto2.repeated_nested_message.add() m.bb = 999 proto2.MergeFrom(proto1) self.assertEqual(999, proto2.repeated_nested_message[0].bb) self.assertEqual(123, proto2.repeated_nested_message[1].bb) self.assertEqual(321, proto2.repeated_nested_message[2].bb) proto3 = unittest_pb2.TestAllTypes() proto3.repeated_nested_message.MergeFrom(proto2.repeated_nested_message) self.assertEqual(999, proto3.repeated_nested_message[0].bb) self.assertEqual(123, proto3.repeated_nested_message[1].bb) self.assertEqual(321, proto3.repeated_nested_message[2].bb) def testMergeFromAllFields(self): # With all fields set. proto1 = unittest_pb2.TestAllTypes() test_util.SetAllFields(proto1) proto2 = unittest_pb2.TestAllTypes() proto2.MergeFrom(proto1) # Messages should be equal. self.assertEqual(proto2, proto1) # Serialized string should be equal too. string1 = proto1.SerializeToString() string2 = proto2.SerializeToString() self.assertEqual(string1, string2) def testMergeFromExtensionsSingular(self): proto1 = unittest_pb2.TestAllExtensions() proto1.Extensions[unittest_pb2.optional_int32_extension] = 1 proto2 = unittest_pb2.TestAllExtensions() proto2.MergeFrom(proto1) self.assertEqual( 1, proto2.Extensions[unittest_pb2.optional_int32_extension]) def testMergeFromExtensionsRepeated(self): proto1 = unittest_pb2.TestAllExtensions() proto1.Extensions[unittest_pb2.repeated_int32_extension].append(1) proto1.Extensions[unittest_pb2.repeated_int32_extension].append(2) proto2 = unittest_pb2.TestAllExtensions() proto2.Extensions[unittest_pb2.repeated_int32_extension].append(0) proto2.MergeFrom(proto1) self.assertEqual( 3, len(proto2.Extensions[unittest_pb2.repeated_int32_extension])) self.assertEqual( 0, proto2.Extensions[unittest_pb2.repeated_int32_extension][0]) self.assertEqual( 1, proto2.Extensions[unittest_pb2.repeated_int32_extension][1]) self.assertEqual( 2, proto2.Extensions[unittest_pb2.repeated_int32_extension][2]) def testMergeFromExtensionsNestedMessage(self): proto1 = unittest_pb2.TestAllExtensions() ext1 = proto1.Extensions[ unittest_pb2.repeated_nested_message_extension] m = ext1.add() m.bb = 222 m = ext1.add() m.bb = 333 proto2 = unittest_pb2.TestAllExtensions() ext2 = proto2.Extensions[ unittest_pb2.repeated_nested_message_extension] m = ext2.add() m.bb = 111 proto2.MergeFrom(proto1) ext2 = proto2.Extensions[ unittest_pb2.repeated_nested_message_extension] self.assertEqual(3, len(ext2)) self.assertEqual(111, ext2[0].bb) self.assertEqual(222, ext2[1].bb) self.assertEqual(333, ext2[2].bb) def testMergeFromBug(self): message1 = unittest_pb2.TestAllTypes() message2 = unittest_pb2.TestAllTypes() # Cause optional_nested_message to be instantiated within message1, even # though it is not considered to be "present". message1.optional_nested_message self.assertFalse(message1.HasField('optional_nested_message')) # Merge into message2. This should not instantiate the field is message2. message2.MergeFrom(message1) self.assertFalse(message2.HasField('optional_nested_message')) def testCopyFromSingularField(self): # Test copy with just a singular field. proto1 = unittest_pb2.TestAllTypes() proto1.optional_int32 = 1 proto1.optional_string = 'important-text' proto2 = unittest_pb2.TestAllTypes() proto2.optional_string = 'value' proto2.CopyFrom(proto1) self.assertEqual(1, proto2.optional_int32) self.assertEqual('important-text', proto2.optional_string) def testCopyFromRepeatedField(self): # Test copy with a repeated field. proto1 = unittest_pb2.TestAllTypes() proto1.repeated_int32.append(1) proto1.repeated_int32.append(2) proto2 = unittest_pb2.TestAllTypes() proto2.repeated_int32.append(0) proto2.CopyFrom(proto1) self.assertEqual(1, proto2.repeated_int32[0]) self.assertEqual(2, proto2.repeated_int32[1]) def testCopyFromAllFields(self): # With all fields set. proto1 = unittest_pb2.TestAllTypes() test_util.SetAllFields(proto1) proto2 = unittest_pb2.TestAllTypes() proto2.CopyFrom(proto1) # Messages should be equal. self.assertEqual(proto2, proto1) # Serialized string should be equal too. string1 = proto1.SerializeToString() string2 = proto2.SerializeToString() self.assertEqual(string1, string2) def testCopyFromSelf(self): proto1 = unittest_pb2.TestAllTypes() proto1.repeated_int32.append(1) proto1.optional_int32 = 2 proto1.optional_string = 'important-text' proto1.CopyFrom(proto1) self.assertEqual(1, proto1.repeated_int32[0]) self.assertEqual(2, proto1.optional_int32) self.assertEqual('important-text', proto1.optional_string) def testCopyFromBadType(self): # The python implementation doesn't raise an exception in this # case. In theory it should. if api_implementation.Type() == 'python': return proto1 = unittest_pb2.TestAllTypes() proto2 = unittest_pb2.TestAllExtensions() self.assertRaises(TypeError, proto1.CopyFrom, proto2) def testClear(self): proto = unittest_pb2.TestAllTypes() test_util.SetAllFields(proto) # Clear the message. proto.Clear() self.assertEquals(proto.ByteSize(), 0) empty_proto = unittest_pb2.TestAllTypes() self.assertEquals(proto, empty_proto) # Test if extensions which were set are cleared. proto = unittest_pb2.TestAllExtensions() test_util.SetAllExtensions(proto) # Clear the message. proto.Clear() self.assertEquals(proto.ByteSize(), 0) empty_proto = unittest_pb2.TestAllExtensions() self.assertEquals(proto, empty_proto) def assertInitialized(self, proto): self.assertTrue(proto.IsInitialized()) # Neither method should raise an exception. proto.SerializeToString() proto.SerializePartialToString() def assertNotInitialized(self, proto): self.assertFalse(proto.IsInitialized()) self.assertRaises(message.EncodeError, proto.SerializeToString) # "Partial" serialization doesn't care if message is uninitialized. proto.SerializePartialToString() def testIsInitialized(self): # Trivial cases - all optional fields and extensions. proto = unittest_pb2.TestAllTypes() self.assertInitialized(proto) proto = unittest_pb2.TestAllExtensions() self.assertInitialized(proto) # The case of uninitialized required fields. proto = unittest_pb2.TestRequired() self.assertNotInitialized(proto) proto.a = proto.b = proto.c = 2 self.assertInitialized(proto) # The case of uninitialized submessage. proto = unittest_pb2.TestRequiredForeign() self.assertInitialized(proto) proto.optional_message.a = 1 self.assertNotInitialized(proto) proto.optional_message.b = 0 proto.optional_message.c = 0 self.assertInitialized(proto) # Uninitialized repeated submessage. message1 = proto.repeated_message.add() self.assertNotInitialized(proto) message1.a = message1.b = message1.c = 0 self.assertInitialized(proto) # Uninitialized repeated group in an extension. proto = unittest_pb2.TestAllExtensions() extension = unittest_pb2.TestRequired.multi message1 = proto.Extensions[extension].add() message2 = proto.Extensions[extension].add() self.assertNotInitialized(proto) message1.a = 1 message1.b = 1 message1.c = 1 self.assertNotInitialized(proto) message2.a = 2 message2.b = 2 message2.c = 2 self.assertInitialized(proto) # Uninitialized nonrepeated message in an extension. proto = unittest_pb2.TestAllExtensions() extension = unittest_pb2.TestRequired.single proto.Extensions[extension].a = 1 self.assertNotInitialized(proto) proto.Extensions[extension].b = 2 proto.Extensions[extension].c = 3 self.assertInitialized(proto) # Try passing an errors list. errors = [] proto = unittest_pb2.TestRequired() self.assertFalse(proto.IsInitialized(errors)) self.assertEqual(errors, ['a', 'b', 'c']) def testStringUTF8Encoding(self): proto = unittest_pb2.TestAllTypes() # Assignment of a unicode object to a field of type 'bytes' is not allowed. self.assertRaises(TypeError, setattr, proto, 'optional_bytes', u'unicode object') # Check that the default value is of python's 'unicode' type. self.assertEqual(type(proto.optional_string), unicode) proto.optional_string = unicode('Testing') self.assertEqual(proto.optional_string, str('Testing')) # Assign a value of type 'str' which can be encoded in UTF-8. proto.optional_string = str('Testing') self.assertEqual(proto.optional_string, unicode('Testing')) if api_implementation.Type() == 'python': # Values of type 'str' are also accepted as long as they can be # encoded in UTF-8. self.assertEqual(type(proto.optional_string), str) # Try to assign a 'str' value which contains bytes that aren't 7-bit ASCII. self.assertRaises(ValueError, setattr, proto, 'optional_string', str('a\x80a')) # Assign a 'str' object which contains a UTF-8 encoded string. self.assertRaises(ValueError, setattr, proto, 'optional_string', 'Тест') # No exception thrown. proto.optional_string = 'abc' def testStringUTF8Serialization(self): proto = unittest_mset_pb2.TestMessageSet() extension_message = unittest_mset_pb2.TestMessageSetExtension2 extension = extension_message.message_set_extension test_utf8 = u'Тест' test_utf8_bytes = test_utf8.encode('utf-8') # 'Test' in another language, using UTF-8 charset. proto.Extensions[extension].str = test_utf8 # Serialize using the MessageSet wire format (this is specified in the # .proto file). serialized = proto.SerializeToString() # Check byte size. self.assertEqual(proto.ByteSize(), len(serialized)) raw = unittest_mset_pb2.RawMessageSet() raw.MergeFromString(serialized) message2 = unittest_mset_pb2.TestMessageSetExtension2() self.assertEqual(1, len(raw.item)) # Check that the type_id is the same as the tag ID in the .proto file. self.assertEqual(raw.item[0].type_id, 1547769) # Check the actual bytes on the wire. self.assertTrue( raw.item[0].message.endswith(test_utf8_bytes)) message2.MergeFromString(raw.item[0].message) self.assertEqual(type(message2.str), unicode) self.assertEqual(message2.str, test_utf8) # The pure Python API throws an exception on MergeFromString(), # if any of the string fields of the message can't be UTF-8 decoded. # The C++ implementation of the API has no way to check that on # MergeFromString and thus has no way to throw the exception. # # The pure Python API always returns objects of type 'unicode' (UTF-8 # encoded), or 'str' (in 7 bit ASCII). bytes = raw.item[0].message.replace( test_utf8_bytes, len(test_utf8_bytes) * '\xff') unicode_decode_failed = False try: message2.MergeFromString(bytes) except UnicodeDecodeError, e: unicode_decode_failed = True string_field = message2.str self.assertTrue(unicode_decode_failed or type(string_field) == str) def testEmptyNestedMessage(self): proto = unittest_pb2.TestAllTypes() proto.optional_nested_message.MergeFrom( unittest_pb2.TestAllTypes.NestedMessage()) self.assertTrue(proto.HasField('optional_nested_message')) proto = unittest_pb2.TestAllTypes() proto.optional_nested_message.CopyFrom( unittest_pb2.TestAllTypes.NestedMessage()) self.assertTrue(proto.HasField('optional_nested_message')) proto = unittest_pb2.TestAllTypes() proto.optional_nested_message.MergeFromString('') self.assertTrue(proto.HasField('optional_nested_message')) proto = unittest_pb2.TestAllTypes() proto.optional_nested_message.ParseFromString('') self.assertTrue(proto.HasField('optional_nested_message')) serialized = proto.SerializeToString() proto2 = unittest_pb2.TestAllTypes() proto2.MergeFromString(serialized) self.assertTrue(proto2.HasField('optional_nested_message')) def testSetInParent(self): proto = unittest_pb2.TestAllTypes() self.assertFalse(proto.HasField('optionalgroup')) proto.optionalgroup.SetInParent() self.assertTrue(proto.HasField('optionalgroup')) # Since we had so many tests for protocol buffer equality, we broke these out # into separate TestCase classes. class TestAllTypesEqualityTest(unittest.TestCase): def setUp(self): self.first_proto = unittest_pb2.TestAllTypes() self.second_proto = unittest_pb2.TestAllTypes() def testNotHashable(self): self.assertRaises(TypeError, hash, self.first_proto) def testSelfEquality(self): self.assertEqual(self.first_proto, self.first_proto) def testEmptyProtosEqual(self): self.assertEqual(self.first_proto, self.second_proto) class FullProtosEqualityTest(unittest.TestCase): """Equality tests using completely-full protos as a starting point.""" def setUp(self): self.first_proto = unittest_pb2.TestAllTypes() self.second_proto = unittest_pb2.TestAllTypes() test_util.SetAllFields(self.first_proto) test_util.SetAllFields(self.second_proto) def testNotHashable(self): self.assertRaises(TypeError, hash, self.first_proto) def testNoneNotEqual(self): self.assertNotEqual(self.first_proto, None) self.assertNotEqual(None, self.second_proto) def testNotEqualToOtherMessage(self): third_proto = unittest_pb2.TestRequired() self.assertNotEqual(self.first_proto, third_proto) self.assertNotEqual(third_proto, self.second_proto) def testAllFieldsFilledEquality(self): self.assertEqual(self.first_proto, self.second_proto) def testNonRepeatedScalar(self): # Nonrepeated scalar field change should cause inequality. self.first_proto.optional_int32 += 1 self.assertNotEqual(self.first_proto, self.second_proto) # ...as should clearing a field. self.first_proto.ClearField('optional_int32') self.assertNotEqual(self.first_proto, self.second_proto) def testNonRepeatedComposite(self): # Change a nonrepeated composite field. self.first_proto.optional_nested_message.bb += 1 self.assertNotEqual(self.first_proto, self.second_proto) self.first_proto.optional_nested_message.bb -= 1 self.assertEqual(self.first_proto, self.second_proto) # Clear a field in the nested message. self.first_proto.optional_nested_message.ClearField('bb') self.assertNotEqual(self.first_proto, self.second_proto) self.first_proto.optional_nested_message.bb = ( self.second_proto.optional_nested_message.bb) self.assertEqual(self.first_proto, self.second_proto) # Remove the nested message entirely. self.first_proto.ClearField('optional_nested_message') self.assertNotEqual(self.first_proto, self.second_proto) def testRepeatedScalar(self): # Change a repeated scalar field. self.first_proto.repeated_int32.append(5) self.assertNotEqual(self.first_proto, self.second_proto) self.first_proto.ClearField('repeated_int32') self.assertNotEqual(self.first_proto, self.second_proto) def testRepeatedComposite(self): # Change value within a repeated composite field. self.first_proto.repeated_nested_message[0].bb += 1 self.assertNotEqual(self.first_proto, self.second_proto) self.first_proto.repeated_nested_message[0].bb -= 1 self.assertEqual(self.first_proto, self.second_proto) # Add a value to a repeated composite field. self.first_proto.repeated_nested_message.add() self.assertNotEqual(self.first_proto, self.second_proto) self.second_proto.repeated_nested_message.add() self.assertEqual(self.first_proto, self.second_proto) def testNonRepeatedScalarHasBits(self): # Ensure that we test "has" bits as well as value for # nonrepeated scalar field. self.first_proto.ClearField('optional_int32') self.second_proto.optional_int32 = 0 self.assertNotEqual(self.first_proto, self.second_proto) def testNonRepeatedCompositeHasBits(self): # Ensure that we test "has" bits as well as value for # nonrepeated composite field. self.first_proto.ClearField('optional_nested_message') self.second_proto.optional_nested_message.ClearField('bb') self.assertNotEqual(self.first_proto, self.second_proto) self.first_proto.optional_nested_message.bb = 0 self.first_proto.optional_nested_message.ClearField('bb') self.assertEqual(self.first_proto, self.second_proto) class ExtensionEqualityTest(unittest.TestCase): def testExtensionEquality(self): first_proto = unittest_pb2.TestAllExtensions() second_proto = unittest_pb2.TestAllExtensions() self.assertEqual(first_proto, second_proto) test_util.SetAllExtensions(first_proto) self.assertNotEqual(first_proto, second_proto) test_util.SetAllExtensions(second_proto) self.assertEqual(first_proto, second_proto) # Ensure that we check value equality. first_proto.Extensions[unittest_pb2.optional_int32_extension] += 1 self.assertNotEqual(first_proto, second_proto) first_proto.Extensions[unittest_pb2.optional_int32_extension] -= 1 self.assertEqual(first_proto, second_proto) # Ensure that we also look at "has" bits. first_proto.ClearExtension(unittest_pb2.optional_int32_extension) second_proto.Extensions[unittest_pb2.optional_int32_extension] = 0 self.assertNotEqual(first_proto, second_proto) first_proto.Extensions[unittest_pb2.optional_int32_extension] = 0 self.assertEqual(first_proto, second_proto) # Ensure that differences in cached values # don't matter if "has" bits are both false. first_proto = unittest_pb2.TestAllExtensions() second_proto = unittest_pb2.TestAllExtensions() self.assertEqual( 0, first_proto.Extensions[unittest_pb2.optional_int32_extension]) self.assertEqual(first_proto, second_proto) class MutualRecursionEqualityTest(unittest.TestCase): def testEqualityWithMutualRecursion(self): first_proto = unittest_pb2.TestMutualRecursionA() second_proto = unittest_pb2.TestMutualRecursionA() self.assertEqual(first_proto, second_proto) first_proto.bb.a.bb.optional_int32 = 23 self.assertNotEqual(first_proto, second_proto) second_proto.bb.a.bb.optional_int32 = 23 self.assertEqual(first_proto, second_proto) class ByteSizeTest(unittest.TestCase): def setUp(self): self.proto = unittest_pb2.TestAllTypes() self.extended_proto = more_extensions_pb2.ExtendedMessage() self.packed_proto = unittest_pb2.TestPackedTypes() self.packed_extended_proto = unittest_pb2.TestPackedExtensions() def Size(self): return self.proto.ByteSize() def testEmptyMessage(self): self.assertEqual(0, self.proto.ByteSize()) def testSizedOnKwargs(self): # Use a separate message to ensure testing right after creation. proto = unittest_pb2.TestAllTypes() self.assertEqual(0, proto.ByteSize()) proto_kwargs = unittest_pb2.TestAllTypes(optional_int64 = 1) # One byte for the tag, one to encode varint 1. self.assertEqual(2, proto_kwargs.ByteSize()) def testVarints(self): def Test(i, expected_varint_size): self.proto.Clear() self.proto.optional_int64 = i # Add one to the varint size for the tag info # for tag 1. self.assertEqual(expected_varint_size + 1, self.Size()) Test(0, 1) Test(1, 1) for i, num_bytes in zip(range(7, 63, 7), range(1, 10000)): Test((1 << i) - 1, num_bytes) Test(-1, 10) Test(-2, 10) Test(-(1 << 63), 10) def testStrings(self): self.proto.optional_string = '' # Need one byte for tag info (tag #14), and one byte for length. self.assertEqual(2, self.Size()) self.proto.optional_string = 'abc' # Need one byte for tag info (tag #14), and one byte for length. self.assertEqual(2 + len(self.proto.optional_string), self.Size()) self.proto.optional_string = 'x' * 128 # Need one byte for tag info (tag #14), and TWO bytes for length. self.assertEqual(3 + len(self.proto.optional_string), self.Size()) def testOtherNumerics(self): self.proto.optional_fixed32 = 1234 # One byte for tag and 4 bytes for fixed32. self.assertEqual(5, self.Size()) self.proto = unittest_pb2.TestAllTypes() self.proto.optional_fixed64 = 1234 # One byte for tag and 8 bytes for fixed64. self.assertEqual(9, self.Size()) self.proto = unittest_pb2.TestAllTypes() self.proto.optional_float = 1.234 # One byte for tag and 4 bytes for float. self.assertEqual(5, self.Size()) self.proto = unittest_pb2.TestAllTypes() self.proto.optional_double = 1.234 # One byte for tag and 8 bytes for float. self.assertEqual(9, self.Size()) self.proto = unittest_pb2.TestAllTypes() self.proto.optional_sint32 = 64 # One byte for tag and 2 bytes for zig-zag-encoded 64. self.assertEqual(3, self.Size()) self.proto = unittest_pb2.TestAllTypes() def testComposites(self): # 3 bytes. self.proto.optional_nested_message.bb = (1 << 14) # Plus one byte for bb tag. # Plus 1 byte for optional_nested_message serialized size. # Plus two bytes for optional_nested_message tag. self.assertEqual(3 + 1 + 1 + 2, self.Size()) def testGroups(self): # 4 bytes. self.proto.optionalgroup.a = (1 << 21) # Plus two bytes for |a| tag. # Plus 2 * two bytes for START_GROUP and END_GROUP tags. self.assertEqual(4 + 2 + 2*2, self.Size()) def testRepeatedScalars(self): self.proto.repeated_int32.append(10) # 1 byte. self.proto.repeated_int32.append(128) # 2 bytes. # Also need 2 bytes for each entry for tag. self.assertEqual(1 + 2 + 2*2, self.Size()) def testRepeatedScalarsExtend(self): self.proto.repeated_int32.extend([10, 128]) # 3 bytes. # Also need 2 bytes for each entry for tag. self.assertEqual(1 + 2 + 2*2, self.Size()) def testRepeatedScalarsRemove(self): self.proto.repeated_int32.append(10) # 1 byte. self.proto.repeated_int32.append(128) # 2 bytes. # Also need 2 bytes for each entry for tag. self.assertEqual(1 + 2 + 2*2, self.Size()) self.proto.repeated_int32.remove(128) self.assertEqual(1 + 2, self.Size()) def testRepeatedComposites(self): # Empty message. 2 bytes tag plus 1 byte length. foreign_message_0 = self.proto.repeated_nested_message.add() # 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int. foreign_message_1 = self.proto.repeated_nested_message.add() foreign_message_1.bb = 7 self.assertEqual(2 + 1 + 2 + 1 + 1 + 1, self.Size()) def testRepeatedCompositesDelete(self): # Empty message. 2 bytes tag plus 1 byte length. foreign_message_0 = self.proto.repeated_nested_message.add() # 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int. foreign_message_1 = self.proto.repeated_nested_message.add() foreign_message_1.bb = 9 self.assertEqual(2 + 1 + 2 + 1 + 1 + 1, self.Size()) # 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int. del self.proto.repeated_nested_message[0] self.assertEqual(2 + 1 + 1 + 1, self.Size()) # Now add a new message. foreign_message_2 = self.proto.repeated_nested_message.add() foreign_message_2.bb = 12 # 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int. # 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int. self.assertEqual(2 + 1 + 1 + 1 + 2 + 1 + 1 + 1, self.Size()) # 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int. del self.proto.repeated_nested_message[1] self.assertEqual(2 + 1 + 1 + 1, self.Size()) del self.proto.repeated_nested_message[0] self.assertEqual(0, self.Size()) def testRepeatedGroups(self): # 2-byte START_GROUP plus 2-byte END_GROUP. group_0 = self.proto.repeatedgroup.add() # 2-byte START_GROUP plus 2-byte |a| tag + 1-byte |a| # plus 2-byte END_GROUP. group_1 = self.proto.repeatedgroup.add() group_1.a = 7 self.assertEqual(2 + 2 + 2 + 2 + 1 + 2, self.Size()) def testExtensions(self): proto = unittest_pb2.TestAllExtensions() self.assertEqual(0, proto.ByteSize()) extension = unittest_pb2.optional_int32_extension # Field #1, 1 byte. proto.Extensions[extension] = 23 # 1 byte for tag, 1 byte for value. self.assertEqual(2, proto.ByteSize()) def testCacheInvalidationForNonrepeatedScalar(self): # Test non-extension. self.proto.optional_int32 = 1 self.assertEqual(2, self.proto.ByteSize()) self.proto.optional_int32 = 128 self.assertEqual(3, self.proto.ByteSize()) self.proto.ClearField('optional_int32') self.assertEqual(0, self.proto.ByteSize()) # Test within extension. extension = more_extensions_pb2.optional_int_extension self.extended_proto.Extensions[extension] = 1 self.assertEqual(2, self.extended_proto.ByteSize()) self.extended_proto.Extensions[extension] = 128 self.assertEqual(3, self.extended_proto.ByteSize()) self.extended_proto.ClearExtension(extension) self.assertEqual(0, self.extended_proto.ByteSize()) def testCacheInvalidationForRepeatedScalar(self): # Test non-extension. self.proto.repeated_int32.append(1) self.assertEqual(3, self.proto.ByteSize()) self.proto.repeated_int32.append(1) self.assertEqual(6, self.proto.ByteSize()) self.proto.repeated_int32[1] = 128 self.assertEqual(7, self.proto.ByteSize()) self.proto.ClearField('repeated_int32') self.assertEqual(0, self.proto.ByteSize()) # Test within extension. extension = more_extensions_pb2.repeated_int_extension repeated = self.extended_proto.Extensions[extension] repeated.append(1) self.assertEqual(2, self.extended_proto.ByteSize()) repeated.append(1) self.assertEqual(4, self.extended_proto.ByteSize()) repeated[1] = 128 self.assertEqual(5, self.extended_proto.ByteSize()) self.extended_proto.ClearExtension(extension) self.assertEqual(0, self.extended_proto.ByteSize()) def testCacheInvalidationForNonrepeatedMessage(self): # Test non-extension. self.proto.optional_foreign_message.c = 1 self.assertEqual(5, self.proto.ByteSize()) self.proto.optional_foreign_message.c = 128 self.assertEqual(6, self.proto.ByteSize()) self.proto.optional_foreign_message.ClearField('c') self.assertEqual(3, self.proto.ByteSize()) self.proto.ClearField('optional_foreign_message') self.assertEqual(0, self.proto.ByteSize()) if api_implementation.Type() == 'python': # This is only possible in pure-Python implementation of the API. child = self.proto.optional_foreign_message self.proto.ClearField('optional_foreign_message') child.c = 128 self.assertEqual(0, self.proto.ByteSize()) # Test within extension. extension = more_extensions_pb2.optional_message_extension child = self.extended_proto.Extensions[extension] self.assertEqual(0, self.extended_proto.ByteSize()) child.foreign_message_int = 1 self.assertEqual(4, self.extended_proto.ByteSize()) child.foreign_message_int = 128 self.assertEqual(5, self.extended_proto.ByteSize()) self.extended_proto.ClearExtension(extension) self.assertEqual(0, self.extended_proto.ByteSize()) def testCacheInvalidationForRepeatedMessage(self): # Test non-extension. child0 = self.proto.repeated_foreign_message.add() self.assertEqual(3, self.proto.ByteSize()) self.proto.repeated_foreign_message.add() self.assertEqual(6, self.proto.ByteSize()) child0.c = 1 self.assertEqual(8, self.proto.ByteSize()) self.proto.ClearField('repeated_foreign_message') self.assertEqual(0, self.proto.ByteSize()) # Test within extension. extension = more_extensions_pb2.repeated_message_extension child_list = self.extended_proto.Extensions[extension] child0 = child_list.add() self.assertEqual(2, self.extended_proto.ByteSize()) child_list.add() self.assertEqual(4, self.extended_proto.ByteSize()) child0.foreign_message_int = 1 self.assertEqual(6, self.extended_proto.ByteSize()) child0.ClearField('foreign_message_int') self.assertEqual(4, self.extended_proto.ByteSize()) self.extended_proto.ClearExtension(extension) self.assertEqual(0, self.extended_proto.ByteSize()) def testPackedRepeatedScalars(self): self.assertEqual(0, self.packed_proto.ByteSize()) self.packed_proto.packed_int32.append(10) # 1 byte. self.packed_proto.packed_int32.append(128) # 2 bytes. # The tag is 2 bytes (the field number is 90), and the varint # storing the length is 1 byte. int_size = 1 + 2 + 3 self.assertEqual(int_size, self.packed_proto.ByteSize()) self.packed_proto.packed_double.append(4.2) # 8 bytes self.packed_proto.packed_double.append(3.25) # 8 bytes # 2 more tag bytes, 1 more length byte. double_size = 8 + 8 + 3 self.assertEqual(int_size+double_size, self.packed_proto.ByteSize()) self.packed_proto.ClearField('packed_int32') self.assertEqual(double_size, self.packed_proto.ByteSize()) def testPackedExtensions(self): self.assertEqual(0, self.packed_extended_proto.ByteSize()) extension = self.packed_extended_proto.Extensions[ unittest_pb2.packed_fixed32_extension] extension.extend([1, 2, 3, 4]) # 16 bytes # Tag is 3 bytes. self.assertEqual(19, self.packed_extended_proto.ByteSize()) # Issues to be sure to cover include: # * Handling of unrecognized tags ("uninterpreted_bytes"). # * Handling of MessageSets. # * Consistent ordering of tags in the wire format, # including ordering between extensions and non-extension # fields. # * Consistent serialization of negative numbers, especially # negative int32s. # * Handling of empty submessages (with and without "has" # bits set). class SerializationTest(unittest.TestCase): def testSerializeEmtpyMessage(self): first_proto = unittest_pb2.TestAllTypes() second_proto = unittest_pb2.TestAllTypes() serialized = first_proto.SerializeToString() self.assertEqual(first_proto.ByteSize(), len(serialized)) second_proto.MergeFromString(serialized) self.assertEqual(first_proto, second_proto) def testSerializeAllFields(self): first_proto = unittest_pb2.TestAllTypes() second_proto = unittest_pb2.TestAllTypes() test_util.SetAllFields(first_proto) serialized = first_proto.SerializeToString() self.assertEqual(first_proto.ByteSize(), len(serialized)) second_proto.MergeFromString(serialized) self.assertEqual(first_proto, second_proto) def testSerializeAllExtensions(self): first_proto = unittest_pb2.TestAllExtensions() second_proto = unittest_pb2.TestAllExtensions() test_util.SetAllExtensions(first_proto) serialized = first_proto.SerializeToString() second_proto.MergeFromString(serialized) self.assertEqual(first_proto, second_proto) def testSerializeNegativeValues(self): first_proto = unittest_pb2.TestAllTypes() first_proto.optional_int32 = -1 first_proto.optional_int64 = -(2 << 40) first_proto.optional_sint32 = -3 first_proto.optional_sint64 = -(4 << 40) first_proto.optional_sfixed32 = -5 first_proto.optional_sfixed64 = -(6 << 40) second_proto = unittest_pb2.TestAllTypes.FromString( first_proto.SerializeToString()) self.assertEqual(first_proto, second_proto) def testParseTruncated(self): # This test is only applicable for the Python implementation of the API. if api_implementation.Type() != 'python': return first_proto = unittest_pb2.TestAllTypes() test_util.SetAllFields(first_proto) serialized = first_proto.SerializeToString() for truncation_point in xrange(len(serialized) + 1): try: second_proto = unittest_pb2.TestAllTypes() unknown_fields = unittest_pb2.TestEmptyMessage() pos = second_proto._InternalParse(serialized, 0, truncation_point) # If we didn't raise an error then we read exactly the amount expected. self.assertEqual(truncation_point, pos) # Parsing to unknown fields should not throw if parsing to known fields # did not. try: pos2 = unknown_fields._InternalParse(serialized, 0, truncation_point) self.assertEqual(truncation_point, pos2) except message.DecodeError: self.fail('Parsing unknown fields failed when parsing known fields ' 'did not.') except message.DecodeError: # Parsing unknown fields should also fail. self.assertRaises(message.DecodeError, unknown_fields._InternalParse, serialized, 0, truncation_point) def testCanonicalSerializationOrder(self): proto = more_messages_pb2.OutOfOrderFields() # These are also their tag numbers. Even though we're setting these in # reverse-tag order AND they're listed in reverse tag-order in the .proto # file, they should nonetheless be serialized in tag order. proto.optional_sint32 = 5 proto.Extensions[more_messages_pb2.optional_uint64] = 4 proto.optional_uint32 = 3 proto.Extensions[more_messages_pb2.optional_int64] = 2 proto.optional_int32 = 1 serialized = proto.SerializeToString() self.assertEqual(proto.ByteSize(), len(serialized)) d = _MiniDecoder(serialized) ReadTag = d.ReadFieldNumberAndWireType self.assertEqual((1, wire_format.WIRETYPE_VARINT), ReadTag()) self.assertEqual(1, d.ReadInt32()) self.assertEqual((2, wire_format.WIRETYPE_VARINT), ReadTag()) self.assertEqual(2, d.ReadInt64()) self.assertEqual((3, wire_format.WIRETYPE_VARINT), ReadTag()) self.assertEqual(3, d.ReadUInt32()) self.assertEqual((4, wire_format.WIRETYPE_VARINT), ReadTag()) self.assertEqual(4, d.ReadUInt64()) self.assertEqual((5, wire_format.WIRETYPE_VARINT), ReadTag()) self.assertEqual(5, d.ReadSInt32()) def testCanonicalSerializationOrderSameAsCpp(self): # Copy of the same test we use for C++. proto = unittest_pb2.TestFieldOrderings() test_util.SetAllFieldsAndExtensions(proto) serialized = proto.SerializeToString() test_util.ExpectAllFieldsAndExtensionsInOrder(serialized) def testMergeFromStringWhenFieldsAlreadySet(self): first_proto = unittest_pb2.TestAllTypes() first_proto.repeated_string.append('foobar') first_proto.optional_int32 = 23 first_proto.optional_nested_message.bb = 42 serialized = first_proto.SerializeToString() second_proto = unittest_pb2.TestAllTypes() second_proto.repeated_string.append('baz') second_proto.optional_int32 = 100 second_proto.optional_nested_message.bb = 999 second_proto.MergeFromString(serialized) # Ensure that we append to repeated fields. self.assertEqual(['baz', 'foobar'], list(second_proto.repeated_string)) # Ensure that we overwrite nonrepeatd scalars. self.assertEqual(23, second_proto.optional_int32) # Ensure that we recursively call MergeFromString() on # submessages. self.assertEqual(42, second_proto.optional_nested_message.bb) def testMessageSetWireFormat(self): proto = unittest_mset_pb2.TestMessageSet() extension_message1 = unittest_mset_pb2.TestMessageSetExtension1 extension_message2 = unittest_mset_pb2.TestMessageSetExtension2 extension1 = extension_message1.message_set_extension extension2 = extension_message2.message_set_extension proto.Extensions[extension1].i = 123 proto.Extensions[extension2].str = 'foo' # Serialize using the MessageSet wire format (this is specified in the # .proto file). serialized = proto.SerializeToString() raw = unittest_mset_pb2.RawMessageSet() self.assertEqual(False, raw.DESCRIPTOR.GetOptions().message_set_wire_format) raw.MergeFromString(serialized) self.assertEqual(2, len(raw.item)) message1 = unittest_mset_pb2.TestMessageSetExtension1() message1.MergeFromString(raw.item[0].message) self.assertEqual(123, message1.i) message2 = unittest_mset_pb2.TestMessageSetExtension2() message2.MergeFromString(raw.item[1].message) self.assertEqual('foo', message2.str) # Deserialize using the MessageSet wire format. proto2 = unittest_mset_pb2.TestMessageSet() proto2.MergeFromString(serialized) self.assertEqual(123, proto2.Extensions[extension1].i) self.assertEqual('foo', proto2.Extensions[extension2].str) # Check byte size. self.assertEqual(proto2.ByteSize(), len(serialized)) self.assertEqual(proto.ByteSize(), len(serialized)) def testMessageSetWireFormatUnknownExtension(self): # Create a message using the message set wire format with an unknown # message. raw = unittest_mset_pb2.RawMessageSet() # Add an item. item = raw.item.add() item.type_id = 1545008 extension_message1 = unittest_mset_pb2.TestMessageSetExtension1 message1 = unittest_mset_pb2.TestMessageSetExtension1() message1.i = 12345 item.message = message1.SerializeToString() # Add a second, unknown extension. item = raw.item.add() item.type_id = 1545009 extension_message1 = unittest_mset_pb2.TestMessageSetExtension1 message1 = unittest_mset_pb2.TestMessageSetExtension1() message1.i = 12346 item.message = message1.SerializeToString() # Add another unknown extension. item = raw.item.add() item.type_id = 1545010 message1 = unittest_mset_pb2.TestMessageSetExtension2() message1.str = 'foo' item.message = message1.SerializeToString() serialized = raw.SerializeToString() # Parse message using the message set wire format. proto = unittest_mset_pb2.TestMessageSet() proto.MergeFromString(serialized) # Check that the message parsed well. extension_message1 = unittest_mset_pb2.TestMessageSetExtension1 extension1 = extension_message1.message_set_extension self.assertEquals(12345, proto.Extensions[extension1].i) def testUnknownFields(self): proto = unittest_pb2.TestAllTypes() test_util.SetAllFields(proto) serialized = proto.SerializeToString() # The empty message should be parsable with all of the fields # unknown. proto2 = unittest_pb2.TestEmptyMessage() # Parsing this message should succeed. proto2.MergeFromString(serialized) # Now test with a int64 field set. proto = unittest_pb2.TestAllTypes() proto.optional_int64 = 0x0fffffffffffffff serialized = proto.SerializeToString() # The empty message should be parsable with all of the fields # unknown. proto2 = unittest_pb2.TestEmptyMessage() # Parsing this message should succeed. proto2.MergeFromString(serialized) def _CheckRaises(self, exc_class, callable_obj, exception): """This method checks if the excpetion type and message are as expected.""" try: callable_obj() except exc_class, ex: # Check if the exception message is the right one. self.assertEqual(exception, str(ex)) return else: raise self.failureException('%s not raised' % str(exc_class)) def testSerializeUninitialized(self): proto = unittest_pb2.TestRequired() self._CheckRaises( message.EncodeError, proto.SerializeToString, 'Message is missing required fields: a,b,c') # Shouldn't raise exceptions. partial = proto.SerializePartialToString() proto.a = 1 self._CheckRaises( message.EncodeError, proto.SerializeToString, 'Message is missing required fields: b,c') # Shouldn't raise exceptions. partial = proto.SerializePartialToString() proto.b = 2 self._CheckRaises( message.EncodeError, proto.SerializeToString, 'Message is missing required fields: c') # Shouldn't raise exceptions. partial = proto.SerializePartialToString() proto.c = 3 serialized = proto.SerializeToString() # Shouldn't raise exceptions. partial = proto.SerializePartialToString() proto2 = unittest_pb2.TestRequired() proto2.MergeFromString(serialized) self.assertEqual(1, proto2.a) self.assertEqual(2, proto2.b) self.assertEqual(3, proto2.c) proto2.ParseFromString(partial) self.assertEqual(1, proto2.a) self.assertEqual(2, proto2.b) self.assertEqual(3, proto2.c) def testSerializeUninitializedSubMessage(self): proto = unittest_pb2.TestRequiredForeign() # Sub-message doesn't exist yet, so this succeeds. proto.SerializeToString() proto.optional_message.a = 1 self._CheckRaises( message.EncodeError, proto.SerializeToString, 'Message is missing required fields: ' 'optional_message.b,optional_message.c') proto.optional_message.b = 2 proto.optional_message.c = 3 proto.SerializeToString() proto.repeated_message.add().a = 1 proto.repeated_message.add().b = 2 self._CheckRaises( message.EncodeError, proto.SerializeToString, 'Message is missing required fields: ' 'repeated_message[0].b,repeated_message[0].c,' 'repeated_message[1].a,repeated_message[1].c') proto.repeated_message[0].b = 2 proto.repeated_message[0].c = 3 proto.repeated_message[1].a = 1 proto.repeated_message[1].c = 3 proto.SerializeToString() def testSerializeAllPackedFields(self): first_proto = unittest_pb2.TestPackedTypes() second_proto = unittest_pb2.TestPackedTypes() test_util.SetAllPackedFields(first_proto) serialized = first_proto.SerializeToString() self.assertEqual(first_proto.ByteSize(), len(serialized)) bytes_read = second_proto.MergeFromString(serialized) self.assertEqual(second_proto.ByteSize(), bytes_read) self.assertEqual(first_proto, second_proto) def testSerializeAllPackedExtensions(self): first_proto = unittest_pb2.TestPackedExtensions() second_proto = unittest_pb2.TestPackedExtensions() test_util.SetAllPackedExtensions(first_proto) serialized = first_proto.SerializeToString() bytes_read = second_proto.MergeFromString(serialized) self.assertEqual(second_proto.ByteSize(), bytes_read) self.assertEqual(first_proto, second_proto) def testMergePackedFromStringWhenSomeFieldsAlreadySet(self): first_proto = unittest_pb2.TestPackedTypes() first_proto.packed_int32.extend([1, 2]) first_proto.packed_double.append(3.0) serialized = first_proto.SerializeToString() second_proto = unittest_pb2.TestPackedTypes() second_proto.packed_int32.append(3) second_proto.packed_double.extend([1.0, 2.0]) second_proto.packed_sint32.append(4) second_proto.MergeFromString(serialized) self.assertEqual([3, 1, 2], second_proto.packed_int32) self.assertEqual([1.0, 2.0, 3.0], second_proto.packed_double) self.assertEqual([4], second_proto.packed_sint32) def testPackedFieldsWireFormat(self): proto = unittest_pb2.TestPackedTypes() proto.packed_int32.extend([1, 2, 150, 3]) # 1 + 1 + 2 + 1 bytes proto.packed_double.extend([1.0, 1000.0]) # 8 + 8 bytes proto.packed_float.append(2.0) # 4 bytes, will be before double serialized = proto.SerializeToString() self.assertEqual(proto.ByteSize(), len(serialized)) d = _MiniDecoder(serialized) ReadTag = d.ReadFieldNumberAndWireType self.assertEqual((90, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag()) self.assertEqual(1+1+1+2, d.ReadInt32()) self.assertEqual(1, d.ReadInt32()) self.assertEqual(2, d.ReadInt32()) self.assertEqual(150, d.ReadInt32()) self.assertEqual(3, d.ReadInt32()) self.assertEqual((100, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag()) self.assertEqual(4, d.ReadInt32()) self.assertEqual(2.0, d.ReadFloat()) self.assertEqual((101, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag()) self.assertEqual(8+8, d.ReadInt32()) self.assertEqual(1.0, d.ReadDouble()) self.assertEqual(1000.0, d.ReadDouble()) self.assertTrue(d.EndOfStream()) def testParsePackedFromUnpacked(self): unpacked = unittest_pb2.TestUnpackedTypes() test_util.SetAllUnpackedFields(unpacked) packed = unittest_pb2.TestPackedTypes() packed.MergeFromString(unpacked.SerializeToString()) expected = unittest_pb2.TestPackedTypes() test_util.SetAllPackedFields(expected) self.assertEqual(expected, packed) def testParseUnpackedFromPacked(self): packed = unittest_pb2.TestPackedTypes() test_util.SetAllPackedFields(packed) unpacked = unittest_pb2.TestUnpackedTypes() unpacked.MergeFromString(packed.SerializeToString()) expected = unittest_pb2.TestUnpackedTypes() test_util.SetAllUnpackedFields(expected) self.assertEqual(expected, unpacked) def testFieldNumbers(self): proto = unittest_pb2.TestAllTypes() self.assertEqual(unittest_pb2.TestAllTypes.NestedMessage.BB_FIELD_NUMBER, 1) self.assertEqual(unittest_pb2.TestAllTypes.OPTIONAL_INT32_FIELD_NUMBER, 1) self.assertEqual(unittest_pb2.TestAllTypes.OPTIONALGROUP_FIELD_NUMBER, 16) self.assertEqual( unittest_pb2.TestAllTypes.OPTIONAL_NESTED_MESSAGE_FIELD_NUMBER, 18) self.assertEqual( unittest_pb2.TestAllTypes.OPTIONAL_NESTED_ENUM_FIELD_NUMBER, 21) self.assertEqual(unittest_pb2.TestAllTypes.REPEATED_INT32_FIELD_NUMBER, 31) self.assertEqual(unittest_pb2.TestAllTypes.REPEATEDGROUP_FIELD_NUMBER, 46) self.assertEqual( unittest_pb2.TestAllTypes.REPEATED_NESTED_MESSAGE_FIELD_NUMBER, 48) self.assertEqual( unittest_pb2.TestAllTypes.REPEATED_NESTED_ENUM_FIELD_NUMBER, 51) def testExtensionFieldNumbers(self): self.assertEqual(unittest_pb2.TestRequired.single.number, 1000) self.assertEqual(unittest_pb2.TestRequired.SINGLE_FIELD_NUMBER, 1000) self.assertEqual(unittest_pb2.TestRequired.multi.number, 1001) self.assertEqual(unittest_pb2.TestRequired.MULTI_FIELD_NUMBER, 1001) self.assertEqual(unittest_pb2.optional_int32_extension.number, 1) self.assertEqual(unittest_pb2.OPTIONAL_INT32_EXTENSION_FIELD_NUMBER, 1) self.assertEqual(unittest_pb2.optionalgroup_extension.number, 16) self.assertEqual(unittest_pb2.OPTIONALGROUP_EXTENSION_FIELD_NUMBER, 16) self.assertEqual(unittest_pb2.optional_nested_message_extension.number, 18) self.assertEqual( unittest_pb2.OPTIONAL_NESTED_MESSAGE_EXTENSION_FIELD_NUMBER, 18) self.assertEqual(unittest_pb2.optional_nested_enum_extension.number, 21) self.assertEqual(unittest_pb2.OPTIONAL_NESTED_ENUM_EXTENSION_FIELD_NUMBER, 21) self.assertEqual(unittest_pb2.repeated_int32_extension.number, 31) self.assertEqual(unittest_pb2.REPEATED_INT32_EXTENSION_FIELD_NUMBER, 31) self.assertEqual(unittest_pb2.repeatedgroup_extension.number, 46) self.assertEqual(unittest_pb2.REPEATEDGROUP_EXTENSION_FIELD_NUMBER, 46) self.assertEqual(unittest_pb2.repeated_nested_message_extension.number, 48) self.assertEqual( unittest_pb2.REPEATED_NESTED_MESSAGE_EXTENSION_FIELD_NUMBER, 48) self.assertEqual(unittest_pb2.repeated_nested_enum_extension.number, 51) self.assertEqual(unittest_pb2.REPEATED_NESTED_ENUM_EXTENSION_FIELD_NUMBER, 51) def testInitKwargs(self): proto = unittest_pb2.TestAllTypes( optional_int32=1, optional_string='foo', optional_bool=True, optional_bytes='bar', optional_nested_message=unittest_pb2.TestAllTypes.NestedMessage(bb=1), optional_foreign_message=unittest_pb2.ForeignMessage(c=1), optional_nested_enum=unittest_pb2.TestAllTypes.FOO, optional_foreign_enum=unittest_pb2.FOREIGN_FOO, repeated_int32=[1, 2, 3]) self.assertTrue(proto.IsInitialized()) self.assertTrue(proto.HasField('optional_int32')) self.assertTrue(proto.HasField('optional_string')) self.assertTrue(proto.HasField('optional_bool')) self.assertTrue(proto.HasField('optional_bytes')) self.assertTrue(proto.HasField('optional_nested_message')) self.assertTrue(proto.HasField('optional_foreign_message')) self.assertTrue(proto.HasField('optional_nested_enum')) self.assertTrue(proto.HasField('optional_foreign_enum')) self.assertEqual(1, proto.optional_int32) self.assertEqual('foo', proto.optional_string) self.assertEqual(True, proto.optional_bool) self.assertEqual('bar', proto.optional_bytes) self.assertEqual(1, proto.optional_nested_message.bb) self.assertEqual(1, proto.optional_foreign_message.c) self.assertEqual(unittest_pb2.TestAllTypes.FOO, proto.optional_nested_enum) self.assertEqual(unittest_pb2.FOREIGN_FOO, proto.optional_foreign_enum) self.assertEqual([1, 2, 3], proto.repeated_int32) def testInitArgsUnknownFieldName(self): def InitalizeEmptyMessageWithExtraKeywordArg(): unused_proto = unittest_pb2.TestEmptyMessage(unknown='unknown') self._CheckRaises(ValueError, InitalizeEmptyMessageWithExtraKeywordArg, 'Protocol message has no "unknown" field.') def testInitRequiredKwargs(self): proto = unittest_pb2.TestRequired(a=1, b=1, c=1) self.assertTrue(proto.IsInitialized()) self.assertTrue(proto.HasField('a')) self.assertTrue(proto.HasField('b')) self.assertTrue(proto.HasField('c')) self.assertTrue(not proto.HasField('dummy2')) self.assertEqual(1, proto.a) self.assertEqual(1, proto.b) self.assertEqual(1, proto.c) def testInitRequiredForeignKwargs(self): proto = unittest_pb2.TestRequiredForeign( optional_message=unittest_pb2.TestRequired(a=1, b=1, c=1)) self.assertTrue(proto.IsInitialized()) self.assertTrue(proto.HasField('optional_message')) self.assertTrue(proto.optional_message.IsInitialized()) self.assertTrue(proto.optional_message.HasField('a')) self.assertTrue(proto.optional_message.HasField('b')) self.assertTrue(proto.optional_message.HasField('c')) self.assertTrue(not proto.optional_message.HasField('dummy2')) self.assertEqual(unittest_pb2.TestRequired(a=1, b=1, c=1), proto.optional_message) self.assertEqual(1, proto.optional_message.a) self.assertEqual(1, proto.optional_message.b) self.assertEqual(1, proto.optional_message.c) def testInitRepeatedKwargs(self): proto = unittest_pb2.TestAllTypes(repeated_int32=[1, 2, 3]) self.assertTrue(proto.IsInitialized()) self.assertEqual(1, proto.repeated_int32[0]) self.assertEqual(2, proto.repeated_int32[1]) self.assertEqual(3, proto.repeated_int32[2]) class OptionsTest(unittest.TestCase): def testMessageOptions(self): proto = unittest_mset_pb2.TestMessageSet() self.assertEqual(True, proto.DESCRIPTOR.GetOptions().message_set_wire_format) proto = unittest_pb2.TestAllTypes() self.assertEqual(False, proto.DESCRIPTOR.GetOptions().message_set_wire_format) def testPackedOptions(self): proto = unittest_pb2.TestAllTypes() proto.optional_int32 = 1 proto.optional_double = 3.0 for field_descriptor, _ in proto.ListFields(): self.assertEqual(False, field_descriptor.GetOptions().packed) proto = unittest_pb2.TestPackedTypes() proto.packed_int32.append(1) proto.packed_double.append(3.0) for field_descriptor, _ in proto.ListFields(): self.assertEqual(True, field_descriptor.GetOptions().packed) self.assertEqual(reflection._FieldDescriptor.LABEL_REPEATED, field_descriptor.label) if __name__ == '__main__': unittest.main()
bsd-3-clause
srikantbmandal/ansible
lib/ansible/modules/packaging/os/redhat_subscription.py
16
27067
#!/usr/bin/python # James Laska (jlaska@redhat.com) # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: redhat_subscription short_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command description: - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command version_added: "1.2" author: "Barnaby Court (@barnabycourt)" notes: - In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID. requirements: - subscription-manager options: state: description: - whether to register and subscribe (C(present)), or unregister (C(absent)) a system required: false choices: [ "present", "absent" ] default: "present" username: description: - access.redhat.com or Sat6 username required: False default: null password: description: - access.redhat.com or Sat6 password required: False default: null server_hostname: description: - Specify an alternative Red Hat Subscription Management or Sat6 server required: False default: Current value from C(/etc/rhsm/rhsm.conf) is the default server_insecure: description: - Enable or disable https server certificate verification when connecting to C(server_hostname) required: False default: Current value from C(/etc/rhsm/rhsm.conf) is the default rhsm_baseurl: description: - Specify CDN baseurl required: False default: Current value from C(/etc/rhsm/rhsm.conf) is the default autosubscribe: description: - Upon successful registration, auto-consume available subscriptions required: False default: False activationkey: description: - supply an activation key for use with registration required: False default: null org_id: description: - Organization ID to use in conjunction with activationkey required: False default: null version_added: "2.0" environment: description: - Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello required: False default: null version_added: "2.2" pool: description: - | Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if possible, as it is much faster. Mutually exclusive with I(pool_ids). required: False default: '^$' pool_ids: description: - | Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster. A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)), or as a C(dict) with the pool ID as the key, and a quantity as the value (ex. C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple entitlements from a pool (the pool must support this). Mutually exclusive with I(pool). default: [] version_added: "2.4" consumer_type: description: - The type of unit to register, defaults to system required: False default: null version_added: "2.1" consumer_name: description: - Name of the system to register, defaults to the hostname required: False default: null version_added: "2.1" consumer_id: description: - | References an existing consumer ID to resume using a previous registration for this system. If the system's identity certificate is lost or corrupted, this option allows it to resume using its previous identity and subscriptions. The default is to not specify a consumer ID so a new ID is created. required: False default: null version_added: "2.1" force_register: description: - Register the system even if it is already registered required: False default: False version_added: "2.2" ''' EXAMPLES = ''' - name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content. redhat_subscription: state: present username: joe_user password: somepass autosubscribe: true - name: Same as above but subscribe to a specific pool by ID. redhat_subscription: state: present username: joe_user password: somepass pool_ids: 0123456789abcdef0123456789abcdef - name: Register and subscribe to multiple pools. redhat_subscription: state: present username: joe_user password: somepass pool_ids: - 0123456789abcdef0123456789abcdef - 1123456789abcdef0123456789abcdef - name: Same as above but consume multiple entitlements. redhat_subscription: state: present username: joe_user password: somepass pool_ids: - 0123456789abcdef0123456789abcdef: 2 - 1123456789abcdef0123456789abcdef: 4 - name: Register and pull existing system data. redhat_subscription: state: present username: joe_user password: somepass consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization redhat_subscription: state: present activationkey: 1-222333444 org_id: 222333444 pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$' - name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription) redhat_subscription: state: present activationkey: 1-222333444 org_id: 222333444 pool: '^Red Hat Enterprise Server$' - name: Register as user credentials into given environment (against Red Hat Satellite 6.x), and auto-subscribe. redhat_subscription: state: present username: joe_user password: somepass environment: Library autosubscribe: yes ''' import os import re import types from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pycompat24 import get_exception from ansible.module_utils.six.moves import configparser SUBMAN_CMD = None class RegistrationBase(object): def __init__(self, module, username=None, password=None): self.module = module self.username = username self.password = password def configure(self): raise NotImplementedError("Must be implemented by a sub-class") def enable(self): # Remove any existing redhat.repo redhat_repo = '/etc/yum.repos.d/redhat.repo' if os.path.isfile(redhat_repo): os.unlink(redhat_repo) def register(self): raise NotImplementedError("Must be implemented by a sub-class") def unregister(self): raise NotImplementedError("Must be implemented by a sub-class") def unsubscribe(self): raise NotImplementedError("Must be implemented by a sub-class") def update_plugin_conf(self, plugin, enabled=True): plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin if os.path.isfile(plugin_conf): cfg = configparser.ConfigParser() cfg.read([plugin_conf]) if enabled: cfg.set('main', 'enabled', 1) else: cfg.set('main', 'enabled', 0) fd = open(plugin_conf, 'rwa+') cfg.write(fd) fd.close() def subscribe(self, **kwargs): raise NotImplementedError("Must be implemented by a sub-class") class Rhsm(RegistrationBase): def __init__(self, module, username=None, password=None): RegistrationBase.__init__(self, module, username, password) self.config = self._read_config() self.module = module def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'): ''' Load RHSM configuration from /etc/rhsm/rhsm.conf. Returns: * ConfigParser object ''' # Read RHSM defaults ... cp = configparser.ConfigParser() cp.read(rhsm_conf) # Add support for specifying a default value w/o having to standup some configuration # Yeah, I know this should be subclassed ... but, oh well def get_option_default(self, key, default=''): sect, opt = key.split('.', 1) if self.has_section(sect) and self.has_option(sect, opt): return self.get(sect, opt) else: return default cp.get_option = types.MethodType(get_option_default, cp, configparser.ConfigParser) return cp def enable(self): ''' Enable the system to receive updates from subscription-manager. This involves updating affected yum plugins and removing any conflicting yum repositories. ''' RegistrationBase.enable(self) self.update_plugin_conf('rhnplugin', False) self.update_plugin_conf('subscription-manager', True) def configure(self, **kwargs): ''' Configure the system as directed for registration with RHSM Raises: * Exception - if error occurs while running command ''' args = [SUBMAN_CMD, 'config'] # Pass supplied **kwargs as parameters to subscription-manager. Ignore # non-configuration parameters and replace '_' with '.'. For example, # 'server_hostname' becomes '--server.hostname'. for k, v in kwargs.items(): if re.search(r'^(server|rhsm)_', k): args.append('--%s=%s' % (k.replace('_', '.'), v)) self.module.run_command(args, check_rc=True) @property def is_registered(self): ''' Determine whether the current system Returns: * Boolean - whether the current system is currently registered to RHSM. ''' args = [SUBMAN_CMD, 'identity'] rc, stdout, stderr = self.module.run_command(args, check_rc=False) if rc == 0: return True else: return False def register(self, username, password, autosubscribe, activationkey, org_id, consumer_type, consumer_name, consumer_id, force_register, environment, rhsm_baseurl, server_insecure): ''' Register the current system to the provided RHSM or Sat6 server Raises: * Exception - if error occurs while running command ''' args = [SUBMAN_CMD, 'register'] # Generate command arguments if force_register: args.extend(['--force']) if rhsm_baseurl: args.extend(['--baseurl', rhsm_baseurl]) if server_insecure: args.extend(['--insecure']) if activationkey: args.extend(['--activationkey', activationkey]) args.extend(['--org', org_id]) else: if autosubscribe: args.append('--autosubscribe') if username: args.extend(['--username', username]) if password: args.extend(['--password', password]) if consumer_type: args.extend(['--type', consumer_type]) if consumer_name: args.extend(['--name', consumer_name]) if consumer_id: args.extend(['--consumerid', consumer_id]) if environment: args.extend(['--environment', environment]) rc, stderr, stdout = self.module.run_command(args, check_rc=True) def unsubscribe(self, serials=None): ''' Unsubscribe a system from subscribed channels Args: serials(list or None): list of serials to unsubscribe. If serials is none or an empty list, then all subscribed channels will be removed. Raises: * Exception - if error occurs while running command ''' items = [] if serials is not None and serials: items = ["--serial=%s" % s for s in serials] if serials is None: items = ["--all"] if items: args = [SUBMAN_CMD, 'unsubscribe'] + items rc, stderr, stdout = self.module.run_command(args, check_rc=True) return serials def unregister(self): ''' Unregister a currently registered system Raises: * Exception - if error occurs while running command ''' args = [SUBMAN_CMD, 'unregister'] rc, stderr, stdout = self.module.run_command(args, check_rc=True) def subscribe(self, regexp): ''' Subscribe current system to available pools matching the specified regular expression. It matches regexp against available pool ids first. If any pool ids match, subscribe to those pools and return. If no pool ids match, then match regexp against available pool product names. Note this can still easily match many many pools. Then subscribe to those pools. Since a pool id is a more specific match, we only fallback to matching against names if we didnt match pool ids. Raises: * Exception - if error occurs while running command ''' # See https://github.com/ansible/ansible/issues/19466 # subscribe to pools whose pool id matches regexp (and only the pool id) subscribed_pool_ids = self.subscribe_pool(regexp) # If we found any matches, we are done # Don't attempt to match pools by product name if subscribed_pool_ids: return subscribed_pool_ids # We didn't match any pool ids. # Now try subscribing to pools based on product name match # Note: This can match lots of product names. subscribed_by_product_pool_ids = self.subscribe_product(regexp) if subscribed_by_product_pool_ids: return subscribed_by_product_pool_ids # no matches return [] def subscribe_by_pool_ids(self, pool_ids): for pool_id, quantity in pool_ids.items(): args = [SUBMAN_CMD, 'attach', '--pool', pool_id, '--quantity', quantity] rc, stderr, stdout = self.module.run_command(args, check_rc=True) return pool_ids def subscribe_pool(self, regexp): ''' Subscribe current system to available pools matching the specified regular expression Raises: * Exception - if error occurs while running command ''' # Available pools ready for subscription available_pools = RhsmPools(self.module) subscribed_pool_ids = [] for pool in available_pools.filter_pools(regexp): pool.subscribe() subscribed_pool_ids.append(pool.get_pool_id()) return subscribed_pool_ids def subscribe_product(self, regexp): ''' Subscribe current system to available pools matching the specified regular expression Raises: * Exception - if error occurs while running command ''' # Available pools ready for subscription available_pools = RhsmPools(self.module) subscribed_pool_ids = [] for pool in available_pools.filter_products(regexp): pool.subscribe() subscribed_pool_ids.append(pool.get_pool_id()) return subscribed_pool_ids def update_subscriptions(self, regexp): changed = False consumed_pools = RhsmPools(self.module, consumed=True) pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)] pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)]) serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep] serials = self.unsubscribe(serials=serials_to_remove) subscribed_pool_ids = self.subscribe(regexp) if subscribed_pool_ids or serials: changed = True return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids, 'unsubscribed_serials': serials} def update_subscriptions_by_pool_ids(self, pool_ids): changed = False consumed_pools = RhsmPools(self.module, consumed=True) existing_pools = {} for p in consumed_pools: existing_pools[p.get_pool_id()] = p.QuantityUsed serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed] serials = self.unsubscribe(serials=serials_to_remove) missing_pools = {} for pool_id, quantity in pool_ids.items(): if existing_pools.get(pool_id, 0) != quantity: missing_pools[pool_id] = quantity self.subscribe_by_pool_ids(missing_pools) if missing_pools or serials: changed = True return {'changed': changed, 'subscribed_pool_ids': missing_pools.keys(), 'unsubscribed_serials': serials} class RhsmPool(object): ''' Convenience class for housing subscription information ''' def __init__(self, module, **kwargs): self.module = module for k, v in kwargs.items(): setattr(self, k, v) def __str__(self): return str(self.__getattribute__('_name')) def get_pool_id(self): return getattr(self, 'PoolId', getattr(self, 'PoolID')) def subscribe(self): args = "subscription-manager subscribe --pool %s" % self.get_pool_id() rc, stdout, stderr = self.module.run_command(args, check_rc=True) if rc == 0: return True else: return False class RhsmPools(object): """ This class is used for manipulating pools subscriptions with RHSM """ def __init__(self, module, consumed=False): self.module = module self.products = self._load_product_list(consumed) def __iter__(self): return self.products.__iter__() def _load_product_list(self, consumed=False): """ Loads list of all available or consumed pools for system in data structure Args: consumed(bool): if True list consumed pools, else list available pools (default False) """ args = "subscription-manager list" if consumed: args += " --consumed" else: args += " --available" rc, stdout, stderr = self.module.run_command(args, check_rc=True) products = [] for line in stdout.split('\n'): # Remove leading+trailing whitespace line = line.strip() # An empty line implies the end of a output group if len(line) == 0: continue # If a colon ':' is found, parse elif ':' in line: (key, value) = line.split(':', 1) key = key.strip().replace(" ", "") # To unify value = value.strip() if key in ['ProductName', 'SubscriptionName']: # Remember the name for later processing products.append(RhsmPool(self.module, _name=value, key=value)) elif products: # Associate value with most recently recorded product products[-1].__setattr__(key, value) # FIXME - log some warning? # else: # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) return products def filter_pools(self, regexp='^$'): ''' Return a list of RhsmPools whose pool id matches the provided regular expression ''' r = re.compile(regexp) for product in self.products: if r.search(product.get_pool_id()): yield product def filter_products(self, regexp='^$'): ''' Return a list of RhsmPools whose product name matches the provided regular expression ''' r = re.compile(regexp) for product in self.products: if r.search(product._name): yield product def main(): # Load RHSM configuration from file rhsm = Rhsm(None) module = AnsibleModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent']), username=dict(default=None, required=False), password=dict(default=None, required=False, no_log=True), server_hostname=dict(default=rhsm.config.get_option('server.hostname'), required=False), server_insecure=dict(default=rhsm.config.get_option('server.insecure'), required=False), rhsm_baseurl=dict(default=rhsm.config.get_option('rhsm.baseurl'), required=False), autosubscribe=dict(default=False, type='bool'), activationkey=dict(default=None, required=False), org_id=dict(default=None, required=False), environment=dict(default=None, required=False, type='str'), pool=dict(default='^$', required=False, type='str'), pool_ids=dict(default=[], required=False, type='list'), consumer_type=dict(default=None, required=False), consumer_name=dict(default=None, required=False), consumer_id=dict(default=None, required=False), force_register=dict(default=False, type='bool'), ), required_together=[['username', 'password'], ['activationkey', 'org_id']], mutually_exclusive=[['username', 'activationkey'], ['pool', 'pool_ids']], required_if=[['state', 'present', ['username', 'activationkey'], True]], ) rhsm.module = module state = module.params['state'] username = module.params['username'] password = module.params['password'] server_hostname = module.params['server_hostname'] server_insecure = module.params['server_insecure'] rhsm_baseurl = module.params['rhsm_baseurl'] autosubscribe = module.params['autosubscribe'] activationkey = module.params['activationkey'] org_id = module.params['org_id'] environment = module.params['environment'] pool = module.params['pool'] pool_ids = {} for value in module.params['pool_ids']: if isinstance(value, dict): if len(value) != 1: module.fail_json(msg='Unable to parse pool_ids option.') pool_id, quantity = value.items()[0] else: pool_id, quantity = value, 1 pool_ids[pool_id] = str(quantity) consumer_type = module.params["consumer_type"] consumer_name = module.params["consumer_name"] consumer_id = module.params["consumer_id"] force_register = module.params["force_register"] global SUBMAN_CMD SUBMAN_CMD = module.get_bin_path('subscription-manager', True) # Ensure system is registered if state == 'present': # Register system if rhsm.is_registered and not force_register: if pool != '^$' or pool_ids: try: if pool_ids: result = rhsm.update_subscriptions_by_pool_ids(pool_ids) else: result = rhsm.update_subscriptions(pool) except Exception: e = get_exception() module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, e)) else: module.exit_json(**result) else: module.exit_json(changed=False, msg="System already registered.") else: try: rhsm.enable() rhsm.configure(**module.params) rhsm.register(username, password, autosubscribe, activationkey, org_id, consumer_type, consumer_name, consumer_id, force_register, environment, rhsm_baseurl, server_insecure) if pool_ids: subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids) else: subscribed_pool_ids = rhsm.subscribe(pool) except Exception: e = get_exception() module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, e)) else: module.exit_json(changed=True, msg="System successfully registered to '%s'." % server_hostname, subscribed_pool_ids=subscribed_pool_ids) # Ensure system is *not* registered if state == 'absent': if not rhsm.is_registered: module.exit_json(changed=False, msg="System already unregistered.") else: try: rhsm.unsubscribe() rhsm.unregister() except Exception: e = get_exception() module.fail_json(msg="Failed to unregister: %s" % e) else: module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname) if __name__ == '__main__': main()
gpl-3.0
pfmooney/dd-agent
checks.d/apache.py
27
3695
# stdlib import urlparse # 3rd party import requests # project from checks import AgentCheck from util import headers class Apache(AgentCheck): """Tracks basic connection/requests/workers metrics See http://httpd.apache.org/docs/2.2/mod/mod_status.html for more details """ GAUGES = { 'IdleWorkers': 'apache.performance.idle_workers', 'BusyWorkers': 'apache.performance.busy_workers', 'CPULoad': 'apache.performance.cpu_load', 'Uptime': 'apache.performance.uptime', 'Total kBytes': 'apache.net.bytes', 'Total Accesses': 'apache.net.hits', } RATES = { 'Total kBytes': 'apache.net.bytes_per_s', 'Total Accesses': 'apache.net.request_per_s' } def __init__(self, name, init_config, agentConfig, instances=None): AgentCheck.__init__(self, name, init_config, agentConfig, instances) self.assumed_url = {} def check(self, instance): if 'apache_status_url' not in instance: raise Exception("Missing 'apache_status_url' in Apache config") url = self.assumed_url.get(instance['apache_status_url'], instance['apache_status_url']) tags = instance.get('tags', []) auth = None if 'apache_user' in instance and 'apache_password' in instance: auth = (instance['apache_user'], instance['apache_password']) # Submit a service check for status page availability. parsed_url = urlparse.urlparse(url) apache_host = parsed_url.hostname apache_port = parsed_url.port or 80 service_check_name = 'apache.can_connect' service_check_tags = ['host:%s' % apache_host, 'port:%s' % apache_port] try: r = requests.get(url, auth=auth, headers=headers(self.agentConfig)) r.raise_for_status() except Exception: self.service_check(service_check_name, AgentCheck.CRITICAL, tags=service_check_tags) raise else: self.service_check(service_check_name, AgentCheck.OK, tags=service_check_tags) response = r.content metric_count = 0 # Loop through and extract the numerical values for line in response.splitlines(): values = line.split(': ') if len(values) == 2: # match metric, value = values try: value = float(value) except ValueError: continue # Special case: kBytes => bytes if metric == 'Total kBytes': value = value * 1024 # Send metric as a gauge, if applicable if metric in self.GAUGES: metric_count += 1 metric_name = self.GAUGES[metric] self.gauge(metric_name, value, tags=tags) # Send metric as a rate, if applicable if metric in self.RATES: metric_count += 1 metric_name = self.RATES[metric] self.rate(metric_name, value, tags=tags) if metric_count == 0: if self.assumed_url.get(instance['apache_status_url'], None) is None and url[-5:] != '?auto': self.assumed_url[instance['apache_status_url']] = '%s?auto' % url self.warning("Assuming url was not correct. Trying to add ?auto suffix to the url") self.check(instance) else: raise Exception("No metrics were fetched for this instance. Make sure that %s is the proper url." % instance['apache_status_url'])
bsd-3-clause
rschwiebert/galgebra
examples/LaTeX/simple_check_latex.py
1
1067
from printer import xpdf,Get_Program,Print_Function,Format from ga import Ga def basic_multivector_operations_3D(): #Print_Function() (g3d,ex,ey,ez) = Ga.build('e*x|y|z') print 'g_{ij} =',g3d.g A = g3d.mv('A','mv') A.Fmt(1,'A') A.Fmt(2,'A') A.Fmt(3,'A') A.even().Fmt(1,'%A_{+}') A.odd().Fmt(1,'%A_{-}') X = g3d.mv('X','vector') Y = g3d.mv('Y','vector') X.Fmt(1,'X') Y.Fmt(1,'Y') (X*Y).Fmt(2,'X*Y') (X^Y).Fmt(2,'X^Y') (X|Y).Fmt(2,'X|Y') return def basic_multivector_operations_2D(): #Print_Function() (g2d,ex,ey) = Ga.build('e*x|y') print 'g_{ij} =',g2d.g X = g2d.mv('X','vector') A = g2d.mv('A','spinor') X.Fmt(1,'X') A.Fmt(1,'A') (X|A).Fmt(2,'X|A') (X<A).Fmt(2,'X<A') (A>X).Fmt(2,'A>X') return def dummy(): return def main(): #Get_Program(True) Format(ipy=True) basic_multivector_operations_3D() basic_multivector_operations_2D() #xpdf('simple_test_latex.tex') return if __name__ == "__main__": main()
bsd-3-clause
boriel/zxbasic
src/symbols/boundlist.py
1
1333
#!/usr/bin/python # -*- coding: utf-8 -*- # vim: ts=4:et:sw=4: # ---------------------------------------------------------------------- # Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel) # # This program is Free Software and is released under the terms of # the GNU General License # ---------------------------------------------------------------------- from .symbol_ import Symbol from .bound import SymbolBOUND class SymbolBOUNDLIST(Symbol): """ Defines a bound list for an array """ def __init__(self, *bounds): for bound in bounds: assert isinstance(bound, SymbolBOUND) super(SymbolBOUNDLIST, self).__init__(*bounds) def __len__(self): # Number of bounds: return len(self.children) def __getitem__(self, key): return self.children[key] def __str__(self): return '(%s)' % ', '.join(str(x) for x in self) @classmethod def make_node(cls, node, *args): """ Creates an array BOUND LIST. """ if node is None: return cls.make_node(SymbolBOUNDLIST(), *args) if node.token != 'BOUNDLIST': return cls.make_node(None, node, *args) for arg in args: if arg is None: continue node.appendChild(arg) return node
gpl-3.0
zhakui/oryol
fips-generators/util/metalcompiler.py
7
5521
''' Python wrapper for metal shader compiler. ''' import subprocess import tempfile import platform import os import sys import binascii import genutil as util # FIXME: different platform-root for OSX and iOS! platform_roots = [ '/Applications/Xcode-beta.app/Contents/Developer/Platforms/MacOSX.platform/', '/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/' ] rel_sys_root = 'Developer/SDKs/MacOSX10.11.sdk' #------------------------------------------------------------------------------- def get_sys_root() : for platform_root in platform_roots : sys_root_path = platform_root + rel_sys_root if os.path.isdir(sys_root_path) : return sys_root_path return None #------------------------------------------------------------------------------- def get_tool(tool_name) : for platform_root in platform_roots : tool_path = platform_root + 'usr/bin/' + tool_name if os.path.isfile(tool_path) : return tool_path return None #------------------------------------------------------------------------------- def writeFile(f, lines) : ''' Write an array of lines to a file. ''' for line in lines : f.write(line.content + '\n') #------------------------------------------------------------------------------- def run(cmd) : # run a generic command an capture stdout print(cmd) child = subprocess.Popen(cmd, stderr=subprocess.PIPE) out = '' while True : out += child.stderr.read() if child.poll() != None : break return out #------------------------------------------------------------------------------- def cc(in_src, out_dia, out_air) : # run the metal compiler cmd = [get_tool('metal'), '-arch', 'air64', '-emit-llvm', '-ffast-math', '-c', '-isysroot', get_sys_root(), '-serialize-diagnostics', out_dia, '-o', out_air, '-mmacosx-version-min=10.11', '-std=osx-metal1.1', in_src] return run(cmd) #------------------------------------------------------------------------------- def ar(in_air, out_lib) : # run the metal librarian cmd = [get_tool('metal-ar'), 'r', out_lib, in_air] return run(cmd) #------------------------------------------------------------------------------- def link(in_lib, out_bin) : # run the metal linker cmd = [get_tool('metallib'), '-o', out_bin, in_lib] return run(cmd) #------------------------------------------------------------------------------- def parseOutput(output, lines) : hasError = False hasWarnings = False outLines = output.splitlines() for outLine in outLines : if 'error:' in outLine or 'warning:' in outLine or 'note:' in outLine: tokens = outLine.split(':') metalSrcPath = tokens[0] lineNr = int(tokens[1]) colNr = int(tokens[2]) msgType = tokens[3] msg = tokens[4] if msgType == ' error': hasError = True if msgType == ' warning': hasWarning = True # map to original location lineIndex = lineNr - 1 if lineIndex >= len(lines) : lineIndex = len(lines) - 1 srcPath = lines[lineIndex].path srcLineNr = lines[lineIndex].lineNumber # and output... util.setErrorLocation(srcPath, srcLineNr) util.fmtError(msg, False) if hasError : for outLine in outLines : print(outLine) for line in lines : print(line.content) sys.exit(10) #------------------------------------------------------------------------------- def writeBinHeader(in_bin, out_hdr, c_name) : ''' Write the metallib binary data into a C header file. ''' with open(in_bin, 'rb') as in_file : data = in_file.read() hexdata = binascii.hexlify(data) with open(out_hdr, 'w') as out_file : out_file.write('#pragma once\n') out_file.write('static const unsigned char {}[] = {{\n'.format(c_name)) for i in range(0, len(data)) : out_file.write('0x{}{},'.format(hexdata[i*2], hexdata[i*2+1])) if (i % 16) == 15 : out_file.write('\n') out_file.write('\n};\n') #------------------------------------------------------------------------------- def validate(lines, outPath, c_name) : # test if tools exists if not get_tool('metal') : util.fmtWarning('metal compiler not found\n') return if not get_tool('metal-ar') : util.fmtWarning('metal librarian not found\n') return if not get_tool('metallib') : util.fmtWarning('metal linker not found\n') return # filenames rootPath = os.path.splitext(outPath)[0] metal_src_path = rootPath + '.metal' metal_dia_path = rootPath + '.dia' metal_air_path = rootPath + '.air' metal_lib_path = rootPath + '.metal-ar' metal_bin_path = rootPath + '.metallib' c_header_path = rootPath + '.metallib.h' # write metal source file with open(metal_src_path, 'w') as f : writeFile(f, lines) # compile .metal source file output = cc(metal_src_path, metal_dia_path, metal_air_path) parseOutput(output, lines) output += ar(metal_air_path, metal_lib_path) output += link(metal_lib_path, metal_bin_path) writeBinHeader(metal_bin_path, c_header_path, c_name)
mit
tinkhaven-organization/odoo
addons/crm_profiling/__init__.py
438
1089
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import crm_profiling import wizard # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
Gussy/bigcouch
couchjs/scons/scons-local-2.0.1/SCons/Tool/install.py
61
8071
"""SCons.Tool.install Tool-specific initialization for the install tool. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/install.py 5134 2010/08/16 23:02:40 bdeegan" import os import shutil import stat import SCons.Action from SCons.Util import make_path_relative # # We keep track of *all* installed files. _INSTALLED_FILES = [] _UNIQUE_INSTALLED_FILES = None # # Functions doing the actual work of the Install Builder. # def copyFunc(dest, source, env): """Install a source file or directory into a destination by copying, (including copying permission/mode bits).""" if os.path.isdir(source): if os.path.exists(dest): if not os.path.isdir(dest): raise SCons.Errors.UserError("cannot overwrite non-directory `%s' with a directory `%s'" % (str(dest), str(source))) else: parent = os.path.split(dest)[0] if not os.path.exists(parent): os.makedirs(parent) shutil.copytree(source, dest) else: shutil.copy2(source, dest) st = os.stat(source) os.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE) return 0 def installFunc(target, source, env): """Install a source file into a target using the function specified as the INSTALL construction variable.""" try: install = env['INSTALL'] except KeyError: raise SCons.Errors.UserError('Missing INSTALL construction variable.') assert len(target)==len(source), \ "Installing source %s into target %s: target and source lists must have same length."%(list(map(str, source)), list(map(str, target))) for t,s in zip(target,source): if install(t.get_path(),s.get_path(),env): return 1 return 0 def stringFunc(target, source, env): installstr = env.get('INSTALLSTR') if installstr: return env.subst_target_source(installstr, 0, target, source) target = str(target[0]) source = str(source[0]) if os.path.isdir(source): type = 'directory' else: type = 'file' return 'Install %s: "%s" as "%s"' % (type, source, target) # # Emitter functions # def add_targets_to_INSTALLED_FILES(target, source, env): """ an emitter that adds all target files to the list stored in the _INSTALLED_FILES global variable. This way all installed files of one scons call will be collected. """ global _INSTALLED_FILES, _UNIQUE_INSTALLED_FILES _INSTALLED_FILES.extend(target) _UNIQUE_INSTALLED_FILES = None return (target, source) class DESTDIR_factory(object): """ a node factory, where all files will be relative to the dir supplied in the constructor. """ def __init__(self, env, dir): self.env = env self.dir = env.arg2nodes( dir, env.fs.Dir )[0] def Entry(self, name): name = make_path_relative(name) return self.dir.Entry(name) def Dir(self, name): name = make_path_relative(name) return self.dir.Dir(name) # # The Builder Definition # install_action = SCons.Action.Action(installFunc, stringFunc) installas_action = SCons.Action.Action(installFunc, stringFunc) BaseInstallBuilder = None def InstallBuilderWrapper(env, target=None, source=None, dir=None, **kw): if target and dir: import SCons.Errors raise SCons.Errors.UserError("Both target and dir defined for Install(), only one may be defined.") if not dir: dir=target import SCons.Script install_sandbox = SCons.Script.GetOption('install_sandbox') if install_sandbox: target_factory = DESTDIR_factory(env, install_sandbox) else: target_factory = env.fs try: dnodes = env.arg2nodes(dir, target_factory.Dir) except TypeError: raise SCons.Errors.UserError("Target `%s' of Install() is a file, but should be a directory. Perhaps you have the Install() arguments backwards?" % str(dir)) sources = env.arg2nodes(source, env.fs.Entry) tgt = [] for dnode in dnodes: for src in sources: # Prepend './' so the lookup doesn't interpret an initial # '#' on the file name portion as meaning the Node should # be relative to the top-level SConstruct directory. target = env.fs.Entry('.'+os.sep+src.name, dnode) #tgt.extend(BaseInstallBuilder(env, target, src, **kw)) tgt.extend(BaseInstallBuilder(env, target, src, **kw)) return tgt def InstallAsBuilderWrapper(env, target=None, source=None, **kw): result = [] for src, tgt in map(lambda x, y: (x, y), source, target): #result.extend(BaseInstallBuilder(env, tgt, src, **kw)) result.extend(BaseInstallBuilder(env, tgt, src, **kw)) return result added = None def generate(env): from SCons.Script import AddOption, GetOption global added if not added: added = 1 AddOption('--install-sandbox', dest='install_sandbox', type="string", action="store", help='A directory under which all installed files will be placed.') global BaseInstallBuilder if BaseInstallBuilder is None: install_sandbox = GetOption('install_sandbox') if install_sandbox: target_factory = DESTDIR_factory(env, install_sandbox) else: target_factory = env.fs BaseInstallBuilder = SCons.Builder.Builder( action = install_action, target_factory = target_factory.Entry, source_factory = env.fs.Entry, multi = 1, emitter = [ add_targets_to_INSTALLED_FILES, ], name = 'InstallBuilder') env['BUILDERS']['_InternalInstall'] = InstallBuilderWrapper env['BUILDERS']['_InternalInstallAs'] = InstallAsBuilderWrapper # We'd like to initialize this doing something like the following, # but there isn't yet support for a ${SOURCE.type} expansion that # will print "file" or "directory" depending on what's being # installed. For now we punt by not initializing it, and letting # the stringFunc() that we put in the action fall back to the # hand-crafted default string if it's not set. # #try: # env['INSTALLSTR'] #except KeyError: # env['INSTALLSTR'] = 'Install ${SOURCE.type}: "$SOURCES" as "$TARGETS"' try: env['INSTALL'] except KeyError: env['INSTALL'] = copyFunc def exists(env): return 1 # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
apache-2.0
AnishShah/tensorflow
tensorflow/contrib/resampler/python/ops/resampler_ops_test.py
52
10476
# pylint: disable=g-bad-file-header # Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for contrib.resampler.python.ops.resampler_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.contrib import resampler from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.ops import array_ops from tensorflow.python.platform import test def _bilinearly_interpolate(data, x, y): """Performs bilinenar interpolation of grid data at user defined coordinates. This interpolation function: a) implicitly pads the input data with 0s. b) returns 0 when sampling outside the (padded) image. The effect is that the sampled signal smoothly goes to 0 outside the original input domain, rather than producing a jump discontinuity at the image boundaries. Args: data: numpy array of shape `[data_height, data_width]` containing data samples assumed to be defined at the corresponding pixel coordinates. x: numpy array of shape `[warp_height, warp_width]` containing x coordinates at which interpolation will be performed. y: numpy array of shape `[warp_height, warp_width]` containing y coordinates at which interpolation will be performed. Returns: Numpy array of shape `[warp_height, warp_width]` containing interpolated values. """ shape = x.shape x = np.asarray(x) + 1 y = np.asarray(y) + 1 data = np.lib.pad(data, 1, "constant", constant_values=0) x_0 = np.floor(x).astype(int) x_1 = x_0 + 1 y_0 = np.floor(y).astype(int) y_1 = y_0 + 1 x_0 = np.clip(x_0, 0, data.shape[1] - 1) x_1 = np.clip(x_1, 0, data.shape[1] - 1) y_0 = np.clip(y_0, 0, data.shape[0] - 1) y_1 = np.clip(y_1, 0, data.shape[0] - 1) i_a = data[y_0, x_0] i_b = data[y_1, x_0] i_c = data[y_0, x_1] i_d = data[y_1, x_1] w_a = (x_1 - x) * (y_1 - y) w_b = (x_1 - x) * (y - y_0) w_c = (x - x_0) * (y_1 - y) w_d = (x - x_0) * (y - y_0) samples = (w_a * i_a + w_b * i_b + w_c * i_c + w_d * i_d) samples.reshape(shape) return samples def _make_warp(batch_size, warp_height, warp_width, dtype): """Creates batch of warping coordinates.""" x, y = np.meshgrid(np.linspace(0, warp_width - 1, warp_width), np.linspace(0, warp_height - 1, warp_height)) warp = np.concatenate((x.reshape([warp_height, warp_width, 1]), y.reshape([warp_height, warp_width, 1])), 2) warp = np.tile(warp.reshape([1, warp_height, warp_width, 2]), [batch_size, 1, 1, 1]) warp += np.random.randn(*warp.shape) return warp.astype(dtype) class ResamplerTest(test.TestCase): def test_op_forward_pass_gpu_float32(self): self._test_op_forward_pass(True, dtypes.float32, 1e-4) def test_op_forward_pass_gpu_float64(self): self._test_op_forward_pass(True, dtypes.float64, 1e-5) def test_op_forward_pass_cpu_float16(self): self._test_op_forward_pass(False, dtypes.float16, 1e-2) def test_op_forward_pass_cpu_float32(self): self._test_op_forward_pass(False, dtypes.float32, 1e-4) def test_op_forward_pass_cpu_float64(self): self._test_op_forward_pass(False, dtypes.float64, 1e-5) def test_op_backward_pass_gpu_float32(self): self._test_op_backward_pass(True, dtypes.float32, 1e-3) def test_op_backward_pass_cpu_float16(self): self._test_op_backward_pass(False, dtypes.float16, 1e-3) def test_op_backward_pass_cpu_float32(self): self._test_op_backward_pass(False, dtypes.float32, 1e-4) def test_op_backward_pass_cpu_float64(self): self._test_op_backward_pass(False, dtypes.float64, 1e-6) def _test_op_forward_pass(self, on_gpu, dtype, tol): np.random.seed(0) data_width = 7 data_height = 9 data_channels = 5 warp_width = 4 warp_height = 8 batch_size = 10 warp = _make_warp(batch_size, warp_height, warp_width, dtype.as_numpy_dtype) data_shape = (batch_size, data_height, data_width, data_channels) data = np.random.rand(*data_shape).astype(dtype.as_numpy_dtype) with self.test_session(use_gpu=on_gpu, force_gpu=False) as sess: data_ph = array_ops.placeholder(dtype, shape=(None,) + data.shape[1:]) warp_ph = array_ops.placeholder(dtype, shape=(None,) + warp.shape[1:]) outputs = resampler.resampler(data=data_ph, warp=warp_ph) self.assertEqual(outputs.get_shape().as_list(), [None, warp_height, warp_width, data_channels]) out = sess.run(outputs, feed_dict={data_ph: data, warp_ph: warp}) # Generate reference output via bilinear interpolation in numpy reference_output = np.zeros_like(out) for batch in xrange(batch_size): for c in xrange(data_channels): reference_output[batch, :, :, c] = _bilinearly_interpolate( data[batch, :, :, c], warp[batch, :, :, 0], warp[batch, :, :, 1]) self.assertAllClose(out, reference_output, rtol=tol, atol=tol) def _test_op_backward_pass(self, on_gpu, dtype, tol): np.random.seed(13) data_width = 5 data_height = 4 data_channels = 3 warp_width = 2 warp_height = 6 batch_size = 3 warp = _make_warp(batch_size, warp_height, warp_width, dtype.as_numpy_dtype) data_shape = (batch_size, data_height, data_width, data_channels) data = np.random.rand(*data_shape).astype(dtype.as_numpy_dtype) with self.test_session(use_gpu=on_gpu, force_gpu=False): data_tensor = constant_op.constant(data) warp_tensor = constant_op.constant(warp) output_tensor = resampler.resampler(data=data_tensor, warp=warp_tensor) grads = test.compute_gradient([data_tensor, warp_tensor], [ data_tensor.get_shape().as_list(), warp_tensor.get_shape().as_list() ], output_tensor, output_tensor.get_shape().as_list(), [data, warp]) if not on_gpu: # On CPU we perform numerical differentiation at the best available # precision, and compare against that. This is necessary for test to # pass for float16. data_tensor_64 = constant_op.constant(data, dtype=dtypes.float64) warp_tensor_64 = constant_op.constant(warp, dtype=dtypes.float64) output_tensor_64 = resampler.resampler(data=data_tensor_64, warp=warp_tensor_64) grads_64 = test.compute_gradient([data_tensor_64, warp_tensor_64], [ data_tensor.get_shape().as_list(), warp_tensor.get_shape().as_list() ], output_tensor_64, output_tensor.get_shape().as_list(), [data, warp]) for g, g_64 in zip(grads, grads_64): self.assertLess(np.fabs(g[0] - g_64[1]).max(), tol) else: for g in grads: self.assertLess(np.fabs(g[0] - g[1]).max(), tol) def test_op_errors(self): data_width = 7 data_height = 9 data_depth = 3 data_channels = 5 warp_width = 4 warp_height = 8 batch_size = 10 # Input data shape is not defined over a 2D grid, i.e. its shape is not like # (batch_size, data_height, data_width, data_channels). with self.test_session() as sess: data_shape = (batch_size, data_height, data_width, data_depth, data_channels) data = np.zeros(data_shape) warp_shape = (batch_size, warp_height, warp_width, 2) warp = np.zeros(warp_shape) outputs = resampler.resampler(constant_op.constant(data), constant_op.constant(warp)) with self.assertRaisesRegexp(errors_impl.UnimplementedError, "Only bilinear interpolation is currently " "supported."): sess.run(outputs) # Warp tensor must be at least a matrix, with shape [batch_size, 2]. with self.test_session() as sess: data_shape = (batch_size, data_height, data_width, data_channels) data = np.zeros(data_shape) warp_shape = (batch_size,) warp = np.zeros(warp_shape) outputs = resampler.resampler(constant_op.constant(data), constant_op.constant(warp)) with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "warp should be at least a matrix"): sess.run(outputs) # The batch size of the data and warp tensors must be the same. with self.test_session() as sess: data_shape = (batch_size, data_height, data_width, data_channels) data = np.zeros(data_shape) warp_shape = (batch_size+1, warp_height, warp_width, 2) warp = np.zeros(warp_shape) outputs = resampler.resampler(constant_op.constant(data), constant_op.constant(warp)) with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "Batch size of data and warp tensor"): sess.run(outputs) # The warp tensor must contain 2D coordinates, i.e. its shape last dimension # must be 2. with self.test_session() as sess: data_shape = (batch_size, data_height, data_width, data_channels) data = np.zeros(data_shape) warp_shape = (batch_size, warp_height, warp_width, 3) warp = np.zeros(warp_shape) outputs = resampler.resampler(constant_op.constant(data), constant_op.constant(warp)) with self.assertRaisesRegexp(errors_impl.UnimplementedError, "Only bilinear interpolation is supported, " "warping"): sess.run(outputs) if __name__ == "__main__": test.main()
apache-2.0
kikusu/chainer
tests/cupy_tests/math_tests/test_trigonometric.py
24
1474
import unittest from cupy import testing @testing.gpu class TestTrigonometric(unittest.TestCase): _multiprocess_can_split_ = True @testing.for_all_dtypes() @testing.numpy_cupy_allclose(atol=1e-5) def check_unary(self, name, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) return getattr(xp, name)(a) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(atol=1e-5) def check_binary(self, name, xp, dtype): a = testing.shaped_arange((2, 3), xp, dtype) b = testing.shaped_reverse_arange((2, 3), xp, dtype) return getattr(xp, name)(a, b) @testing.for_dtypes(['e', 'f', 'd']) @testing.numpy_cupy_allclose(atol=1e-5) def check_unary_unit(self, name, xp, dtype): a = xp.array([0.2, 0.4, 0.6, 0.8], dtype=dtype) return getattr(xp, name)(a) def test_sin(self): self.check_unary('sin') def test_cos(self): self.check_unary('cos') def test_tan(self): self.check_unary('tan') def test_arcsin(self): self.check_unary_unit('arcsin') def test_arccos(self): self.check_unary_unit('arccos') def test_arctan(self): self.check_unary('arctan') def test_arctan2(self): self.check_binary('arctan2') def test_hypot(self): self.check_binary('hypot') def test_deg2rad(self): self.check_unary('deg2rad') def test_rad2deg(self): self.check_unary('rad2deg')
mit
procangroup/edx-platform
openedx/core/djangoapps/theming/helpers_dirs.py
17
4754
""" Code which dynamically discovers comprehensive themes. Deliberately uses no Django settings, as the discovery happens during the initial setup of Django settings. """ import os from path import Path def get_theme_base_dirs_from_settings(theme_dirs=None): """ Return base directories that contains all the themes. Example: >> get_theme_base_dirs_from_settings('/edx/app/ecommerce/ecommerce/themes') ['/edx/app/ecommerce/ecommerce/themes'] Returns: (List of Paths): Base theme directory paths """ theme_base_dirs = [] if theme_dirs: theme_base_dirs.extend([Path(theme_dir) for theme_dir in theme_dirs]) return theme_base_dirs def get_themes_unchecked(themes_dirs, project_root=None): """ Returns a list of all themes known to the system. Args: themes_dirs (list): Paths to themes base directory project_root (str): (optional) Path to project root Returns: List of themes known to the system. """ themes_base_dirs = [Path(themes_dir) for themes_dir in themes_dirs] # pick only directories and discard files in themes directory themes = [] for themes_dir in themes_base_dirs: themes.extend([Theme(name, name, themes_dir, project_root) for name in get_theme_dirs(themes_dir)]) return themes def get_theme_dirs(themes_dir=None): """ Returns theme dirs in given dirs Args: themes_dir (Path): base dir that contains themes. """ return [_dir for _dir in os.listdir(themes_dir) if is_theme_dir(themes_dir / _dir)] def is_theme_dir(_dir): """ Returns true if given dir contains theme overrides. A theme dir must have subdirectory 'lms' or 'cms' or both. Args: _dir: directory path to check for a theme Returns: Returns true if given dir is a theme directory. """ theme_sub_directories = {'lms', 'cms'} return bool(os.path.isdir(_dir) and theme_sub_directories.intersection(os.listdir(_dir))) def get_project_root_name_from_settings(project_root): """ Return root name for the current project Example: >> get_project_root_name() 'lms' # from studio >> get_project_root_name() 'cms' Args: project_root (str): Root directory of the project. Returns: (str): component name of platform e.g lms, cms """ root = Path(project_root) if root.name == "": root = root.parent return root.name class Theme(object): """ class to encapsulate theme related information. """ name = '' theme_dir_name = '' themes_base_dir = None project_root = None def __init__(self, name='', theme_dir_name='', themes_base_dir=None, project_root=None): """ init method for Theme Args: name: name if the theme theme_dir_name: directory name of the theme themes_base_dir: directory path of the folder that contains the theme """ self.name = name self.theme_dir_name = theme_dir_name self.themes_base_dir = themes_base_dir self.project_root = project_root def __eq__(self, other): """ Returns True if given theme is same as the self Args: other: Theme object to compare with self Returns: (bool) True if two themes are the same else False """ return (self.theme_dir_name, self.path) == (other.theme_dir_name, other.path) def __hash__(self): return hash((self.theme_dir_name, self.path)) def __unicode__(self): return u"<Theme: {name} at '{path}'>".format(name=self.name, path=self.path) def __repr__(self): return self.__unicode__() @property def path(self): """ Get absolute path of the directory that contains current theme's templates, static assets etc. Returns: Path: absolute path to current theme's contents """ return Path(self.themes_base_dir) / self.theme_dir_name / get_project_root_name_from_settings(self.project_root) @property def template_path(self): """ Get absolute path of current theme's template directory. Returns: Path: absolute path to current theme's template directory """ return Path(self.theme_dir_name) / get_project_root_name_from_settings(self.project_root) / 'templates' @property def template_dirs(self): """ Get a list of all template directories for current theme. Returns: list: list of all template directories for current theme. """ return [ self.path / 'templates', ]
agpl-3.0
ryszard/doorman
simulation/utils.py
2
1693
# Copyright 2016 Google, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import time kLevel = logging.INFO # A local clock which drives the simulation. Always starts at zero and # returns times relative to the starting time. class _Clock(object): def __init__(self): self.time = 0 def get_time(self): return self.time def advance(self, n): assert n >= 0 self.time += n def set_time(self, t): # The clock can only move forward. assert t >= self.time self.time = t # A custom formatter to add the timestamp from the simulated clock. class _Formatter(logging.Formatter): def format(self, record): record.simulated_clock = "t=%04d" % clock.get_time() return super(_Formatter, self).format(record) # Creates a logger object. def _create_logger(): logger = logging.getLogger("simulation") logger.setLevel(kLevel) ch = logging.StreamHandler() ch.setLevel(kLevel) formatter = _Formatter("%(simulated_clock)s - %(levelname)s - %(message)s") ch.setFormatter(formatter) logger.addHandler(ch) return logger # Exports singleton objects for the clock and the logger. clock = _Clock() logger = _create_logger()
apache-2.0
tchernomax/ansible
lib/ansible/modules/cloud/google/gce_snapshot.py
122
6781
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gce_snapshot version_added: "2.3" short_description: Create or destroy snapshots for GCE storage volumes description: - Manages snapshots for GCE instances. This module manages snapshots for the storage volumes of a GCE compute instance. If there are multiple volumes, each snapshot will be prepended with the disk name options: instance_name: description: - The GCE instance to snapshot required: True snapshot_name: description: - The name of the snapshot to manage disks: description: - A list of disks to create snapshots for. If none is provided, all of the volumes will be snapshotted default: all required: False state: description: - Whether a snapshot should be C(present) or C(absent) required: false default: present choices: [present, absent] service_account_email: description: - GCP service account email for the project where the instance resides required: true credentials_file: description: - The path to the credentials file associated with the service account required: true project_id: description: - The GCP project ID to use required: true requirements: - "python >= 2.6" - "apache-libcloud >= 0.19.0" author: Rob Wagner (@robwagner33) ''' EXAMPLES = ''' - name: Create gce snapshot gce_snapshot: instance_name: example-instance snapshot_name: example-snapshot state: present service_account_email: project_name@appspot.gserviceaccount.com credentials_file: /path/to/credentials project_id: project_name delegate_to: localhost - name: Delete gce snapshot gce_snapshot: instance_name: example-instance snapshot_name: example-snapshot state: absent service_account_email: project_name@appspot.gserviceaccount.com credentials_file: /path/to/credentials project_id: project_name delegate_to: localhost # This example creates snapshots for only two of the available disks as # disk0-example-snapshot and disk1-example-snapshot - name: Create snapshots of specific disks gce_snapshot: instance_name: example-instance snapshot_name: example-snapshot state: present disks: - disk0 - disk1 service_account_email: project_name@appspot.gserviceaccount.com credentials_file: /path/to/credentials project_id: project_name delegate_to: localhost ''' RETURN = ''' snapshots_created: description: List of newly created snapshots returned: When snapshots are created type: list sample: "[disk0-example-snapshot, disk1-example-snapshot]" snapshots_deleted: description: List of destroyed snapshots returned: When snapshots are deleted type: list sample: "[disk0-example-snapshot, disk1-example-snapshot]" snapshots_existing: description: List of snapshots that already existed (no-op) returned: When snapshots were already present type: list sample: "[disk0-example-snapshot, disk1-example-snapshot]" snapshots_absent: description: List of snapshots that were already absent (no-op) returned: When snapshots were already absent type: list sample: "[disk0-example-snapshot, disk1-example-snapshot]" ''' try: from libcloud.compute.types import Provider _ = Provider.GCE HAS_LIBCLOUD = True except ImportError: HAS_LIBCLOUD = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.gce import gce_connect def find_snapshot(volume, name): ''' Check if there is a snapshot already created with the given name for the passed in volume. Args: volume: A gce StorageVolume object to manage name: The name of the snapshot to look for Returns: The VolumeSnapshot object if one is found ''' found_snapshot = None snapshots = volume.list_snapshots() for snapshot in snapshots: if name == snapshot.name: found_snapshot = snapshot return found_snapshot def main(): module = AnsibleModule( argument_spec=dict( instance_name=dict(required=True), snapshot_name=dict(required=True), state=dict(choices=['present', 'absent'], default='present'), disks=dict(default=None, type='list'), service_account_email=dict(type='str'), credentials_file=dict(type='path'), project_id=dict(type='str') ) ) if not HAS_LIBCLOUD: module.fail_json(msg='libcloud with GCE support (0.19.0+) is required for this module') gce = gce_connect(module) instance_name = module.params.get('instance_name') snapshot_name = module.params.get('snapshot_name') disks = module.params.get('disks') state = module.params.get('state') json_output = dict( changed=False, snapshots_created=[], snapshots_deleted=[], snapshots_existing=[], snapshots_absent=[] ) snapshot = None instance = gce.ex_get_node(instance_name, 'all') instance_disks = instance.extra['disks'] for instance_disk in instance_disks: disk_snapshot_name = snapshot_name device_name = instance_disk['deviceName'] if disks is None or device_name in disks: volume_obj = gce.ex_get_volume(device_name) # If we have more than one disk to snapshot, prepend the disk name if len(instance_disks) > 1: disk_snapshot_name = device_name + "-" + disk_snapshot_name snapshot = find_snapshot(volume_obj, disk_snapshot_name) if snapshot and state == 'present': json_output['snapshots_existing'].append(disk_snapshot_name) elif snapshot and state == 'absent': snapshot.destroy() json_output['changed'] = True json_output['snapshots_deleted'].append(disk_snapshot_name) elif not snapshot and state == 'present': volume_obj.snapshot(disk_snapshot_name) json_output['changed'] = True json_output['snapshots_created'].append(disk_snapshot_name) elif not snapshot and state == 'absent': json_output['snapshots_absent'].append(disk_snapshot_name) module.exit_json(**json_output) if __name__ == '__main__': main()
gpl-3.0
pombredanne/invenio
modules/webcomment/lib/webcommentadminlib.py
1
28817
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. __revision__ = "$Id$" from invenio.config import CFG_SITE_LANG, CFG_SITE_URL, \ CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN from invenio.webcomment import query_get_comment, \ get_reply_order_cache_data from invenio.urlutils import wash_url_argument from invenio.dbquery import run_sql from invenio.messages import gettext_set_language, wash_language from invenio.webuser import get_user_info, collect_user_info, \ isUserAdmin from invenio.access_control_engine import acc_authorize_action, \ acc_get_authorized_emails from invenio.search_engine import perform_request_search import invenio.template webcomment_templates = invenio.template.load('webcomment') def getnavtrail(previous = '', ln=CFG_SITE_LANG): """Get the navtrail""" previous = wash_url_argument(previous, 'str') ln = wash_language(ln) _ = gettext_set_language(ln) navtrail = """<a class="navtrail" href="%s/help/admin">%s</a> """ % (CFG_SITE_URL, _("Admin Area")) navtrail = navtrail + previous return navtrail def get_nb_reviews(recID): """ Return number of reviews for the record recID """ query = """SELECT count(*) FROM cmtRECORDCOMMENT c WHERE c.id_bibrec = %s and c.star_score > 0 """ res = run_sql(query, (recID,)) return res[0][0] def get_nb_comments(recID): """ Return number of comments for the record recID """ query = """SELECT count(*) FROM cmtRECORDCOMMENT c WHERE c.id_bibrec = %s and c.star_score = 0 """ res = run_sql(query, (recID,)) return res[0][0] def get_user_collections(req): """ Return collections for which the user is moderator """ user_info = collect_user_info(req) res = [] collections = run_sql('SELECT name FROM collection') for collection in collections: collection_emails = acc_get_authorized_emails('moderatecomments', collection=collection[0]) if user_info['email'] in collection_emails or isUserAdmin(user_info): res.append(collection[0]) return res def perform_request_index(ln=CFG_SITE_LANG): """ """ return webcomment_templates.tmpl_admin_index(ln=ln) def perform_request_delete(comID=-1, recID=-1, uid=-1, reviews="", ln=CFG_SITE_LANG): """ """ from search_engine import record_exists warnings = [] ln = wash_language(ln) comID = wash_url_argument(comID, 'int') recID = wash_url_argument(recID, 'int') uid = wash_url_argument(uid, 'int') # parameter reviews is deduced from comID when needed if comID is not None and recID is not None and uid is not None: if comID <= 0 and recID <= 0 and uid <= 0: if comID != -1: warnings.append(("WRN_WEBCOMMENT_ADMIN_INVALID_COMID",)) return (webcomment_templates.tmpl_admin_delete_form(ln, warnings), None, warnings) if comID > 0 and not recID > 0: comment = query_get_comment(comID) if comment: # Figure out if this is a review or a comment c_star_score = 5 if comment[c_star_score] > 0: reviews = 1 else: reviews = 0 return (perform_request_comments(ln=ln, comID=comID, recID=recID, reviews=reviews), None, warnings) else: warnings.append(('WRN_WEBCOMMENT_ADMIN_COMID_INEXISTANT', comID)) return (webcomment_templates.tmpl_admin_delete_form(ln, warnings), None, warnings) elif recID > 0: if record_exists(recID): comID = '' reviews = wash_url_argument(reviews, 'int') return (perform_request_comments(ln=ln, comID=comID, recID=recID, reviews=reviews), None, warnings) else: warnings.append(('WRN_WEBCOMMENT_ADMIN_RECID_INEXISTANT', comID)) return (webcomment_templates.tmpl_admin_delete_form(ln, warnings), None, warnings) else: return (webcomment_templates.tmpl_admin_delete_form(ln, warnings), None, warnings) else: return (webcomment_templates.tmpl_admin_delete_form(ln, warnings), None, warnings) def perform_request_users(ln=CFG_SITE_LANG): """ """ ln = wash_language(ln) users_data = query_get_users_reported() return webcomment_templates.tmpl_admin_users(ln=ln, users_data=users_data) def query_get_users_reported(): """ Get the users who have been reported at least once. @return: tuple of ct, i.e. (ct, ct, ...) where ct is a tuple (total_number_reported, total_comments_reported, total_reviews_reported, total_nb_votes_yes_of_reported, total_nb_votes_total_of_reported, user_id, user_email, user_nickname) sorted by order of ct having highest total_number_reported """ query1 = "SELECT c.nb_abuse_reports, c.nb_votes_yes, c.nb_votes_total, u.id, u.email, u.nickname, c.star_score " \ "FROM user AS u, cmtRECORDCOMMENT AS c " \ "WHERE c.id_user=u.id AND c.nb_abuse_reports > 0 " \ "ORDER BY u.id " res1 = run_sql(query1) if type(res1) is None: return () users = {} for cmt in res1: uid = int(cmt[3]) if users.has_key(uid): users[uid] = (users[uid][0]+int(cmt[0]), int(cmt[6])>0 and users[uid][1] or users[uid][1]+1, int(cmt[6])>0 and users[uid][2]+1 or users[uid][2], users[uid][3]+int(cmt[1]), users[uid][4]+int(cmt[2]), int(cmt[3]), cmt[4], cmt[5]) else: users[uid] = (int(cmt[0]), int(cmt[6])==0 and 1 or 0, int(cmt[6])>0 and 1 or 0, int(cmt[1]), int(cmt[2]), int(cmt[3]), cmt[4], cmt[5]) users = users.values() users.sort() users.reverse() users = tuple(users) return users def perform_request_comments(req=None, ln=CFG_SITE_LANG, uid="", comID="", recID="", reviews=0, abuse=False, collection=""): """ Display the list of comments/reviews along with information about the comment. Display the comment given by its ID, or the list of comments for the given record ID. If abuse == True, only list records reported as abuse. If comID and recID are not provided, list all comments, or all abused comments (check parameter 'abuse') """ ln = wash_language(ln) uid = wash_url_argument(uid, 'int') comID = wash_url_argument(comID, 'int') recID = wash_url_argument(recID, 'int') reviews = wash_url_argument(reviews, 'int') collection = wash_url_argument(collection, 'str') user_info = collect_user_info(req) user_collections = ['Show all'] user_collections.extend(get_user_collections(req)) if collection and collection != 'Show all': (auth_code, auth_msg) = acc_authorize_action(req, 'moderatecomments', collection=collection) if auth_code: return webcomment_templates.tmpl_admin_comments(ln=ln, uid=uid, comID=comID, recID=recID, comment_data=None, reviews=reviews, error=1, user_collections=user_collections, collection=collection) if collection: if recID or uid: comments = query_get_comments(uid, comID, recID, reviews, ln, abuse=abuse, user_collections=user_collections, collection=collection) else: comments = query_get_comments('', comID, '', reviews, ln, abuse=abuse, user_collections=user_collections, collection=collection) else: if recID or uid: comments = query_get_comments(uid, comID, recID, reviews, ln, abuse=abuse, user_collections=user_collections, collection=user_collections[0]) else: comments = query_get_comments('', comID, '', reviews, ln, abuse=abuse, user_collections=user_collections, collection=user_collections[0]) if comments: return webcomment_templates.tmpl_admin_comments(ln=ln, uid=uid, comID=comID, recID=recID, comment_data=comments, reviews=reviews, error=0, user_collections=user_collections, collection=collection) else: return webcomment_templates.tmpl_admin_comments(ln=ln, uid=uid, comID=comID, recID=recID, comment_data=comments, reviews=reviews, error=2, user_collections=user_collections, collection=collection) def perform_request_hot(req=None, ln=CFG_SITE_LANG, comments=1, top=10, collection="Show all"): """ Display the list of hottest comments/reviews along with information about the comment. @param req: request object for obtaining user information @param ln: language @param comments: boolean activated if using comments, deactivated for reviews @param top: specify number of results to be shown @param collection: filter by collection """ ln = wash_language(ln) comments = wash_url_argument(comments, 'int') top = wash_url_argument(top, 'int') collection = wash_url_argument(collection, 'str') user_info = collect_user_info(req) user_collections = ['Show all'] user_collections.extend(get_user_collections(req)) if collection and collection != 'Show all': (auth_code, auth_msg) = acc_authorize_action(req, 'moderatecomments', collection=collection) if auth_code: return webcomment_templates.tmpl_admin_hot(ln=ln, comment_data = None, comments=comments, error=1, user_collections=user_collections, collection=collection) if collection: comments_retrieved = query_get_hot(comments, ln, top, user_collections, collection) else: comments_retrieved = query_get_hot(comments, ln, top, user_collections, user_collections[0]) if comments_retrieved: return webcomment_templates.tmpl_admin_hot(ln=ln, comment_data=comments_retrieved, comments=comments, error=0, user_collections=user_collections, collection=collection) else: return webcomment_templates.tmpl_admin_hot(ln=ln, comment_data=comments_retrieved, comments=comments, error=2, user_collections=user_collections, collection=collection) def perform_request_latest(req=None, ln=CFG_SITE_LANG, comments=1, top=10, collection=""): """ Display the list of latest comments/reviews along with information about the comment. @param req: request object for obtaining user information @param ln: language @param comments: boolean activated if using comments, deactivated for reviews @param top: Specify number of results to be shown @param collection: filter by collection """ ln = wash_language(ln) comments = wash_url_argument(comments, 'int') top = wash_url_argument(top, 'int') collection = wash_url_argument(collection, 'str') user_info = collect_user_info(req) user_collections = ['Show all'] user_collections.extend(get_user_collections(req)) if collection and collection != 'Show all': (auth_code, auth_msg) = acc_authorize_action(req, 'moderatecomments', collection=collection) if auth_code: return webcomment_templates.tmpl_admin_latest(ln=ln, comment_data=None, comments=comments, error=1, user_collections=user_collections, collection=collection) if collection: comments_retrieved = query_get_latest(comments, ln, top, user_collections, collection) else: comments_retrieved = query_get_latest(comments, ln, top, user_collections, user_collections[0]) if comments_retrieved: return webcomment_templates.tmpl_admin_latest(ln=ln, comment_data=comments_retrieved, comments=comments, error=0, user_collections=user_collections, collection=collection) else: return webcomment_templates.tmpl_admin_latest(ln=ln, comment_data=comments_retrieved, comments=comments, error=2, user_collections=user_collections, collection=collection) def perform_request_undel_single_com(ln=CFG_SITE_LANG, id=id): """ Mark comment referenced by id as active """ ln = wash_language(ln) id = wash_url_argument(id, 'int') return query_undel_single_comment(id) def query_get_comments(uid, cmtID, recID, reviews, ln, abuse=False, user_collections='', collection=''): """ private function @param user_collections: allowed collections for the user @param collection: collection to display @return tuple of comment where comment is tuple (nickname, uid, date_creation, body, id, status) if ranking disabled or tuple (nickname, uid, date_creation, body, nb_votes_yes, nb_votes_total, star_score, title, id, status) """ qdict = {'id': 0, 'id_bibrec': 1, 'uid': 2, 'date_creation': 3, 'body': 4, 'status': 5, 'nb_abuse_reports': 6, 'nb_votes_yes': 7, 'nb_votes_total': 8, 'star_score': 9, 'title': 10, 'email': -2, 'nickname': -1} query = """SELECT c.id, c.id_bibrec, c.id_user, DATE_FORMAT(c.date_creation, '%%Y-%%m-%%d %%H:%%i:%%S'), c.body, c.status, c.nb_abuse_reports, %s u.email, u.nickname FROM cmtRECORDCOMMENT c LEFT JOIN user u ON c.id_user = u.id %s ORDER BY c.nb_abuse_reports DESC, c.nb_votes_yes DESC, c.date_creation """ select_fields = reviews and 'c.nb_votes_yes, c.nb_votes_total, c.star_score, c.title,' or '' where_clause = "WHERE " + (reviews and 'c.star_score>0' or 'c.star_score=0') if uid: where_clause += ' AND c.id_user=%i' % uid if recID: where_clause += ' AND c.id_bibrec=%i' % recID if cmtID: where_clause += ' AND c.id=%i' % cmtID if abuse: where_clause += ' AND c.nb_abuse_reports>0' res = run_sql(query % (select_fields, where_clause)) collection_records = [] if collection == 'Show all': for collection_name in user_collections: collection_records.extend(perform_request_search(cc=collection_name)) else: collection_records.extend(perform_request_search(cc=collection)) output = [] for qtuple in res: if qtuple[qdict['id_bibrec']] in collection_records: nickname = qtuple[qdict['nickname']] or get_user_info(qtuple[qdict['uid']], ln)[2] if reviews: comment_tuple = (nickname, qtuple[qdict['uid']], qtuple[qdict['date_creation']], qtuple[qdict['body']], qtuple[qdict['nb_votes_yes']], qtuple[qdict['nb_votes_total']], qtuple[qdict['star_score']], qtuple[qdict['title']], qtuple[qdict['id']], qtuple[qdict['status']]) else: comment_tuple = (nickname, qtuple[qdict['uid']], qtuple[qdict['date_creation']], qtuple[qdict['body']], qtuple[qdict['id']], qtuple[qdict['status']]) general_infos_tuple = (nickname, qtuple[qdict['uid']], qtuple[qdict['email']], qtuple[qdict['id']], qtuple[qdict['id_bibrec']], qtuple[qdict['nb_abuse_reports']]) out_tuple = (comment_tuple, general_infos_tuple) output.append(out_tuple) return tuple(output) def query_get_hot(comments, ln, top, user_collections, collection): """ private function @param comments: boolean indicating if we want to retrieve comments or reviews @param ln: language @param top: number of results to display @param user_collections: allowed collections for the user @param collection: collection to display @return: tuple (id_bibrec, date_last_comment, users, count) """ qdict = {'id_bibrec': 0, 'date_last_comment': 1, 'users': 2, 'total_count': 3} query = """SELECT c.id_bibrec, DATE_FORMAT(max(c.date_creation), '%%Y-%%m-%%d %%H:%%i:%%S') as date_last_comment, count(distinct c.id_user) as users, count(*) as count FROM cmtRECORDCOMMENT c %s GROUP BY c.id_bibrec ORDER BY count(*) DESC LIMIT %s """ where_clause = "WHERE " + (comments and 'c.star_score=0' or 'c.star_score>0') + ' AND c.status="ok" AND c.nb_abuse_reports < %s' % CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN res = run_sql(query % (where_clause, top)) collection_records = [] if collection == 'Show all': for collection_name in user_collections: collection_records.extend(perform_request_search(cc=collection_name)) else: collection_records.extend(perform_request_search(cc=collection)) output = [] for qtuple in res: if qtuple[qdict['id_bibrec']] in collection_records: general_infos_tuple = (qtuple[qdict['id_bibrec']], qtuple[qdict['date_last_comment']], qtuple[qdict['users']], qtuple[qdict['total_count']]) output.append(general_infos_tuple) return tuple(output) def query_get_latest(comments, ln, top, user_collections, collection): """ private function @param comments: boolean indicating if we want to retrieve comments or reviews @param ln: language @param top: number of results to display @param user_collections: allowed collections for the user @param collection: collection to display @return tuple of comment where comment is tuple (nickname, uid, date_creation, body, id) if latest comments or tuple (nickname, uid, date_creation, body, star_score, id) if latest reviews """ qdict = {'id': 0, 'id_bibrec': 1, 'uid': 2, 'date_creation': 3, 'body': 4, 'nb_abuse_reports': 5, 'star_score': 6, 'nickname': -1} query = """SELECT c.id, c.id_bibrec, c.id_user, DATE_FORMAT(c.date_creation, '%%Y-%%m-%%d %%H:%%i:%%S'), c.body, c.nb_abuse_reports, %s u.nickname FROM cmtRECORDCOMMENT c LEFT JOIN user u ON c.id_user = u.id %s ORDER BY c.date_creation DESC LIMIT %s """ select_fields = not comments and 'c.star_score, ' or '' where_clause = "WHERE " + (comments and 'c.star_score=0' or 'c.star_score>0') + ' AND c.status="ok" AND c.nb_abuse_reports < %s' % CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN res = run_sql(query % (select_fields, where_clause, top)) collection_records = [] if collection == 'Show all': for collection_name in user_collections: collection_records.extend(perform_request_search(cc=collection_name)) else: collection_records.extend(perform_request_search(cc=collection)) output = [] for qtuple in res: if qtuple[qdict['id_bibrec']] in collection_records: nickname = qtuple[qdict['nickname']] or get_user_info(qtuple[qdict['uid']], ln)[2] if not comments: comment_tuple = (nickname, qtuple[qdict['uid']], qtuple[qdict['date_creation']], qtuple[qdict['body']], qtuple[qdict['star_score']], qtuple[qdict['id']]) else: comment_tuple = (nickname, qtuple[qdict['uid']], qtuple[qdict['date_creation']], qtuple[qdict['body']], qtuple[qdict['id']]) general_infos_tuple = (nickname, qtuple[qdict['uid']], qtuple[qdict['id']], qtuple[qdict['id_bibrec']], qtuple[qdict['nb_abuse_reports']]) out_tuple = (comment_tuple, general_infos_tuple) output.append(out_tuple) return tuple(output) def perform_request_del_com(ln=CFG_SITE_LANG, comIDs=[]): """ private function Delete the comments and say whether successful or not @param ln: language @param comIDs: list of comment ids """ ln = wash_language(ln) comIDs = wash_url_argument(comIDs, 'list') # map ( fct, list, arguments of function) comIDs = map(wash_url_argument, comIDs, ('int '*len(comIDs)).split(' ')[:-1]) if not comIDs: comIDs = map(coerce, comIDs, ('0 '*len(comIDs)).split(' ')[:-1]) return webcomment_templates.tmpl_admin_del_com(del_res=comIDs, ln=ln) del_res = [] for comID in comIDs: del_res.append((comID, query_delete_comment_mod(comID))) return webcomment_templates.tmpl_admin_del_com(del_res=del_res, ln=ln) def perform_request_undel_com(ln=CFG_SITE_LANG, comIDs=[]): """ private function Undelete the comments and say whether successful or not @param ln: language @param comIDs: list of comment ids """ ln = wash_language(ln) comIDs = wash_url_argument(comIDs, 'list') # map ( fct, list, arguments of function) comIDs = map(wash_url_argument, comIDs, ('int '*len(comIDs)).split(' ')[:-1]) if not comIDs: comIDs = map(coerce, comIDs, ('0 '*len(comIDs)).split(' ')[:-1]) return webcomment_templates.tmpl_admin_undel_com(del_res=comIDs, ln=ln) del_res = [] for comID in comIDs: del_res.append((comID, query_undel_single_comment(comID))) return webcomment_templates.tmpl_admin_undel_com(del_res=del_res, ln=ln) def perform_request_del_single_com_mod(ln=CFG_SITE_LANG, id=id): """ private function Delete a single comment requested by a moderator @param ln: language @param id: comment id to be deleted """ ln = wash_language(ln) id = wash_url_argument(id, 'int') return query_delete_comment_mod(id) def perform_request_del_single_com_auth(ln=CFG_SITE_LANG, id=id): """ private function Delete a single comment requested by the author @param ln: language @param id: comment id to be deleted """ ln = wash_language(ln) id = wash_url_argument(id, 'int') return query_delete_comment_auth(id) def perform_request_unreport_single_com(ln=CFG_SITE_LANG, id=""): """ private function Unreport a single comment @param ln: language @param id: comment id to be deleted """ ln = wash_language(ln) id = wash_url_argument(id, 'int') return query_suppress_abuse_report(id) def suppress_abuse_report(ln=CFG_SITE_LANG, comIDs=[]): """ private function suppress the abuse reports for the given comIDs. @param ln: language @param comIDs: list of ids to suppress attached reports. """ ln = wash_language(ln) comIDs = wash_url_argument(comIDs, 'list') # map ( fct, list, arguments of function) comIDs = map(wash_url_argument, comIDs, ('int '*len(comIDs)).split(' ')[:-1]) if not comIDs: comIDs = map(coerce, comIDs, ('0 '*len(comIDs)).split(' ')[:-1]) return webcomment_templates.tmpl_admin_del_com(del_res=comIDs, ln=ln) del_res = [] for comID in comIDs: del_res.append((comID, query_suppress_abuse_report(comID))) return webcomment_templates.tmpl_admin_suppress_abuse_report(del_res=del_res, ln=ln) def query_suppress_abuse_report(comID): """ suppress abuse report for a given comment @return: integer 1 if successful, integer 0 if not """ query = "UPDATE cmtRECORDCOMMENT SET nb_abuse_reports=0, status='ap' WHERE id=%s" params = (comID,) res = run_sql(query, params) return int(res) def query_delete_comment_mod(comID): """ delete comment with id comID @return: integer 1 if successful, integer 0 if not """ query1 = "UPDATE cmtRECORDCOMMENT SET status='dm' WHERE id=%s" params1 = (comID,) res1 = run_sql(query1, params1) return int(res1) def query_delete_comment_auth(comID): """ delete comment with id comID @return: integer 1 if successful, integer 0 if not """ query1 = "UPDATE cmtRECORDCOMMENT SET status='da' WHERE id=%s" params1 = (comID,) res1 = run_sql(query1, params1) return int(res1) def query_undel_single_comment(comID): """ undelete comment with id comID @return: integer 1 if successful, integer 0 if not """ query = "UPDATE cmtRECORDCOMMENT SET status='ok' WHERE id=%s" params = (comID,) res = run_sql(query, params) return int(res) def check_user_is_author(user_id, com_id): """ Check if the user is the author of the given comment """ res = run_sql("SELECT id, id_user FROM cmtRECORDCOMMENT WHERE id=%s and id_user=%s", (str(com_id), str(user_id))) if res: return 1 return 0 def migrate_comments_populate_threads_index(): """ Fill in the `reply_order_cached_data' columns in cmtRECORDCOMMENT and bskRECORDCOMMENT tables with adequate values so that thread are displayed correctly. """ # Update WebComment comments res = run_sql("SELECT id FROM cmtRECORDCOMMENT WHERE reply_order_cached_data is NULL") for row in res: reply_order_cached_data = get_reply_order_cache_data(row[0]) run_sql("UPDATE cmtRECORDCOMMENT set reply_order_cached_data=%s WHERE id=%s", (reply_order_cached_data, row[0])) # Update WebBasket comments res = run_sql("SELECT id FROM bskRECORDCOMMENT WHERE reply_order_cached_data is NULL") for row in res: reply_order_cached_data = get_reply_order_cache_data(row[0]) run_sql("UPDATE cmtRECORDCOMMENT set reply_order_cached_data=%s WHERE id=%s", (reply_order_cached_data, row[0]))
gpl-2.0
muahah/elpy
docs/conf.py
4
8145
# -*- coding: utf-8 -*- # # Elpy documentation build configuration file, created by # sphinx-quickstart on Fri Jun 6 20:15:59 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ["elispdomain"] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Elpy' copyright = u'2014, Jorgen Schäfer' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.35.0' # The full version, including alpha/beta/rc tags. release = '1.35.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Elpydoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'Elpy.tex', u'Elpy Documentation', u'Jorgen Schäfer', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'elpy', u'Elpy Documentation', [u'Jorgen Schäfer'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Elpy', u'Elpy Documentation', u'Jorgen Schäfer', 'Elpy', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
gpl-3.0
bop/foundation
lib/python2.7/site-packages/compressor/filters/css_default.py
1
3577
import os import re import posixpath from compressor.cache import get_hexdigest, get_hashed_mtime from compressor.conf import settings from compressor.filters import FilterBase from compressor.utils import staticfiles URL_PATTERN = re.compile(r'url\(([^\)]+)\)') class CssAbsoluteFilter(FilterBase): def __init__(self, *args, **kwargs): super(CssAbsoluteFilter, self).__init__(*args, **kwargs) self.root = settings.COMPRESS_ROOT self.url = settings.COMPRESS_URL.rstrip('/') self.url_path = self.url self.has_scheme = False def input(self, filename=None, basename=None, **kwargs): if filename is not None: filename = os.path.normcase(os.path.abspath(filename)) if (not (filename and filename.startswith(self.root)) and not self.find(basename)): return self.content self.path = basename.replace(os.sep, '/') self.path = self.path.lstrip('/') if self.url.startswith(('http://', 'https://')): self.has_scheme = True parts = self.url.split('/') self.url = '/'.join(parts[2:]) self.url_path = '/%s' % '/'.join(parts[3:]) self.protocol = '%s/' % '/'.join(parts[:2]) self.host = parts[2] self.directory_name = '/'.join((self.url, os.path.dirname(self.path))) return URL_PATTERN.sub(self.url_converter, self.content) def find(self, basename): if settings.DEBUG and basename and staticfiles.finders: return staticfiles.finders.find(basename) def guess_filename(self, url): local_path = url if self.has_scheme: # COMPRESS_URL had a protocol, remove it and the hostname from our path. local_path = local_path.replace(self.protocol + self.host, "", 1) # Now, we just need to check if we can find the path from COMPRESS_URL in our url if local_path.startswith(self.url_path): local_path = local_path.replace(self.url_path, "", 1) # Re-build the local full path by adding root filename = os.path.join(self.root, local_path.lstrip(os.sep)) return os.path.exists(filename) and filename def add_suffix(self, url): filename = self.guess_filename(url) suffix = None if filename: if settings.COMPRESS_CSS_HASHING_METHOD == "mtime": suffix = get_hashed_mtime(filename) elif settings.COMPRESS_CSS_HASHING_METHOD == "hash": hash_file = open(filename) try: suffix = get_hexdigest(hash_file.read(), 12) finally: hash_file.close() else: raise FilterError('COMPRESS_CSS_HASHING_METHOD is configured ' 'with an unknown method (%s).') if suffix is None: return url if url.startswith(('http://', 'https://', '/')): if "?" in url: url = "%s&%s" % (url, suffix) else: url = "%s?%s" % (url, suffix) return url def url_converter(self, matchobj): url = matchobj.group(1) url = url.strip(' \'"') if url.startswith(('http://', 'https://', '/', 'data:')): return "url('%s')" % self.add_suffix(url) full_url = posixpath.normpath('/'.join([str(self.directory_name), url])) if self.has_scheme: full_url = "%s%s" % (self.protocol, full_url) return "url('%s')" % self.add_suffix(full_url)
gpl-2.0
anaran/kuma
vendor/packages/translate/lang/test_data.py
33
1449
#!/usr/bin/env python # -*- coding: utf-8 -*- from translate.lang import data def test_languagematch(): """test language comparison""" # Simple comparison assert data.languagematch("af", "af") assert not data.languagematch("af", "en") # Handle variants assert data.languagematch("pt", "pt_PT") # FIXME don't think this one is correct #assert not data.languagematch("sr", "sr@Latn") # No first language code, we just check that the other code is valid assert data.languagematch(None, "en") assert data.languagematch(None, "en_GB") assert data.languagematch(None, "en_GB@Latn") assert not data.languagematch(None, "not-a-lang-code") def test_normalise_code(): """test the normalisation of language codes""" assert data.normalize_code("af_ZA") == "af-za" assert data.normalize_code("xx@Latin") == "xx-latin" def test_simplify_to_common(): """test language code simplification""" assert data.simplify_to_common("af_ZA") == "af" assert data.simplify_to_common("pt_PT") == "pt" assert data.simplify_to_common("pt_BR") == "pt_BR" def test_language_names(): _ = data.tr_lang('en_US') assert _(u"Bokmål, Norwegian; Norwegian Bokmål") == u"Norwegian Bokmål" assert _(u"Spanish; Castillian") == u"Spanish" assert _(u"Mapudungun; Mapuche") == u"Mapudungun" assert _(u"Interlingua (International Auxiliary Language Association)") == u"Interlingua"
mpl-2.0
alilotfi/django
tests/auth_tests/urls.py
80
4661
from django.conf.urls import url from django.contrib import admin from django.contrib.auth import views from django.contrib.auth.decorators import login_required from django.contrib.auth.forms import AuthenticationForm from django.contrib.auth.urls import urlpatterns as auth_urlpatterns from django.contrib.messages.api import info from django.http import HttpRequest, HttpResponse from django.shortcuts import render from django.template import RequestContext, Template from django.views.decorators.cache import never_cache class CustomRequestAuthenticationForm(AuthenticationForm): def __init__(self, request, *args, **kwargs): assert isinstance(request, HttpRequest) super(CustomRequestAuthenticationForm, self).__init__(request, *args, **kwargs) @never_cache def remote_user_auth_view(request): "Dummy view for remote user tests" t = Template("Username is {{ user }}.") c = RequestContext(request, {}) return HttpResponse(t.render(c)) def auth_processor_no_attr_access(request): render(request, 'context_processors/auth_attrs_no_access.html') # *After* rendering, we check whether the session was accessed return render(request, 'context_processors/auth_attrs_test_access.html', {'session_accessed': request.session.accessed}) def auth_processor_attr_access(request): render(request, 'context_processors/auth_attrs_access.html') return render(request, 'context_processors/auth_attrs_test_access.html', {'session_accessed': request.session.accessed}) def auth_processor_user(request): return render(request, 'context_processors/auth_attrs_user.html') def auth_processor_perms(request): return render(request, 'context_processors/auth_attrs_perms.html') def auth_processor_perm_in_perms(request): return render(request, 'context_processors/auth_attrs_perm_in_perms.html') def auth_processor_messages(request): info(request, "Message 1") return render(request, 'context_processors/auth_attrs_messages.html') def userpage(request): pass def custom_request_auth_login(request): return views.login(request, authentication_form=CustomRequestAuthenticationForm) # special urls for auth test cases urlpatterns = auth_urlpatterns + [ url(r'^logout/custom_query/$', views.logout, dict(redirect_field_name='follow')), url(r'^logout/next_page/$', views.logout, dict(next_page='/somewhere/')), url(r'^logout/next_page/named/$', views.logout, dict(next_page='password_reset')), url(r'^remote_user/$', remote_user_auth_view), url(r'^password_reset_from_email/$', views.password_reset, dict(from_email='staffmember@example.com')), url(r'^password_reset_extra_email_context/$', views.password_reset, dict(extra_email_context=dict(greeting='Hello!'))), url(r'^password_reset/custom_redirect/$', views.password_reset, dict(post_reset_redirect='/custom/')), url(r'^password_reset/custom_redirect/named/$', views.password_reset, dict(post_reset_redirect='password_reset')), url(r'^password_reset/html_email_template/$', views.password_reset, dict(html_email_template_name='registration/html_password_reset_email.html')), url(r'^reset/custom/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', views.password_reset_confirm, dict(post_reset_redirect='/custom/')), url(r'^reset/custom/named/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', views.password_reset_confirm, dict(post_reset_redirect='password_reset')), url(r'^password_change/custom/$', views.password_change, dict(post_change_redirect='/custom/')), url(r'^password_change/custom/named/$', views.password_change, dict(post_change_redirect='password_reset')), url(r'^login_required/$', login_required(views.password_reset)), url(r'^login_required_login_url/$', login_required(views.password_reset, login_url='/somewhere/')), url(r'^auth_processor_no_attr_access/$', auth_processor_no_attr_access), url(r'^auth_processor_attr_access/$', auth_processor_attr_access), url(r'^auth_processor_user/$', auth_processor_user), url(r'^auth_processor_perms/$', auth_processor_perms), url(r'^auth_processor_perm_in_perms/$', auth_processor_perm_in_perms), url(r'^auth_processor_messages/$', auth_processor_messages), url(r'^custom_request_auth_login/$', custom_request_auth_login), url(r'^userpage/(.+)/$', userpage, name="userpage"), # This line is only required to render the password reset with is_admin=True url(r'^admin/', admin.site.urls), ]
bsd-3-clause
bunchesofdonald/django-hermes
hermes/tests/test_views.py
2
3513
from . import HermesTestCase from .. import models class PostListViewTestCase(HermesTestCase): def url(self): return super(PostListViewTestCase, self).url('hermes_post_list') def test_context_contains_posts(self): """The PostListView Context should contain a QuerySet of all Posts""" response = self.get(self.url()) expected = list(models.Post.objects.published()) self.assertEqual(expected, list(response.context['posts'])) class CategoryPostListViewTestCase(HermesTestCase): def url(self, category): return category.get_absolute_url() def test_context_contains_posts(self): """The CategoryPostListView Context should contain a QuerySet of all Posts in the given Category """ response = self.get(self.url(self.root_category)) expected = list(models.Post.objects.filter(category=self.root_category)) self.assertEqual(expected, list(response.context['posts'])) class ArchivePostListViewTestCase(HermesTestCase): def url(self, year=None, month=None, day=None): if year and month and day: url_name = 'hermes_archive_year_month_day' kwargs = {'year': year, 'month': month, 'day': day, } elif year and month: url_name = 'hermes_archive_year_month' kwargs = {'year': year, 'month': month, } else: url_name = 'hermes_archive_year' kwargs = {'year': year, } return super(ArchivePostListViewTestCase, self).url(url_name, **kwargs) def test_context_contains_posts_by_month_year_day(self): """The ArchivePostListView Context should contain a QuerySet of all Posts on the given month/day/year """ response = self.get(self.url(year=2010, month=6, day=10)) expected = list(models.Post.objects.created_on(year=2010, month=6, day=10)) self.assertEqual(expected, list(response.context['posts'])) def test_context_contains_posts_by_month_year(self): """The ArchivePostListView Context should contain a QuerySet of all Posts on the given month/day """ response = self.get(self.url(year=2011, month=7)) expected = list(models.Post.objects.created_on(year=2011, month=7)) self.assertEqual(expected, list(response.context['posts'])) def test_context_contains_posts_by_year(self): """The ArchivePostListView Context should contain a QuerySet of all Posts in the given year """ response = self.get(self.url(year=2012)) expected = list(models.Post.objects.created_on(year=2012)) self.assertEqual(expected, list(response.context['posts'])) class AuthorPostListViewTestCase(HermesTestCase): def url(self, author): return super(AuthorPostListViewTestCase, self).url('hermes_author_post_list', author) def test_context_contains_posts(self): """The AuthorPoustListView Context should cotain a QuerySet af all Posts by the given Author. """ expected = list(models.Post.objects.by('author1')) response = self.get(self.url('author1')) self.assertEqual(expected, list(response.context['posts'])) class PostDetailViewTestCase(HermesTestCase): def url(self, post): return post.get_absolute_url() def test_context_contains_post(self): response = self.get(self.url(self.post1)) expected = self.post1 self.assertEqual(expected, response.context['post'])
mit
Thireus/hashfast-tools
hf/load/__init__.py
14
1572
# Copyright (c) 2014, HashFast Technologies LLC # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of HashFast Technologies LLC nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL HASHFAST TECHNOLOGIES LLC BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
bsd-3-clause