code
stringlengths
1
199k
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh.""" from import_shims.warn import warn_deprecated_import warn_deprecated_import('contentstore.rest_api.v1.serializers', 'cms.djangoapps.contentstore.rest_api.v1.serializers') from cms.djangoapps.contentstore.rest_api.v1.serializers import *
from spack import * class XorgServer(AutotoolsPackage, XorgPackage): """X.Org Server is the free and open source implementation of the display server for the X Window System stewarded by the X.Org Foundation.""" homepage = "http://cgit.freedesktop.org/xorg/xserver" xorg_mirror_path = "xserver/xorg-server-1.18.99.901.tar.gz" version('1.18.99.901', sha256='c8425163b588de2ee7e5c8e65b0749f2710f55a7e02a8d1dc83b3630868ceb21') depends_on('pixman@0.27.2:') depends_on('font-util') depends_on('libxshmfence@1.1:') depends_on('libdrm@2.3.0:') depends_on('libx11') depends_on('dri2proto@2.8:', type='build') depends_on('dri3proto@1.0:', type='build') depends_on('glproto@1.4.17:', type='build') depends_on('flex', type='build') depends_on('bison', type='build') depends_on('pkgconfig', type='build') depends_on('util-macros', type='build') depends_on('fixesproto@5.0:') depends_on('damageproto@1.1:') depends_on('xcmiscproto@1.2.0:') depends_on('xtrans@1.3.5:') depends_on('bigreqsproto@1.1.0:') depends_on('xproto@7.0.28:') depends_on('randrproto@1.5.0:') depends_on('renderproto@0.11:') depends_on('xextproto@7.2.99.901:') depends_on('inputproto@2.3:') depends_on('kbproto@1.0.3:') depends_on('fontsproto@2.1.3:') depends_on('pixman@0.27.2:') depends_on('videoproto') depends_on('compositeproto@0.4:') depends_on('recordproto@1.13.99.1:') depends_on('scrnsaverproto@1.1:') depends_on('resourceproto@1.2.0:') depends_on('xf86driproto@2.1.0:') depends_on('glproto@1.4.17:') depends_on('presentproto@1.0:') depends_on('xineramaproto') depends_on('libxkbfile') depends_on('libxfont2') depends_on('libxext') depends_on('libxdamage') depends_on('libxfixes') depends_on('libepoxy')
import spack.cmd.location import spack.modules description = "cd to spack directories in the shell" section = "environment" level = "long" def setup_parser(subparser): """This is for decoration -- spack cd is used through spack's shell support. This allows spack cd to print a descriptive help message when called with -h.""" spack.cmd.location.setup_parser(subparser) def cd(parser, args): spack.modules.print_help()
"""Internal support module for sre""" MAGIC = 20031017 MAXREPEAT = 65535 class error(Exception): pass FAILURE = "failure" SUCCESS = "success" ANY = "any" ANY_ALL = "any_all" ASSERT = "assert" ASSERT_NOT = "assert_not" AT = "at" BIGCHARSET = "bigcharset" BRANCH = "branch" CALL = "call" CATEGORY = "category" CHARSET = "charset" GROUPREF = "groupref" GROUPREF_IGNORE = "groupref_ignore" GROUPREF_EXISTS = "groupref_exists" IN = "in" IN_IGNORE = "in_ignore" INFO = "info" JUMP = "jump" LITERAL = "literal" LITERAL_IGNORE = "literal_ignore" MARK = "mark" MAX_REPEAT = "max_repeat" MAX_UNTIL = "max_until" MIN_REPEAT = "min_repeat" MIN_UNTIL = "min_until" NEGATE = "negate" NOT_LITERAL = "not_literal" NOT_LITERAL_IGNORE = "not_literal_ignore" RANGE = "range" REPEAT = "repeat" REPEAT_ONE = "repeat_one" SUBPATTERN = "subpattern" MIN_REPEAT_ONE = "min_repeat_one" AT_BEGINNING = "at_beginning" AT_BEGINNING_LINE = "at_beginning_line" AT_BEGINNING_STRING = "at_beginning_string" AT_BOUNDARY = "at_boundary" AT_NON_BOUNDARY = "at_non_boundary" AT_END = "at_end" AT_END_LINE = "at_end_line" AT_END_STRING = "at_end_string" AT_LOC_BOUNDARY = "at_loc_boundary" AT_LOC_NON_BOUNDARY = "at_loc_non_boundary" AT_UNI_BOUNDARY = "at_uni_boundary" AT_UNI_NON_BOUNDARY = "at_uni_non_boundary" CATEGORY_DIGIT = "category_digit" CATEGORY_NOT_DIGIT = "category_not_digit" CATEGORY_SPACE = "category_space" CATEGORY_NOT_SPACE = "category_not_space" CATEGORY_WORD = "category_word" CATEGORY_NOT_WORD = "category_not_word" CATEGORY_LINEBREAK = "category_linebreak" CATEGORY_NOT_LINEBREAK = "category_not_linebreak" CATEGORY_LOC_WORD = "category_loc_word" CATEGORY_LOC_NOT_WORD = "category_loc_not_word" CATEGORY_UNI_DIGIT = "category_uni_digit" CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit" CATEGORY_UNI_SPACE = "category_uni_space" CATEGORY_UNI_NOT_SPACE = "category_uni_not_space" CATEGORY_UNI_WORD = "category_uni_word" CATEGORY_UNI_NOT_WORD = "category_uni_not_word" CATEGORY_UNI_LINEBREAK = "category_uni_linebreak" CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak" OPCODES = [ # failure=0 success=1 (just because it looks better that way :-) FAILURE, SUCCESS, ANY, ANY_ALL, ASSERT, ASSERT_NOT, AT, BRANCH, CALL, CATEGORY, CHARSET, BIGCHARSET, GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE, IN, IN_IGNORE, INFO, JUMP, LITERAL, LITERAL_IGNORE, MARK, MAX_UNTIL, MIN_UNTIL, NOT_LITERAL, NOT_LITERAL_IGNORE, NEGATE, RANGE, REPEAT, REPEAT_ONE, SUBPATTERN, MIN_REPEAT_ONE ] ATCODES = [ AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY, AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING, AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY, AT_UNI_NON_BOUNDARY ] CHCODES = [ CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE, CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD, CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD, CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT, CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD, CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK, CATEGORY_UNI_NOT_LINEBREAK ] def makedict(list): d = {} i = 0 for item in list: d[item] = i i = i + 1 return d OPCODES = makedict(OPCODES) ATCODES = makedict(ATCODES) CHCODES = makedict(CHCODES) OP_IGNORE = { GROUPREF: GROUPREF_IGNORE, IN: IN_IGNORE, LITERAL: LITERAL_IGNORE, NOT_LITERAL: NOT_LITERAL_IGNORE } AT_MULTILINE = { AT_BEGINNING: AT_BEGINNING_LINE, AT_END: AT_END_LINE } AT_LOCALE = { AT_BOUNDARY: AT_LOC_BOUNDARY, AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY } AT_UNICODE = { AT_BOUNDARY: AT_UNI_BOUNDARY, AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY } CH_LOCALE = { CATEGORY_DIGIT: CATEGORY_DIGIT, CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT, CATEGORY_SPACE: CATEGORY_SPACE, CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE, CATEGORY_WORD: CATEGORY_LOC_WORD, CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD, CATEGORY_LINEBREAK: CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK } CH_UNICODE = { CATEGORY_DIGIT: CATEGORY_UNI_DIGIT, CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT, CATEGORY_SPACE: CATEGORY_UNI_SPACE, CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE, CATEGORY_WORD: CATEGORY_UNI_WORD, CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD, CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK, CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK } SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking) SRE_FLAG_IGNORECASE = 2 # case insensitive SRE_FLAG_LOCALE = 4 # honour system locale SRE_FLAG_MULTILINE = 8 # treat target as multiline string SRE_FLAG_DOTALL = 16 # treat target as a single string SRE_FLAG_UNICODE = 32 # use unicode "locale" SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments SRE_FLAG_DEBUG = 128 # debugging SRE_FLAG_ASCII = 256 # use ascii "locale" SRE_INFO_PREFIX = 1 # has prefix SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix) SRE_INFO_CHARSET = 4 # pattern starts with character from given set if __name__ == "__main__": def dump(f, d, prefix): items = d.items() items.sort(key=lambda a: a[1]) for k, v in items: f.write("#define %s_%s %s\n" % (prefix, k.upper(), v)) f = open("sre_constants.h", "w") f.write("""\ /* * Secret Labs' Regular Expression Engine * * regular expression matching engine * * NOTE: This file is generated by sre_constants.py. If you need * to change anything in here, edit sre_constants.py and run it. * * Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. * * See the _sre.c file for information on usage and redistribution. */ """) f.write("#define SRE_MAGIC %d\n" % MAGIC) dump(f, OPCODES, "SRE_OP") dump(f, ATCODES, "SRE") dump(f, CHCODES, "SRE") f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE) f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE) f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE) f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE) f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL) f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE) f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE) f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX) f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL) f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET) f.close() print("done")
""" Multiple dictation constructs =============================================================================== This file is a showcase investigating the use and functionality of multiple dictation elements within Dragonfly speech recognition grammars. The first part of this file (i.e. the module's doc string) contains a description of the functionality being investigated along with test code and actual output in doctest format. This allows the reader to see what really would happen, without needing to load the file into a speech recognition engine and put effort into speaking all the showcased commands. The test code below makes use of Dragonfly's built-in element testing tool. When run, it will connect to the speech recognition engine, load the element being tested, mimic recognitions, and process the recognized value. Multiple consecutive dictation elements ------------------------------------------------------------------------------- >>> tester = ElementTester(RuleRef(ConsecutiveDictationRule())) >>> print(tester.recognize("consecutive Alice Bob Charlie")) Recognition: "consecutive Alice Bob Charlie" Word and rule pairs: ("1000000" is "dgndictation") - consecutive (1) - Alice (1000000) - Bob (1000000) - Charlie (1000000) Extras: - dictation1: Alice - dictation2: Bob - dictation3: Charlie >>> print(tester.recognize("consecutive Alice Bob")) RecognitionFailure Mixed literal and dictation elements ------------------------------------------------------------------------------- Here we will investigate mixed, i.e. interspersed, fixed literal command words and dynamic dictation elements. We will use the "MixedDictationRule" class which has a spec of "mixed [<dictation1>] <dictation2> command <dictation3>". Note that "<dictation1>" was made optional instead of "<dictation2>" because otherwise the first dictation elements would always gobble up all dictated words. There would (by definition) be no way to distinguish which words correspond with which dictation elements. Such consecutive dictation elements should for that reason be avoided in real command grammars. The way the spec is defined now, adds some interesting dynamics, because of the order in which they dictation elements parse the recognized words. However, do note that that order is well defined but arbitrarily chosen. >>> tester = ElementTester(RuleRef(MixedDictationRule())) >>> print(tester.recognize("mixed Alice Bob command Charlie")) Recognition: "mixed Alice Bob command Charlie" Word and rule pairs: ("1000000" is "dgndictation") - mixed (1) - Alice (1000000) - Bob (1000000) - command (1) - Charlie (1000000) Extras: - dictation1: Alice - dictation2: Bob - dictation3: Charlie >>> print(tester.recognize("mixed Alice command Charlie")) Recognition: "mixed Alice command Charlie" Word and rule pairs: ("1000000" is "dgndictation") - mixed (1) - Alice (1000000) - command (1) - Charlie (1000000) Extras: - dictation2: Alice - dictation3: Charlie >>> print(tester.recognize("mixed Alice Bob command")) RecognitionFailure >>> print(tester.recognize("mixed command Charlie")) RecognitionFailure Repetition of dictation elements ------------------------------------------------------------------------------- Now let's take a look at repetition of dictation elements. For this we will use the "RepeatedDictationRule" class, which defines its spec as a repetition of "command <dictation>". I.e. "command Alice" will match, and "command Alice command Bob" will also match. Note that this rule is inherently ambiguous, given the lack of a clear definition of grouping or precedence rules for fixed literal words in commands, and dynamic dictation elements. As an example, "command Alice command Bob" could either match 2 repetitions with "Alice" and "Bob" as dictation values, or a single repetition with "Alice command Bob" as its only dictation value. The tests below the show which of these actually occurs. >>> tester = ElementTester(RuleRef(RepeatedDictationRule())) >>> print(tester.recognize("command Alice")) Recognition: "command Alice" Word and rule pairs: ("1000000" is "dgndictation") - command (1) - Alice (1000000) Extras: - repetition: [[u'command', NatlinkDictationContainer(Alice)]] >>> print(tester.recognize("command Alice command Bob")) Recognition: "command Alice command Bob" Word and rule pairs: ("1000000" is "dgndictation") - command (1) - Alice (1000000) - command (1000000) - Bob (1000000) Extras: - repetition: [[u'command', NatlinkDictationContainer(Alice, command, Bob)]] """ import doctest from dragonfly import * from dragonfly.test.infrastructure import RecognitionFailure from dragonfly.test.element_testcase import ElementTestCase from dragonfly.test.element_tester import ElementTester class RecognitionAnalysisRule(CompoundRule): """ Base class that implements reporting in human-readable format details about the recognized phrase. It is used by the actual testing rules below, and allows the doctests above to be easily readable and informative. """ def _process_recognition(self, node, extras): Paste(text).execute() def value(self, node): return self.get_recognition_info(node) def get_recognition_info(self, node): output = [] output.append('Recognition: "{0}"'.format(" ".join(node.words()))) output.append('Word and rule pairs: ("1000000" is "dgndictation")') for word, rule in node.full_results(): output.append(" - {0} ({1})".format(word, rule)) output.append("Extras:") for key in sorted(extra.name for extra in self.extras): extra_node = node.get_child_by_name(key) if extra_node: output.append(" - {0}: {1}".format(key, extra_node.value())) return "\n".join(output) class ConsecutiveDictationRule(RecognitionAnalysisRule): spec = "consecutive <dictation1> <dictation2> <dictation3>" extras = [Dictation("dictation1"), Dictation("dictation2"), Dictation("dictation3")] class MixedDictationRule(RecognitionAnalysisRule): spec = "mixed [<dictation1>] <dictation2> command <dictation3>" extras = [Dictation("dictation1"), Dictation("dictation2"), Dictation("dictation3")] class RepeatedDictationRule(RecognitionAnalysisRule): spec = "<repetition>" extras = [Repetition(name="repetition", child=Sequence([Literal("command"), Dictation()]))] def main(): engine = get_engine() engine.connect() try: doctest.testmod(verbose=True) finally: engine.disconnect() if __name__ == "__main__": main()
import os import unittest from mock import patch, Mock from tests.utils import ( FakedCache, ObjectWithSignals, setup_test_env, ) setup_test_env() from softwarecenter.db.database import StoreDatabase from softwarecenter.ui.gtk3.views import lobbyview from softwarecenter.ui.gtk3.widgets.exhibits import ( _HtmlRenderer, ) class ExhibitsTestCase(unittest.TestCase): """The test suite for the exhibits carousel.""" def setUp(self): self.cache = FakedCache() self.db = StoreDatabase(cache=self.cache) self.lobby = lobbyview.LobbyView(cache=self.cache, db=self.db, icons=None, apps_filter=None) self.addCleanup(self.lobby.destroy) def _get_banner_from_lobby(self): return self.lobby.vbox.get_children()[-1].get_child() def test_featured_exhibit_by_default(self): """Show the featured exhibit before querying the remote service.""" self.lobby._append_banner_ads() banner = self._get_banner_from_lobby() self.assertEqual(1, len(banner.exhibits)) self.assertIsInstance(banner.exhibits[0], lobbyview.FeaturedExhibit) def test_no_exhibit_if_not_available(self): """The exhibit should not be shown if the package is not available.""" exhibit = Mock() exhibit.package_names = u'foobarbaz' sca = ObjectWithSignals() sca.query_exhibits = lambda: sca.emit('exhibits', sca, [exhibit]) with patch.object(lobbyview, 'SoftwareCenterAgent', lambda: sca): self.lobby._append_banner_ads() banner = self._get_banner_from_lobby() self.assertEqual(1, len(banner.exhibits)) self.assertIsInstance(banner.exhibits[0], lobbyview.FeaturedExhibit) def test_exhibit_if_available(self): """The exhibit should be shown if the package is available.""" exhibit = Mock() exhibit.package_names = u'foobarbaz' exhibit.banner_urls = ['banner'] exhibit.title_translated = '' self.cache[u'foobarbaz'] = Mock() sca = ObjectWithSignals() sca.query_exhibits = lambda: sca.emit('exhibits', sca, [exhibit]) with patch.object(lobbyview, 'SoftwareCenterAgent', lambda: sca): self.lobby._append_banner_ads() banner = self._get_banner_from_lobby() self.assertEqual(1, len(banner.exhibits)) self.assertIs(banner.exhibits[0], exhibit) def test_exhibit_if_mixed_availability(self): """The exhibit should be shown even if some are not available.""" # available exhibit exhibit = Mock() exhibit.package_names = u'foobarbaz' exhibit.banner_urls = ['banner'] exhibit.title_translated = '' self.cache[u'foobarbaz'] = Mock() # not available exhibit other = Mock() other.package_names = u'not-there' sca = ObjectWithSignals() sca.query_exhibits = lambda: sca.emit('exhibits', sca, [exhibit, other]) with patch.object(lobbyview, 'SoftwareCenterAgent', lambda: sca): self.lobby._append_banner_ads() banner = self._get_banner_from_lobby() self.assertEqual(1, len(banner.exhibits)) self.assertIs(banner.exhibits[0], exhibit) def test_exhibit_with_url(self): # available exhibit exhibit = Mock() exhibit.package_names = '' exhibit.click_url = 'http://example.com' exhibit.banner_urls = ['banner'] exhibit.title_translated = '' sca = ObjectWithSignals() sca.query_exhibits = lambda: sca.emit('exhibits', sca, [exhibit]) with patch.object(lobbyview, 'SoftwareCenterAgent', lambda: sca): # add the banners self.lobby._append_banner_ads() # fake click alloc = self.lobby.exhibit_banner.get_allocation() mock_event = Mock() mock_event.x = alloc.x mock_event.y = alloc.y with patch.object(self.lobby.exhibit_banner, 'emit') as mock_emit: self.lobby.exhibit_banner.on_button_press(None, mock_event) self.lobby.exhibit_banner.on_button_release(None, mock_event) mock_emit.assert_called() signal_name = mock_emit.call_args[0][0] call_exhibit = mock_emit.call_args[0][1] self.assertEqual(signal_name, "show-exhibits-clicked") self.assertEqual(call_exhibit.click_url, "http://example.com") def test_exhibit_with_featured_exhibit(self): """ regression test for bug #1023777 """ sca = ObjectWithSignals() sca.query_exhibits = lambda: sca.emit('exhibits', sca, [lobbyview.FeaturedExhibit()]) with patch.object(lobbyview, 'SoftwareCenterAgent', lambda: sca): # add the banners self.lobby._append_banner_ads() # fake click alloc = self.lobby.exhibit_banner.get_allocation() mock_event = Mock() mock_event.x = alloc.x mock_event.y = alloc.y with patch.object(self.lobby, 'emit') as mock_emit: self.lobby.exhibit_banner.on_button_press(None, mock_event) self.lobby.exhibit_banner.on_button_release(None, mock_event) mock_emit.assert_called() signal_name = mock_emit.call_args[0][0] call_category = mock_emit.call_args[0][1] self.assertEqual(signal_name, "category-selected") self.assertEqual(call_category.name, "Our star apps") class HtmlRendererTestCase(unittest.TestCase): def test_multiple_images(self): downloader = ObjectWithSignals() downloader.download_file = lambda *args, **kwargs: downloader.emit( "file-download-complete", downloader, os.path.basename(args[0])) with patch("softwarecenter.ui.gtk3.widgets.exhibits." "SimpleFileDownloader", lambda: downloader): renderer = _HtmlRenderer() mock_exhibit = Mock() mock_exhibit.banner_urls = [ "http://example.com/path1/banner1.png", "http://example.com/path2/banner2.png", ] mock_exhibit.html = "url('/path1/banner1.png')#"\ "url('/path2/banner2.png')" renderer.set_exhibit(mock_exhibit) # assert the stuff we expected to get downloaded got downloaded self.assertEqual( renderer._downloaded_banner_images, ["banner1.png", "banner2.png"]) # test that the path mangling worked self.assertEqual( mock_exhibit.html, "url('banner1.png')#url('banner2.png')") if __name__ == "__main__": unittest.main()
import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Result' db.create_table('taxonomy_result', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])), ('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()), )) db.send_create_signal('taxonomy', ['Result']) # Adding model 'Tag' db.create_table('taxonomy_tag', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)), ('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50, db_index=True)), )) db.send_create_signal('taxonomy', ['Tag']) # Adding model 'Category' db.create_table('taxonomy_category', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='children', null=True, to=orm['taxonomy.Category'])), ('title', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)), ('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50, db_index=True)), )) db.send_create_signal('taxonomy', ['Category']) # Adding model 'Vote' db.create_table('taxonomy_vote', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])), ('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()), ('owner', self.gf('django.db.models.fields.related.ForeignKey')(related_name='poll_votes', to=orm['auth.User'])), ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), )) db.send_create_signal('taxonomy', ['Vote']) # Adding unique constraint on 'Vote', fields ['owner', 'content_type', 'object_id'] db.create_unique('taxonomy_vote', ['owner_id', 'content_type_id', 'object_id']) def backwards(self, orm): # Removing unique constraint on 'Vote', fields ['owner', 'content_type', 'object_id'] db.delete_unique('taxonomy_vote', ['owner_id', 'content_type_id', 'object_id']) # Deleting model 'Result' db.delete_table('taxonomy_result') # Deleting model 'Tag' db.delete_table('taxonomy_tag') # Deleting model 'Category' db.delete_table('taxonomy_category') # Deleting model 'Vote' db.delete_table('taxonomy_vote') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'taxonomy.category': { 'Meta': {'ordering': "('title',)", 'object_name': 'Category'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['taxonomy.Category']"}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}) }, 'taxonomy.result': { 'Meta': {'object_name': 'Result'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'taxonomy.tag': { 'Meta': {'ordering': "('title',)", 'object_name': 'Tag'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}) }, 'taxonomy.vote': { 'Meta': {'unique_together': "(('owner', 'content_type', 'object_id'),)", 'object_name': 'Vote'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'to': "orm['auth.User']"}) } } complete_apps = ['taxonomy']
"""System tests for Google Cloud Memorystore operators""" import os from urllib.parse import urlparse import pytest from tests.providers.google.cloud.utils.gcp_authenticator import GCP_MEMORYSTORE from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project") GCP_ARCHIVE_URL = os.environ.get("GCP_MEMORYSTORE_EXPORT_GCS_URL", "gs://test-memorystore/my-export.rdb") GCP_ARCHIVE_URL_PARTS = urlparse(GCP_ARCHIVE_URL) GCP_BUCKET_NAME = GCP_ARCHIVE_URL_PARTS.netloc @pytest.mark.backend("mysql", "postgres") @pytest.mark.credential_file(GCP_MEMORYSTORE) class CloudMemorystoreSystemTest(GoogleSystemTest): """ System tests for Google Cloud Memorystore operators It use a real service. """ @provide_gcp_context(GCP_MEMORYSTORE) def setUp(self): super().setUp() self.create_gcs_bucket(GCP_BUCKET_NAME, location="europe-north1") @provide_gcp_context(GCP_MEMORYSTORE) def test_run_example_dag_memorystore_redis(self): self.run_dag('gcp_cloud_memorystore_redis', CLOUD_DAG_FOLDER) @provide_gcp_context(GCP_MEMORYSTORE) def test_run_example_dag_memorystore_memcached(self): self.run_dag('gcp_cloud_memorystore_memcached', CLOUD_DAG_FOLDER) @provide_gcp_context(GCP_MEMORYSTORE) def tearDown(self): self.delete_gcs_bucket(GCP_BUCKET_NAME) super().tearDown()
import pytest from api.base.settings.defaults import API_BASE from framework.auth.core import Auth from osf.models import AbstractNode, NodeLog from osf.utils import permissions from osf.utils.sanitize import strip_html from osf_tests.factories import ( NodeFactory, ProjectFactory, OSFGroupFactory, RegistrationFactory, AuthUserFactory, PrivateLinkFactory, ) from tests.base import fake @pytest.fixture() def user(): return AuthUserFactory() @pytest.mark.django_db class TestNodeChildrenList: @pytest.fixture() def private_project(self, user): private_project = ProjectFactory() private_project.add_contributor( user, permissions=permissions.WRITE ) private_project.save() return private_project @pytest.fixture() def component(self, user, private_project): return NodeFactory(parent=private_project, creator=user) @pytest.fixture() def pointer(self): return ProjectFactory() @pytest.fixture() def private_project_url(self, private_project): return '/{}nodes/{}/children/'.format(API_BASE, private_project._id) @pytest.fixture() def public_project(self, user): return ProjectFactory(is_public=True, creator=user) @pytest.fixture() def public_component(self, user, public_project): return NodeFactory(parent=public_project, creator=user, is_public=True) @pytest.fixture() def public_project_url(self, user, public_project): return '/{}nodes/{}/children/'.format(API_BASE, public_project._id) @pytest.fixture() def view_only_link(self, private_project): view_only_link = PrivateLinkFactory(name='node_view_only_link') view_only_link.nodes.add(private_project) view_only_link.save() return view_only_link def test_return_public_node_children_list( self, app, public_component, public_project_url): # test_return_public_node_children_list_logged_out res = app.get(public_project_url) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert len(res.json['data']) == 1 assert res.json['data'][0]['id'] == public_component._id # test_return_public_node_children_list_logged_in non_contrib = AuthUserFactory() res = app.get(public_project_url, auth=non_contrib.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert len(res.json['data']) == 1 assert res.json['data'][0]['id'] == public_component._id def test_return_private_node_children_list( self, app, user, component, private_project, private_project_url): # test_return_private_node_children_list_logged_out res = app.get(private_project_url, expect_errors=True) assert res.status_code == 401 assert 'detail' in res.json['errors'][0] # test_return_private_node_children_list_logged_in_non_contributor non_contrib = AuthUserFactory() res = app.get( private_project_url, auth=non_contrib.auth, expect_errors=True) assert res.status_code == 403 assert 'detail' in res.json['errors'][0] # test_return_private_node_children_list_logged_in_contributor res = app.get(private_project_url, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert len(res.json['data']) == 1 assert res.json['data'][0]['id'] == component._id # test_return_private_node_children_osf_group_member_admin group_mem = AuthUserFactory() group = OSFGroupFactory(creator=group_mem) private_project.add_osf_group(group, permissions.ADMIN) res = app.get(private_project_url, auth=group_mem.auth) assert res.status_code == 200 # Can view node children that you have implict admin permissions assert len(res.json['data']) == 1 assert res.json['data'][0]['id'] == component._id def test_node_children_list_does_not_include_pointers( self, app, user, component, private_project_url): res = app.get(private_project_url, auth=user.auth) assert len(res.json['data']) == 1 def test_node_children_list_does_not_include_unauthorized_projects( self, app, user, component, private_project, private_project_url): NodeFactory(parent=private_project) res = app.get(private_project_url, auth=user.auth) assert len(res.json['data']) == 1 def test_node_children_list_does_not_include_deleted( self, app, user, public_project, public_component, component, public_project_url): child_project = NodeFactory(parent=public_project, creator=user) child_project.save() res = app.get(public_project_url, auth=user.auth) assert res.status_code == 200 ids = [node['id'] for node in res.json['data']] assert child_project._id in ids assert 2 == len(ids) child_project.is_deleted = True child_project.save() res = app.get(public_project_url, auth=user.auth) assert res.status_code == 200 ids = [node['id'] for node in res.json['data']] assert child_project._id not in ids assert 1 == len(ids) def test_node_children_list_does_not_include_node_links( self, app, user, public_project, public_component, public_project_url): pointed_to = ProjectFactory(is_public=True) public_project.add_pointer( pointed_to, auth=Auth(public_project.creator) ) res = app.get(public_project_url, auth=user.auth) ids = [node['id'] for node in res.json['data']] assert public_component._id in ids # sanity check assert pointed_to._id not in ids # Regression test for https://openscience.atlassian.net/browse/EMB-593 # Duplicates returned in child count def test_node_children_related_counts_duplicate_query_results(self, app, user, public_project, private_project, public_project_url): user_2 = AuthUserFactory() # Adding a child component child = NodeFactory(parent=public_project, creator=user, is_public=True, category='software') child.add_contributor(user_2, permissions.WRITE, save=True) # Adding a grandchild NodeFactory(parent=child, creator=user, is_public=True) # Adding a node link public_project.add_pointer( private_project, auth=Auth(public_project.creator) ) # Assert NodeChildrenList returns one result res = app.get(public_project_url, auth=user.auth) assert len(res.json['data']) == 1 assert res.json['data'][0]['id'] == child._id project_url = '/{}nodes/{}/?related_counts=children'.format(API_BASE, public_project._id) res = app.get(project_url, auth=user.auth) assert res.status_code == 200 # Verifying related_counts match direct children count (grandchildren not included, pointers not included) assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1 def test_node_children_related_counts(self, app, user, public_project): parent = ProjectFactory(creator=user, is_public=False) user_2 = AuthUserFactory() parent.add_contributor(user_2, permissions.ADMIN) child = NodeFactory(parent=parent, creator=user_2, is_public=False, category='software') NodeFactory(parent=child, creator=user_2, is_public=False) # child has one component. `user` can view due to implict admin perms component_url = '/{}nodes/{}/children/'.format(API_BASE, child._id, auth=user.auth) res = app.get(component_url, auth=user.auth) assert len(res.json['data']) == 1 project_url = '/{}nodes/{}/?related_counts=children'.format(API_BASE, child._id) res = app.get(project_url, auth=user.auth) assert res.status_code == 200 # Nodes with implicit admin perms are also included in the count assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1 def test_child_counts_permissions(self, app, user, public_project): NodeFactory(parent=public_project, creator=user) url = '/{}nodes/{}/?related_counts=children'.format(API_BASE, public_project._id) user_two = AuthUserFactory() # Unauthorized res = app.get(url) assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0 # Logged in noncontrib res = app.get(url, auth=user_two.auth) assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0 # Logged in contrib res = app.get(url, auth=user.auth) assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1 def test_private_node_children_with_view_only_link(self, user, app, private_project, component, view_only_link, private_project_url): # get node related_counts with vol before vol is attached to components node_url = '/{}nodes/{}/?related_counts=children&view_only={}'.format(API_BASE, private_project._id, view_only_link.key) res = app.get(node_url) assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0 # view only link is not attached to components view_only_link_url = '{}?view_only={}'.format(private_project_url, view_only_link.key) res = app.get(view_only_link_url) ids = [node['id'] for node in res.json['data']] assert res.status_code == 200 assert len(ids) == 0 assert component._id not in ids # view only link is attached to components view_only_link.nodes.add(component) res = app.get(view_only_link_url) ids = [node['id'] for node in res.json['data']] assert res.status_code == 200 assert component._id in ids assert 'contributors' in res.json['data'][0]['relationships'] assert 'implicit_contributors' in res.json['data'][0]['relationships'] assert 'bibliographic_contributors' in res.json['data'][0]['relationships'] # get node related_counts with vol once vol is attached to components res = app.get(node_url) assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1 # make private vol anonymous view_only_link.anonymous = True view_only_link.save() res = app.get(view_only_link_url) assert 'contributors' not in res.json['data'][0]['relationships'] assert 'implicit_contributors' not in res.json['data'][0]['relationships'] assert 'bibliographic_contributors' not in res.json['data'][0]['relationships'] # delete vol view_only_link.is_deleted = True view_only_link.save() res = app.get(view_only_link_url, expect_errors=True) assert res.status_code == 401 @pytest.mark.django_db class TestNodeChildrenListFiltering: def test_node_child_filtering(self, app, user): project = ProjectFactory(creator=user) title_one, title_two = fake.bs(), fake.bs() component = NodeFactory(title=title_one, parent=project) component_two = NodeFactory(title=title_two, parent=project) url = '/{}nodes/{}/children/?filter[title]={}'.format( API_BASE, project._id, title_one ) res = app.get(url, auth=user.auth) ids = [node['id'] for node in res.json['data']] assert component._id in ids assert component_two._id not in ids @pytest.mark.django_db class TestNodeChildCreate: @pytest.fixture() def project(self, user): return ProjectFactory(creator=user, is_public=True) @pytest.fixture() def url(self, project): return '/{}nodes/{}/children/'.format(API_BASE, project._id) @pytest.fixture() def child(self): return { 'data': { 'type': 'nodes', 'attributes': { 'title': 'child', 'description': 'this is a child project', 'category': 'project' } } } def test_creates_child(self, app, user, project, child, url): # test_creates_child_logged_out_user res = app.post_json_api(url, child, expect_errors=True) assert res.status_code == 401 project.reload() assert len(project.nodes) == 0 # test_creates_child_logged_in_read_contributor read_contrib = AuthUserFactory() project.add_contributor( read_contrib, permissions=permissions.READ, auth=Auth(user), save=True ) res = app.post_json_api( url, child, auth=read_contrib.auth, expect_errors=True ) assert res.status_code == 403 project.reload() assert len(project.nodes) == 0 # test_creates_child_logged_in_non_contributor non_contrib = AuthUserFactory() res = app.post_json_api( url, child, auth=non_contrib.auth, expect_errors=True ) assert res.status_code == 403 project.reload() assert len(project.nodes) == 0 # test_creates_child_group_member_read group_mem = AuthUserFactory() group = OSFGroupFactory(creator=group_mem) project.add_osf_group(group, permissions.READ) res = app.post_json_api( url, child, auth=group_mem.auth, expect_errors=True ) assert res.status_code == 403 project.update_osf_group(group, permissions.WRITE) res = app.post_json_api( url, child, auth=group_mem.auth, expect_errors=True ) assert res.status_code == 201 # test_creates_child_no_type child = { 'data': { 'attributes': { 'title': 'child', 'description': 'this is a child project', 'category': 'project', } } } res = app.post_json_api(url, child, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'This field may not be null.' assert res.json['errors'][0]['source']['pointer'] == '/data/type' # test_creates_child_incorrect_type child = { 'data': { 'type': 'Wrong type.', 'attributes': { 'title': 'child', 'description': 'this is a child project', 'category': 'project', } } } res = app.post_json_api(url, child, auth=user.auth, expect_errors=True) assert res.status_code == 409 assert res.json['errors'][0]['detail'] == 'This resource has a type of "nodes", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.' # test_creates_child_properties_not_nested child = { 'data': { 'attributes': { 'title': 'child', 'description': 'this is a child project' }, 'category': 'project' } } res = app.post_json_api(url, child, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'This field is required.' assert res.json['errors'][0]['source']['pointer'] == '/data/attributes/category' def test_creates_child_logged_in_write_contributor( self, app, user, project, child, url): write_contrib = AuthUserFactory() project.add_contributor( write_contrib, permissions=permissions.WRITE, auth=Auth(user), save=True) res = app.post_json_api(url, child, auth=write_contrib.auth) assert res.status_code == 201 assert res.json['data']['attributes']['title'] == child['data']['attributes']['title'] assert res.json['data']['attributes']['description'] == child['data']['attributes']['description'] assert res.json['data']['attributes']['category'] == child['data']['attributes']['category'] project.reload() child_id = res.json['data']['id'] assert child_id == project.nodes[0]._id assert AbstractNode.load(child_id).logs.latest( ).action == NodeLog.PROJECT_CREATED def test_creates_child_logged_in_owner( self, app, user, project, child, url): res = app.post_json_api(url, child, auth=user.auth) assert res.status_code == 201 assert res.json['data']['attributes']['title'] == child['data']['attributes']['title'] assert res.json['data']['attributes']['description'] == child['data']['attributes']['description'] assert res.json['data']['attributes']['category'] == child['data']['attributes']['category'] project.reload() assert res.json['data']['id'] == project.nodes[0]._id assert project.nodes[0].logs.latest().action == NodeLog.PROJECT_CREATED def test_creates_child_creates_child_and_sanitizes_html_logged_in_owner( self, app, user, project, url): title = '<em>Reasonable</em> <strong>Project</strong>' description = 'An <script>alert("even reasonabler")</script> child' res = app.post_json_api(url, { 'data': { 'type': 'nodes', 'attributes': { 'title': title, 'description': description, 'category': 'project', 'public': True } } }, auth=user.auth) child_id = res.json['data']['id'] assert res.status_code == 201 url = '/{}nodes/{}/'.format(API_BASE, child_id) res = app.get(url, auth=user.auth) assert res.json['data']['attributes']['title'] == strip_html(title) assert res.json['data']['attributes']['description'] == strip_html( description) assert res.json['data']['attributes']['category'] == 'project' project.reload() child_id = res.json['data']['id'] assert child_id == project.nodes[0]._id assert AbstractNode.load(child_id).logs.latest( ).action == NodeLog.PROJECT_CREATED def test_cannot_create_child_on_a_registration(self, app, user, project): registration = RegistrationFactory(project=project, creator=user) url = '/{}nodes/{}/children/'.format(API_BASE, registration._id) res = app.post_json_api(url, { 'data': { 'type': 'nodes', 'attributes': { 'title': fake.catch_phrase(), 'description': fake.bs(), 'category': 'project', 'public': True, } } }, auth=user.auth, expect_errors=True) assert res.status_code == 404 @pytest.mark.django_db class TestNodeChildrenBulkCreate: @pytest.fixture() def project(self, user): return ProjectFactory(creator=user, is_public=True) @pytest.fixture() def url(self, project): return '/{}nodes/{}/children/'.format(API_BASE, project._id) @pytest.fixture() def child_one(self): return { 'type': 'nodes', 'attributes': { 'title': 'child', 'description': 'this is a child project', 'category': 'project' } } @pytest.fixture() def child_two(self): return { 'type': 'nodes', 'attributes': { 'title': 'second child', 'description': 'this is my hypothesis', 'category': 'hypothesis' } } def test_bulk_children_create_blank_request(self, app, user, url): res = app.post_json_api( url, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 def test_bulk_creates_children_limits(self, app, user, child_one, url): res = app.post_json_api( url, {'data': [child_one] * 101}, auth=user.auth, expect_errors=True, bulk=True ) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.' assert res.json['errors'][0]['source']['pointer'] == '/data' def test_bulk_creates_children_auth_errors( self, app, user, project, child_one, child_two, url): # test_bulk_creates_children_logged_out_user res = app.post_json_api( url, {'data': [child_one, child_two]}, expect_errors=True, bulk=True ) assert res.status_code == 401 project.reload() assert len(project.nodes) == 0 # test_bulk_creates_children_logged_in_read_contributor read_contrib = AuthUserFactory() project.add_contributor( read_contrib, permissions=permissions.READ, auth=Auth(user), save=True) res = app.post_json_api( url, {'data': [child_one, child_two]}, auth=read_contrib.auth, expect_errors=True, bulk=True) assert res.status_code == 403 project.reload() assert len(project.nodes) == 0 # test_bulk_creates_children_logged_in_non_contributor non_contrib = AuthUserFactory() res = app.post_json_api( url, {'data': [child_one, child_two]}, auth=non_contrib.auth, expect_errors=True, bulk=True) assert res.status_code == 403 project.reload() assert len(project.nodes) == 0 def test_bulk_creates_children_logged_in_owner( self, app, user, project, child_one, child_two, url): res = app.post_json_api( url, {'data': [child_one, child_two]}, auth=user.auth, bulk=True) assert res.status_code == 201 assert res.json['data'][0]['attributes']['title'] == child_one['attributes']['title'] assert res.json['data'][0]['attributes']['description'] == child_one['attributes']['description'] assert res.json['data'][0]['attributes']['category'] == child_one['attributes']['category'] assert res.json['data'][1]['attributes']['title'] == child_two['attributes']['title'] assert res.json['data'][1]['attributes']['description'] == child_two['attributes']['description'] assert res.json['data'][1]['attributes']['category'] == child_two['attributes']['category'] project.reload() nodes = project.nodes assert res.json['data'][0]['id'] == nodes[0]._id assert res.json['data'][1]['id'] == nodes[1]._id assert nodes[0].logs.latest().action == NodeLog.PROJECT_CREATED assert nodes[1].logs.latest().action == NodeLog.PROJECT_CREATED def test_bulk_creates_children_child_logged_in_write_contributor( self, app, user, project, child_one, child_two, url): write_contrib = AuthUserFactory() project.add_contributor( write_contrib, permissions=permissions.WRITE, auth=Auth(user), save=True) res = app.post_json_api( url, {'data': [child_one, child_two]}, auth=write_contrib.auth, bulk=True) assert res.status_code == 201 assert res.json['data'][0]['attributes']['title'] == child_one['attributes']['title'] assert res.json['data'][0]['attributes']['description'] == child_one['attributes']['description'] assert res.json['data'][0]['attributes']['category'] == child_one['attributes']['category'] assert res.json['data'][1]['attributes']['title'] == child_two['attributes']['title'] assert res.json['data'][1]['attributes']['description'] == child_two['attributes']['description'] assert res.json['data'][1]['attributes']['category'] == child_two['attributes']['category'] project.reload() child_id = res.json['data'][0]['id'] child_two_id = res.json['data'][1]['id'] nodes = project.nodes assert child_id == nodes[0]._id assert child_two_id == nodes[1]._id assert AbstractNode.load(child_id).logs.latest( ).action == NodeLog.PROJECT_CREATED assert nodes[1].logs.latest().action == NodeLog.PROJECT_CREATED def test_bulk_creates_children_and_sanitizes_html_logged_in_owner( self, app, user, project, url): title = '<em>Reasoning</em> <strong>Aboot Projects</strong>' description = 'A <script>alert("super reasonable")</script> child' res = app.post_json_api(url, { 'data': [{ 'type': 'nodes', 'attributes': { 'title': title, 'description': description, 'category': 'project', 'public': True } }] }, auth=user.auth, bulk=True) child_id = res.json['data'][0]['id'] assert res.status_code == 201 url = '/{}nodes/{}/'.format(API_BASE, child_id) res = app.get(url, auth=user.auth) assert res.json['data']['attributes']['title'] == strip_html(title) assert res.json['data']['attributes']['description'] == strip_html( description) assert res.json['data']['attributes']['category'] == 'project' project.reload() child_id = res.json['data']['id'] assert child_id == project.nodes[0]._id assert AbstractNode.load(child_id).logs.latest( ).action == NodeLog.PROJECT_CREATED def test_cannot_bulk_create_children_on_a_registration( self, app, user, project, child_two): registration = RegistrationFactory(project=project, creator=user) url = '/{}nodes/{}/children/'.format(API_BASE, registration._id) res = app.post_json_api(url, { 'data': [child_two, { 'type': 'nodes', 'attributes': { 'title': fake.catch_phrase(), 'description': fake.bs(), 'category': 'project', 'public': True, } }] }, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 404 project.reload() assert len(project.nodes) == 0 def test_bulk_creates_children_payload_errors( self, app, user, project, child_two, url): # def test_bulk_creates_children_no_type(self, app, user, project, # child_two, url): child = { 'data': [child_two, { 'attributes': { 'title': 'child', 'description': 'this is a child project', 'category': 'project', } }] } res = app.post_json_api( url, child, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'This field may not be null.' assert res.json['errors'][0]['source']['pointer'] == '/data/1/type' project.reload() assert len(project.nodes) == 0 # def test_bulk_creates_children_incorrect_type(self, app, user, project, # child_two, url): child = { 'data': [child_two, { 'type': 'Wrong type.', 'attributes': { 'title': 'child', 'description': 'this is a child project', 'category': 'project', } }] } res = app.post_json_api( url, child, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 409 assert res.json['errors'][0]['detail'] == 'This resource has a type of "nodes", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.' project.reload() assert len(project.nodes) == 0 # def test_bulk_creates_children_properties_not_nested(self, app, user, # project, child_two, url): child = { 'data': [child_two, { 'title': 'child', 'description': 'this is a child project', 'category': 'project', }] } res = app.post_json_api( url, child, auth=user.auth, expect_errors=True, bulk=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'This field is required.' assert res.json['errors'][0]['source']['pointer'] == '/data/1/attributes/category' project.reload() assert len(project.nodes) == 0
"""Fastfood Chef Cookbook manager.""" from __future__ import print_function import os from fastfood import utils class CookBook(object): """Chef Cookbook object. Understands metadata.rb, Berksfile and how to parse them. """ def __init__(self, path): """Initialize CookBook wrapper at 'path'.""" self.path = utils.normalize_path(path) self._metadata = None if not os.path.isdir(path): raise ValueError("Cookbook dir %s does not exist." % self.path) self._berksfile = None @property def name(self): """Cookbook name property.""" try: return self.metadata.to_dict()['name'] except KeyError: raise LookupError("%s is missing 'name' attribute'." % self.metadata) @property def metadata(self): """Return dict representation of this cookbook's metadata.rb .""" self.metadata_path = os.path.join(self.path, 'metadata.rb') if not os.path.isfile(self.metadata_path): raise ValueError("Cookbook needs metadata.rb, %s" % self.metadata_path) if not self._metadata: self._metadata = MetadataRb(open(self.metadata_path, 'r+')) return self._metadata @property def berksfile(self): """Return this cookbook's Berksfile instance.""" self.berks_path = os.path.join(self.path, 'Berksfile') if not self._berksfile: if not os.path.isfile(self.berks_path): raise ValueError("No Berksfile found at %s" % self.berks_path) self._berksfile = Berksfile(open(self.berks_path, 'r+')) return self._berksfile class MetadataRb(utils.FileWrapper): """Wrapper for a metadata.rb file.""" @classmethod def from_dict(cls, dictionary): """Create a MetadataRb instance from a dict.""" cookbooks = set() # put these in order groups = [cookbooks] for key, val in dictionary.items(): if key == 'depends': cookbooks.update({cls.depends_statement(cbn, meta) for cbn, meta in val.items()}) body = '' for group in groups: if group: body += '\n' body += '\n'.join(group) return cls.from_string(body) @staticmethod def depends_statement(cookbook_name, metadata=None): """Return a valid Ruby 'depends' statement for the metadata.rb file.""" line = "depends '%s'" % cookbook_name if metadata: if not isinstance(metadata, dict): raise TypeError("Stencil dependency options for %s " "should be a dict of options, not %s." % (cookbook_name, metadata)) if metadata: line = "%s '%s'" % (line, "', '".join(metadata)) return line def to_dict(self): """Return a dictionary representation of this metadata.rb file.""" return self.parse() def parse(self): """Parse the metadata.rb into a dict.""" data = utils.ruby_lines(self.readlines()) data = [tuple(j.strip() for j in line.split(None, 1)) for line in data] depends = {} for line in data: if not len(line) == 2: continue key, value = line if key == 'depends': value = value.split(',') lib = utils.ruby_strip(value[0]) detail = [utils.ruby_strip(j) for j in value[1:]] depends[lib] = detail datamap = {key: utils.ruby_strip(val) for key, val in data} if depends: datamap['depends'] = depends self.seek(0) return datamap def merge(self, other): """Add requirements from 'other' metadata.rb into this one.""" if not isinstance(other, MetadataRb): raise TypeError("MetadataRb to merge should be a 'MetadataRb' " "instance, not %s.", type(other)) current = self.to_dict() new = other.to_dict() # compare and gather cookbook dependencies meta_writelines = ['%s\n' % self.depends_statement(cbn, meta) for cbn, meta in new.get('depends', {}).items() if cbn not in current.get('depends', {})] self.write_statements(meta_writelines) return self.to_dict() class Berksfile(utils.FileWrapper): """Wrapper for a Berksfile.""" berks_options = [ 'branch', 'git', 'path', 'ref', 'revision', 'tag', ] def to_dict(self): """Return a dictionary representation of this Berksfile.""" return self.parse() def parse(self): """Parse this Berksfile into a dict.""" self.flush() self.seek(0) data = utils.ruby_lines(self.readlines()) data = [tuple(j.strip() for j in line.split(None, 1)) for line in data] datamap = {} for line in data: if len(line) == 1: datamap[line[0]] = True elif len(line) == 2: key, value = line if key == 'cookbook': datamap.setdefault('cookbook', {}) value = [utils.ruby_strip(v) for v in value.split(',')] lib, detail = value[0], value[1:] datamap['cookbook'].setdefault(lib, {}) # if there is additional dependency data but its # not the ruby hash, its the version constraint if detail and not any("".join(detail).startswith(o) for o in self.berks_options): constraint, detail = detail[0], detail[1:] datamap['cookbook'][lib]['constraint'] = constraint if detail: for deet in detail: opt, val = [ utils.ruby_strip(i) for i in deet.split(':', 1) ] if not any(opt == o for o in self.berks_options): raise ValueError( "Cookbook detail '%s' does not specify " "one of '%s'" % (opt, self.berks_options)) else: datamap['cookbook'][lib][opt.strip(':')] = ( utils.ruby_strip(val)) elif key == 'source': datamap.setdefault(key, []) datamap[key].append(utils.ruby_strip(value)) elif key: datamap[key] = utils.ruby_strip(value) self.seek(0) return datamap @classmethod def from_dict(cls, dictionary): """Create a Berksfile instance from a dict.""" cookbooks = set() sources = set() other = set() # put these in order groups = [sources, cookbooks, other] for key, val in dictionary.items(): if key == 'cookbook': cookbooks.update({cls.cookbook_statement(cbn, meta) for cbn, meta in val.items()}) elif key == 'source': sources.update({"source '%s'" % src for src in val}) elif key == 'metadata': other.add('metadata') body = '' for group in groups: if group: body += '\n' body += '\n'.join(group) return cls.from_string(body) @staticmethod def cookbook_statement(cookbook_name, metadata=None): """Return a valid Ruby 'cookbook' statement for the Berksfile.""" line = "cookbook '%s'" % cookbook_name if metadata: if not isinstance(metadata, dict): raise TypeError("Berksfile dependency hash for %s " "should be a dict of options, not %s." % (cookbook_name, metadata)) # not like the others... if 'constraint' in metadata: line += ", '%s'" % metadata.pop('constraint') for opt, spec in metadata.items(): line += ", %s: '%s'" % (opt, spec) return line def merge(self, other): """Add requirements from 'other' Berksfile into this one.""" if not isinstance(other, Berksfile): raise TypeError("Berksfile to merge should be a 'Berksfile' " "instance, not %s.", type(other)) current = self.to_dict() new = other.to_dict() # compare and gather cookbook dependencies berks_writelines = ['%s\n' % self.cookbook_statement(cbn, meta) for cbn, meta in new.get('cookbook', {}).items() if cbn not in current.get('cookbook', {})] # compare and gather 'source' requirements berks_writelines.extend(["source '%s'\n" % src for src in new.get('source', []) if src not in current.get('source', [])]) self.write_statements(berks_writelines) return self.to_dict()
import webob from nova.api.openstack.compute import flavors as flavors_api from nova.api.openstack.compute.views import flavors as flavors_view from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.compute import flavors from nova import exception from nova.openstack.common.gettextutils import _ authorize = extensions.extension_authorizer('compute', 'flavormanage') class FlavorManageController(wsgi.Controller): """ The Flavor Lifecycle API controller for the OpenStack API. """ _view_builder_class = flavors_view.ViewBuilder def __init__(self): super(FlavorManageController, self).__init__() @wsgi.action("delete") def _delete(self, req, id): context = req.environ['nova.context'] authorize(context) try: flavor = flavors.get_flavor_by_flavor_id( id, ctxt=context, read_deleted="no") except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) flavors.destroy(flavor['name']) return webob.Response(status_int=202) @wsgi.action("create") @wsgi.serializers(xml=flavors_api.FlavorTemplate) def _create(self, req, body): context = req.environ['nova.context'] authorize(context) if not self.is_valid_body(body, 'flavor'): msg = _("Invalid request body") raise webob.exc.HTTPBadRequest(explanation=msg) vals = body['flavor'] name = vals.get('name') flavorid = vals.get('id') memory = vals.get('ram') vcpus = vals.get('vcpus') root_gb = vals.get('disk') ephemeral_gb = vals.get('OS-FLV-EXT-DATA:ephemeral', 0) swap = vals.get('swap', 0) rxtx_factor = vals.get('rxtx_factor', 1.0) is_public = vals.get('os-flavor-access:is_public', True) try: flavor = flavors.create(name, memory, vcpus, root_gb, ephemeral_gb=ephemeral_gb, flavorid=flavorid, swap=swap, rxtx_factor=rxtx_factor, is_public=is_public) if not flavor['is_public']: flavors.add_flavor_access(flavor['flavorid'], context.project_id, context) req.cache_db_flavor(flavor) except (exception.InstanceTypeExists, exception.InstanceTypeIdExists) as err: raise webob.exc.HTTPConflict(explanation=err.format_message()) except exception.InvalidInput as exc: raise webob.exc.HTTPBadRequest(explanation=exc.format_message()) return self._view_builder.show(req, flavor) class Flavormanage(extensions.ExtensionDescriptor): """ Flavor create/delete API support """ name = "FlavorManage" alias = "os-flavor-manage" namespace = ("http://docs.openstack.org/compute/ext/" "flavor_manage/api/v1.1") updated = "2012-01-19T00:00:00+00:00" def get_controller_extensions(self): controller = FlavorManageController() extension = extensions.ControllerExtension(self, 'flavors', controller) return [extension]
from nose.tools import * import os import time from subprocess import * import signal from . import test_data from concourse import Concourse, Tag, Link, Diff, Operator, constants from concourse.thriftapi.shared.ttypes import Type from concourse.utils import python_to_thrift import ujson from tests import ignore import socket class IntegrationBaseTest(object): """ Base class for unit tests that use Mockcourse. """ port = None process = None client = None expected_network_latency = 0.05 @classmethod def setup_class(cls): """ Fixture method to start Mockcourse and connect before the tests start to run. """ port = IntegrationBaseTest.get_open_port() dir = os.path.dirname(os.path.realpath(__file__)) + '/../../mockcourse' script = dir + '/mockcourse '+str(port) cls.process = Popen(script, shell=True, preexec_fn=os.setsid) cls.client = None tries = 5 while tries > 0 and cls.client is None: tries -= 1 time.sleep(1) # Wait for Mockcourse to start try: cls.client = Concourse.connect(port=port) except RuntimeError as e: if tries == 0: raise e else: continue @classmethod def teardown_class(cls): """ Fixture method to kill Mockcourse after all the tests have fun. """ os.killpg(cls.process.pid, signal.SIGTERM) def tearDown(self): """" Logout" and clear all the data that the client stored in Mockcourse after each test. This ensures that the environment for each test is clean and predicatable. """ self.client.logout() # Mockcourse logout simply clears the content of the datastore def get_time_anchor(self): """ Return a time anchor and sleep for long enough to account for network latency """ anchor = test_data.current_time_millis() time.sleep(self.expected_network_latency) return anchor @staticmethod def get_open_port(): """Return an open port that is chosen by the OS """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(("localhost", 0)) port = sock.getsockname()[1] sock.close() return port class TestPythonClientDriver(IntegrationBaseTest): """ Implementations for standard unit tests that verify the Python client driver conforms to the Concourse standard """ def __do_test_value_round_trip(self, value, ttype): """ Do the round_trip test logic for the specified value of the specified type :param value: """ key = test_data.random_string() record = self.client.add(key=key, value=value) stored = self.client.get(key=key, record=record) assert_equal(value, stored) assert_equal(python_to_thrift(stored).type, ttype) def test_string_round_trip(self): self.__do_test_value_round_trip(test_data.random_string(), Type.STRING) def test_bool_round_trip(self): self.__do_test_value_round_trip(test_data.random_bool(), Type.BOOLEAN) def test_tag_round_trip(self): self.__do_test_value_round_trip(Tag.create(test_data.random_string()), Type.TAG) def test_link_round_trip(self): self.__do_test_value_round_trip(Link.to(test_data.random_int()), Type.LINK) def test_int_round_trip(self): self.__do_test_value_round_trip(test_data.random_int(), Type.INTEGER) self.__do_test_value_round_trip(2147483647, Type.INTEGER) self.__do_test_value_round_trip(-2147483648, Type.INTEGER) def test_long_round_trip(self): self.__do_test_value_round_trip(2147483648, Type.LONG) self.__do_test_value_round_trip(-2147483649, Type.LONG) self.__do_test_value_round_trip(test_data.random_long(), Type.LONG) def test_float_round_trip(self): self.__do_test_value_round_trip(3.4028235E38, Type.DOUBLE) self.__do_test_value_round_trip(-1.4E-45, Type.DOUBLE) def test_abort(self): self.client.stage() key = test_data.random_string() value = "some value" record = 1 self.client.add(key=key, value=value, record=record) self.client.abort() assert_is_none(self.client.get(key=key, record=record)) def test_add_key_value(self): key = test_data.random_string() value = "static value" record = self.client.add(key=key, value=value) assert_is_not_none(record) stored = self.client.get(key=key, record=record) assert_equal(stored, value) def test_add_key_value_record(self): key = test_data.random_string() value = "static value" record = 17 assert_true(self.client.add(key=key, value=value, record=record)) stored = self.client.get(key=key, record=record) assert_equal(stored, value) def test_add_key_value_records(self): key = test_data.random_string() value = "static value" records = [1, 2, 3] result = self.client.add(key=key, value=value, records=records) assert_true(isinstance(result, dict)) assert_true(result.get(1)) assert_true(result.get(2)) assert_true(result.get(3)) def test_audit_key_record(self): key = test_data.random_string() values = ["one", "two", "three"] record = 1000 for value in values: self.client.set(key, value, record) audit = self.client.audit(key, record) assert_equal(5, len(audit)) expected = 'ADD' for k, v in audit.items(): assert_true(v.startswith(expected)) expected = 'REMOVE' if expected == 'ADD' else 'ADD' def test_audit_key_record_start(self): key = test_data.random_string() values = ["one", "two", "three"] record = 1001 for value in values: self.client.set(key, value, record) start = self.client.time() values = [4, 5, 6] for value in values: self.client.set(key, value, record) audit = self.client.audit(key, record, start=start) assert_equal(6, len(audit)) def test_audit_key_record_start_end(self): key = test_data.random_string() values = ["one", "two", "three"] record = 1002 for value in values: self.client.set(key, value, record) start = self.client.time() values = [4, 5, 6] for value in values: self.client.set(key, value, record) end = self.client.time() values = [True, False] for value in values: self.client.set(key, value, record) audit = self.client.audit(key, record, start=start, end=end) assert_equal(6, len(audit)) def test_audit_key_record_startstr(self): key = test_data.random_string() values = ["one", "two", "three"] record = 1001 for value in values: self.client.set(key, value, record) anchor = self.get_time_anchor() values = [4, 5, 6] for value in values: self.client.set(key, value, record) start = test_data.get_elapsed_millis_string(anchor) audit = self.client.audit(key, record, start=start) assert_equal(6, len(audit)) def test_audit_key_record_startstr_endstr(self): key = test_data.random_string() values = ["one", "two", "three"] record = 1002 for value in values: self.client.set(key, value, record) start_anchor = self.get_time_anchor() values = [4, 5, 6] for value in values: self.client.set(key, value, record) end_anchor = self.get_time_anchor() values = [True, False] for value in values: self.client.set(key, value, record) start = test_data.get_elapsed_millis_string(start_anchor) end = test_data.get_elapsed_millis_string(end_anchor) audit = self.client.audit(key, record, start=start, end=end) assert_equal(6, len(audit)) def test_audit_record(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value = "foo" record = 1002 self.client.add(key1, value, record) self.client.add(key2, value, record) self.client.add(key3, value, record) audit = self.client.audit(record) assert_equal(3, len(audit)) def test_audit_record_start(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value = "bar" record = 344 self.client.add(key1, value, record) self.client.add(key2, value, record) self.client.add(key3, value, record) start = self.client.time() self.client.remove(key1, value, record) self.client.remove(key2, value, record) self.client.remove(key3, value, record) audit = self.client.audit(record, start=start) assert_equal(3, len(audit)) def test_audit_record_start_end(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value = "bar" record = 344 self.client.add(key1, value, record) self.client.add(key2, value, record) self.client.add(key3, value, record) start = self.client.time() self.client.remove(key1, value, record) self.client.remove(key2, value, record) self.client.remove(key3, value, record) end = self.client.time() self.client.add(key1, value, record) self.client.add(key2, value, record) self.client.add(key3, value, record) audit = self.client.audit(record, start=start, end=end) assert_equal(3, len(audit)) def test_audit_record_startstr(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value = "bar" record = 344 self.client.add(key1, value, record) self.client.add(key2, value, record) self.client.add(key3, value, record) anchor = self.get_time_anchor() self.client.remove(key1, value, record) self.client.remove(key2, value, record) self.client.remove(key3, value, record) start = test_data.get_elapsed_millis_string(anchor) audit = self.client.audit(record, start=start) assert_equal(3, len(audit)) def test_audit_record_startstr_endstr(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value = "bar" record = 344 self.client.add(key1, value, record) self.client.add(key2, value, record) self.client.add(key3, value, record) start_anchor = self.get_time_anchor() self.client.remove(key1, value, record) self.client.remove(key2, value, record) self.client.remove(key3, value, record) end_anchor = self.get_time_anchor() self.client.add(key1, value, record) self.client.add(key2, value, record) self.client.add(key3, value, record) start = test_data.get_elapsed_millis_string(start_anchor) end = test_data.get_elapsed_millis_string(end_anchor) audit = self.client.audit(record, start=start, end=end) assert_equal(3, len(audit)) def test_browse_key(self): key = test_data.random_string() value = 10 self.client.add(key, value, [1, 2, 3]) value = test_data.random_string() self.client.add(key, value, [10, 20, 30]) data = self.client.browse(key) assert_equal([1, 2, 3], data.get(10)) assert_equal([20, 10, 30], data.get(value)) def test_browse_keys(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value1 = "A" value2 = "B" value3 = "C" record1 = 1 record2 = 2 record3 = 3 self.client.add(key1, value1, record1) self.client.add(key2, value2, record2) self.client.add(key3, value3, record3) data = self.client.browse([key1, key2, key3]) assert_equal({value1: [record1]}, data.get(key1)) assert_equal({value2: [record2]}, data.get(key2)) assert_equal({value3: [record3]}, data.get(key3)) def test_browse_keys_time(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value1 = "A" value2 = "B" value3 = "C" record1 = 1 record2 = 2 record3 = 3 self.client.add(key1, value1, record1) self.client.add(key2, value2, record2) self.client.add(key3, value3, record3) time = self.client.time() self.client.add(key1, "Foo") self.client.add(key2, "Foo") self.client.add(key3, "Foo") data = self.client.browse([key1, key2, key3], time=time) assert_equal({value1: [record1]}, data.get(key1)) assert_equal({value2: [record2]}, data.get(key2)) assert_equal({value3: [record3]}, data.get(key3)) def test_browse_key_timestr(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value1 = "A" value2 = "B" value3 = "C" record1 = 1 record2 = 2 record3 = 3 self.client.add(key1, value1, record1) self.client.add(key2, value2, record2) self.client.add(key3, value3, record3) ts = test_data.get_elapsed_millis_string(self.get_time_anchor()) data = self.client.browse([key1, key2, key3], time=ts) assert_equal({value1: [record1]}, data.get(key1)) assert_equal({value2: [record2]}, data.get(key2)) assert_equal({value3: [record3]}, data.get(key3)) @ignore def test_browse_keys_timestr(self): key1 = test_data.random_string() key2 = test_data.random_string() key3 = test_data.random_string() value1 = "A" value2 = "B" value3 = "C" record1 = 1 record2 = 2 record3 = 3 self.client.add(key1, value1, record1) self.client.add(key2, value2, record2) self.client.add(key3, value3, record3) anchor = self.get_time_anchor() self.client.add(key1, "D", record1) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.browse([key1, key2, key3], time=ts) assert_equal({value1: [record1]}, data.get(key1)) assert_equal({value2: [record2]}, data.get(key2)) assert_equal({value3: [record3]}, data.get(key3)) def test_browse_key_time(self): key = test_data.random_string() value = 10 self.client.add(key, value, [1, 2, 3]) value = test_data.random_string() self.client.add(key, value, [10, 20, 30]) timestamp = self.client.time() self.client.add(key=key, value=True) data = self.client.browse(key, timestamp) assert_equal([1, 2, 3], data.get(10)) assert_equal([20, 10, 30], data.get(value)) def test_chronologize_key_record(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) self.client.add(key, 2, record) self.client.add(key, 3, record) self.client.remove(key, 1, record) self.client.remove(key, 2, record) self.client.remove(key, 3, record) data = self.client.chronologize(key, record) assert_equal([[1], [1, 2], [1, 2, 3], [2, 3], [3]], list(data.values())) def test_chronologize_key_record_start(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) self.client.add(key, 2, record) self.client.add(key, 3, record) start = self.client.time() self.client.remove(key, 1, record) self.client.remove(key, 2, record) self.client.remove(key, 3, record) data = self.client.chronologize(key, record, time=start) assert_equal([[2, 3], [3]], list(data.values())) def test_chronologize_key_record_start_end(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) self.client.add(key, 2, record) self.client.add(key, 3, record) start = self.client.time() self.client.remove(key, 1, record) end = self.client.time() self.client.remove(key, 2, record) self.client.remove(key, 3, record) data = self.client.chronologize(key, record, timestamp=start, end=end) assert_equal([[2, 3]], list(data.values())) def test_chronologize_key_record_startstr(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) self.client.add(key, 2, record) self.client.add(key, 3, record) anchor = self.get_time_anchor() self.client.remove(key, 1, record) self.client.remove(key, 2, record) self.client.remove(key, 3, record) start = test_data.get_elapsed_millis_string(anchor) data = self.client.chronologize(key, record, time=start) assert_equal([[2, 3], [3]], list(data.values())) def test_chronologize_key_record_startstr_endstr(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) self.client.add(key, 2, record) self.client.add(key, 3, record) start_anchor = self.get_time_anchor() self.client.remove(key, 1, record) end_anchor = self.get_time_anchor() self.client.remove(key, 2, record) self.client.remove(key, 3, record) start = test_data.get_elapsed_millis_string(start_anchor) end = test_data.get_elapsed_millis_string(end_anchor) data = self.client.chronologize(key, record, timestamp=start, end=end) assert_equal([[2, 3]], list(data.values())) def test_clear_key_record(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) self.client.add(key, 2, record) self.client.add(key, 3, record) self.client.clear(key=key, record=record) data = self.client.select(key=key, record=record) assert_equal([], data) def test_clear_key_records(self): key = test_data.random_string() records = [1, 2, 3] self.client.add(key, 1, records) self.client.add(key, 2, records) self.client.add(key, 3, records) self.client.clear(key=key, records=records) data = self.client.select(key=key, records=records) assert_equal({}, data) def test_clear_keys_record(self): key1 = test_data.random_string(6) key2 = test_data.random_string(7) key3 = test_data.random_string(8) record = test_data.random_long() self.client.add(key1, 1, record) self.client.add(key2, 2, record) self.client.add(key3, 3, record) self.client.clear(keys=[key1, key2, key3], record=record) data = self.client.select(keys=[key1, key2, key3], record=record) assert_equal({}, data) def test_clear_keys_records(self): data = { 'a': 'A', 'b': 'B', 'c': ['C', True], 'd': 'D' } records = [1, 2, 3] self.client.insert(data=data, records=records) self.client.clear(keys=['a', 'b', 'c'], records=records) data = self.client.get(key='d', records=records) assert_equal({ 1: 'D', 2: 'D', 3: 'D' }, data) def test_clear_record(self): data = { 'a': 'A', 'b': 'B', 'c': ['C', True] } record = next(iter(self.client.insert(data))) self.client.clear(record=record) data = self.client.select(record=record) assert_equal({}, data) def test_clear_records(self): data = { 'a': 'A', 'b': 'B', 'c': ['C', True], 'd': 'D' } records = [1, 2, 3] self.client.insert(data=data, records=records) self.client.clear(records=records) data = self.client.select(records=records) assert_equal({1: {}, 2: {}, 3: {}}, data) def test_commit(self): self.client.stage() record = self.client.add("name", "jeff nelson") self.client.commit() assert_equal(['name'], list(self.client.describe(record))) def test_describe_record(self): self.client.set('name', 'tom brady', 1) self.client.set('age', 100, 1) self.client.set('team', 'new england patriots', 1) keys = self.client.describe(1) assert_equals(['age', 'name', 'team'], keys) def test_describe_record_time(self): self.client.set('name', 'tom brady', 1) self.client.set('age', 100, 1) self.client.set('team', 'new england patriots', 1) timestamp = self.client.time() self.client.clear('name', 1) keys = self.client.describe(1, time=timestamp) assert_equals(['age', 'name', 'team'], keys) def test_describe_record_timestr(self): self.client.set('name', 'tom brady', 1) self.client.set('age', 100, 1) self.client.set('team', 'new england patriots', 1) anchor = self.get_time_anchor() self.client.clear('name', 1) timestamp = test_data.get_elapsed_millis_string(anchor) keys = self.client.describe(1, time=timestamp) assert_equals(['age', 'name', 'team'], keys) def test_describe_records(self): records = [1, 2, 3] self.client.set('name', 'tom brady', records) self.client.set('age', 100, records) self.client.set('team', 'new england patriots', records) keys = self.client.describe(records) assert_equals(['age', 'name', 'team'], keys[1]) assert_equals(['age', 'name', 'team'], keys[2]) assert_equals(['age', 'name', 'team'], keys[3]) def test_describe_records_time(self): records = [1, 2, 3] self.client.set('name', 'tom brady', records) self.client.set('age', 100, records) self.client.set('team', 'new england patriots', records) timestamp = self.client.time() self.client.clear(records=records) keys = self.client.describe(records, timestamp=timestamp) assert_equals(['age', 'name', 'team'], keys[1]) assert_equals(['age', 'name', 'team'], keys[2]) assert_equals(['age', 'name', 'team'], keys[3]) def test_describe_records_timestr(self): records = [1, 2, 3] self.client.set('name', 'tom brady', records) self.client.set('age', 100, records) self.client.set('team', 'new england patriots', records) anchor = self.get_time_anchor() self.client.clear(records=records) timestamp = test_data.get_elapsed_millis_string(anchor) keys = self.client.describe(records, timestamp=timestamp) assert_equals(['age', 'name', 'team'], keys[1]) assert_equals(['age', 'name', 'team'], keys[2]) assert_equals(['age', 'name', 'team'], keys[3]) def test_diff_key_record_start(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) start = self.client.time() self.client.add(key, 2, record) self.client.remove(key, 1, record) diff = self.client.diff(key, record, start) assert_equal([2], diff.get(Diff.ADDED)) assert_equal([1], diff.get(Diff.REMOVED)) def test_diff_key_record_startstr(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) anchor = self.get_time_anchor() self.client.add(key, 2, record) self.client.remove(key, 1, record) start = test_data.get_elapsed_millis_string(anchor) diff = self.client.diff(key, record, start) assert_equal([2], diff.get(Diff.ADDED)) assert_equal([1], diff.get(Diff.REMOVED)) def test_diff_key_record_start_end(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) start = self.client.time() self.client.add(key, 2, record) self.client.remove(key, 1, record) end = self.client.time() self.client.set(key, 3, record) diff = self.client.diff(key, record, start, end) assert_equal([2], diff.get(Diff.ADDED)) assert_equal([1], diff.get(Diff.REMOVED)) def test_diff_key_record_startstr_endstr(self): key = test_data.random_string() record = test_data.random_long() self.client.add(key, 1, record) start_anchor = self.get_time_anchor() self.client.add(key, 2, record) self.client.remove(key, 1, record) end_anchor = self.get_time_anchor() self.client.set(key, 3, record) start = test_data.get_elapsed_millis_string(start_anchor) end = test_data.get_elapsed_millis_string(end_anchor) diff = self.client.diff(key, record, start, end) assert_equal([2], diff.get(Diff.ADDED)) assert_equal([1], diff.get(Diff.REMOVED)) def test_diff_key_start(self): key = test_data.random_string() self.client.add(key=key, value=1, record=1) start = self.client.time() self.client.add(key=key, value=2, record=1) self.client.add(key=key, value=1, record=2) self.client.add(key=key, value=3, record=3) self.client.remove(key=key, value=1, record=2) diff = self.client.diff(key=key, start=start) assert_equal(2, len(diff.keys())) diff2 = diff.get(2) diff3 = diff.get(3) assert_equal([1], diff2.get(Diff.ADDED)) assert_equal([3], diff3.get(Diff.ADDED)) assert_is_none(diff2.get(Diff.REMOVED)) assert_is_none(diff3.get(Diff.REMOVED)) def test_diff_key_startstr(self): key = test_data.random_string() self.client.add(key=key, value=1, record=1) anchor = self.get_time_anchor() self.client.add(key=key, value=2, record=1) self.client.add(key=key, value=1, record=2) self.client.add(key=key, value=3, record=3) self.client.remove(key=key, value=1, record=2) start = test_data.get_elapsed_millis_string(anchor) diff = self.client.diff(key=key, start=start) assert_equal(2, len(diff.keys())) diff2 = diff.get(2) diff3 = diff.get(3) assert_equal([1], diff2.get(Diff.ADDED)) assert_equal([3], diff3.get(Diff.ADDED)) assert_is_none(diff2.get(Diff.REMOVED)) assert_is_none(diff3.get(Diff.REMOVED)) def test_diff_key_start_end(self): key = test_data.random_string() self.client.add(key=key, value=1, record=1) start = self.client.time() self.client.add(key=key, value=2, record=1) self.client.add(key=key, value=1, record=2) self.client.add(key=key, value=3, record=3) self.client.remove(key=key, value=1, record=2) end = self.client.time() self.client.add(key=key, value=4, record=1) diff = self.client.diff(key=key, start=start, end=end) assert_equal(2, len(diff.keys())) diff2 = diff.get(2) diff3 = diff.get(3) assert_equal([1], diff2.get(Diff.ADDED)) assert_equal([3], diff3.get(Diff.ADDED)) assert_is_none(diff2.get(Diff.REMOVED)) assert_is_none(diff3.get(Diff.REMOVED)) def test_diff_key_startstr_endstr(self): key = test_data.random_string() self.client.add(key=key, value=1, record=1) start_anchor = self.get_time_anchor() self.client.add(key=key, value=2, record=1) self.client.add(key=key, value=1, record=2) self.client.add(key=key, value=3, record=3) self.client.remove(key=key, value=1, record=2) end_anchor = self.get_time_anchor() self.client.add(key=key, value=4, record=1) start = test_data.get_elapsed_millis_string(start_anchor) end = test_data.get_elapsed_millis_string(end_anchor) diff = self.client.diff(key=key, start=start, end=end) assert_equal(2, len(diff.keys())) diff2 = diff.get(2) diff3 = diff.get(3) assert_equal([1], diff2.get(Diff.ADDED)) assert_equal([3], diff3.get(Diff.ADDED)) assert_is_none(diff2.get(Diff.REMOVED)) assert_is_none(diff3.get(Diff.REMOVED)) def test_diff_record_start(self): self.client.add(key="foo", value=1, record=1) start = self.client.time() self.client.set(key="foo", value=2, record=1) self.client.add(key="bar", value=True, record=1) diff = self.client.diff(record=1, time=start) assert_equal([1], diff.get('foo').get(Diff.REMOVED)) assert_equal([2], diff.get('foo').get(Diff.ADDED)) assert_equal([True], diff.get('bar').get(Diff.ADDED)) def test_diff_record_startstr(self): self.client.add(key="foo", value=1, record=1) anchor = self.get_time_anchor() self.client.set(key="foo", value=2, record=1) self.client.add(key="bar", value=True, record=1) start = test_data.get_elapsed_millis_string(anchor) diff = self.client.diff(record=1, time=start) assert_equal([1], diff.get('foo').get(Diff.REMOVED)) assert_equal([2], diff.get('foo').get(Diff.ADDED)) assert_equal([True], diff.get('bar').get(Diff.ADDED)) def test_diff_record_start_end(self): self.client.add(key="foo", value=1, record=1) start = self.client.time() self.client.set(key="foo", value=2, record=1) self.client.add(key="bar", value=True, record=1) end = self.client.time() self.client.set(key="car", value=100, record=1) diff = self.client.diff(record=1, time=start, end=end) assert_equal([1], diff.get('foo').get(Diff.REMOVED)) assert_equal([2], diff.get('foo').get(Diff.ADDED)) assert_equal([True], diff.get('bar').get(Diff.ADDED)) def test_diff_record_startstr_endstr(self): self.client.add(key="foo", value=1, record=1) start_anchor = self.get_time_anchor() self.client.set(key="foo", value=2, record=1) self.client.add(key="bar", value=True, record=1) end_anchor = self.get_time_anchor() self.client.set(key="car", value=100, record=1) start = test_data.get_elapsed_millis_string(start_anchor) end = test_data.get_elapsed_millis_string(end_anchor) diff = self.client.diff(record=1, time=start, end=end) assert_equal([1], diff.get('foo').get(Diff.REMOVED)) assert_equal([2], diff.get('foo').get(Diff.ADDED)) assert_equal([True], diff.get('bar').get(Diff.ADDED)) def test_find_ccl(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) records = list(self.client.find(key+' > 3')) assert_equal(list(range(4, 10)), records) @raises(Exception) def test_find_ccl_handle_parse_exception(self): self.client.find(ccl="throw parse exception") def test_find_key_operator_value(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) records = list(self.client.find(key=key, operator=Operator.GREATER_THAN, value=3)) assert_equal(list(range(4, 10)), records) def test_find_key_operator_values(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) records = list(self.client.find(key=key, operator=Operator.BETWEEN, values=[3, 6])) assert_equal([3, 4, 5], records) def test_find_key_operator_value_time(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) ts = self.client.time() for n in range(0, 10): self.client.add(key=key, value=n, record=n+1) records = list(self.client.find(key=key, operator=Operator.GREATER_THAN, value=3, time=ts)) assert_equal(list(range(4, 10)), records) def test_find_key_operator_value_timestr(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) anchor = self.get_time_anchor() for n in range(0, 10): self.client.add(key=key, value=n, record=n+1) ts = test_data.get_elapsed_millis_string(anchor) records = list(self.client.find(key=key, operator=Operator.GREATER_THAN, value=3, time=ts)) assert_equal(list(range(4, 10)), records) def test_find_key_operator_values_time(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) ts = self.client.time() for n in range(0, 10): self.client.add(key=key, value=n, record=n+1) records = list(self.client.find(key=key, operator=Operator.BETWEEN, values=[3, 6], time=ts)) assert_equal([3, 4, 5], records) def test_find_key_operator_values_timestr(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) anchor = self.get_time_anchor() for n in range(0, 10): self.client.add(key=key, value=n, record=n+1) ts = test_data.get_elapsed_millis_string(anchor) records = list(self.client.find(key=key, operator=Operator.BETWEEN, values=[3, 6], time=ts)) assert_equal([3, 4, 5], records) def test_find_key_operatorstr_values_time(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) ts = self.client.time() for n in range(0, 10): self.client.add(key=key, value=n, record=n+1) records = list(self.client.find(key=key, operator="bw", values=[3, 6], time=ts)) assert_equal([3, 4, 5], records) def test_find_key_operatorstr_values(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) records = list(self.client.find(key=key, operator="bw", values=[3, 6])) assert_equal([3, 4, 5], records) def test_find_key_operatorstr_values_timestr(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) anchor = self.get_time_anchor() for n in range(0, 10): self.client.add(key=key, value=n, record=n+1) ts = test_data.get_elapsed_millis_string(anchor) records = list(self.client.find(key=key, operator="bw", values=[3, 6], time=ts)) assert_equal([3, 4, 5], records) def test_find_key_operatorstr_value(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) records = list(self.client.find(key=key, operator="gt", value=3)) assert_equal(list(range(4, 10)), records) def test_find_key_operatorstr_value_time(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) ts = self.client.time() for n in range(0, 10): self.client.add(key=key, value=n, record=n+1) records = list(self.client.find(key=key, operator="gt", value=3, time=ts)) assert_equal(list(range(4, 10)), records) def test_find_key_operatorstr_value_timestr(self): key = test_data.random_string() for n in range(0, 10): self.client.add(key=key, value=n, record=n) anchor = self.get_time_anchor() for n in range(0, 10): self.client.add(key=key, value=n, record=n+1) ts = test_data.get_elapsed_millis_string(anchor) records = list(self.client.find(key=key, operator="gt", value=3, time=ts)) assert_equal(list(range(4, 10)), records) def test_get_ccl(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) ccl = key2 + ' = 10' data = self.client.get(ccl=ccl) expected = { key1: 3, key2: 10 } assert_equal(data.get(record1), expected) assert_equal(data.get(record2), expected) def test_get_ccl_time(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) ts = self.client.time() self.client.set(key=key2, value=11, records=[record1, record2]) ccl = key2 + ' > 10' data = self.client.get(ccl=ccl, time=ts) expected = { key1: 3, key2: 10 } assert_equal(data.get(record1), expected) assert_equal(data.get(record2), expected) def test_get_ccl_timestr(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) anchor = self.get_time_anchor() self.client.set(key=key2, value=11, records=[record1, record2]) ccl = key2 + ' > 10' ts = test_data.get_elapsed_millis_string(anchor) data = self.client.get(ccl=ccl, time=ts) expected = { key1: 3, key2: 10 } assert_equal(data.get(record1), expected) assert_equal(data.get(record2), expected) def test_get_key_ccl(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) ccl = key2 + ' = 10' data = self.client.get(key=key1, ccl=ccl) expected = { record1: 3, record2: 4 } assert_equal(expected, data) def test_get_keys_ccl(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) ccl = key2 + ' = 10' data = self.client.get(keys=[key1, key2], ccl=ccl) expected = { record1: {key1: 3, key2: 10}, record2: {key1: 4, key2: 10}, } assert_equal(expected, data) def test_get_key_ccl_time(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) ts = self.client.time() ccl = key2 + ' = 10' self.client.set(key=key1, value=100, record=[record2, record1]) data = self.client.get(key=key1, ccl=ccl, time=ts) expected = { record1: 3, record2: 4 } assert_equal(expected, data) def test_get_keys_ccl_time(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) ts = self.client.time() ccl = key2 + ' = 10' self.client.set(key=key1, value=100, record=[record2, record1]) data = self.client.get(key=[key1, key2], ccl=ccl, time=ts) expected = { record1: {key1: 3, key2: 10}, record2: {key1: 4, key2: 10}, } assert_equal(expected, data) def test_get_key_ccl_timestr(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) anchor = self.get_time_anchor() ccl = key2 + ' = 10' self.client.set(key=key1, value=100, record=[record2, record1]) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.get(key=key1, ccl=ccl, time=ts) expected = { record1: 3, record2: 4 } assert_equal(expected, data) def test_get_keys_ccl_timestr(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) anchor = self.get_time_anchor() ccl = key2 + ' = 10' self.client.set(key=key1, value=100, record=[record2, record1]) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.get(key=[key1, key2], ccl=ccl, time=ts) expected = { record1: {key1: 3, key2: 10}, record2: {key1: 4, key2: 10}, } assert_equal(expected, data) def test_get_key_record(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('foo', 3, 1) assert_equal(3, self.client.get(key='foo', record=1)) def test_get_key_record_time(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('foo', 3, 1) ts = self.client.time() self.client.add('foo', 4, 1) assert_equal(3, self.client.get(key='foo', record=1, time=ts)) def test_get_key_record_timestr(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('foo', 3, 1) anchor = self.get_time_anchor() self.client.add('foo', 4, 1) ts = test_data.get_elapsed_millis_string(anchor) assert_equal(3, self.client.get(key='foo', record=1, time=ts)) def test_get_key_records(self): self.client.add('foo', 1, [1, 2, 3]) self.client.add('foo', 2, [1, 2, 3]) self.client.add('foo', 3, [1, 2, 3]) assert_equal({ 1: 3, 2: 3, 3: 3 }, self.client.get(key='foo', record=[1, 2, 3])) def test_get_key_records_time(self): self.client.add('foo', 1, [1, 2, 3]) self.client.add('foo', 2, [1, 2, 3]) self.client.add('foo', 3, [1, 2, 3]) ts = self.client.time() self.client.add('foo', 4, [1, 2, 3]) assert_equal({ 1: 3, 2: 3, 3: 3 }, self.client.get(key='foo', record=[1, 2, 3], time=ts)) def test_get_key_records_timestr(self): self.client.add('foo', 1, [1, 2, 3]) self.client.add('foo', 2, [1, 2, 3]) self.client.add('foo', 3, [1, 2, 3]) anchor = self.get_time_anchor() self.client.add('foo', 4, [1, 2, 3]) ts = test_data.get_elapsed_millis_string(anchor) assert_equal({ 1: 3, 2: 3, 3: 3 }, self.client.get(key='foo', record=[1, 2, 3], time=ts)) def test_get_keys_record(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('bar', 1, 1) self.client.add('bar', 2, 1) data = self.client.get(keys=['foo', 'bar'], record=1) expected = { 'foo': 2, 'bar': 2 } assert_equal(expected, data) def test_get_keys_record_time(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('bar', 1, 1) self.client.add('bar', 2, 1) ts = self.client.time() self.client.add('foo', 3, 1) self.client.add('bar', 3, 1) data = self.client.get(keys=['foo', 'bar'], record=1, time=ts) expected = { 'foo': 2, 'bar': 2 } assert_equal(expected, data) def test_get_keys_record_timestr(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('bar', 1, 1) self.client.add('bar', 2, 1) anchor = self.get_time_anchor() self.client.add('foo', 3, 1) self.client.add('bar', 3, 1) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.get(keys=['foo', 'bar'], record=1, time=ts) expected = { 'foo': 2, 'bar': 2 } assert_equal(expected, data) def test_get_keys_records_time(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) ts = self.client.time() self.client.add('foo', 3, [1, 2]) self.client.add('bar', 3, [1, 2]) data = self.client.get(keys=['foo', 'bar'], records=[1, 2], time=ts) expected = { 'foo': 2, 'bar': 2 } assert_equal({ 1: expected, 2: expected }, data) def test_get_keys_records_timestr(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) anchor = self.get_time_anchor() self.client.add('foo', 3, [1, 2]) self.client.add('bar', 3, [1, 2]) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.get(keys=['foo', 'bar'], records=[1, 2], time=ts) expected = { 'foo': 2, 'bar': 2 } assert_equal({ 1: expected, 2: expected }, data) def test_get_keys_records(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) data = self.client.get(keys=['foo', 'bar'], records=[1, 2]) expected = { 'foo': 2, 'bar': 2 } assert_equal({ 1: expected, 2: expected }, data) def test_insert_dict(self): data = { 'string': 'a', 'int': 1, 'double': 3.14, 'bool': True, 'multi': ['a', 1, 3.14, True] } record = self.client.insert(data=data)[0] assert_equal('a', self.client.get(key='string', record=record)) assert_equal(1, self.client.get(key='int', record=record)) assert_equal(3.14, self.client.get(key='double', record=record)) assert_equal(True, self.client.get(key='bool', record=record)) assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record)) def test_insert_dicts(self): data = [ { 'foo': 1 }, { 'foo': 2 }, { 'foo': 3 } ] records = self.client.insert(data=data) assert_equal(len(data), len(records)) def test_insert_json(self): data = { 'string': 'a', 'int': 1, 'double': 3.14, 'bool': True, 'multi': ['a', 1, 3.14, True] } data = ujson.dumps(data) record = self.client.insert(data=data)[0] assert_equal('a', self.client.get(key='string', record=record)) assert_equal(1, self.client.get(key='int', record=record)) assert_equal(3.14, self.client.get(key='double', record=record)) assert_equal(True, self.client.get(key='bool', record=record)) assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record)) def test_insert_json_list(self): data = [ { 'foo': 1 }, { 'foo': 2 }, { 'foo': 3 } ] count = len(data) data = ujson.dumps(data) records = self.client.insert(data=data) assert_equal(count, len(records)) def test_insert_dict_record(self): record = test_data.random_long() data = { 'string': 'a', 'int': 1, 'double': 3.14, 'bool': True, 'multi': ['a', 1, 3.14, True] } result = self.client.insert(data=data, record=record) assert_true(result) assert_equal('a', self.client.get(key='string', record=record)) assert_equal(1, self.client.get(key='int', record=record)) assert_equal(3.14, self.client.get(key='double', record=record)) assert_equal(True, self.client.get(key='bool', record=record)) assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record)) def test_insert_json_record(self): record = test_data.random_long() data = { 'string': 'a', 'int': 1, 'double': 3.14, 'bool': True, 'multi': ['a', 1, 3.14, True] } data = ujson.dumps(data) result = self.client.insert(data=data, record=record) assert_true(result) assert_equal('a', self.client.get(key='string', record=record)) assert_equal(1, self.client.get(key='int', record=record)) assert_equal(3.14, self.client.get(key='double', record=record)) assert_equal(True, self.client.get(key='bool', record=record)) assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record)) def test_insert_dict_records(self): record1 = test_data.random_long() record2 = test_data.random_long() record3 = test_data.random_long() data = { 'string': 'a', 'int': 1, 'double': 3.14, 'bool': True, 'multi': ['a', 1, 3.14, True] } result = self.client.insert(data=data, records=[record1, record2, record3]) assert_true({ record1: True, record2: True, record3: True }, result) def test_insert_json_records(self): record1 = test_data.random_long() record2 = test_data.random_long() record3 = test_data.random_long() data = { 'string': 'a', 'int': 1, 'double': 3.14, 'bool': True, 'multi': ['a', 1, 3.14, True] } data = ujson.dumps(data) result = self.client.insert(data=data, records=[record1, record2, record3]) assert_true({ record1: True, record2: True, record3: True }, result) def test_inventory(self): records = [1, 2, 3, 4, 5, 6, 7] self.client.add(key='foo', value=17, records=records) assert_equal(records, self.client.inventory()) def test_jsonify_records(self): record1 = 1 record2 = 2 data = { 'int': 1, 'multi': [1, 2, 3, 4] } self.client.insert(data=data, records=[record1, record2]) dump = self.client.jsonify(records=[record1, record2]) data = { 'int': [1], 'multi': [1, 2, 3, 4] } assert_equal([data, data], ujson.loads(dump)) def test_jsonify_records_identifier(self): record1 = 1 record2 = 2 data = { 'int': 1, 'multi': [1, 2, 3, 4] } self.client.insert(data=data, records=[record1, record2]) dump = self.client.jsonify(records=[record1, record2], id=True) data1 = { 'int': [1], 'multi': [1, 2, 3, 4], constants.JSON_RESERVED_IDENTIFIER_NAME: 1 } data2 = { 'int': [1], 'multi': [1, 2, 3, 4], constants.JSON_RESERVED_IDENTIFIER_NAME: 2 } assert_equal([data1, data2], ujson.loads(dump)) def test_jsonify_records_time(self): record1 = 1 record2 = 2 data = { 'int': 1, 'multi': [1, 2, 3, 4] } self.client.insert(data=data, records=[record1, record2]) ts = self.client.time() self.client.add('foo', 10, [record1, record2]) dump = self.client.jsonify(records=[record1, record2], time=ts) data = { 'int': [1], 'multi': [1, 2, 3, 4] } assert_equal([data, data], ujson.loads(dump)) @ignore def test_jsonify_records_timestr(self): record1 = 1 record2 = 2 data = { 'int': 1, 'multi': [1, 2, 3, 4] } self.client.insert(data=data, records=[record1, record2]) anchor = self.get_time_anchor() self.client.add('foo', 10, [record1, record2]) ts = test_data.get_elapsed_millis_string(anchor) dump = self.client.jsonify(records=[record1, record2], time=ts) data = { 'int': [1], 'multi': [1, 2, 3, 4] } assert_equal([data, data], ujson.loads(dump)) def test_jsonify_records_identifier_time(self): record1 = 1 record2 = 2 data = { 'int': 1, 'multi': [1, 2, 3, 4] } self.client.insert(data=data, records=[record1, record2]) ts = self.client.time() self.client.add(key='foo', value=True, records=[record1, record2]) dump = self.client.jsonify(records=[record1, record2], id=True, time=ts) data1 = { 'int': [1], 'multi': [1, 2, 3, 4], constants.JSON_RESERVED_IDENTIFIER_NAME: 1 } data2 = { 'int': [1], 'multi': [1, 2, 3, 4], constants.JSON_RESERVED_IDENTIFIER_NAME: 2 } assert_equal([data1, data2], ujson.loads(dump)) def test_jsonify_records_identifier_timestr(self): record1 = 1 record2 = 2 data = { 'int': 1, 'multi': [1, 2, 3, 4] } self.client.insert(data=data, records=[record1, record2]) anchor = self.get_time_anchor() self.client.add(key='foo', value=True, records=[record1, record2]) ts = test_data.get_elapsed_millis_string(anchor) dump = self.client.jsonify(records=[record1, record2], id=True, time=ts) data1 = { 'int': [1], 'multi': [1, 2, 3, 4], constants.JSON_RESERVED_IDENTIFIER_NAME: 1 } data2 = { 'int': [1], 'multi': [1, 2, 3, 4], constants.JSON_RESERVED_IDENTIFIER_NAME: 2 } assert_equal([data1, data2], ujson.loads(dump)) def test_ping_record(self): record = 1 assert_false(self.client.ping(record)) self.client.add(key='foo', value=1, record=record) assert_true(self.client.ping(record)) self.client.clear(key='foo', record=record) assert_false(self.client.ping(record)) def test_ping_records(self): self.client.add(key='foo', value=1, records=[1, 2]) data = self.client.ping([1, 2, 3]) assert_equal({ 1: True, 2: True, 3: False }, data) def test_remove_key_value_record(self): key = 'foo' value = 1 record = 1 assert_false(self.client.remove(key, value, record)) self.client.add(key, value, record) assert_true(self.client.remove(key=key, record=record, value=value)) def test_remove_key_value_records(self): key = 'foo' value = 1 self.client.add(key, value, records=[1, 2]) data = self.client.remove(key, value, records=[1, 2, 3]) assert_equal({ 1: True, 2: True, 3: False }, data) def test_revert_key_records_time(self): data1 = { 'one': 1, 'two': 2, 'three': 3 } data2 = { 'one': True, 'two': True, 'three': True } self.client.insert(data=data1, records=[1, 2, 3]) ts = self.client.time() self.client.insert(data=data2, records=[1, 2, 3]) self.client.revert(key='one', records=[1, 2, 3], time=ts) data = self.client.select(key='one', record=[1, 2, 3]) assert_equal({ 1: [1], 2: [1], 3: [1] }, data) def test_revert_key_records_timestr(self): data1 = { 'one': 1, 'two': 2, 'three': 3 } data2 = { 'one': True, 'two': True, 'three': True } self.client.insert(data=data1, records=[1, 2, 3]) anchor = self.get_time_anchor() self.client.insert(data=data2, records=[1, 2, 3]) ts = test_data.get_elapsed_millis_string(anchor) self.client.revert(key='one', records=[1, 2, 3], time=ts) data = self.client.select(key='one', record=[1, 2, 3]) assert_equal({ 1: [1], 2: [1], 3: [1] }, data) def test_revert_keys_records_time(self): data1 = { 'one': 1, 'two': 2, 'three': 3 } data2 = { 'one': True, 'two': True, 'three': True } self.client.insert(data=data1, records=[1, 2, 3]) ts = self.client.time() self.client.insert(data=data2, records=[1, 2, 3]) self.client.revert(keys=['one', 'two', 'three'], records=[1, 2, 3], time=ts) data = self.client.select(key=['one', 'two', 'three'], record=[1, 2, 3]) data3 = { 'one': [1], 'two': [2], 'three': [3] } assert_equal({ 1: data3, 2: data3, 3: data3 }, data) def test_revert_keys_records_timestr(self): data1 = { 'one': 1, 'two': 2, 'three': 3 } data2 = { 'one': True, 'two': True, 'three': True } self.client.insert(data=data1, records=[1, 2, 3]) anchor = self.get_time_anchor() self.client.insert(data=data2, records=[1, 2, 3]) ts = test_data.get_elapsed_millis_string(anchor) self.client.revert(keys=['one', 'two', 'three'], records=[1, 2, 3], time=ts) data = self.client.select(key=['one', 'two', 'three'], record=[1, 2, 3]) data3 = { 'one': [1], 'two': [2], 'three': [3] } assert_equal({ 1: data3, 2: data3, 3: data3 }, data) def test_revert_keys_record_time(self): data1 = { 'one': 1, 'two': 2, 'three': 3 } data2 = { 'one': True, 'two': True, 'three': True } self.client.insert(data=data1, records=[1, 2, 3]) ts = self.client.time() self.client.insert(data=data2, records=[1, 2, 3]) self.client.revert(key=['one', 'two', 'three'], records=1, time=ts) data = self.client.select(key=['one', 'two', 'three'], record=1) assert_equal({ 'one': [1], 'two': [2], 'three': [3] }, data) def test_revert_keys_record_timestr(self): data1 = { 'one': 1, 'two': 2, 'three': 3 } data2 = { 'one': True, 'two': True, 'three': True } self.client.insert(data=data1, records=[1, 2, 3]) anchor = self.get_time_anchor() self.client.insert(data=data2, records=[1, 2, 3]) ts = test_data.get_elapsed_millis_string(anchor) self.client.revert(key=['one', 'two', 'three'], records=1, time=ts) data = self.client.select(key=['one', 'two', 'three'], record=1) assert_equal({ 'one': [1], 'two': [2], 'three': [3] }, data) def test_revert_key_record_time(self): data1 = { 'one': 1, 'two': 2, 'three': 3 } data2 = { 'one': True, 'two': True, 'three': True } self.client.insert(data=data1, records=[1, 2, 3]) ts = self.client.time() self.client.insert(data=data2, records=[1, 2, 3]) self.client.revert(key='one', records=1, time=ts) data = self.client.select(key='one', record=1) assert_equal([1], data) def test_revert_key_record_timestr(self): data1 = { 'one': 1, 'two': 2, 'three': 3 } data2 = { 'one': True, 'two': True, 'three': True } self.client.insert(data=data1, records=[1, 2, 3]) anchor = self.get_time_anchor() self.client.insert(data=data2, records=[1, 2, 3]) ts = test_data.get_elapsed_millis_string(anchor) self.client.revert(key='one', records=1, time=ts) data = self.client.select(key='one', record=1) assert_equal([1], data) def test_search(self): self.client.add(key="name", value="jeff", record=1) self.client.add(key="name", value="jeffery", record=2) self.client.add(key="name", value="jeremy", record=3) self.client.add(key="name", value="ben jefferson", record=4) records = self.client.search(key="name", query="jef") assert_equal([1, 2, 4], records) def test_select_ccl(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) ccl = key2 + ' = 10' data = self.client.select(ccl=ccl) expected = { key1: [1, 2, 3], key2: [10] } assert_equal(data.get(record1), expected) assert_equal(data.get(record2), expected) def test_select_ccl_time(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) ts = self.client.time() self.client.set(key=key2, value=11, records=[record1, record2]) ccl = key2 + ' > 10' data = self.client.select(ccl=ccl, time=ts) expected = { key1: [1, 2, 3], key2: [10] } assert_equal(data.get(record1), expected) assert_equal(data.get(record2), expected) def test_select_ccl_timestr(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) anchor = self.get_time_anchor() self.client.set(key=key2, value=11, records=[record1, record2]) ccl = key2 + ' > 10' ts = test_data.get_elapsed_millis_string(anchor) data = self.client.select(ccl=ccl, time=ts) expected = { key1: [1, 2, 3], key2: [10] } assert_equal(data.get(record1), expected) assert_equal(data.get(record2), expected) def test_select_key_ccl(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) ccl = key2 + ' = 10' data = self.client.select(key=key1, ccl=ccl) expected = { record1: [1, 2, 3], record2: [1, 2, 3, 4] } assert_equal(expected, data) def test_select_keys_ccl(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) ccl = key2 + ' = 10' data = self.client.select(keys=[key1, key2], ccl=ccl) expected = { record1: {key1: [1, 2, 3], key2: [10]}, record2: {key1: [1, 2, 3, 4], key2: [10]}, } assert_equal(expected, data) def test_select_key_ccl_time(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) ts = self.client.time() ccl = key2 + ' = 10' self.client.set(key=key1, value=100, record=[record2, record1]) data = self.client.select(key=key1, ccl=ccl, time=ts) expected = { record1: [1, 2, 3], record2: [1, 2, 3, 4] } assert_equal(expected, data) def test_select_keys_ccl_time(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) ts = self.client.time() ccl = key2 + ' = 10' self.client.set(key=key1, value=100, record=[record2, record1]) data = self.client.select(key=[key1, key2], ccl=ccl, time=ts) expected = { record1: {key1: [1, 2, 3], key2: [10]}, record2: {key1: [1, 2, 3, 4], key2: [10]}, } assert_equal(expected, data) def test_select_key_ccl_timestr(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) anchor = self.get_time_anchor() ccl = key2 + ' = 10' self.client.set(key=key1, value=100, record=[record2, record1]) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.select(key=key1, ccl=ccl, time=ts) expected = { record1: [1, 2, 3], record2: [1, 2, 3, 4] } assert_equal(expected, data) def test_select_keys_ccl_timestr(self): key1 = test_data.random_string() key2 = test_data.random_string() record1 = test_data.random_long() record2 = test_data.random_long() self.client.add(key=key1, value=1, records=[record1, record2]) self.client.add(key=key1, value=2, records=[record1, record2]) self.client.add(key=key1, value=3, records=[record1, record2]) self.client.add(key=key2, value=10, records=[record1, record2]) self.client.add(key=key1, value=4, record=record2) anchor = self.get_time_anchor() ccl = key2 + ' = 10' self.client.set(key=key1, value=100, record=[record2, record1]) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.select(key=[key1, key2], ccl=ccl, time=ts) expected = { record1: {key1: [1, 2, 3], key2: [10]}, record2: {key1: [1, 2, 3, 4], key2: [10]}, } assert_equal(expected, data) def test_select_key_record(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('foo', 3, 1) assert_equal([1, 2, 3], self.client.select(key='foo', record=1)) def test_select_key_record_time(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('foo', 3, 1) ts = self.client.time() self.client.add('foo', 4, 1) assert_equal([1, 2, 3], self.client.select(key='foo', record=1, time=ts)) def test_select_key_record_timestr(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('foo', 3, 1) anchor = self.get_time_anchor() self.client.add('foo', 4, 1) ts = test_data.get_elapsed_millis_string(anchor) assert_equal([1, 2, 3], self.client.select(key='foo', record=1, time=ts)) def test_select_key_records(self): self.client.add('foo', 1, [1, 2, 3]) self.client.add('foo', 2, [1, 2, 3]) self.client.add('foo', 3, [1, 2, 3]) assert_equal({ 1: [1, 2, 3], 2: [1, 2, 3], 3: [1, 2, 3] }, self.client.select(key='foo', record=[1, 2, 3])) def test_select_key_records_time(self): self.client.add('foo', 1, [1, 2, 3]) self.client.add('foo', 2, [1, 2, 3]) self.client.add('foo', 3, [1, 2, 3]) ts = self.client.time() self.client.add('foo', 4, [1, 2, 3]) assert_equal({ 1: [1, 2, 3], 2: [1, 2, 3], 3: [1, 2, 3] }, self.client.select(key='foo', record=[1, 2, 3], time=ts)) def test_select_key_records_timestr(self): self.client.add('foo', 1, [1, 2, 3]) self.client.add('foo', 2, [1, 2, 3]) self.client.add('foo', 3, [1, 2, 3]) anchor = self.get_time_anchor() self.client.add('foo', 4, [1, 2, 3]) ts = test_data.get_elapsed_millis_string(anchor) assert_equal({ 1: [1, 2, 3], 2: [1, 2, 3], 3: [1, 2, 3] }, self.client.select(key='foo', record=[1, 2, 3], time=ts)) def test_select_keys_record(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('bar', 1, 1) self.client.add('bar', 2, 1) data = self.client.select(keys=['foo', 'bar'], record=1) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal(expected, data) def test_select_keys_record_time(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('bar', 1, 1) self.client.add('bar', 2, 1) ts = self.client.time() self.client.add('foo', 3, 1) self.client.add('bar', 3, 1) data = self.client.select(keys=['foo', 'bar'], record=1, time=ts) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal(expected, data) def test_select_keys_record_timestr(self): self.client.add('foo', 1, 1) self.client.add('foo', 2, 1) self.client.add('bar', 1, 1) self.client.add('bar', 2, 1) anchor = self.get_time_anchor() self.client.add('foo', 3, 1) self.client.add('bar', 3, 1) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.select(keys=['foo', 'bar'], record=1, time=ts) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal(expected, data) def test_select_keys_records_time(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) ts = self.client.time() self.client.add('foo', 3, [1, 2]) self.client.add('bar', 3, [1, 2]) data = self.client.select(keys=['foo', 'bar'], records=[1, 2], time=ts) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal({ 1: expected, 2: expected }, data) def test_select_keys_records_timestr(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) anchor = self.get_time_anchor() self.client.add('foo', 3, [1, 2]) self.client.add('bar', 3, [1, 2]) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.select(keys=['foo', 'bar'], records=[1, 2], time=ts) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal({ 1: expected, 2: expected }, data) def test_select_keys_records(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) data = self.client.select(keys=['foo', 'bar'], records=[1, 2]) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal({ 1: expected, 2: expected }, data) def test_select_record(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) data = self.client.select(record=1) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal(expected, data) def test_select_record_time(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) ts = self.client.time() self.client.add('foo', 3, [1, 2]) self.client.add('bar', 3, [1, 2]) data = self.client.select(record=2, time=ts) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal(expected, data) def test_select_record_timestr(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) anchor = self.get_time_anchor() self.client.add('foo', 3, [1, 2]) self.client.add('bar', 3, [1, 2]) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.select(record=2, time=ts) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal(expected, data) def test_select_records(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) data = self.client.select(records=[1, 2]) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal({ 1: expected, 2: expected }, data) def test_select_records_time(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) ts = self.client.time() self.client.add('foo', 3, [1, 2]) self.client.add('bar', 3, [1, 2]) data = self.client.select( records=[1, 2], time=ts) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal({ 1: expected, 2: expected }, data) def test_select_records_timestr(self): self.client.add('foo', 1, [1, 2]) self.client.add('foo', 2, [1, 2]) self.client.add('bar', 1, [1, 2]) self.client.add('bar', 2, [1, 2]) anchor = self.get_time_anchor() self.client.add('foo', 3, [1, 2]) self.client.add('bar', 3, [1, 2]) ts = test_data.get_elapsed_millis_string(anchor) data = self.client.select( records=[1, 2], time=ts) expected = { 'foo': [1, 2], 'bar': [1, 2] } assert_equal({ 1: expected, 2: expected }, data) def test_set_key_value(self): key = "foo" value = 1 record = self.client.set(key=key, value=value) data = self.client.select(record=record) assert_equal({ 'foo': [1] }, data) def test_set_key_value_record(self): key = "foo" value = 1 record = 1 self.client.add(key=key, value=2, record=record) self.client.add(key=key, value=2, record=record) self.client.set(key=key, value=value, record=record) data = self.client.select(record=record) assert_equal({ 'foo': [1] }, data) def test_set_key_value_records(self): key = "foo" value = 1 records = [1, 2, 3] self.client.add(key=key, value=2, record=records) self.client.add(key=key, value=2, record=records) self.client.set(key=key, value=value, record=records) data = self.client.select(record=records) expected = { 'foo': [1] } assert_equal({ 1: expected, 2: expected, 3: expected }, data) def test_stage(self): assert_is_none(self.client.transaction) self.client.stage() assert_is_not_none(self.client.transaction) self.client.abort() def test_time(self): assert_true(isinstance(self.client.time(), int)) def test_time_phrase(self): assert_true(isinstance(self.client.time("3 seconds ago"), int)) def test_verify_and_swap(self): self.client.add("foo", 2, 2) assert_false(self.client.verify_and_swap(key='foo', expected=1, record=2, replacement=3)) assert_true(self.client.verify_and_swap(key='foo', expected=2, record=2, replacement=3)) assert_equal(3, self.client.get(key='foo', record=2)) def test_verify_or_set(self): self.client.add("foo", 2, 2) self.client.verify_or_set(key='foo', value=3, record=2) assert_equal(3, self.client.get(key='foo', record=2)) def test_verify_key_value_record(self): self.client.add('name', 'jeff', 1) self.client.add('name', 'jeffery', 1) self.client.add('name', 'bob', 1) assert_true(self.client.verify('name', 'jeff', 1)) self.client.remove('name', 'jeff', 1) assert_false(self.client.verify('name', 'jeff', 1)) def test_verify_key_value_record_time(self): self.client.add('name', 'jeff', 1) self.client.add('name', 'jeffery', 1) self.client.add('name', 'bob', 1) ts = self.client.time() self.client.remove('name', 'jeff', 1) assert_true(self.client.verify('name', 'jeff', 1, time=ts)) def test_verify_key_value_record_timestr(self): self.client.add('name', 'jeff', 1) self.client.add('name', 'jeffery', 1) self.client.add('name', 'bob', 1) anchor = self.get_time_anchor() self.client.remove('name', 'jeff', 1) ts = test_data.get_elapsed_millis_string(anchor) assert_true(self.client.verify('name', 'jeff', 1, time=ts)) def test_link_key_source_destination(self): assert_true(self.client.link(key='friends', source=1, destination=2)) assert_equal(Link.to(2), self.client.get('friends', record=1)) def test_link_key_source_destinations(self): assert_equal({ 2: True, 3: True, 4: True }, self.client.link(key='friends', source=1, destination=[2, 3, 4])) def test_unlink_key_source_destination(self): assert_true(self.client.link(key='friends', source=1, destination=2)) assert_true(self.client.unlink(key='friends', source=1, destination=2)) def test_unlink_key_source_destinations(self): assert_true(self.client.link(key='friends', source=1, destination=2)) assert_equal({ 2: True, 3: False }, self.client.unlink(key='friends', source=1, destination=[2, 3])) def test_find_or_add_key_value(self): record = self.client.find_or_add("age", 23) assert_equal(23, self.client.get("age", record)) def test_find_or_insert_ccl_json(self): data = { 'name': 'jeff nelson' } data = ujson.dumps(data) record = self.client.find_or_insert(criteria="age > 10", data=data) assert_equal('jeff nelson', self.client.get("name", record)) def test_find_or_insert_ccl_dict(self): data = { 'name': 'jeff nelson' } record = self.client.find_or_insert(criteria="age > 10", data=data) assert_equal('jeff nelson', self.client.get("name", record)) def test_insert_dict_with_link(self): data = { 'foo': Link.to(1) } record = self.client.insert(data=data)[0] assert_equal(Link.to(1), self.client.get(key='foo', record=record)) def test_insert_dict_with_resolvable_link(self): record1 = self.client.add('foo', 1) record2 = self.client.insert(data={ 'foo': Link.to_where('foo = 1') })[0] assert_equal(Link.to(record1), self.client.get(key='foo', record=record2))
"""Support for OpenWRT (ubus) routers.""" import logging import re from openwrt.ubus import Ubus import voluptuous as vol from homeassistant.components.device_tracker import ( DOMAIN, PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA, DeviceScanner, ) from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) CONF_DHCP_SOFTWARE = "dhcp_software" DEFAULT_DHCP_SOFTWARE = "dnsmasq" DHCP_SOFTWARES = ["dnsmasq", "odhcpd", "none"] PLATFORM_SCHEMA = PARENT_PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Required(CONF_USERNAME): cv.string, vol.Optional(CONF_DHCP_SOFTWARE, default=DEFAULT_DHCP_SOFTWARE): vol.In( DHCP_SOFTWARES ), } ) def get_scanner(hass, config): """Validate the configuration and return an ubus scanner.""" dhcp_sw = config[DOMAIN][CONF_DHCP_SOFTWARE] if dhcp_sw == "dnsmasq": scanner = DnsmasqUbusDeviceScanner(config[DOMAIN]) elif dhcp_sw == "odhcpd": scanner = OdhcpdUbusDeviceScanner(config[DOMAIN]) else: scanner = UbusDeviceScanner(config[DOMAIN]) return scanner if scanner.success_init else None def _refresh_on_access_denied(func): """If remove rebooted, it lost our session so rebuild one and try again.""" def decorator(self, *args, **kwargs): """Wrap the function to refresh session_id on PermissionError.""" try: return func(self, *args, **kwargs) except PermissionError: _LOGGER.warning( "Invalid session detected." " Trying to refresh session_id and re-run RPC" ) self.ubus.connect() return func(self, *args, **kwargs) return decorator class UbusDeviceScanner(DeviceScanner): """ This class queries a wireless router running OpenWrt firmware. Adapted from Tomato scanner. """ def __init__(self, config): """Initialize the scanner.""" host = config[CONF_HOST] self.username = config[CONF_USERNAME] self.password = config[CONF_PASSWORD] self.parse_api_pattern = re.compile(r"(?P<param>\w*) = (?P<value>.*);") self.last_results = {} self.url = f"http://{host}/ubus" self.ubus = Ubus(self.url, self.username, self.password) self.hostapd = [] self.mac2name = None self.success_init = self.ubus.connect() is not None def scan_devices(self): """Scan for new devices and return a list with found device IDs.""" self._update_info() return self.last_results def _generate_mac2name(self): """Return empty MAC to name dict. Overridden if DHCP server is set.""" self.mac2name = {} @_refresh_on_access_denied def get_device_name(self, device): """Return the name of the given device or None if we don't know.""" if self.mac2name is None: self._generate_mac2name() if self.mac2name is None: # Generation of mac2name dictionary failed return None name = self.mac2name.get(device.upper(), None) return name @_refresh_on_access_denied def _update_info(self): """Ensure the information from the router is up to date. Returns boolean if scanning successful. """ if not self.success_init: return False _LOGGER.info("Checking hostapd") if not self.hostapd: hostapd = self.ubus.get_hostapd() self.hostapd.extend(hostapd.keys()) self.last_results = [] results = 0 # for each access point for hostapd in self.hostapd: if result := self.ubus.get_hostapd_clients(hostapd): results = results + 1 # Check for each device is authorized (valid wpa key) for key in result["clients"].keys(): device = result["clients"][key] if device["authorized"]: self.last_results.append(key) return bool(results) class DnsmasqUbusDeviceScanner(UbusDeviceScanner): """Implement the Ubus device scanning for the dnsmasq DHCP server.""" def __init__(self, config): """Initialize the scanner.""" super().__init__(config) self.leasefile = None def _generate_mac2name(self): if self.leasefile is None: if result := self.ubus.get_uci_config("dhcp", "dnsmasq"): values = result["values"].values() self.leasefile = next(iter(values))["leasefile"] else: return result = self.ubus.file_read(self.leasefile) if result: self.mac2name = {} for line in result["data"].splitlines(): hosts = line.split(" ") self.mac2name[hosts[1].upper()] = hosts[3] else: # Error, handled in the ubus.file_read() return class OdhcpdUbusDeviceScanner(UbusDeviceScanner): """Implement the Ubus device scanning for the odhcp DHCP server.""" def _generate_mac2name(self): if result := self.ubus.get_dhcp_method("ipv4leases"): self.mac2name = {} for device in result["device"].values(): for lease in device["leases"]: mac = lease["mac"] # mac = aabbccddeeff # Convert it to expected format with colon mac = ":".join(mac[i : i + 2] for i in range(0, len(mac), 2)) self.mac2name[mac.upper()] = lease["hostname"] else: # Error, handled in the ubus.get_dhcp_method() return
"""Support for Switchbot devices.""" from asyncio import Lock import switchbot # pylint: disable=import-error from homeassistant.config_entries import ConfigEntry from homeassistant.const import CONF_SENSOR_TYPE, Platform from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady from .const import ( ATTR_BOT, ATTR_CURTAIN, BTLE_LOCK, COMMON_OPTIONS, CONF_RETRY_COUNT, CONF_RETRY_TIMEOUT, CONF_SCAN_TIMEOUT, CONF_TIME_BETWEEN_UPDATE_COMMAND, DATA_COORDINATOR, DEFAULT_RETRY_COUNT, DEFAULT_RETRY_TIMEOUT, DEFAULT_SCAN_TIMEOUT, DEFAULT_TIME_BETWEEN_UPDATE_COMMAND, DOMAIN, ) from .coordinator import SwitchbotDataUpdateCoordinator PLATFORMS_BY_TYPE = { ATTR_BOT: [Platform.SWITCH, Platform.SENSOR], ATTR_CURTAIN: [Platform.COVER, Platform.BINARY_SENSOR, Platform.SENSOR], } async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up Switchbot from a config entry.""" hass.data.setdefault(DOMAIN, {}) if not entry.options: options = { CONF_TIME_BETWEEN_UPDATE_COMMAND: DEFAULT_TIME_BETWEEN_UPDATE_COMMAND, CONF_RETRY_COUNT: DEFAULT_RETRY_COUNT, CONF_RETRY_TIMEOUT: DEFAULT_RETRY_TIMEOUT, CONF_SCAN_TIMEOUT: DEFAULT_SCAN_TIMEOUT, } hass.config_entries.async_update_entry(entry, options=options) # Use same coordinator instance for all entities. # Uses BTLE advertisement data, all Switchbot devices in range is stored here. if DATA_COORDINATOR not in hass.data[DOMAIN]: # Check if asyncio.lock is stored in hass data. # BTLE has issues with multiple connections, # so we use a lock to ensure that only one API request is reaching it at a time: if BTLE_LOCK not in hass.data[DOMAIN]: hass.data[DOMAIN][BTLE_LOCK] = Lock() if COMMON_OPTIONS not in hass.data[DOMAIN]: hass.data[DOMAIN][COMMON_OPTIONS] = {**entry.options} switchbot.DEFAULT_RETRY_TIMEOUT = hass.data[DOMAIN][COMMON_OPTIONS][ CONF_RETRY_TIMEOUT ] # Store api in coordinator. coordinator = SwitchbotDataUpdateCoordinator( hass, update_interval=hass.data[DOMAIN][COMMON_OPTIONS][ CONF_TIME_BETWEEN_UPDATE_COMMAND ], api=switchbot, retry_count=hass.data[DOMAIN][COMMON_OPTIONS][CONF_RETRY_COUNT], scan_timeout=hass.data[DOMAIN][COMMON_OPTIONS][CONF_SCAN_TIMEOUT], api_lock=hass.data[DOMAIN][BTLE_LOCK], ) hass.data[DOMAIN][DATA_COORDINATOR] = coordinator else: coordinator = hass.data[DOMAIN][DATA_COORDINATOR] await coordinator.async_config_entry_first_refresh() if not coordinator.last_update_success: raise ConfigEntryNotReady entry.async_on_unload(entry.add_update_listener(_async_update_listener)) hass.data[DOMAIN][entry.entry_id] = {DATA_COORDINATOR: coordinator} sensor_type = entry.data[CONF_SENSOR_TYPE] hass.config_entries.async_setup_platforms(entry, PLATFORMS_BY_TYPE[sensor_type]) return True async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload a config entry.""" sensor_type = entry.data[CONF_SENSOR_TYPE] unload_ok = await hass.config_entries.async_unload_platforms( entry, PLATFORMS_BY_TYPE[sensor_type] ) if unload_ok: hass.data[DOMAIN].pop(entry.entry_id) if len(hass.config_entries.async_entries(DOMAIN)) == 0: hass.data.pop(DOMAIN) return unload_ok async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None: """Handle options update.""" # Update entity options stored in hass. if {**entry.options} != hass.data[DOMAIN][COMMON_OPTIONS]: hass.data[DOMAIN][COMMON_OPTIONS] = {**entry.options} hass.data[DOMAIN].pop(DATA_COORDINATOR) await hass.config_entries.async_reload(entry.entry_id)
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class systemglobal_authenticationpolicy_binding(base_resource) : """ Binding class showing the authenticationpolicy that can be bound to systemglobal. """ def __init__(self) : self._policyname = "" self._priority = 0 self._builtin = [] self.___count = 0 @property def priority(self) : ur"""The priority of the command policy. """ try : return self._priority except Exception as e: raise e @priority.setter def priority(self, priority) : ur"""The priority of the command policy. """ try : self._priority = priority except Exception as e: raise e @property def builtin(self) : ur"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE. """ try : return self._builtin except Exception as e: raise e @builtin.setter def builtin(self, builtin) : ur"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE """ try : self._builtin = builtin except Exception as e: raise e @property def policyname(self) : ur"""The name of the command policy. """ try : return self._policyname except Exception as e: raise e @policyname.setter def policyname(self, policyname) : ur"""The name of the command policy. """ try : self._policyname = policyname except Exception as e: raise e def _get_nitro_response(self, service, response) : ur""" converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(systemglobal_authenticationpolicy_binding_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.systemglobal_authenticationpolicy_binding except Exception as e : raise e def _get_object_name(self) : ur""" Returns the value of object identifier argument """ try : return 0 except Exception as e : raise e @classmethod def add(cls, client, resource) : try : if resource and type(resource) is not list : updateresource = systemglobal_authenticationpolicy_binding() updateresource.policyname = resource.policyname updateresource.priority = resource.priority return updateresource.update_resource(client) else : if resource and len(resource) > 0 : updateresources = [systemglobal_authenticationpolicy_binding() for _ in range(len(resource))] for i in range(len(resource)) : updateresources[i].policyname = resource[i].policyname updateresources[i].priority = resource[i].priority return cls.update_bulk_request(client, updateresources) except Exception as e : raise e @classmethod def delete(cls, client, resource) : try : if resource and type(resource) is not list : deleteresource = systemglobal_authenticationpolicy_binding() deleteresource.policyname = resource.policyname return deleteresource.delete_resource(client) else : if resource and len(resource) > 0 : deleteresources = [systemglobal_authenticationpolicy_binding() for _ in range(len(resource))] for i in range(len(resource)) : deleteresources[i].policyname = resource[i].policyname return cls.delete_bulk_request(client, deleteresources) except Exception as e : raise e @classmethod def get(cls, service) : ur""" Use this API to fetch a systemglobal_authenticationpolicy_binding resources. """ try : obj = systemglobal_authenticationpolicy_binding() response = obj.get_resources(service) return response except Exception as e: raise e @classmethod def get_filtered(cls, service, filter_) : ur""" Use this API to fetch filtered set of systemglobal_authenticationpolicy_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = systemglobal_authenticationpolicy_binding() option_ = options() option_.filter = filter_ response = obj.getfiltered(service, option_) return response except Exception as e: raise e @classmethod def count(cls, service) : ur""" Use this API to count systemglobal_authenticationpolicy_binding resources configued on NetScaler. """ try : obj = systemglobal_authenticationpolicy_binding() option_ = options() option_.count = True response = obj.get_resources(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e @classmethod def count_filtered(cls, service, filter_) : ur""" Use this API to count the filtered set of systemglobal_authenticationpolicy_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = systemglobal_authenticationpolicy_binding() option_ = options() option_.count = True option_.filter = filter_ response = obj.getfiltered(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e class Builtin: MODIFIABLE = "MODIFIABLE" DELETABLE = "DELETABLE" IMMUTABLE = "IMMUTABLE" class systemglobal_authenticationpolicy_binding_response(base_response) : def __init__(self, length=1) : self.systemglobal_authenticationpolicy_binding = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.systemglobal_authenticationpolicy_binding = [systemglobal_authenticationpolicy_binding() for _ in range(length)]
""" Drone.io badge generator. Currently set up to work on Mac. Requires Pillow. """ import os from PIL import Image, ImageDraw, ImageFont SIZE = (95, 18) def hex_colour(hex): if hex[0] == '#': hex = hex[1:] return ( int(hex[:2], 16), int(hex[2:4], 16), int(hex[4:6], 16), ) BACKGROUND = hex_colour('#4A4A4A') SUCCESS = hex_colour('#94B944') WARNING = hex_colour('#E4A83C') ERROR = hex_colour('#B10610') SUCCESS_CUTOFF = 85 WARNING_CUTOFF = 45 FONT = ImageFont.truetype(size=10, filename="/Library/Fonts/Arial.ttf") FONT_SHADOW = hex_colour('#525252') PADDING_TOP = 3 def build_image(percentage, colour): image = Image.new('RGB', SIZE, color=BACKGROUND) drawing = ImageDraw.Draw(image) drawing.rectangle([(55, 0), SIZE], colour, colour) drawing.text((8, PADDING_TOP+1), 'coverage', font=FONT, fill=FONT_SHADOW) drawing.text((7, PADDING_TOP), 'coverage', font=FONT) drawing.text((63, PADDING_TOP+1), '%s%%' % percentage, font=FONT, fill=FONT_SHADOW) drawing.text((62, PADDING_TOP), '%s%%' % percentage, font=FONT) return image os.chdir('_build') for i in range(101): filename = '%i.png' % i file = open(filename, 'wb') if i < WARNING_CUTOFF: build_image(i, ERROR).save(file) elif i < SUCCESS_CUTOFF: build_image(i, WARNING).save(file) else: build_image(i, SUCCESS).save(file)
from ThreadedComponent import threadedcomponent, threadedadaptivecommscomponent import heapq import time class SchedulingComponentMixin(object): """ SchedulingComponent() -> new SchedulingComponent Base class for a threadedcomponent with an inbuilt scheduler, allowing a component to block until a scheduled event is ready or a message is received on an inbox. """ Inboxes = {"inbox" : "Standard inbox for receiving data from other components", "control" : "Standard inbox for receiving control messages from other components", "event" : "Scheduled events which are ready to be processed"} def __init__(self, **argd): super(SchedulingComponentMixin, self).__init__(**argd) self.eventQueue = [] def scheduleRel(self, message, delay, priority=1): """ Schedule an event to wake the component and send a message to the "event" inbox after a delay. """ return self.scheduleAbs(message, time.time() + delay, priority) def scheduleAbs(self, message, eventTime, priority=1): """ Schedule an event to wake the component and send a message to the "event" inbox after at a specified time. """ event = eventTime, priority, message heapq.heappush(self.eventQueue, event) return event def cancelEvent(self, event): """ Remove a scheduled event from the scheduler """ self.eventQueue.remove(event) heapq.heapify(self.eventQueue) def eventReady(self): """ Returns true if there is an event ready to be processed """ if self.eventQueue: eventTime = self.eventQueue[0][0] if time.time() >= eventTime: return True return False def pause(self): """ Sleep until there is either an event ready or a message is received on an inbox """ if self.eventReady(): self.signalEvent() else: if self.eventQueue: eventTime = self.eventQueue[0][0] super(SchedulingComponentMixin, self).pause(eventTime - time.time()) if self.eventReady(): self.signalEvent() else: super(SchedulingComponentMixin, self).pause() def signalEvent(self): """ Put the event message of the earliest scheduled event onto the component's "event" inbox and remove it from the scheduler. """ eventTime, priority, message = heapq.heappop(self.eventQueue) #print "Signalling, late by:", (time.time() - eventTime) if not self.inqueues["event"].full(): self.inqueues["event"].put(message) class SchedulingComponent(SchedulingComponentMixin, threadedcomponent): def __init__(self, **argd): super(SchedulingComponent, self).__init__(**argd) class SchedulingAdaptiveCommsComponent(SchedulingComponentMixin, threadedadaptivecommscomponent): def __init__(self, **argd): super(SchedulingAdaptiveCommsComponent, self).__init__(**argd)
"""Tools to work with checkpoints.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import six from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.framework import ops from tensorflow.python.ops import io_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops import variables from tensorflow.python.platform import gfile from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import checkpoint_management from tensorflow.python.training import py_checkpoint_reader from tensorflow.python.training.saving import saveable_object_util from tensorflow.python.util.tf_export import tf_export __all__ = [ "load_checkpoint", "load_variable", "list_variables", "checkpoints_iterator", "init_from_checkpoint" ] @tf_export("train.load_checkpoint") def load_checkpoint(ckpt_dir_or_file): """Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`. If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints, reader for the latest checkpoint is returned. Args: ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint file. Returns: `CheckpointReader` object. Raises: ValueError: If `ckpt_dir_or_file` resolves to a directory with no checkpoints. """ filename = _get_checkpoint_filename(ckpt_dir_or_file) if filename is None: raise ValueError("Couldn't find 'checkpoint' file or checkpoints in " "given directory %s" % ckpt_dir_or_file) return py_checkpoint_reader.NewCheckpointReader(filename) @tf_export("train.load_variable") def load_variable(ckpt_dir_or_file, name): """Returns the tensor value of the given variable in the checkpoint. Args: ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint. name: Name of the variable to return. Returns: A numpy `ndarray` with a copy of the value of this variable. """ # TODO(b/29227106): Fix this in the right place and remove this. if name.endswith(":0"): name = name[:-2] reader = load_checkpoint(ckpt_dir_or_file) return reader.get_tensor(name) @tf_export("train.list_variables") def list_variables(ckpt_dir_or_file): """Returns list of all variables in the checkpoint. Args: ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint. Returns: List of tuples `(name, shape)`. """ reader = load_checkpoint(ckpt_dir_or_file) variable_map = reader.get_variable_to_shape_map() names = sorted(variable_map.keys()) result = [] for name in names: result.append((name, variable_map[name])) return result def wait_for_new_checkpoint(checkpoint_dir, last_checkpoint=None, seconds_to_sleep=1, timeout=None): """Waits until a new checkpoint file is found. Args: checkpoint_dir: The directory in which checkpoints are saved. last_checkpoint: The last checkpoint path used or `None` if we're expecting a checkpoint for the first time. seconds_to_sleep: The number of seconds to sleep for before looking for a new checkpoint. timeout: The maximum number of seconds to wait. If left as `None`, then the process will wait indefinitely. Returns: a new checkpoint path, or None if the timeout was reached. """ logging.info("Waiting for new checkpoint at %s", checkpoint_dir) stop_time = time.time() + timeout if timeout is not None else None while True: checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir) if checkpoint_path is None or checkpoint_path == last_checkpoint: if stop_time is not None and time.time() + seconds_to_sleep > stop_time: return None time.sleep(seconds_to_sleep) else: logging.info("Found new checkpoint at %s", checkpoint_path) return checkpoint_path @tf_export("train.checkpoints_iterator") def checkpoints_iterator(checkpoint_dir, min_interval_secs=0, timeout=None, timeout_fn=None): """Continuously yield new checkpoint files as they appear. The iterator only checks for new checkpoints when control flow has been reverted to it. This means it can miss checkpoints if your code takes longer to run between iterations than `min_interval_secs` or the interval at which new checkpoints are written. The `timeout` argument is the maximum number of seconds to block waiting for a new checkpoint. It is used in combination with the `timeout_fn` as follows: * If the timeout expires and no `timeout_fn` was specified, the iterator stops yielding. * If a `timeout_fn` was specified, that function is called and if it returns a true boolean value the iterator stops yielding. * If the function returns a false boolean value then the iterator resumes the wait for new checkpoints. At this point the timeout logic applies again. This behavior gives control to callers on what to do if checkpoints do not come fast enough or stop being generated. For example, if callers have a way to detect that the training has stopped and know that no new checkpoints will be generated, they can provide a `timeout_fn` that returns `True` when the training has stopped. If they know that the training is still going on they return `False` instead. Args: checkpoint_dir: The directory in which checkpoints are saved. min_interval_secs: The minimum number of seconds between yielding checkpoints. timeout: The maximum number of seconds to wait between checkpoints. If left as `None`, then the process will wait indefinitely. timeout_fn: Optional function to call after a timeout. If the function returns True, then it means that no new checkpoints will be generated and the iterator will exit. The function is called with no arguments. Yields: String paths to latest checkpoint files as they arrive. """ checkpoint_path = None while True: new_checkpoint_path = wait_for_new_checkpoint( checkpoint_dir, checkpoint_path, timeout=timeout) if new_checkpoint_path is None: if not timeout_fn: # timed out logging.info("Timed-out waiting for a checkpoint.") return if timeout_fn(): # The timeout_fn indicated that we are truly done. return else: # The timeout_fn indicated that more checkpoints may come. continue start = time.time() checkpoint_path = new_checkpoint_path yield checkpoint_path time_to_next_eval = start + min_interval_secs - time.time() if time_to_next_eval > 0: time.sleep(time_to_next_eval) @tf_export(v1=["train.init_from_checkpoint"]) def init_from_checkpoint(ckpt_dir_or_file, assignment_map): """Replaces `tf.Variable` initializers so they load from a checkpoint file. Values are not loaded immediately, but when the initializer is run (typically by running a `tf.compat.v1.global_variables_initializer` op). Note: This overrides default initialization ops of specified variables and redefines dtype. Assignment map supports following syntax: * `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in current `scope_name` from `checkpoint_scope_name` with matching tensor names. * `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` - will initialize `scope_name/variable_name` variable from `checkpoint_scope_name/some_other_variable`. * `'scope_variable_name': variable` - will initialize given `tf.Variable` object with tensor 'scope_variable_name' from the checkpoint. * `'scope_variable_name': list(variable)` - will initialize list of partitioned variables with tensor 'scope_variable_name' from the checkpoint. * `'/': 'scope_name/'` - will load all variables in current `scope_name` from checkpoint's root (e.g. no scope). Supports loading into partitioned variables, which are represented as `'<variable>/part_<part #>'`. Example: ```python # Say, '/tmp/model.ckpt' has the following tensors: # -- name='old_scope_1/var1', shape=[20, 2] # -- name='old_scope_1/var2', shape=[50, 4] # -- name='old_scope_2/var3', shape=[100, 100] # Create new model's variables with tf.compat.v1.variable_scope('new_scope_1'): var1 = tf.compat.v1.get_variable('var1', shape=[20, 2], initializer=tf.compat.v1.zeros_initializer()) with tf.compat.v1.variable_scope('new_scope_2'): var2 = tf.compat.v1.get_variable('var2', shape=[50, 4], initializer=tf.compat.v1.zeros_initializer()) # Partition into 5 variables along the first axis. var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100], initializer=tf.compat.v1.zeros_initializer(), partitioner=lambda shape, dtype: [5, 1]) # Initialize all variables in `new_scope_1` from `old_scope_1`. init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'}) # Use names to specify which variables to initialize from checkpoint. init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/var1': 'new_scope_1/var1', 'old_scope_1/var2': 'new_scope_2/var2'}) # Or use tf.Variable objects to identify what to initialize. init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/var1': var1, 'old_scope_1/var2': var2}) # Initialize partitioned variables using variable's name init_from_checkpoint('/tmp/model.ckpt', {'old_scope_2/var3': 'new_scope_2/var3'}) # Or specify the list of tf.Variable objects. init_from_checkpoint('/tmp/model.ckpt', {'old_scope_2/var3': var3._get_variable_list()}) ``` Args: ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint. assignment_map: Dict, where keys are names of the variables in the checkpoint and values are current variables or names of current variables (in default graph). Raises: ValueError: If missing variables in current graph, or if missing checkpoints or tensors in checkpoints. """ init_from_checkpoint_fn = lambda _: _init_from_checkpoint( ckpt_dir_or_file, assignment_map) if distribution_strategy_context.get_cross_replica_context(): init_from_checkpoint_fn(None) else: distribution_strategy_context.get_replica_context().merge_call( init_from_checkpoint_fn) def _init_from_checkpoint(ckpt_dir_or_file, assignment_map): """See `init_from_checkpoint` for documentation.""" ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file) reader = load_checkpoint(ckpt_dir_or_file) variable_map = reader.get_variable_to_shape_map() for tensor_name_in_ckpt, current_var_or_name in sorted( six.iteritems(assignment_map)): var = None # Check if this is Variable object or list of Variable objects (in case of # partitioned variables). if _is_variable(current_var_or_name) or ( isinstance(current_var_or_name, list) and all(_is_variable(v) for v in current_var_or_name)): var = current_var_or_name else: store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access # Check if this variable is in var_store. var = store_vars.get(current_var_or_name, None) # Also check if variable is partitioned as list. if var is None: var = _collect_partitioned_variable(current_var_or_name, store_vars) if var is not None: # If 1 to 1 mapping was provided, find variable in the checkpoint. if tensor_name_in_ckpt not in variable_map: raise ValueError("Tensor %s is not found in %s checkpoint %s" % ( tensor_name_in_ckpt, ckpt_dir_or_file, variable_map )) if _is_variable(var): # Additional at-call-time checks. if not var.get_shape().is_compatible_with( variable_map[tensor_name_in_ckpt]): raise ValueError( "Shape of variable %s (%s) doesn't match with shape of " "tensor %s (%s) from checkpoint reader." % ( var.name, str(var.get_shape()), tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt]) )) var_name = var.name else: var_name = ",".join([v.name for v in var]) _set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt) logging.debug("Initialize variable %s from checkpoint %s with %s", var_name, ckpt_dir_or_file, tensor_name_in_ckpt) else: scopes = "" # TODO(vihanjain): Support list of 'current_var_or_name' here. if "/" in current_var_or_name: scopes = current_var_or_name[:current_var_or_name.rindex("/")] if not tensor_name_in_ckpt.endswith("/"): raise ValueError( "Assignment map with scope only name {} should map to scope only " "{}. Should be 'scope/': 'other_scope/'.".format( scopes, tensor_name_in_ckpt)) # If scope to scope mapping was provided, find all variables in the scope # and create variable to variable mapping. scope_variables = set() for var_name in store_vars: if not scopes or var_name.startswith(scopes + "/"): # Consume /part_ if partitioned variable. if "/part_" in var_name: var_name = var_name[:var_name.index("/part_")] scope_variables.add(var_name) for var_name in sorted(scope_variables): # Lookup name with specified prefix and suffix from current variable. # If tensor_name given is '/' (root), don't use it for full name. full_tensor_name = var_name[len(scopes):] if current_var_or_name != "/": full_tensor_name = full_tensor_name[1:] if tensor_name_in_ckpt != "/": full_tensor_name = tensor_name_in_ckpt + full_tensor_name # Remove trailing '/', if any, in the full_tensor_name if full_tensor_name.endswith("/"): full_tensor_name = full_tensor_name[:-1] if full_tensor_name not in variable_map: raise ValueError( "Tensor %s (%s in %s) is not found in %s checkpoint" % ( full_tensor_name, var_name[len(scopes) + 1:], tensor_name_in_ckpt, ckpt_dir_or_file )) var = store_vars.get(var_name, None) if var is None: var = _collect_partitioned_variable(var_name, store_vars) _set_variable_or_list_initializer(var, ckpt_file, full_tensor_name) logging.debug("Initialize variable %s from checkpoint %s with %s", var_name, ckpt_dir_or_file, full_tensor_name) def _get_checkpoint_filename(ckpt_dir_or_file): """Returns checkpoint filename given directory or specific checkpoint file.""" if gfile.IsDirectory(ckpt_dir_or_file): return checkpoint_management.latest_checkpoint(ckpt_dir_or_file) return ckpt_dir_or_file def _set_checkpoint_initializer(variable, ckpt_file, tensor_name, slice_spec, name="checkpoint_initializer"): """Overrides given variable's initialization op. Sets variable initializer to assign op that initializes variable from tensor's value in the checkpoint. Args: variable: `tf.Variable` object. ckpt_file: string, full path of the checkpoint. tensor_name: Name of the tensor to load from the checkpoint. slice_spec: Slice specification for loading partitioned tensors. name: Name of the operation. """ base_type = variable.dtype.base_dtype # Do not colocate with variable since RestoreV2 op only runs on CPU and # colocation will force variable (and other ops that colocate with variable) # to be on CPU as well. It is okay to place the variable's initializer op on # CPU since it will only be run once at the start. with ops.device(variable.device), ops.device("/cpu:0"): restore_op = io_ops.restore_v2( ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0] names_to_saveables = saveable_object_util.op_list_to_dict([variable]) saveable_objects = [] for name, op in names_to_saveables.items(): for s in saveable_object_util.saveable_objects_for_op(op, name): saveable_objects.append(s) assert len(saveable_objects) == 1 # Should be only one variable. init_op = saveable_objects[0].restore([restore_op], restored_shapes=None) # pylint:disable=protected-access variable._initializer_op = init_op restore_op.set_shape(variable.shape) variable._initial_value = restore_op # pylint:enable=protected-access def _set_variable_or_list_initializer(variable_or_list, ckpt_file, tensor_name): """Overrides initialization op of given variable or list of variables. Calls `_set_checkpoint_initializer` for each variable in the given list of variables. Args: variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects. ckpt_file: string, full path of the checkpoint. tensor_name: Name of the tensor to load from the checkpoint. Raises: ValueError: if all objects in `variable_or_list` are not partitions of the same large variable. """ if isinstance(variable_or_list, (list, tuple)): # A set of slices. slice_name = None for v in variable_or_list: slice_info = v._save_slice_info # pylint:disable=protected-access if slice_name is None: slice_name = slice_info.full_name elif slice_name != slice_info.full_name: raise ValueError("Slices must all be from the same tensor: %s != %s" % (slice_name, slice_info.full_name)) _set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec) else: _set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "") def _is_variable(x): return (isinstance(x, variables.Variable) or resource_variable_ops.is_resource_variable(x)) def _collect_partitioned_variable(name, all_vars): """Returns list of `tf.Variable` that comprise the partitioned variable.""" if name + "/part_0" in all_vars: var = [] i = 0 while name + "/part_%d" % i in all_vars: var.append(all_vars[name + "/part_%d" % i]) i += 1 return var return None
"""Creates a VM with the provided name, metadata, and auth scopes.""" COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/' def GlobalComputeUrl(project, collection, name): return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/global/', collection, '/', name]) def ZonalComputeUrl(project, zone, collection, name): return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/zones/', zone, '/', collection, '/', name]) def GenerateConfig(context): """Generate configuration.""" base_name = context.properties['instanceName'] items = [] for key, value in context.properties['metadata'].iteritems(): items.append({ 'key': key, 'value': value }) metadata = {'items': items} # Properties for the container-based instance. instance = { 'zone': context.properties['zone'], 'machineType': ZonalComputeUrl( context.env['project'], context.properties['zone'], 'machineTypes', 'f1-micro'), 'metadata': metadata, 'serviceAccounts': [{ 'email': 'default', 'scopes': context.properties['scopes'] }], 'disks': [{ 'deviceName': 'boot', 'type': 'PERSISTENT', 'autoDelete': True, 'boot': True, 'initializeParams': { 'diskName': base_name + '-disk', 'sourceImage': GlobalComputeUrl( 'debian-cloud', 'images', ''.join(['backports-debian', '-7-wheezy-v20151104'])) }, }], 'networkInterfaces': [{ 'accessConfigs': [{ 'name': 'external-nat', 'type': 'ONE_TO_ONE_NAT' }], 'network': GlobalComputeUrl( context.env['project'], 'networks', 'default') }] } # Resources and output to return. return { 'resources': [{ 'name': base_name, 'type': 'compute.v1.instance', 'properties': instance }] }
import re from getting_started import main def test_main(cloud_config, capsys): main(cloud_config.project) out, _ = capsys.readouterr() assert re.search(re.compile( r'Query Results:.hamlet', re.DOTALL), out)
"Example code to perform int8 GEMM" import logging import sys import numpy as np import tvm from tvm import te from tvm import autotvm from tvm.topi.cuda.tensor_intrin import dp4a DO_TUNING = True PRETUNED_INDEX = 75333 intrin_dp4a = dp4a("local", "local", "local") @autotvm.template def gemm_int8(n, m, l): A = te.placeholder((n, l), name="A", dtype="int8") B = te.placeholder((m, l), name="B", dtype="int8") k = te.reduce_axis((0, l), name="k") C = te.compute( (n, m), lambda i, j: te.sum(A[i, k].astype("int32") * B[j, k].astype("int32"), axis=k), name="C", ) cfg = autotvm.get_config() s = te.create_schedule(C.op) y, x = C.op.axis AA = s.cache_read(A, "shared", [C]) BB = s.cache_read(B, "shared", [C]) AL = s.cache_read(AA, "local", [C]) BL = s.cache_read(BB, "local", [C]) CC = s.cache_write(C, "local") k = CC.op.reduce_axis[0] cfg.define_split( "tile_k", cfg.axis(k), num_outputs=3, filter=lambda entity: entity.size[2] == 4 and entity.size[0] * 2 >= entity.size[1], ) ko, kt, ki = cfg["tile_k"].apply(s, CC, k) s[CC].tensorize(ki, intrin_dp4a) block_x = te.thread_axis("blockIdx.x") block_y = te.thread_axis("blockIdx.y") thread_x = te.thread_axis("threadIdx.x") thread_y = te.thread_axis("threadIdx.y") def block_size_filter(entity): return ( entity.size[0] * 2 >= entity.size[1] * 2 and entity.size[1] <= 16 and entity.size[3] <= 4 ) cfg.define_split("tile_y", cfg.axis(y), num_outputs=4, filter=block_size_filter) cfg.define_split("tile_x", cfg.axis(x), num_outputs=4, filter=block_size_filter) by, tyz, ty, yi = cfg["tile_y"].apply(s, C, y) bx, txz, tx, xi = cfg["tile_x"].apply(s, C, x) s[C].bind(by, block_y) s[C].bind(bx, block_x) s[C].bind(tyz, te.thread_axis("vthread")) s[C].bind(txz, te.thread_axis("vthread")) s[C].bind(ty, thread_y) s[C].bind(tx, thread_x) s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi) s[CC].compute_at(s[C], tx) yo, xo = CC.op.axis s[CC].reorder(ko, kt, yo, xo, ki) s[CC].unroll(kt) for stage in [AL, BL]: s[stage].compute_at(s[CC], kt) _, xi = s[stage].split(stage.op.axis[1], factor=4) s[stage].vectorize(xi) s[stage].double_buffer() cfg.define_knob("storage_align", [16, 48]) for stage in [AA, BB]: s[stage].storage_align(s[stage].op.axis[0], cfg["storage_align"].val, 0) s[stage].compute_at(s[CC], ko) fused = s[stage].fuse(*s[stage].op.axis) ty, tx = s[stage].split(fused, nparts=cfg["tile_y"].size[2]) tx, xi = s[stage].split(tx, nparts=cfg["tile_x"].size[2]) _, xi = s[stage].split(xi, factor=16) s[stage].bind(ty, thread_y) s[stage].bind(tx, thread_x) s[stage].vectorize(xi) cfg.define_knob("auto_unroll_max_step", [512, 1500]) s[C].pragma(by, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val) s[C].pragma(by, "unroll_explicit", False) cfg.add_flop(n * m * l * 2) return s, [A, B, C] if __name__ == "__main__": N = 2048 n = m = l = N logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) task = autotvm.task.create(gemm_int8, args=(n, m, l), target="cuda") print(task.config_space) measure_option = autotvm.measure_option( builder=autotvm.LocalBuilder(), runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4), ) log_name = "gemm_int8.log" if DO_TUNING: tuner = autotvm.tuner.XGBTuner(task) tuner.tune( n_trial=1000, measure_option=measure_option, callbacks=[autotvm.callback.log_to_file(log_name)], ) dispatch_context = autotvm.apply_history_best(log_name) best_config = dispatch_context.query(task.target, task.workload) print("\nBest config:") print(best_config) else: config = task.config_space.get(PRETUNED_INDEX) dispatch_context = autotvm.task.ApplyConfig(config) print("Using pretuned config:") print(config) with dispatch_context: with tvm.target.Target("cuda"): s, arg_bufs = gemm_int8(n, m, l) f = tvm.build(s, arg_bufs, "cuda", name="gemm_int8") dev = tvm.device("cuda", 0) a_np = np.random.randint(size=(n, l), low=-128, high=127, dtype="int8") b_np = np.random.randint(size=(m, l), low=-128, high=127, dtype="int8") a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros((n, m), dtype="int32"), dev) f(a, b, c) tvm.testing.assert_allclose( c.numpy(), np.dot(a_np.astype("int32"), b_np.T.astype("int32")), rtol=1e-5 ) num_ops = 2 * l * m * n num_runs = 1000 timer_f = f.time_evaluator(f.entry_name, dev, number=num_runs) t = timer_f(a, b, c).mean GOPS = num_ops / (t * 1e3) / 1e6 print("average time cost of %d runs = %g ms, %g GOPS." % (num_runs, t * 1e3, GOPS))
import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body
import os from segments import Segment, theme from utils import colors, glyphs class CurrentDir(Segment): bg = colors.background(theme.CURRENTDIR_BG) fg = colors.foreground(theme.CURRENTDIR_FG) def init(self, cwd): home = os.path.expanduser('~') self.text = cwd.replace(home, '~') class ReadOnly(Segment): bg = colors.background(theme.READONLY_BG) fg = colors.foreground(theme.READONLY_FG) def init(self, cwd): self.text = ' ' + glyphs.WRITE_ONLY + ' ' if os.access(cwd, os.W_OK): self.active = False class Venv(Segment): bg = colors.background(theme.VENV_BG) fg = colors.foreground(theme.VENV_FG) def init(self): env = os.getenv('VIRTUAL_ENV') if env is None: self.active = False return env_name = os.path.basename(env) self.text = glyphs.VIRTUAL_ENV + ' ' + env_name
from PyQt4 import QtGui, QtCore, QtSvg from PyQt4.QtCore import QMimeData from PyQt4.QtGui import QGraphicsScene, QGraphicsView, QWidget, QApplication from Orange.data.io import FileFormat class ImgFormat(FileFormat): @staticmethod def _get_buffer(size, filename): raise NotImplementedError @staticmethod def _get_target(scene, painter, buffer): raise NotImplementedError @staticmethod def _save_buffer(buffer, filename): raise NotImplementedError @staticmethod def _get_exporter(): raise NotImplementedError @staticmethod def _export(self, exporter, filename): raise NotImplementedError @classmethod def write_image(cls, filename, scene): try: scene = scene.scene() scenerect = scene.sceneRect() #preserve scene bounding rectangle viewrect = scene.views()[0].sceneRect() scene.setSceneRect(viewrect) backgroundbrush = scene.backgroundBrush() #preserve scene background brush scene.setBackgroundBrush(QtCore.Qt.white) exporter = cls._get_exporter() cls._export(exporter(scene), filename) scene.setBackgroundBrush(backgroundbrush) # reset scene background brush scene.setSceneRect(scenerect) # reset scene bounding rectangle except Exception: if isinstance(scene, (QGraphicsScene, QGraphicsView)): rect = scene.sceneRect() elif isinstance(scene, QWidget): rect = scene.rect() rect = rect.adjusted(-15, -15, 15, 15) buffer = cls._get_buffer(rect.size(), filename) painter = QtGui.QPainter() painter.begin(buffer) painter.setRenderHint(QtGui.QPainter.Antialiasing) target = cls._get_target(scene, painter, buffer, rect) try: scene.render(painter, target, rect) except TypeError: scene.render(painter) # PyQt4 QWidget.render() takes different params cls._save_buffer(buffer, filename) painter.end() @classmethod def write(cls, filename, scene): if type(scene) == dict: scene = scene['scene'] cls.write_image(filename, scene) class PngFormat(ImgFormat): EXTENSIONS = ('.png',) DESCRIPTION = 'Portable Network Graphics' PRIORITY = 50 @staticmethod def _get_buffer(size, filename): return QtGui.QPixmap(int(size.width()), int(size.height())) @staticmethod def _get_target(scene, painter, buffer, source): try: brush = scene.backgroundBrush() if brush.style() == QtCore.Qt.NoBrush: brush = QtGui.QBrush(scene.palette().color(QtGui.QPalette.Base)) except AttributeError: # not a QGraphicsView/Scene brush = QtGui.QBrush(QtCore.Qt.white) painter.fillRect(buffer.rect(), brush) return QtCore.QRectF(0, 0, source.width(), source.height()) @staticmethod def _save_buffer(buffer, filename): buffer.save(filename, "png") @staticmethod def _get_exporter(): from pyqtgraph.exporters.ImageExporter import ImageExporter return ImageExporter @staticmethod def _export(exporter, filename): buffer = exporter.export(toBytes=True) buffer.save(filename, "png") class ClipboardFormat(PngFormat): EXTENSIONS = () DESCRIPTION = 'System Clipboard' PRIORITY = 50 @staticmethod def _save_buffer(buffer, _): QApplication.clipboard().setPixmap(buffer) @staticmethod def _export(exporter, _): buffer = exporter.export(toBytes=True) mimedata = QMimeData() mimedata.setData("image/png", buffer) QApplication.clipboard().setMimeData(mimedata) class SvgFormat(ImgFormat): EXTENSIONS = ('.svg',) DESCRIPTION = 'Scalable Vector Graphics' PRIORITY = 100 @staticmethod def _get_buffer(size, filename): buffer = QtSvg.QSvgGenerator() buffer.setFileName(filename) buffer.setSize(QtCore.QSize(int(size.width()), int(size.height()))) return buffer @staticmethod def _get_target(scene, painter, buffer, source): return QtCore.QRectF(0, 0, source.width(), source.height()) @staticmethod def _save_buffer(buffer, filename): pass @staticmethod def _get_exporter(): from pyqtgraph.exporters.SVGExporter import SVGExporter return SVGExporter @staticmethod def _export(exporter, filename): exporter.export(filename)
"""versioneer.py (like a rocketeer, but for versions) * https://github.com/warner/python-versioneer * Brian Warner * License: Public Domain * Version: 0.7+ This file helps distutils-based projects manage their version number by just creating version-control tags. For developers who work from a VCS-generated tree (e.g. 'git clone' etc), each 'setup.py version', 'setup.py build', 'setup.py sdist' will compute a version number by asking your version-control tool about the current checkout. The version number will be written into a generated _version.py file of your choosing, where it can be included by your __init__.py For users who work from a VCS-generated tarball (e.g. 'git archive'), it will compute a version number by looking at the name of the directory created when te tarball is unpacked. This conventionally includes both the name of the project and a version number. For users who work from a tarball built by 'setup.py sdist', it will get a version number from a previously-generated _version.py file. As a result, loading code directly from the source tree will not result in a real version. If you want real versions from VCS trees (where you frequently update from the upstream repository, or do new development), you will need to do a 'setup.py version' after each update, and load code from the build/ directory. You need to provide this code with a few configuration values: versionfile_source: A project-relative pathname into which the generated version strings should be written. This is usually a _version.py next to your project's main __init__.py file. If your project uses src/myproject/__init__.py, this should be 'src/myproject/_version.py'. This file should be checked in to your VCS as usual: the copy created below by 'setup.py update_files' will include code that parses expanded VCS keywords in generated tarballs. The 'build' and 'sdist' commands will replace it with a copy that has just the calculated version string. versionfile_build: Like versionfile_source, but relative to the build directory instead of the source directory. These will differ when your setup.py uses 'package_dir='. If you have package_dir={'myproject': 'src/myproject'}, then you will probably have versionfile_build='myproject/_version.py' and versionfile_source='src/myproject/_version.py'. tag_prefix: a string, like 'PROJECTNAME-', which appears at the start of all VCS tags. If your tags look like 'myproject-1.2.0', then you should use tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this should be an empty string. parentdir_prefix: a string, frequently the same as tag_prefix, which appears at the start of all unpacked tarball filenames. If your tarball unpacks into 'myproject-1.2.0', this should be 'myproject-'. To use it: 1: include this file in the top level of your project 2: make the following changes to the top of your setup.py: import versioneer versioneer.versionfile_source = 'src/myproject/_version.py' versioneer.versionfile_build = 'myproject/_version.py' versioneer.tag_prefix = '' # tags are like 1.2.0 versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0' 3: add the following arguments to the setup() call in your setup.py: version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), 4: run 'setup.py update_files', which will create _version.py, and will append the following to your __init__.py: from _version import __version__ 5: modify your MANIFEST.in to include versioneer.py 6: add both versioneer.py and the generated _version.py to your VCS """ import os import sys import re import subprocess from distutils.core import Command from distutils.command.sdist import sdist as _sdist from distutils.command.build import build as _build versionfile_source = None versionfile_build = None tag_prefix = None parentdir_prefix = None VCS = "git" IN_LONG_VERSION_PY = False GIT = "git" LONG_VERSION_PY = ''' IN_LONG_VERSION_PY = True git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" GIT = "git" import subprocess import sys def run_command(args, cwd=None, verbose=False): try: # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd) except EnvironmentError: e = sys.exc_info()[1] if verbose: print("unable to run %%s" %% args[0]) print(e) return None stdout = p.communicate()[0].strip() if sys.version >= '3': stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %%s (error)" %% args[0]) return None return stdout import sys import re import os.path def get_expanded_variables(versionfile_source): # the code embedded in _version.py can just fetch the value of these # variables. When used from setup.py, we don't want to import # _version.py, so we do it with a regexp instead. This function is not # used from _version.py. variables = {} try: for line in open(versionfile_source,"r").readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: variables["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: variables["full"] = mo.group(1) except EnvironmentError: pass return variables def versions_from_expanded_variables(variables, tag_prefix, verbose=False): refnames = variables["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("variables are unexpanded, not using") return {} # unexpanded, so not in an unpacked git-archive tarball refs = set([r.strip() for r in refnames.strip("()").split(",")]) for ref in list(refs): if not re.search(r'\d', ref): if verbose: print("discarding '%%s', no digits" %% ref) refs.discard(ref) # Assume all version tags have a digit. git's %%d expansion # behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us # distinguish between branches and tags. By ignoring refnames # without digits, we filter out many common branch names like # "release" and "stabilization", as well as "HEAD" and "master". if verbose: print("remaining refs: %%s" %% ",".join(sorted(refs))) for ref in sorted(refs): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %%s" %% r) return { "version": r, "full": variables["full"].strip() } # no suitable tags, so we use the full revision id if verbose: print("no suitable tags, using full revision id") return { "version": variables["full"].strip(), "full": variables["full"].strip() } def versions_from_vcs(tag_prefix, versionfile_source, verbose=False): # this runs 'git' from the root of the source tree. That either means # someone ran a setup.py command (and this code is in versioneer.py, so # IN_LONG_VERSION_PY=False, thus the containing directory is the root of # the source tree), or someone ran a project-specific entry point (and # this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the # containing directory is somewhere deeper in the source tree). This only # gets called if the git-archive 'subst' variables were *not* expanded, # and _version.py hasn't already been rewritten with a short version # string, meaning we're inside a checked out source tree. try: here = os.path.realpath(__file__) except NameError: # some py2exe/bbfreeze/non-CPython implementations don't do __file__ return {} # not always correct # versionfile_source is the relative path from the top of the source tree # (where the .git directory might live) to this file. Invert this to find # the root from __file__. root = here if IN_LONG_VERSION_PY: for i in range(len(versionfile_source.split("/"))): root = os.path.dirname(root) else: root = os.path.dirname(here) if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %%s" %% root) return {} stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"], cwd=root) if stdout is None: return {} if not stdout.startswith(tag_prefix): if verbose: print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix)) return {} tag = stdout[len(tag_prefix):] stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root) if stdout is None: return {} full = stdout.strip() if tag.endswith("-dirty"): full += "-dirty" return {"version": tag, "full": full} def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False): if IN_LONG_VERSION_PY: # We're running from _version.py. If it's from a source tree # (execute-in-place), we can work upwards to find the root of the # tree, and then check the parent directory for a version string. If # it's in an installed application, there's no hope. try: here = os.path.realpath(__file__) except NameError: # py2exe/bbfreeze/non-CPython don't have __file__ return {} # without __file__, we have no hope # versionfile_source is the relative path from the top of the source # tree to _version.py. Invert this to find the root from __file__. root = here for i in range(len(versionfile_source.split("/"))): root = os.path.dirname(root) else: # we're running from versioneer.py, which means we're running from # the setup.py in a source tree. sys.argv[0] is setup.py in the root. here = os.path.realpath(sys.argv[0]) root = os.path.dirname(here) # Source tarballs conventionally unpack into a directory that includes # both the project name and a version string. dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %% (root, dirname, parentdir_prefix)) return None return {"version": dirname[len(parentdir_prefix):], "full": ""} tag_prefix = "%(TAG_PREFIX)s" parentdir_prefix = "%(PARENTDIR_PREFIX)s" versionfile_source = "%(VERSIONFILE_SOURCE)s" def get_versions(default={"version": "unknown", "full": ""}, verbose=False): variables = { "refnames": git_refnames, "full": git_full } ver = versions_from_expanded_variables(variables, tag_prefix, verbose) if not ver: ver = versions_from_vcs(tag_prefix, versionfile_source, verbose) if not ver: ver = versions_from_parentdir(parentdir_prefix, versionfile_source, verbose) if not ver: ver = default return ver ''' def run_command(args, cwd=None, verbose=False): try: # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd) except EnvironmentError: e = sys.exc_info()[1] if verbose: print("unable to run %s" % args[0]) print(e) return None stdout = p.communicate()[0].strip() if sys.version >= '3': stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % args[0]) return None return stdout def get_expanded_variables(versionfile_source): # the code embedded in _version.py can just fetch the value of these # variables. When used from setup.py, we don't want to import # _version.py, so we do it with a regexp instead. This function is not # used from _version.py. variables = {} try: for line in open(versionfile_source,"r").readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: variables["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: variables["full"] = mo.group(1) except EnvironmentError: pass return variables def versions_from_expanded_variables(variables, tag_prefix, verbose=False): refnames = variables["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("variables are unexpanded, not using") return {} # unexpanded, so not in an unpacked git-archive tarball refs = set([r.strip() for r in refnames.strip("()").split(",")]) for ref in list(refs): if not re.search(r'\d', ref): if verbose: print("discarding '%s', no digits" % ref) refs.discard(ref) # Assume all version tags have a digit. git's %d expansion # behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us # distinguish between branches and tags. By ignoring refnames # without digits, we filter out many common branch names like # "release" and "stabilization", as well as "HEAD" and "master". if verbose: print("remaining refs: %s" % ",".join(sorted(refs))) for ref in sorted(refs): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return { "version": r, "full": variables["full"].strip() } # no suitable tags, so we use the full revision id if verbose: print("no suitable tags, using full revision id") return { "version": variables["full"].strip(), "full": variables["full"].strip() } def versions_from_vcs(tag_prefix, versionfile_source, verbose=False): # this runs 'git' from the root of the source tree. That either means # someone ran a setup.py command (and this code is in versioneer.py, so # IN_LONG_VERSION_PY=False, thus the containing directory is the root of # the source tree), or someone ran a project-specific entry point (and # this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the # containing directory is somewhere deeper in the source tree). This only # gets called if the git-archive 'subst' variables were *not* expanded, # and _version.py hasn't already been rewritten with a short version # string, meaning we're inside a checked out source tree. try: here = os.path.realpath(__file__) except NameError: # some py2exe/bbfreeze/non-CPython implementations don't do __file__ return {} # not always correct # versionfile_source is the relative path from the top of the source tree # (where the .git directory might live) to this file. Invert this to find # the root from __file__. root = here if IN_LONG_VERSION_PY: for i in range(len(versionfile_source.split("/"))): root = os.path.dirname(root) else: root = os.path.dirname(here) if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %s" % root) return {} stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"], cwd=root) if stdout is None: return {} if not stdout.startswith(tag_prefix): if verbose: print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix)) return {} tag = stdout[len(tag_prefix):] stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root) if stdout is None: return {} full = stdout.strip() if tag.endswith("-dirty"): full += "-dirty" return {"version": tag, "full": full} def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False): if IN_LONG_VERSION_PY: # We're running from _version.py. If it's from a source tree # (execute-in-place), we can work upwards to find the root of the # tree, and then check the parent directory for a version string. If # it's in an installed application, there's no hope. try: here = os.path.realpath(__file__) except NameError: # py2exe/bbfreeze/non-CPython don't have __file__ return {} # without __file__, we have no hope # versionfile_source is the relative path from the top of the source # tree to _version.py. Invert this to find the root from __file__. root = here for i in range(len(versionfile_source.split("/"))): root = os.path.dirname(root) else: # we're running from versioneer.py, which means we're running from # the setup.py in a source tree. sys.argv[0] is setup.py in the root. here = os.path.realpath(sys.argv[0]) root = os.path.dirname(here) # Source tarballs conventionally unpack into a directory that includes # both the project name and a version string. dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" % (root, dirname, parentdir_prefix)) return None return {"version": dirname[len(parentdir_prefix):], "full": ""} def do_vcs_install(versionfile_source, ipy): run_command([GIT, "add", "versioneer.py"]) run_command([GIT, "add", versionfile_source]) run_command([GIT, "add", ipy]) present = False try: f = open(".gitattributes", "r") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() except EnvironmentError: pass if not present: f = open(".gitattributes", "a+") f.write("%s export-subst\n" % versionfile_source) f.close() run_command([GIT, "add", ".gitattributes"]) SHORT_VERSION_PY = """ version_version = '%(version)s' version_full = '%(full)s' def get_versions(default={}, verbose=False): return {'version': version_version, 'full': version_full} """ DEFAULT = {"version": "unknown", "full": "unknown"} def versions_from_file(filename): versions = {} try: f = open(filename) except EnvironmentError: return versions for line in f.readlines(): mo = re.match("version_version = '([^']+)'", line) if mo: versions["version"] = mo.group(1) mo = re.match("version_full = '([^']+)'", line) if mo: versions["full"] = mo.group(1) return versions def write_to_version_file(filename, versions): f = open(filename, "w") f.write(SHORT_VERSION_PY % versions) f.close() print("set %s to '%s'" % (filename, versions["version"])) def get_best_versions(versionfile, tag_prefix, parentdir_prefix, default=DEFAULT, verbose=False): # returns dict with two keys: 'version' and 'full' # # extract version from first of _version.py, 'git describe', parentdir. # This is meant to work for developers using a source checkout, for users # of a tarball created by 'setup.py sdist', and for users of a # tarball/zipball created by 'git archive' or github's download-from-tag # feature. variables = get_expanded_variables(versionfile_source) if variables: ver = versions_from_expanded_variables(variables, tag_prefix) if ver: if verbose: print("got version from expanded variable %s" % ver) return ver ver = versions_from_file(versionfile) if ver: if verbose: print("got version from file %s %s" % (versionfile, ver)) return ver ver = versions_from_vcs(tag_prefix, versionfile_source, verbose) if ver: if verbose: print("got version from git %s" % ver) return ver ver = versions_from_parentdir(parentdir_prefix, versionfile_source, verbose) if ver: if verbose: print("got version from parentdir %s" % ver) return ver if verbose: print("got version from default %s" % ver) return default def get_versions(default=DEFAULT, verbose=False): assert versionfile_source is not None, "please set versioneer.versionfile_source" assert tag_prefix is not None, "please set versioneer.tag_prefix" assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix" return get_best_versions(versionfile_source, tag_prefix, parentdir_prefix, default=default, verbose=verbose) def get_version(verbose=False): return get_versions(verbose=verbose)["version"] class cmd_version(Command): description = "report generated version string" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): ver = get_version(verbose=True) print("Version is currently: %s" % ver) class cmd_build(_build): def run(self): versions = get_versions(verbose=True) _build.run(self) # now locate _version.py in the new build/ directory and replace it # with an updated value target_versionfile = os.path.join(self.build_lib, versionfile_build) print("UPDATING %s" % target_versionfile) os.unlink(target_versionfile) f = open(target_versionfile, "w") f.write(SHORT_VERSION_PY % versions) f.close() class cmd_sdist(_sdist): def run(self): versions = get_versions(verbose=True) self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir, files): _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory (remembering # that it may be a hardlink) and replace it with an updated value target_versionfile = os.path.join(base_dir, versionfile_source) print("UPDATING %s" % target_versionfile) os.unlink(target_versionfile) f = open(target_versionfile, "w") f.write(SHORT_VERSION_PY % self._versioneer_generated_versions) f.close() INIT_PY_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ class cmd_update_files(Command): description = "modify __init__.py and create _version.py" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py") print(" creating %s" % versionfile_source) f = open(versionfile_source, "w") f.write(LONG_VERSION_PY % {"DOLLAR": "$", "TAG_PREFIX": tag_prefix, "PARENTDIR_PREFIX": parentdir_prefix, "VERSIONFILE_SOURCE": versionfile_source, }) f.close() try: old = open(ipy, "r").read() except EnvironmentError: old = "" if INIT_PY_SNIPPET not in old: print(" appending to %s" % ipy) f = open(ipy, "a") f.write(INIT_PY_SNIPPET) f.close() else: print(" %s unmodified" % ipy) do_vcs_install(versionfile_source, ipy) def get_cmdclass(): return {'version': cmd_version, 'update_files': cmd_update_files, 'build': cmd_build, 'sdist': cmd_sdist, }
from django.contrib.auth.decorators import login_required from django.core.urlresolvers import reverse from django.utils.decorators import method_decorator from django.views import generic from regressiontests.generic_views.models import Artist, Author, Book, Page from regressiontests.generic_views.forms import AuthorForm class CustomTemplateView(generic.TemplateView): template_name = 'generic_views/about.html' def get_context_data(self, **kwargs): return { 'params': kwargs, 'key': 'value' } class ObjectDetail(generic.DetailView): template_name = 'generic_views/detail.html' def get_object(self): return {'foo': 'bar'} class ArtistDetail(generic.DetailView): queryset = Artist.objects.all() class AuthorDetail(generic.DetailView): queryset = Author.objects.all() class PageDetail(generic.DetailView): queryset = Page.objects.all() template_name_field = 'template' class DictList(generic.ListView): """A ListView that doesn't use a model.""" queryset = [ {'first': 'John', 'last': 'Lennon'}, {'last': 'Yoko', 'last': 'Ono'} ] template_name = 'generic_views/list.html' class AuthorList(generic.ListView): queryset = Author.objects.all() class ArtistCreate(generic.CreateView): model = Artist class NaiveAuthorCreate(generic.CreateView): queryset = Author.objects.all() class AuthorCreate(generic.CreateView): model = Author success_url = '/list/authors/' class SpecializedAuthorCreate(generic.CreateView): model = Author form_class = AuthorForm template_name = 'generic_views/form.html' context_object_name = 'thingy' def get_success_url(self): return reverse('author_detail', args=[self.object.id,]) class AuthorCreateRestricted(AuthorCreate): post = method_decorator(login_required)(AuthorCreate.post) class ArtistUpdate(generic.UpdateView): model = Artist class NaiveAuthorUpdate(generic.UpdateView): queryset = Author.objects.all() class AuthorUpdate(generic.UpdateView): model = Author success_url = '/list/authors/' class SpecializedAuthorUpdate(generic.UpdateView): model = Author form_class = AuthorForm template_name = 'generic_views/form.html' context_object_name = 'thingy' def get_success_url(self): return reverse('author_detail', args=[self.object.id,]) class NaiveAuthorDelete(generic.DeleteView): queryset = Author.objects.all() class AuthorDelete(generic.DeleteView): model = Author success_url = '/list/authors/' class SpecializedAuthorDelete(generic.DeleteView): queryset = Author.objects.all() template_name = 'generic_views/confirm_delete.html' context_object_name = 'thingy' def get_success_url(self): return reverse('authors_list') class BookConfig(object): queryset = Book.objects.all() date_field = 'pubdate' class BookArchive(BookConfig, generic.ArchiveIndexView): pass class BookYearArchive(BookConfig, generic.YearArchiveView): pass class BookMonthArchive(BookConfig, generic.MonthArchiveView): pass class BookWeekArchive(BookConfig, generic.WeekArchiveView): pass class BookDayArchive(BookConfig, generic.DayArchiveView): pass class BookTodayArchive(BookConfig, generic.TodayArchiveView): pass class BookDetail(BookConfig, generic.DateDetailView): pass
"""A connection adapter that tries to use the best polling method for the platform pika is running on. """ import os import logging import socket import select import errno import time from operator import itemgetter from collections import defaultdict import threading import pika.compat from pika.compat import dictkeys from pika.adapters.base_connection import BaseConnection LOGGER = logging.getLogger(__name__) SELECT_TYPE = None READ = 0x0001 WRITE = 0x0004 ERROR = 0x0008 if pika.compat.PY2: _SELECT_ERROR = select.error else: # select.error was deprecated and replaced by OSError in python 3.3 _SELECT_ERROR = OSError def _get_select_errno(error): if pika.compat.PY2: assert isinstance(error, select.error), repr(error) return error.args[0] else: assert isinstance(error, OSError), repr(error) return error.errno class SelectConnection(BaseConnection): """An asynchronous connection adapter that attempts to use the fastest event loop adapter for the given platform. """ def __init__(self, parameters=None, on_open_callback=None, on_open_error_callback=None, on_close_callback=None, stop_ioloop_on_close=True, custom_ioloop=None): """Create a new instance of the Connection object. :param pika.connection.Parameters parameters: Connection parameters :param method on_open_callback: Method to call on connection open :param on_open_error_callback: Method to call if the connection cant be opened :type on_open_error_callback: method :param method on_close_callback: Method to call on connection close :param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected :param custom_ioloop: Override using the global IOLoop in Tornado :raises: RuntimeError """ ioloop = custom_ioloop or IOLoop() super(SelectConnection, self).__init__(parameters, on_open_callback, on_open_error_callback, on_close_callback, ioloop, stop_ioloop_on_close) def _adapter_connect(self): """Connect to the RabbitMQ broker, returning True on success, False on failure. :rtype: bool """ error = super(SelectConnection, self)._adapter_connect() if not error: self.ioloop.add_handler(self.socket.fileno(), self._handle_events, self.event_state) return error def _adapter_disconnect(self): """Disconnect from the RabbitMQ broker""" if self.socket: self.ioloop.remove_handler(self.socket.fileno()) super(SelectConnection, self)._adapter_disconnect() class IOLoop(object): """Singlton wrapper that decides which type of poller to use, creates an instance of it in start_poller and keeps the invoking application in a blocking state by calling the pollers start method. Poller should keep looping until IOLoop.instance().stop() is called or there is a socket error. Passes through all operations to the loaded poller object. """ def __init__(self): self._poller = self._get_poller() def __getattr__(self, attr): return getattr(self._poller, attr) def _get_poller(self): """Determine the best poller to use for this enviroment.""" poller = None if hasattr(select, 'epoll'): if not SELECT_TYPE or SELECT_TYPE == 'epoll': LOGGER.debug('Using EPollPoller') poller = EPollPoller() if not poller and hasattr(select, 'kqueue'): if not SELECT_TYPE or SELECT_TYPE == 'kqueue': LOGGER.debug('Using KQueuePoller') poller = KQueuePoller() if (not poller and hasattr(select, 'poll') and hasattr(select.poll(), 'modify')): # pylint: disable=E1101 if not SELECT_TYPE or SELECT_TYPE == 'poll': LOGGER.debug('Using PollPoller') poller = PollPoller() if not poller: LOGGER.debug('Using SelectPoller') poller = SelectPoller() return poller class SelectPoller(object): """Default behavior is to use Select since it's the widest supported and has all of the methods we need for child classes as well. One should only need to override the update_handler and start methods for additional types. """ # Drop out of the poll loop every POLL_TIMEOUT secs as a worst case, this # is only a backstop value. We will run timeouts when they are scheduled. POLL_TIMEOUT = 5 # if the poller uses MS specify 1000 POLL_TIMEOUT_MULT = 1 def __init__(self): """Create an instance of the SelectPoller """ # fd-to-handler function mappings self._fd_handlers = dict() # event-to-fdset mappings self._fd_events = {READ: set(), WRITE: set(), ERROR: set()} self._stopping = False self._timeouts = {} self._next_timeout = None self._processing_fd_event_map = {} # Mutex for controlling critical sections where ioloop-interrupt sockets # are created, used, and destroyed. Needed in case `stop()` is called # from a thread. self._mutex = threading.Lock() # ioloop-interrupt socket pair; initialized in start() self._r_interrupt = None self._w_interrupt = None def get_interrupt_pair(self): """ Use a socketpair to be able to interrupt the ioloop if called from another thread. Socketpair() is not supported on some OS (Win) so use a pair of simple UDP sockets instead. The sockets will be closed and garbage collected by python when the ioloop itself is. """ try: read_sock, write_sock = socket.socketpair() except AttributeError: LOGGER.debug("Using custom socketpair for interrupt") read_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) read_sock.bind(('localhost', 0)) write_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) write_sock.connect(read_sock.getsockname()) read_sock.setblocking(0) write_sock.setblocking(0) return read_sock, write_sock def read_interrupt(self, interrupt_sock, events, write_only): # pylint: disable=W0613 """ Read the interrupt byte(s). We ignore the event mask and write_only flag as we can ony get here if there's data to be read on our fd. :param int interrupt_sock: The file descriptor to read from :param int events: (unused) The events generated for this fd :param bool write_only: (unused) True if poll was called to trigger a write """ try: os.read(interrupt_sock, 512) except OSError as err: if err.errno != errno.EAGAIN: raise def add_timeout(self, deadline, callback_method): """Add the callback_method to the IOLoop timer to fire after deadline seconds. Returns a handle to the timeout. Do not confuse with Tornado's timeout where you pass in the time you want to have your callback called. Only pass in the seconds until it's to be called. :param int deadline: The number of seconds to wait to call callback :param method callback_method: The callback method :rtype: str """ timeout_at = time.time() + deadline value = {'deadline': timeout_at, 'callback': callback_method} timeout_id = hash(frozenset(value.items())) self._timeouts[timeout_id] = value if not self._next_timeout or timeout_at < self._next_timeout: self._next_timeout = timeout_at return timeout_id def remove_timeout(self, timeout_id): """Remove a timeout if it's still in the timeout stack :param str timeout_id: The timeout id to remove """ try: timeout = self._timeouts.pop(timeout_id) if timeout['deadline'] == self._next_timeout: self._next_timeout = None except KeyError: pass def get_next_deadline(self): """Get the interval to the next timeout event, or a default interval """ if self._next_timeout: timeout = max((self._next_timeout - time.time(), 0)) elif self._timeouts: deadlines = [t['deadline'] for t in self._timeouts.values()] self._next_timeout = min(deadlines) timeout = max((self._next_timeout - time.time(), 0)) else: timeout = SelectPoller.POLL_TIMEOUT timeout = min((timeout, SelectPoller.POLL_TIMEOUT)) return timeout * SelectPoller.POLL_TIMEOUT_MULT def process_timeouts(self): """Process the self._timeouts event stack""" now = time.time() to_run = [timer for timer in self._timeouts.values() if timer['deadline'] <= now] # Run the timeouts in order of deadlines. Although this shouldn't # be strictly necessary it preserves old behaviour when timeouts # were only run periodically. for t in sorted(to_run, key=itemgetter('deadline')): t['callback']() del self._timeouts[hash(frozenset(t.items()))] self._next_timeout = None def add_handler(self, fileno, handler, events): """Add a new fileno to the set to be monitored :param int fileno: The file descriptor :param method handler: What is called when an event happens :param int events: The event mask """ self._fd_handlers[fileno] = handler self.update_handler(fileno, events) def update_handler(self, fileno, events): """Set the events to the current events :param int fileno: The file descriptor :param int events: The event mask """ for ev in (READ, WRITE, ERROR): if events & ev: self._fd_events[ev].add(fileno) else: self._fd_events[ev].discard(fileno) def remove_handler(self, fileno): """Remove a file descriptor from the set :param int fileno: The file descriptor """ try: del self._processing_fd_event_map[fileno] except KeyError: pass self.update_handler(fileno, 0) del self._fd_handlers[fileno] def start(self): """Start the main poller loop. It will loop here until self._stopping""" LOGGER.debug('Starting IOLoop') self._stopping = False with self._mutex: # Watch out for reentry if self._r_interrupt is None: # Create ioloop-interrupt socket pair and register read handler. # NOTE: we defer their creation because some users (e.g., # BlockingConnection adapter) don't use the event loop and these # sockets would get reported as leaks self._r_interrupt, self._w_interrupt = self.get_interrupt_pair() self.add_handler(self._r_interrupt.fileno(), self.read_interrupt, READ) interrupt_sockets_created = True else: interrupt_sockets_created = False try: # Run event loop while not self._stopping: self.poll() self.process_timeouts() finally: # Unregister and close ioloop-interrupt socket pair if interrupt_sockets_created: with self._mutex: self.remove_handler(self._r_interrupt.fileno()) self._r_interrupt.close() self._r_interrupt = None self._w_interrupt.close() self._w_interrupt = None def stop(self): """Request exit from the ioloop.""" LOGGER.debug('Stopping IOLoop') self._stopping = True with self._mutex: if self._w_interrupt is None: return try: # Send byte to interrupt the poll loop, use write() for # consitency. os.write(self._w_interrupt.fileno(), b'X') except OSError as err: if err.errno != errno.EWOULDBLOCK: raise except Exception as err: # There's nothing sensible to do here, we'll exit the interrupt # loop after POLL_TIMEOUT secs in worst case anyway. LOGGER.warning("Failed to send ioloop interrupt: %s", err) raise def poll(self, write_only=False): """Wait for events on interested filedescriptors. :param bool write_only: Passed through to the hadnlers to indicate that they should only process write events. """ while True: try: read, write, error = select.select(self._fd_events[READ], self._fd_events[WRITE], self._fd_events[ERROR], self.get_next_deadline()) break except _SELECT_ERROR as error: if _get_select_errno(error) == errno.EINTR: continue else: raise # Build an event bit mask for each fileno we've recieved an event for fd_event_map = defaultdict(int) for fd_set, ev in zip((read, write, error), (READ, WRITE, ERROR)): for fileno in fd_set: fd_event_map[fileno] |= ev self._process_fd_events(fd_event_map, write_only) def _process_fd_events(self, fd_event_map, write_only): """ Processes the callbacks for each fileno we've recieved events. Before doing so we re-calculate the event mask based on what is currently set in case it has been changed under our feet by a previous callback. We also take a store a refernce to the fd_event_map in the class so that we can detect removal of an fileno during processing of another callback and not generate spurious callbacks on it. :param dict fd_event_map: Map of fds to events recieved on them. """ self._processing_fd_event_map = fd_event_map for fileno in dictkeys(fd_event_map): if fileno not in fd_event_map: # the fileno has been removed from the map under our feet. continue events = fd_event_map[fileno] for ev in [READ, WRITE, ERROR]: if fileno not in self._fd_events[ev]: events &= ~ev if events: handler = self._fd_handlers[fileno] handler(fileno, events, write_only=write_only) class KQueuePoller(SelectPoller): """KQueuePoller works on BSD based systems and is faster than select""" def __init__(self): """Create an instance of the KQueuePoller :param int fileno: The file descriptor to check events for :param method handler: What is called when an event happens :param int events: The events to look for """ self._kqueue = select.kqueue() super(KQueuePoller, self).__init__() def update_handler(self, fileno, events): """Set the events to the current events :param int fileno: The file descriptor :param int events: The event mask """ kevents = list() if not events & READ: if fileno in self._fd_events[READ]: kevents.append(select.kevent(fileno, filter=select.KQ_FILTER_READ, flags=select.KQ_EV_DELETE)) else: if fileno not in self._fd_events[READ]: kevents.append(select.kevent(fileno, filter=select.KQ_FILTER_READ, flags=select.KQ_EV_ADD)) if not events & WRITE: if fileno in self._fd_events[WRITE]: kevents.append(select.kevent(fileno, filter=select.KQ_FILTER_WRITE, flags=select.KQ_EV_DELETE)) else: if fileno not in self._fd_events[WRITE]: kevents.append(select.kevent(fileno, filter=select.KQ_FILTER_WRITE, flags=select.KQ_EV_ADD)) for event in kevents: self._kqueue.control([event], 0) super(KQueuePoller, self).update_handler(fileno, events) def _map_event(self, kevent): """return the event type associated with a kevent object :param kevent kevent: a kevent object as returned by kqueue.control() """ if kevent.filter == select.KQ_FILTER_READ: return READ elif kevent.filter == select.KQ_FILTER_WRITE: return WRITE elif kevent.flags & select.KQ_EV_ERROR: return ERROR def poll(self, write_only=False): """Check to see if the events that are cared about have fired. :param bool write_only: Don't look at self.events, just look to see if the adapter can write. """ while True: try: kevents = self._kqueue.control(None, 1000, self.get_next_deadline()) break except _SELECT_ERROR as error: if _get_select_errno(error) == errno.EINTR: continue else: raise fd_event_map = defaultdict(int) for event in kevents: fileno = event.ident fd_event_map[fileno] |= self._map_event(event) self._process_fd_events(fd_event_map, write_only) class PollPoller(SelectPoller): """Poll works on Linux and can have better performance than EPoll in certain scenarios. Both are faster than select. """ POLL_TIMEOUT_MULT = 1000 def __init__(self): """Create an instance of the KQueuePoller :param int fileno: The file descriptor to check events for :param method handler: What is called when an event happens :param int events: The events to look for """ self._poll = self.create_poller() super(PollPoller, self).__init__() def create_poller(self): return select.poll() # pylint: disable=E1101 def add_handler(self, fileno, handler, events): """Add a file descriptor to the poll set :param int fileno: The file descriptor to check events for :param method handler: What is called when an event happens :param int events: The events to look for """ self._poll.register(fileno, events) super(PollPoller, self).add_handler(fileno, handler, events) def update_handler(self, fileno, events): """Set the events to the current events :param int fileno: The file descriptor :param int events: The event mask """ super(PollPoller, self).update_handler(fileno, events) self._poll.modify(fileno, events) def remove_handler(self, fileno): """Remove a fileno to the set :param int fileno: The file descriptor """ super(PollPoller, self).remove_handler(fileno) self._poll.unregister(fileno) def poll(self, write_only=False): """Poll until the next timeout waiting for an event :param bool write_only: Only process write events """ while True: try: events = self._poll.poll(self.get_next_deadline()) break except _SELECT_ERROR as error: if _get_select_errno(error) == errno.EINTR: continue else: raise fd_event_map = defaultdict(int) for fileno, event in events: fd_event_map[fileno] |= event self._process_fd_events(fd_event_map, write_only) class EPollPoller(PollPoller): """EPoll works on Linux and can have better performance than Poll in certain scenarios. Both are faster than select. """ POLL_TIMEOUT_MULT = 1 def create_poller(self): return select.epoll() # pylint: disable=E1101
from __future__ import division, absolute_import, print_function import sys if sys.version_info[0] >= 3: from io import StringIO else: from io import StringIO import compiler import inspect import textwrap import tokenize from .compiler_unparse import unparse class Comment(object): """ A comment block. """ is_comment = True def __init__(self, start_lineno, end_lineno, text): # int : The first line number in the block. 1-indexed. self.start_lineno = start_lineno # int : The last line number. Inclusive! self.end_lineno = end_lineno # str : The text block including '#' character but not any leading spaces. self.text = text def add(self, string, start, end, line): """ Add a new comment line. """ self.start_lineno = min(self.start_lineno, start[0]) self.end_lineno = max(self.end_lineno, end[0]) self.text += string def __repr__(self): return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno, self.end_lineno, self.text) class NonComment(object): """ A non-comment block of code. """ is_comment = False def __init__(self, start_lineno, end_lineno): self.start_lineno = start_lineno self.end_lineno = end_lineno def add(self, string, start, end, line): """ Add lines to the block. """ if string.strip(): # Only add if not entirely whitespace. self.start_lineno = min(self.start_lineno, start[0]) self.end_lineno = max(self.end_lineno, end[0]) def __repr__(self): return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno, self.end_lineno) class CommentBlocker(object): """ Pull out contiguous comment blocks. """ def __init__(self): # Start with a dummy. self.current_block = NonComment(0, 0) # All of the blocks seen so far. self.blocks = [] # The index mapping lines of code to their associated comment blocks. self.index = {} def process_file(self, file): """ Process a file object. """ if sys.version_info[0] >= 3: nxt = file.__next__ else: nxt = file.next for token in tokenize.generate_tokens(nxt): self.process_token(*token) self.make_index() def process_token(self, kind, string, start, end, line): """ Process a single token. """ if self.current_block.is_comment: if kind == tokenize.COMMENT: self.current_block.add(string, start, end, line) else: self.new_noncomment(start[0], end[0]) else: if kind == tokenize.COMMENT: self.new_comment(string, start, end, line) else: self.current_block.add(string, start, end, line) def new_noncomment(self, start_lineno, end_lineno): """ We are transitioning from a noncomment to a comment. """ block = NonComment(start_lineno, end_lineno) self.blocks.append(block) self.current_block = block def new_comment(self, string, start, end, line): """ Possibly add a new comment. Only adds a new comment if this comment is the only thing on the line. Otherwise, it extends the noncomment block. """ prefix = line[:start[1]] if prefix.strip(): # Oops! Trailing comment, not a comment block. self.current_block.add(string, start, end, line) else: # A comment block. block = Comment(start[0], end[0], string) self.blocks.append(block) self.current_block = block def make_index(self): """ Make the index mapping lines of actual code to their associated prefix comments. """ for prev, block in zip(self.blocks[:-1], self.blocks[1:]): if not block.is_comment: self.index[block.start_lineno] = prev def search_for_comment(self, lineno, default=None): """ Find the comment block just before the given line number. Returns None (or the specified default) if there is no such block. """ if not self.index: self.make_index() block = self.index.get(lineno, None) text = getattr(block, 'text', default) return text def strip_comment_marker(text): """ Strip # markers at the front of a block of comment text. """ lines = [] for line in text.splitlines(): lines.append(line.lstrip('#')) text = textwrap.dedent('\n'.join(lines)) return text def get_class_traits(klass): """ Yield all of the documentation for trait definitions on a class object. """ # FIXME: gracefully handle errors here or in the caller? source = inspect.getsource(klass) cb = CommentBlocker() cb.process_file(StringIO(source)) mod_ast = compiler.parse(source) class_ast = mod_ast.node.nodes[0] for node in class_ast.code.nodes: # FIXME: handle other kinds of assignments? if isinstance(node, compiler.ast.Assign): name = node.nodes[0].name rhs = unparse(node.expr).strip() doc = strip_comment_marker(cb.search_for_comment(node.lineno, default='')) yield name, rhs, doc
from django.conf.urls.defaults import patterns, url urlpatterns = patterns( 'popcorn_gallery.users.views', url(r'^edit/$', 'edit', name='users_edit'), url(r'^delete/$', 'delete_profile', name='users_delete'), url(r'^(?P<username>[\w-]+)/$', 'profile', name='users_profile'), )
import copy from django import forms from django.db import models from django.core.exceptions import ValidationError, ImproperlyConfigured from django.db.models.fields.subclassing import Creator from djangae.forms.fields import ListFormField from django.utils.text import capfirst class _FakeModel(object): """ An object of this class can pass itself off as a model instance when used as an arguments to Field.pre_save method (item_fields of iterable fields are not actually fields of any model). """ def __init__(self, field, value): setattr(self, field.attname, value) class IterableField(models.Field): __metaclass__ = models.SubfieldBase @property def _iterable_type(self): raise NotImplementedError() def db_type(self, connection): return 'list' def get_prep_lookup(self, lookup_type, value): if hasattr(value, 'prepare'): return value.prepare() if hasattr(value, '_prepare'): return value._prepare() if value is None: raise ValueError("You can't query an iterable field with None") if lookup_type == 'isnull' and value in (True, False): return value if lookup_type != 'exact' and lookup_type != 'in': raise ValueError("You can only query using exact and in lookups on iterable fields") if isinstance(value, (list, set)): return [ self.item_field_type.to_python(x) for x in value ] return self.item_field_type.to_python(value) def get_prep_value(self, value): if value is None: raise ValueError("You can't set a {} to None (did you mean {}?)".format( self.__class__.__name__, str(self._iterable_type()) )) if isinstance(value, basestring): # Catch accidentally assigning a string to a ListField raise ValueError("Tried to assign a string to a {}".format(self.__class__.__name__)) return super(IterableField, self).get_prep_value(value) def __init__(self, item_field_type, *args, **kwargs): # This seems bonkers, we shout at people for specifying null=True, but then do it ourselves. But this is because # *we* abuse None values for our own purposes (to represent an empty iterable) if someone else tries to then # all hell breaks loose if kwargs.get("null", False): raise RuntimeError("IterableFields cannot be set as nullable (as the datastore doesn't differentiate None vs []") kwargs["null"] = True default = kwargs.get("default", []) self._original_item_field_type = copy.deepcopy(item_field_type) # For deconstruction purposes if default is not None and not callable(default): kwargs["default"] = lambda: self._iterable_type(default) if hasattr(item_field_type, 'attname'): item_field_type = item_field_type.__class__ if callable(item_field_type): item_field_type = item_field_type() if isinstance(item_field_type, models.ForeignKey): raise ImproperlyConfigured("Lists of ForeignKeys aren't supported, use RelatedSetField instead") self.item_field_type = item_field_type # We'll be pretending that item_field is a field of a model # with just one "value" field. assert not hasattr(self.item_field_type, 'attname') self.item_field_type.set_attributes_from_name('value') super(IterableField, self).__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super(IterableField, self).deconstruct() args = (self._original_item_field_type,) del kwargs["null"] return name, path, args, kwargs def contribute_to_class(self, cls, name): self.item_field_type.model = cls self.item_field_type.name = name super(IterableField, self).contribute_to_class(cls, name) # If items' field uses SubfieldBase we also need to. item_metaclass = getattr(self.item_field_type, '__metaclass__', None) if item_metaclass and issubclass(item_metaclass, models.SubfieldBase): setattr(cls, self.name, Creator(self)) def _map(self, function, iterable, *args, **kwargs): return self._iterable_type(function(element, *args, **kwargs) for element in iterable) def to_python(self, value): if value is None: return self._iterable_type([]) # Because a set cannot be defined in JSON, we must allow a list to be passed as the value # of a SetField, as otherwise SetField data can't be loaded from fixtures if not hasattr(value, "__iter__"): # Allows list/set, not string raise ValueError("Tried to assign a {} to a {}".format(value.__class__.__name__, self.__class__.__name__)) return self._map(self.item_field_type.to_python, value) def pre_save(self, model_instance, add): """ Gets our value from the model_instance and passes its items through item_field's pre_save (using a fake model instance). """ value = getattr(model_instance, self.attname) if value is None: return None return self._map(lambda item: self.item_field_type.pre_save(_FakeModel(self.item_field_type, item), add), value) def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) if value is None: return None # If the value is an empty iterable, store None if value == self._iterable_type([]): return None return self._map(self.item_field_type.get_db_prep_save, value, connection=connection) def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False): """ Passes the value through get_db_prep_lookup of item_field. """ return self.item_field_type.get_db_prep_lookup( lookup_type, value, connection=connection, prepared=prepared) def validate(self, value_list, model_instance): """ We want to override the default validate method from django.db.fields.Field, because it is only designed to deal with a single choice from the user. """ if not self.editable: # Skip validation for non-editable fields return # Validate choices if self.choices: valid_values = [] for choice in self.choices: if isinstance(choice[0], (list, tuple)): # this is an optgroup, so look inside it for the options for optgroup_choice in choice[0]: valid_values.append(optgroup_choice[0]) else: valid_values.append(choice[0]) for value in value_list: if value not in valid_values: # TODO: if there is more than 1 invalid value then this should show all of the invalid values raise ValidationError(self.error_messages['invalid_choice'] % value) # Validate null-ness if value_list is None and not self.null: raise ValidationError(self.error_messages['null']) if not self.blank and not value_list: raise ValidationError(self.error_messages['blank']) # apply the default items validation rules for value in value_list: self.item_field_type.clean(value, model_instance) def formfield(self, **kwargs): """ If this field has choices, then we can use a multiple choice field. NB: The choices must be set on *this* field, e.g. this_field = ListField(CharField(), choices=x) as opposed to: this_field = ListField(CharField(choices=x)) """ #Largely lifted straight from Field.formfield() in django.models.__init__.py defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text} if self.has_default(): #No idea what this does if callable(self.default): defaults['initial'] = self.default defaults['show_hidden_initial'] = True else: defaults['initial'] = self.get_default() if self.choices: form_field_class = forms.MultipleChoiceField defaults['choices'] = self.get_choices(include_blank=False) #no empty value on a multi-select else: form_field_class = ListFormField defaults.update(**kwargs) return form_field_class(**defaults) class ListField(IterableField): def __init__(self, *args, **kwargs): self.ordering = kwargs.pop('ordering', None) if self.ordering is not None and not callable(self.ordering): raise TypeError("'ordering' has to be a callable or None, " "not of type %r." % type(self.ordering)) super(ListField, self).__init__(*args, **kwargs) def pre_save(self, model_instance, add): value = super(ListField, self).pre_save(model_instance, add) if value and self.ordering: value.sort(key=self.ordering) return value @property def _iterable_type(self): return list def deconstruct(self): name, path, args, kwargs = super(ListField, self).deconstruct() kwargs['ordering'] = self.ordering return name, path, args, kwargs class SetField(IterableField): @property def _iterable_type(self): return set def db_type(self, connection): return 'set' def get_db_prep_save(self, *args, **kwargs): ret = super(SetField, self).get_db_prep_save(*args, **kwargs) if ret: ret = list(ret) return ret def get_db_prep_lookup(self, *args, **kwargs): ret = super(SetField, self).get_db_prep_lookup(*args, **kwargs) if ret: ret = list(ret) return ret def value_to_string(self, obj): """ Custom method for serialization, as JSON doesn't support serializing sets. """ return str(list(self._get_val_from_obj(obj)))
from __future__ import absolute_import from datetime import datetime from django.utils import timezone from django.core.urlresolvers import reverse from sentry.models import ( ProcessingIssue, EventError, RawEvent, EventProcessingIssue ) from sentry.testutils import APITestCase class ProjectProjectProcessingIssuesTest(APITestCase): def test_simple(self): self.login_as(user=self.user) team = self.create_team() project1 = self.create_project(team=team, name='foo') raw_event = RawEvent.objects.create( project_id=project1.id, event_id='abc' ) issue, _ = ProcessingIssue.objects.get_or_create( project_id=project1.id, checksum='abc', type=EventError.NATIVE_MISSING_DSYM ) EventProcessingIssue.objects.get_or_create( raw_event=raw_event, processing_issue=issue, ) url = reverse('sentry-api-0-project-processing-issues', kwargs={ 'organization_slug': project1.organization.slug, 'project_slug': project1.slug, }) response = self.client.get(url, format='json') assert response.status_code == 200, response.content assert response.data['hasIssues'] is True assert response.data['hasMoreResolveableIssues'] is False assert response.data['numIssues'] == 1 assert response.data['issuesProcessing'] == 0 assert response.data['resolveableIssues'] == 0 def test_issues(self): self.login_as(user=self.user) team = self.create_team() project1 = self.create_project(team=team, name='foo') raw_event = RawEvent.objects.create( project_id=project1.id, event_id='abc' ) issue, _ = ProcessingIssue.objects.get_or_create( project_id=project1.id, checksum='abc', type=EventError.NATIVE_MISSING_DSYM, datetime=datetime(2013, 8, 13, 3, 8, 25, tzinfo=timezone.utc), ) issue2, _ = ProcessingIssue.objects.get_or_create( project_id=project1.id, checksum='abcd', type=EventError.NATIVE_MISSING_DSYM, datetime=datetime(2014, 8, 13, 3, 8, 25, tzinfo=timezone.utc), ) EventProcessingIssue.objects.get_or_create( raw_event=raw_event, processing_issue=issue, ) url = reverse('sentry-api-0-project-processing-issues', kwargs={ 'organization_slug': project1.organization.slug, 'project_slug': project1.slug, }) response = self.client.get(url + '?detailed=1', format='json') assert response.status_code == 200, response.content assert len(response.data['issues']) == 2 assert response.data['numIssues'] == 2 assert response.data['lastSeen'] == issue2.datetime assert response.data['hasIssues'] is True assert response.data['hasMoreResolveableIssues'] is False assert response.data['issuesProcessing'] == 0 assert response.data['resolveableIssues'] == 0 assert response.data['issues'][0]['checksum'] == issue.checksum assert response.data['issues'][0]['numEvents'] == 1 assert response.data['issues'][0]['type'] == EventError.NATIVE_MISSING_DSYM assert response.data['issues'][1]['checksum'] == issue2.checksum def test_resolvable_issues(self): self.login_as(user=self.user) team = self.create_team() project1 = self.create_project(team=team, name='foo') RawEvent.objects.create( project_id=project1.id, event_id='abc' ) url = reverse('sentry-api-0-project-processing-issues', kwargs={ 'organization_slug': project1.organization.slug, 'project_slug': project1.slug, }) response = self.client.get(url + '?detailed=1', format='json') assert response.status_code == 200, response.content assert response.data['numIssues'] == 0 assert response.data['resolveableIssues'] == 1 assert response.data['lastSeen'] is None assert response.data['hasIssues'] is False assert response.data['hasMoreResolveableIssues'] is False assert response.data['numIssues'] == 0 assert response.data['issuesProcessing'] == 0
"""Tools for manipulating of large commutative expressions. """ from __future__ import print_function, division from sympy.core.add import Add from sympy.core.compatibility import iterable, is_sequence, SYMPY_INTS from sympy.core.mul import Mul, _keep_coeff from sympy.core.power import Pow from sympy.core.basic import Basic, preorder_traversal from sympy.core.expr import Expr from sympy.core.sympify import sympify from sympy.core.numbers import Rational, Integer, Number, I from sympy.core.singleton import S from sympy.core.symbol import Dummy from sympy.core.coreerrors import NonCommutativeExpression from sympy.core.containers import Tuple, Dict from sympy.utilities import default_sort_key from sympy.utilities.iterables import (common_prefix, common_suffix, variations, ordered) from collections import defaultdict def _isnumber(i): return isinstance(i, (SYMPY_INTS, float)) or i.is_Number def decompose_power(expr): """ Decompose power into symbolic base and integer exponent. This is strictly only valid if the exponent from which the integer is extracted is itself an integer or the base is positive. These conditions are assumed and not checked here. Examples ======== >>> from sympy.core.exprtools import decompose_power >>> from sympy.abc import x, y >>> decompose_power(x) (x, 1) >>> decompose_power(x**2) (x, 2) >>> decompose_power(x**(2*y)) (x**y, 2) >>> decompose_power(x**(2*y/3)) (x**(y/3), 2) """ base, exp = expr.as_base_exp() if exp.is_Number: if exp.is_Rational: if not exp.is_Integer: base = Pow(base, Rational(1, exp.q)) exp = exp.p else: base, exp = expr, 1 else: exp, tail = exp.as_coeff_Mul(rational=True) if exp is S.NegativeOne: base, exp = Pow(base, tail), -1 elif exp is not S.One: tail = _keep_coeff(Rational(1, exp.q), tail) base, exp = Pow(base, tail), exp.p else: base, exp = expr, 1 return base, exp class Factors(object): """Efficient representation of ``f_1*f_2*...*f_n``.""" __slots__ = ['factors', 'gens'] def __init__(self, factors=None): # Factors """Initialize Factors from dict or expr. Examples ======== >>> from sympy.core.exprtools import Factors >>> from sympy.abc import x >>> from sympy import I >>> e = 2*x**3 >>> Factors(e) Factors({2: 1, x: 3}) >>> Factors(e.as_powers_dict()) Factors({2: 1, x: 3}) >>> f = _ >>> f.factors # underlying dictionary {2: 1, x: 3} >>> f.gens # base of each factor frozenset([2, x]) >>> Factors(0) Factors({0: 1}) >>> Factors(I) Factors({I: 1}) Notes ===== Although a dictionary can be passed, only minimal checking is performed: powers of -1 and I are made canonical. """ if isinstance(factors, (SYMPY_INTS, float)): factors = S(factors) if isinstance(factors, Factors): factors = factors.factors.copy() elif factors is None or factors is S.One: factors = {} elif factors is S.Zero or factors == 0: factors = {S.Zero: S.One} elif isinstance(factors, Number): n = factors factors = {} if n < 0: factors[S.NegativeOne] = S.One n = -n if n is not S.One: if n.is_Float or n.is_Integer or n is S.Infinity: factors[n] = S.One elif n.is_Rational: # since we're processing Numbers, the denominator is # stored with a negative exponent; all other factors # are left . if n.p != 1: factors[Integer(n.p)] = S.One factors[Integer(n.q)] = S.NegativeOne else: raise ValueError('Expected Float|Rational|Integer, not %s' % n) elif isinstance(factors, Basic) and not factors.args: factors = {factors: S.One} elif isinstance(factors, Expr): c, nc = factors.args_cnc() i = c.count(I) for _ in range(i): c.remove(I) factors = dict(Mul._from_args(c).as_powers_dict()) if i: factors[I] = S.One*i if nc: factors[Mul(*nc, evaluate=False)] = S.One else: factors = factors.copy() # /!\ should be dict-like # tidy up -/+1 and I exponents if Rational handle = [] for k in factors: if k is I or k in (-1, 1): handle.append(k) if handle: i1 = S.One for k in handle: if not _isnumber(factors[k]): continue i1 *= k**factors.pop(k) if i1 is not S.One: for a in i1.args if i1.is_Mul else [i1]: # at worst, -1.0*I*(-1)**e if a is S.NegativeOne: factors[a] = S.One elif a is I: factors[I] = S.One elif a.is_Pow: if S.NegativeOne not in factors: factors[S.NegativeOne] = S.Zero factors[S.NegativeOne] += a.exp elif a == 1: factors[a] = S.One elif a == -1: factors[-a] = S.One factors[S.NegativeOne] = S.One else: raise ValueError('unexpected factor in i1: %s' % a) self.factors = factors try: self.gens = frozenset(factors.keys()) except AttributeError: raise TypeError('expecting Expr or dictionary') def __hash__(self): # Factors keys = tuple(ordered(self.factors.keys())) values = [self.factors[k] for k in keys] return hash((keys, values)) def __repr__(self): # Factors return "Factors({%s})" % ', '.join( ['%s: %s' % (k, v) for k, v in ordered(self.factors.items())]) @property def is_zero(self): # Factors """ >>> from sympy.core.exprtools import Factors >>> Factors(0).is_zero True """ f = self.factors return len(f) == 1 and S.Zero in f @property def is_one(self): # Factors """ >>> from sympy.core.exprtools import Factors >>> Factors(1).is_one True """ return not self.factors def as_expr(self): # Factors """Return the underlying expression. Examples ======== >>> from sympy.core.exprtools import Factors >>> from sympy.abc import x, y >>> Factors((x*y**2).as_powers_dict()).as_expr() x*y**2 """ args = [] for factor, exp in self.factors.items(): if exp != 1: b, e = factor.as_base_exp() if isinstance(exp, int): e = _keep_coeff(Integer(exp), e) elif isinstance(exp, Rational): e = _keep_coeff(exp, e) else: e *= exp args.append(b**e) else: args.append(factor) return Mul(*args) def mul(self, other): # Factors """Return Factors of ``self * other``. Examples ======== >>> from sympy.core.exprtools import Factors >>> from sympy.abc import x, y, z >>> a = Factors((x*y**2).as_powers_dict()) >>> b = Factors((x*y/z).as_powers_dict()) >>> a.mul(b) Factors({x: 2, y: 3, z: -1}) >>> a*b Factors({x: 2, y: 3, z: -1}) """ if not isinstance(other, Factors): other = Factors(other) if any(f.is_zero for f in (self, other)): return Factors(S.Zero) factors = dict(self.factors) for factor, exp in other.factors.items(): if factor in factors: exp = factors[factor] + exp if not exp: del factors[factor] continue factors[factor] = exp return Factors(factors) def normal(self, other): """Return ``self`` and ``other`` with ``gcd`` removed from each. The only differences between this and method ``div`` is that this is 1) optimized for the case when there are few factors in common and 2) this does not raise an error if ``other`` is zero. See Also ======== div """ if not isinstance(other, Factors): other = Factors(other) if other.is_zero: return (Factors(), Factors(S.Zero)) if self.is_zero: return (Factors(S.Zero), Factors()) self_factors = dict(self.factors) other_factors = dict(other.factors) for factor, self_exp in self.factors.items(): try: other_exp = other.factors[factor] except KeyError: continue exp = self_exp - other_exp if not exp: del self_factors[factor] del other_factors[factor] elif _isnumber(exp): if exp > 0: self_factors[factor] = exp del other_factors[factor] else: del self_factors[factor] other_factors[factor] = -exp else: r = self_exp.extract_additively(other_exp) if r is not None: if r: self_factors[factor] = r del other_factors[factor] else: # should be handled already del self_factors[factor] del other_factors[factor] else: sc, sa = self_exp.as_coeff_Add() if sc: oc, oa = other_exp.as_coeff_Add() diff = sc - oc if diff > 0: self_factors[factor] -= oc other_exp = oa elif diff < 0: self_factors[factor] -= sc other_factors[factor] -= sc other_exp = oa - diff else: self_factors[factor] = sa other_exp = oa if other_exp: other_factors[factor] = other_exp else: del other_factors[factor] return Factors(self_factors), Factors(other_factors) def div(self, other): # Factors """Return ``self`` and ``other`` with ``gcd`` removed from each. This is optimized for the case when there are many factors in common. Examples ======== >>> from sympy.core.exprtools import Factors >>> from sympy.abc import x, y, z >>> from sympy import S >>> a = Factors((x*y**2).as_powers_dict()) >>> a.div(a) (Factors({}), Factors({})) >>> a.div(x*z) (Factors({y: 2}), Factors({z: 1})) The ``/`` operator only gives ``quo``: >>> a/x Factors({y: 2}) Factors treats its factors as though they are all in the numerator, so if you violate this assumption the results will be correct but will not strictly correspond to the numerator and denominator of the ratio: >>> a.div(x/z) (Factors({y: 2}), Factors({z: -1})) Factors is also naive about bases: it does not attempt any denesting of Rational-base terms, for example the following does not become 2**(2*x)/2. >>> Factors(2**(2*x + 2)).div(S(8)) (Factors({2: 2*x + 2}), Factors({8: 1})) factor_terms can clean up such Rational-bases powers: >>> from sympy.core.exprtools import factor_terms >>> n, d = Factors(2**(2*x + 2)).div(S(8)) >>> n.as_expr()/d.as_expr() 2**(2*x + 2)/8 >>> factor_terms(_) 2**(2*x)/2 """ quo, rem = dict(self.factors), {} if not isinstance(other, Factors): other = Factors(other) if other.is_zero: raise ZeroDivisionError if self.is_zero: return (Factors(S.Zero), Factors()) for factor, exp in other.factors.items(): if factor in quo: d = quo[factor] - exp if _isnumber(d): if d <= 0: del quo[factor] if d >= 0: if d: quo[factor] = d continue exp = -d else: r = quo[factor].extract_additively(exp) if r is not None: if r: quo[factor] = r else: # should be handled already del quo[factor] else: other_exp = exp sc, sa = quo[factor].as_coeff_Add() if sc: oc, oa = other_exp.as_coeff_Add() diff = sc - oc if diff > 0: quo[factor] -= oc other_exp = oa elif diff < 0: quo[factor] -= sc other_exp = oa - diff else: quo[factor] = sa other_exp = oa if other_exp: rem[factor] = other_exp else: assert factor not in rem continue rem[factor] = exp return Factors(quo), Factors(rem) def quo(self, other): # Factors """Return numerator Factor of ``self / other``. Examples ======== >>> from sympy.core.exprtools import Factors >>> from sympy.abc import x, y, z >>> a = Factors((x*y**2).as_powers_dict()) >>> b = Factors((x*y/z).as_powers_dict()) >>> a.quo(b) # same as a/b Factors({y: 1}) """ return self.div(other)[0] def rem(self, other): # Factors """Return denominator Factors of ``self / other``. Examples ======== >>> from sympy.core.exprtools import Factors >>> from sympy.abc import x, y, z >>> a = Factors((x*y**2).as_powers_dict()) >>> b = Factors((x*y/z).as_powers_dict()) >>> a.rem(b) Factors({z: -1}) >>> a.rem(a) Factors({}) """ return self.div(other)[1] def pow(self, other): # Factors """Return self raised to a non-negative integer power. Examples ======== >>> from sympy.core.exprtools import Factors >>> from sympy.abc import x, y >>> a = Factors((x*y**2).as_powers_dict()) >>> a**2 Factors({x: 2, y: 4}) """ if isinstance(other, Factors): other = other.as_expr() if other.is_Integer: other = int(other) if isinstance(other, SYMPY_INTS) and other >= 0: factors = {} if other: for factor, exp in self.factors.items(): factors[factor] = exp*other return Factors(factors) else: raise ValueError("expected non-negative integer, got %s" % other) def gcd(self, other): # Factors """Return Factors of ``gcd(self, other)``. The keys are the intersection of factors with the minimum exponent for each factor. Examples ======== >>> from sympy.core.exprtools import Factors >>> from sympy.abc import x, y, z >>> a = Factors((x*y**2).as_powers_dict()) >>> b = Factors((x*y/z).as_powers_dict()) >>> a.gcd(b) Factors({x: 1, y: 1}) """ if not isinstance(other, Factors): other = Factors(other) if other.is_zero: return Factors(self.factors) factors = {} for factor, exp in self.factors.items(): if factor in other.factors: exp = min(exp, other.factors[factor]) factors[factor] = exp return Factors(factors) def lcm(self, other): # Factors """Return Factors of ``lcm(self, other)`` which are the union of factors with the maximum exponent for each factor. Examples ======== >>> from sympy.core.exprtools import Factors >>> from sympy.abc import x, y, z >>> a = Factors((x*y**2).as_powers_dict()) >>> b = Factors((x*y/z).as_powers_dict()) >>> a.lcm(b) Factors({x: 1, y: 2, z: -1}) """ if not isinstance(other, Factors): other = Factors(other) if any(f.is_zero for f in (self, other)): return Factors(S.Zero) factors = dict(self.factors) for factor, exp in other.factors.items(): if factor in factors: exp = max(exp, factors[factor]) factors[factor] = exp return Factors(factors) def __mul__(self, other): # Factors return self.mul(other) def __divmod__(self, other): # Factors return self.div(other) def __div__(self, other): # Factors return self.quo(other) __truediv__ = __div__ def __mod__(self, other): # Factors return self.rem(other) def __pow__(self, other): # Factors return self.pow(other) def __eq__(self, other): # Factors if not isinstance(other, Factors): other = Factors(other) return self.factors == other.factors def __ne__(self, other): # Factors return not self.__eq__(other) class Term(object): """Efficient representation of ``coeff*(numer/denom)``. """ __slots__ = ['coeff', 'numer', 'denom'] def __init__(self, term, numer=None, denom=None): # Term if numer is None and denom is None: if not term.is_commutative: raise NonCommutativeExpression( 'commutative expression expected') coeff, factors = term.as_coeff_mul() numer, denom = defaultdict(int), defaultdict(int) for factor in factors: base, exp = decompose_power(factor) if base.is_Add: cont, base = base.primitive() coeff *= cont**exp if exp > 0: numer[base] += exp else: denom[base] += -exp numer = Factors(numer) denom = Factors(denom) else: coeff = term if numer is None: numer = Factors() if denom is None: denom = Factors() self.coeff = coeff self.numer = numer self.denom = denom def __hash__(self): # Term return hash((self.coeff, self.numer, self.denom)) def __repr__(self): # Term return "Term(%s, %s, %s)" % (self.coeff, self.numer, self.denom) def as_expr(self): # Term return self.coeff*(self.numer.as_expr()/self.denom.as_expr()) def mul(self, other): # Term coeff = self.coeff*other.coeff numer = self.numer.mul(other.numer) denom = self.denom.mul(other.denom) numer, denom = numer.normal(denom) return Term(coeff, numer, denom) def inv(self): # Term return Term(1/self.coeff, self.denom, self.numer) def quo(self, other): # Term return self.mul(other.inv()) def pow(self, other): # Term if other < 0: return self.inv().pow(-other) else: return Term(self.coeff ** other, self.numer.pow(other), self.denom.pow(other)) def gcd(self, other): # Term return Term(self.coeff.gcd(other.coeff), self.numer.gcd(other.numer), self.denom.gcd(other.denom)) def lcm(self, other): # Term return Term(self.coeff.lcm(other.coeff), self.numer.lcm(other.numer), self.denom.lcm(other.denom)) def __mul__(self, other): # Term if isinstance(other, Term): return self.mul(other) else: return NotImplemented def __div__(self, other): # Term if isinstance(other, Term): return self.quo(other) else: return NotImplemented __truediv__ = __div__ def __pow__(self, other): # Term if isinstance(other, SYMPY_INTS): return self.pow(other) else: return NotImplemented def __eq__(self, other): # Term return (self.coeff == other.coeff and self.numer == other.numer and self.denom == other.denom) def __ne__(self, other): # Term return not self.__eq__(other) def _gcd_terms(terms, isprimitive=False, fraction=True): """Helper function for :func:`gcd_terms`. If ``isprimitive`` is True then the call to primitive for an Add will be skipped. This is useful when the content has already been extrated. If ``fraction`` is True then the expression will appear over a common denominator, the lcm of all term denominators. """ if isinstance(terms, Basic) and not isinstance(terms, Tuple): terms = Add.make_args(terms) terms = list(map(Term, [t for t in terms if t])) # there is some simplification that may happen if we leave this # here rather than duplicate it before the mapping of Term onto # the terms if len(terms) == 0: return S.Zero, S.Zero, S.One if len(terms) == 1: cont = terms[0].coeff numer = terms[0].numer.as_expr() denom = terms[0].denom.as_expr() else: cont = terms[0] for term in terms[1:]: cont = cont.gcd(term) for i, term in enumerate(terms): terms[i] = term.quo(cont) if fraction: denom = terms[0].denom for term in terms[1:]: denom = denom.lcm(term.denom) numers = [] for term in terms: numer = term.numer.mul(denom.quo(term.denom)) numers.append(term.coeff*numer.as_expr()) else: numers = [t.as_expr() for t in terms] denom = Term(S(1)).numer cont = cont.as_expr() numer = Add(*numers) denom = denom.as_expr() if not isprimitive and numer.is_Add: _cont, numer = numer.primitive() cont *= _cont return cont, numer, denom def gcd_terms(terms, isprimitive=False, clear=True, fraction=True): """Compute the GCD of ``terms`` and put them together. ``terms`` can be an expression or a non-Basic sequence of expressions which will be handled as though they are terms from a sum. If ``isprimitive`` is True the _gcd_terms will not run the primitive method on the terms. ``clear`` controls the removal of integers from the denominator of an Add expression. When True (default), all numerical denominator will be cleared; when False the denominators will be cleared only if all terms had numerical denominators other than 1. ``fraction``, when True (default), will put the expression over a common denominator. Examples ======== >>> from sympy.core import gcd_terms >>> from sympy.abc import x, y >>> gcd_terms((x + 1)**2*y + (x + 1)*y**2) y*(x + 1)*(x + y + 1) >>> gcd_terms(x/2 + 1) (x + 2)/2 >>> gcd_terms(x/2 + 1, clear=False) x/2 + 1 >>> gcd_terms(x/2 + y/2, clear=False) (x + y)/2 >>> gcd_terms(x/2 + 1/x) (x**2 + 2)/(2*x) >>> gcd_terms(x/2 + 1/x, fraction=False) (x + 2/x)/2 >>> gcd_terms(x/2 + 1/x, fraction=False, clear=False) x/2 + 1/x >>> gcd_terms(x/2/y + 1/x/y) (x**2 + 2)/(2*x*y) >>> gcd_terms(x/2/y + 1/x/y, fraction=False, clear=False) (x + 2/x)/(2*y) The ``clear`` flag was ignored in this case because the returned expression was a rational expression, not a simple sum. See Also ======== factor_terms, sympy.polys.polytools.terms_gcd """ def mask(terms): """replace nc portions of each term with a unique Dummy symbols and return the replacements to restore them""" args = [(a, []) if a.is_commutative else a.args_cnc() for a in terms] reps = [] for i, (c, nc) in enumerate(args): if nc: nc = Mul._from_args(nc) d = Dummy() reps.append((d, nc)) c.append(d) args[i] = Mul._from_args(c) else: args[i] = c return args, dict(reps) isadd = isinstance(terms, Add) addlike = isadd or not isinstance(terms, Basic) and \ is_sequence(terms, include=set) and \ not isinstance(terms, Dict) if addlike: if isadd: # i.e. an Add terms = list(terms.args) else: terms = sympify(terms) terms, reps = mask(terms) cont, numer, denom = _gcd_terms(terms, isprimitive, fraction) numer = numer.xreplace(reps) coeff, factors = cont.as_coeff_Mul() return _keep_coeff(coeff, factors*numer/denom, clear=clear) if not isinstance(terms, Basic): return terms if terms.is_Atom: return terms if terms.is_Mul: c, args = terms.as_coeff_mul() return _keep_coeff(c, Mul(*[gcd_terms(i, isprimitive, clear, fraction) for i in args]), clear=clear) def handle(a): # don't treat internal args like terms of an Add if not isinstance(a, Expr): if isinstance(a, Basic): return a.func(*[handle(i) for i in a.args]) return type(a)([handle(i) for i in a]) return gcd_terms(a, isprimitive, clear, fraction) if isinstance(terms, Dict): return Dict(*[(k, handle(v)) for k, v in terms.args]) return terms.func(*[handle(i) for i in terms.args]) def factor_terms(expr, radical=False, clear=False, fraction=False, sign=True): """Remove common factors from terms in all arguments without changing the underlying structure of the expr. No expansion or simplification (and no processing of non-commutatives) is performed. If radical=True then a radical common to all terms will be factored out of any Add sub-expressions of the expr. If clear=False (default) then coefficients will not be separated from a single Add if they can be distributed to leave one or more terms with integer coefficients. If fraction=True (default is False) then a common denominator will be constructed for the expression. If sign=True (default) then even if the only factor in common is a -1, it will be factored out of the expression. Examples ======== >>> from sympy import factor_terms, Symbol >>> from sympy.abc import x, y >>> factor_terms(x + x*(2 + 4*y)**3) x*(8*(2*y + 1)**3 + 1) >>> A = Symbol('A', commutative=False) >>> factor_terms(x*A + x*A + x*y*A) x*(y*A + 2*A) When ``clear`` is False, a rational will only be factored out of an Add expression if all terms of the Add have coefficients that are fractions: >>> factor_terms(x/2 + 1, clear=False) x/2 + 1 >>> factor_terms(x/2 + 1, clear=True) (x + 2)/2 This only applies when there is a single Add that the coefficient multiplies: >>> factor_terms(x*y/2 + y, clear=True) y*(x + 2)/2 >>> factor_terms(x*y/2 + y, clear=False) == _ True If a -1 is all that can be factored out, to *not* factor it out, the flag ``sign`` must be False: >>> factor_terms(-x - y) -(x + y) >>> factor_terms(-x - y, sign=False) -x - y >>> factor_terms(-2*x - 2*y, sign=False) -2*(x + y) See Also ======== gcd_terms, sympy.polys.polytools.terms_gcd """ from sympy.simplify.simplify import bottom_up def do(expr): is_iterable = iterable(expr) if not isinstance(expr, Basic) or expr.is_Atom: if is_iterable: return type(expr)([do(i) for i in expr]) return expr if expr.is_Pow or expr.is_Function or \ is_iterable or not hasattr(expr, 'args_cnc'): args = expr.args newargs = tuple([do(i) for i in args]) if newargs == args: return expr return expr.func(*newargs) cont, p = expr.as_content_primitive(radical=radical) if p.is_Add: list_args = [do(a) for a in Add.make_args(p)] # get a common negative (if there) which gcd_terms does not remove if all(a.as_coeff_Mul()[0] < 0 for a in list_args): cont = -cont list_args = [-a for a in list_args] # watch out for exp(-(x+2)) which gcd_terms will change to exp(-x-2) special = {} for i, a in enumerate(list_args): b, e = a.as_base_exp() if e.is_Mul and e != Mul(*e.args): list_args[i] = Dummy() special[list_args[i]] = a # rebuild p not worrying about the order which gcd_terms will fix p = Add._from_args(list_args) p = gcd_terms(p, isprimitive=True, clear=clear, fraction=fraction).xreplace(special) elif p.args: p = p.func( *[do(a) for a in p.args]) rv = _keep_coeff(cont, p, clear=clear, sign=sign) return rv expr = sympify(expr) return do(expr) def _mask_nc(eq, name=None): """ Return ``eq`` with non-commutative objects replaced with Dummy symbols. A dictionary that can be used to restore the original values is returned: if it is None, the expression is noncommutative and cannot be made commutative. The third value returned is a list of any non-commutative symbols that appear in the returned equation. ``name``, if given, is the name that will be used with numered Dummy variables that will replace the non-commutative objects and is mainly used for doctesting purposes. Notes ===== All non-commutative objects other than Symbols are replaced with a non-commutative Symbol. Identical objects will be identified by identical symbols. If there is only 1 non-commutative object in an expression it will be replaced with a commutative symbol. Otherwise, the non-commutative entities are retained and the calling routine should handle replacements in this case since some care must be taken to keep track of the ordering of symbols when they occur within Muls. Examples ======== >>> from sympy.physics.secondquant import Commutator, NO, F, Fd >>> from sympy import symbols, Mul >>> from sympy.core.exprtools import _mask_nc >>> from sympy.abc import x, y >>> A, B, C = symbols('A,B,C', commutative=False) One nc-symbol: >>> _mask_nc(A**2 - x**2, 'd') (_d0**2 - x**2, {_d0: A}, []) Multiple nc-symbols: >>> _mask_nc(A**2 - B**2, 'd') (A**2 - B**2, None, [A, B]) An nc-object with nc-symbols but no others outside of it: >>> _mask_nc(1 + x*Commutator(A, B), 'd') (_d0*x + 1, {_d0: Commutator(A, B)}, []) >>> _mask_nc(NO(Fd(x)*F(y)), 'd') (_d0, {_d0: NO(CreateFermion(x)*AnnihilateFermion(y))}, []) Multiple nc-objects: >>> eq = x*Commutator(A, B) + x*Commutator(A, C)*Commutator(A, B) >>> _mask_nc(eq, 'd') (x*_d0 + x*_d1*_d0, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1]) Multiple nc-objects and nc-symbols: >>> eq = A*Commutator(A, B) + B*Commutator(A, C) >>> _mask_nc(eq, 'd') (A*_d0 + B*_d1, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1, A, B]) If there is an object that: - doesn't contain nc-symbols - but has arguments which derive from Basic, not Expr - and doesn't define an _eval_is_commutative routine then it will give False (or None?) for the is_commutative test. Such objects are also removed by this routine: >>> from sympy import Basic >>> eq = (1 + Mul(Basic(), Basic(), evaluate=False)) >>> eq.is_commutative False >>> _mask_nc(eq, 'd') (_d0**2 + 1, {_d0: Basic()}, []) """ name = name or 'mask' # Make Dummy() append sequential numbers to the name def numbered_names(): i = 0 while True: yield name + str(i) i += 1 names = numbered_names() def Dummy(*args, **kwargs): from sympy import Dummy return Dummy(next(names), *args, **kwargs) expr = eq if expr.is_commutative: return eq, {}, [] # identify nc-objects; symbols and other rep = [] nc_obj = set() nc_syms = set() pot = preorder_traversal(expr, keys=default_sort_key) for i, a in enumerate(pot): if any(a == r[0] for r in rep): pot.skip() elif not a.is_commutative: if a.is_Symbol: nc_syms.add(a) elif not (a.is_Add or a.is_Mul or a.is_Pow): if all(s.is_commutative for s in a.free_symbols): rep.append((a, Dummy())) else: nc_obj.add(a) pot.skip() # If there is only one nc symbol or object, it can be factored regularly # but polys is going to complain, so replace it with a Dummy. if len(nc_obj) == 1 and not nc_syms: rep.append((nc_obj.pop(), Dummy())) elif len(nc_syms) == 1 and not nc_obj: rep.append((nc_syms.pop(), Dummy())) # Any remaining nc-objects will be replaced with an nc-Dummy and # identified as an nc-Symbol to watch out for nc_obj = sorted(nc_obj, key=default_sort_key) for n in nc_obj: nc = Dummy(commutative=False) rep.append((n, nc)) nc_syms.add(nc) expr = expr.subs(rep) nc_syms = list(nc_syms) nc_syms.sort(key=default_sort_key) return expr, dict([(v, k) for k, v in rep]) or None, nc_syms def factor_nc(expr): """Return the factored form of ``expr`` while handling non-commutative expressions. **examples** >>> from sympy.core.exprtools import factor_nc >>> from sympy import Symbol >>> from sympy.abc import x >>> A = Symbol('A', commutative=False) >>> B = Symbol('B', commutative=False) >>> factor_nc((x**2 + 2*A*x + A**2).expand()) (x + A)**2 >>> factor_nc(((x + A)*(x + B)).expand()) (x + A)*(x + B) """ from sympy.simplify.simplify import powsimp from sympy.polys import gcd, factor def _pemexpand(expr): "Expand with the minimal set of hints necessary to check the result." return expr.expand(deep=True, mul=True, power_exp=True, power_base=False, basic=False, multinomial=True, log=False) expr = sympify(expr) if not isinstance(expr, Expr) or not expr.args: return expr if not expr.is_Add: return expr.func(*[factor_nc(a) for a in expr.args]) expr, rep, nc_symbols = _mask_nc(expr) if rep: return factor(expr).subs(rep) else: args = [a.args_cnc() for a in Add.make_args(expr)] c = g = l = r = S.One hit = False # find any commutative gcd term for i, a in enumerate(args): if i == 0: c = Mul._from_args(a[0]) elif a[0]: c = gcd(c, Mul._from_args(a[0])) else: c = S.One if c is not S.One: hit = True c, g = c.as_coeff_Mul() if g is not S.One: for i, (cc, _) in enumerate(args): cc = list(Mul.make_args(Mul._from_args(list(cc))/g)) args[i][0] = cc for i, (cc, _) in enumerate(args): cc[0] = cc[0]/c args[i][0] = cc # find any noncommutative common prefix for i, a in enumerate(args): if i == 0: n = a[1][:] else: n = common_prefix(n, a[1]) if not n: # is there a power that can be extracted? if not args[0][1]: break b, e = args[0][1][0].as_base_exp() ok = False if e.is_Integer: for t in args: if not t[1]: break bt, et = t[1][0].as_base_exp() if et.is_Integer and bt == b: e = min(e, et) else: break else: ok = hit = True l = b**e il = b**-e for i, a in enumerate(args): args[i][1][0] = il*args[i][1][0] break if not ok: break else: hit = True lenn = len(n) l = Mul(*n) for i, a in enumerate(args): args[i][1] = args[i][1][lenn:] # find any noncommutative common suffix for i, a in enumerate(args): if i == 0: n = a[1][:] else: n = common_suffix(n, a[1]) if not n: # is there a power that can be extracted? if not args[0][1]: break b, e = args[0][1][-1].as_base_exp() ok = False if e.is_Integer: for t in args: if not t[1]: break bt, et = t[1][-1].as_base_exp() if et.is_Integer and bt == b: e = min(e, et) else: break else: ok = hit = True r = b**e il = b**-e for i, a in enumerate(args): args[i][1][-1] = args[i][1][-1]*il break if not ok: break else: hit = True lenn = len(n) r = Mul(*n) for i, a in enumerate(args): args[i][1] = a[1][:len(a[1]) - lenn] if hit: mid = Add(*[Mul(*cc)*Mul(*nc) for cc, nc in args]) else: mid = expr # sort the symbols so the Dummys would appear in the same # order as the original symbols, otherwise you may introduce # a factor of -1, e.g. A**2 - B**2) -- {A:y, B:x} --> y**2 - x**2 # and the former factors into two terms, (A - B)*(A + B) while the # latter factors into 3 terms, (-1)*(x - y)*(x + y) rep1 = [(n, Dummy()) for n in sorted(nc_symbols, key=default_sort_key)] unrep1 = [(v, k) for k, v in rep1] unrep1.reverse() new_mid, r2, _ = _mask_nc(mid.subs(rep1)) new_mid = powsimp(factor(new_mid)) new_mid = new_mid.subs(r2).subs(unrep1) if new_mid.is_Pow: return _keep_coeff(c, g*l*new_mid*r) if new_mid.is_Mul: # XXX TODO there should be a way to inspect what order the terms # must be in and just select the plausible ordering without # checking permutations cfac = [] ncfac = [] for f in new_mid.args: if f.is_commutative: cfac.append(f) else: b, e = f.as_base_exp() if e.is_Integer: ncfac.extend([b]*e) else: ncfac.append(f) pre_mid = g*Mul(*cfac)*l target = _pemexpand(expr/c) for s in variations(ncfac, len(ncfac)): ok = pre_mid*Mul(*s)*r if _pemexpand(ok) == target: return _keep_coeff(c, ok) # mid was an Add that didn't factor successfully return _keep_coeff(c, g*l*mid*r)
from tvtk.tools.tvtk_doc import TVTKFilterChooser, TVTK_FILTERS from mayavi.filters.filter_base import FilterBase from mayavi.core.common import handle_children_state, error from mayavi.core.pipeline_info import PipelineInfo class UserDefined(FilterBase): """ This filter lets the user define their own filter dynamically/interactively. It is like `FilterBase` but allows a user to specify the class without writing any code. """ # The version of this class. Used for persistence. __version__ = 0 input_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any']) output_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any']) ###################################################################### # `object` interface. ###################################################################### def __set_pure_state__(self, state): # Create and set the filter. children = [f for f in [self.filter] if f is not None] handle_children_state(children, [state.filter]) self.filter = children[0] self.update_pipeline() # Restore our state. super(UserDefined, self).__set_pure_state__(state) ###################################################################### # `UserDefined` interface. ###################################################################### def setup_filter(self): """Setup the filter if none has been set or check it if it already has been.""" obj = self.filter if not self._check_object(obj): if obj is not None: cname = obj.__class__.__name__ error('Invalid filter %s chosen! Try again!'%cname) obj = self._choose_filter() self.filter = obj ###################################################################### # Non-public interface. ###################################################################### def _choose_filter(self): chooser = TVTKFilterChooser() chooser.edit_traits(kind='livemodal') obj = chooser.object if obj is None: error('Invalid filter chosen! Try again!') return obj def _check_object(self, obj): if obj is None: return False if obj.__class__.__name__ in TVTK_FILTERS: return True return False def _filter_changed(self, old, new): self.name = 'UserDefined:%s'%new.__class__.__name__ super(UserDefined, self)._filter_changed(old, new)
"""Univariate features selection.""" import numpy as np import warnings from scipy import special, stats from scipy.sparse import issparse from ..base import BaseEstimator from ..preprocessing import LabelBinarizer from ..utils import (as_float_array, check_array, check_X_y, safe_sqr, safe_mask) from ..utils.extmath import norm, safe_sparse_dot from ..utils.validation import check_is_fitted from .base import SelectorMixin def _clean_nans(scores): """ Fixes Issue #1240: NaNs can't be properly compared, so change them to the smallest value of scores's dtype. -inf seems to be unreliable. """ # XXX where should this function be called? fit? scoring functions # themselves? scores = as_float_array(scores, copy=True) scores[np.isnan(scores)] = np.finfo(scores.dtype).min return scores def f_oneway(*args): """Performs a 1-way ANOVA. The one-way ANOVA tests the null hypothesis that 2 or more groups have the same population mean. The test is applied to samples from two or more groups, possibly with differing sizes. Parameters ---------- sample1, sample2, ... : array_like, sparse matrices The sample measurements should be given as arguments. Returns ------- F-value : float The computed F-value of the test. p-value : float The associated p-value from the F-distribution. Notes ----- The ANOVA test has important assumptions that must be satisfied in order for the associated p-value to be valid. 1. The samples are independent 2. Each sample is from a normally distributed population 3. The population standard deviations of the groups are all equal. This property is known as homoscedasticity. If these assumptions are not true for a given set of data, it may still be possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although with some loss of power. The algorithm is from Heiman[2], pp.394-7. See ``scipy.stats.f_oneway`` that should give the same results while being less efficient. References ---------- .. [1] Lowry, Richard. "Concepts and Applications of Inferential Statistics". Chapter 14. http://faculty.vassar.edu/lowry/ch14pt1.html .. [2] Heiman, G.W. Research Methods in Statistics. 2002. """ n_classes = len(args) args = [as_float_array(a) for a in args] n_samples_per_class = np.array([a.shape[0] for a in args]) n_samples = np.sum(n_samples_per_class) ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args) sums_args = [np.asarray(a.sum(axis=0)) for a in args] square_of_sums_alldata = sum(sums_args) ** 2 square_of_sums_args = [s ** 2 for s in sums_args] sstot = ss_alldata - square_of_sums_alldata / float(n_samples) ssbn = 0. for k, _ in enumerate(args): ssbn += square_of_sums_args[k] / n_samples_per_class[k] ssbn -= square_of_sums_alldata / float(n_samples) sswn = sstot - ssbn dfbn = n_classes - 1 dfwn = n_samples - n_classes msb = ssbn / float(dfbn) msw = sswn / float(dfwn) constant_features_idx = np.where(msw == 0.)[0] if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size): warnings.warn("Features %s are constant." % constant_features_idx, UserWarning) f = msb / msw # flatten matrix to vector in sparse case f = np.asarray(f).ravel() prob = stats.fprob(dfbn, dfwn, f) return f, prob def f_classif(X, y): """Compute the Anova F-value for the provided sample Parameters ---------- X : {array-like, sparse matrix} shape = [n_samples, n_features] The set of regressors that will tested sequentially. y : array of shape(n_samples) The data matrix. Returns ------- F : array, shape = [n_features,] The set of F values. pval : array, shape = [n_features,] The set of p-values. """ X, y = check_X_y(X, y, ['csr', 'csc', 'coo']) args = [X[safe_mask(X, y == k)] for k in np.unique(y)] return f_oneway(*args) def _chisquare(f_obs, f_exp): """Fast replacement for scipy.stats.chisquare. Version from https://github.com/scipy/scipy/pull/2525 with additional optimizations. """ f_obs = np.asarray(f_obs, dtype=np.float64) k = len(f_obs) # Reuse f_obs for chi-squared statistics chisq = f_obs chisq -= f_exp chisq **= 2 chisq /= f_exp chisq = chisq.sum(axis=0) return chisq, special.chdtrc(k - 1, chisq) def chi2(X, y): """Compute chi-squared statistic for each class/feature combination. This score can be used to select the n_features features with the highest values for the test chi-squared statistic from X, which must contain booleans or frequencies (e.g., term counts in document classification), relative to the classes. Recall that the chi-square test measures dependence between stochastic variables, so using this function "weeds out" the features that are the most likely to be independent of class and therefore irrelevant for classification. Parameters ---------- X : {array-like, sparse matrix}, shape = (n_samples, n_features_in) Sample vectors. y : array-like, shape = (n_samples,) Target vector (class labels). Returns ------- chi2 : array, shape = (n_features,) chi2 statistics of each feature. pval : array, shape = (n_features,) p-values of each feature. Notes ----- Complexity of this algorithm is O(n_classes * n_features). """ # XXX: we might want to do some of the following in logspace instead for # numerical stability. X = check_array(X, accept_sparse='csr') if np.any((X.data if issparse(X) else X) < 0): raise ValueError("Input X must be non-negative.") Y = LabelBinarizer().fit_transform(y) if Y.shape[1] == 1: Y = np.append(1 - Y, Y, axis=1) observed = safe_sparse_dot(Y.T, X) # n_classes * n_features feature_count = check_array(X.sum(axis=0)) class_prob = check_array(Y.mean(axis=0)) expected = np.dot(class_prob.T, feature_count) return _chisquare(observed, expected) def f_regression(X, y, center=True): """Univariate linear regression tests Quick linear model for testing the effect of a single regressor, sequentially for many regressors. This is done in 3 steps: 1. the regressor of interest and the data are orthogonalized wrt constant regressors 2. the cross correlation between data and regressors is computed 3. it is converted to an F score then to a p-value Parameters ---------- X : {array-like, sparse matrix} shape = (n_samples, n_features) The set of regressors that will tested sequentially. y : array of shape(n_samples). The data matrix center : True, bool, If true, X and y will be centered. Returns ------- F : array, shape=(n_features,) F values of features. pval : array, shape=(n_features,) p-values of F-scores. """ if issparse(X) and center: raise ValueError("center=True only allowed for dense data") X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float) if center: y = y - np.mean(y) X = X.copy('F') # faster in fortran X -= X.mean(axis=0) # compute the correlation corr = safe_sparse_dot(y, X) # XXX could use corr /= row_norms(X.T) here, but the test doesn't pass corr /= np.asarray(np.sqrt(safe_sqr(X).sum(axis=0))).ravel() corr /= norm(y) # convert to p-value degrees_of_freedom = y.size - (2 if center else 1) F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom pv = stats.f.sf(F, 1, degrees_of_freedom) return F, pv class _BaseFilter(BaseEstimator, SelectorMixin): """Initialize the univariate feature selection. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). """ def __init__(self, score_func): self.score_func = score_func def fit(self, X, y): """Run score function on (X, y) and get the appropriate features. Parameters ---------- X : array-like, shape = [n_samples, n_features] The training input samples. y : array-like, shape = [n_samples] The target values (class labels in classification, real numbers in regression). Returns ------- self : object Returns self. """ X, y = check_X_y(X, y, ['csr', 'csc', 'coo']) if not callable(self.score_func): raise TypeError("The score function should be a callable, %s (%s) " "was passed." % (self.score_func, type(self.score_func))) self._check_params(X, y) self.scores_, self.pvalues_ = self.score_func(X, y) self.scores_ = np.asarray(self.scores_) self.pvalues_ = np.asarray(self.pvalues_) return self def _check_params(self, X, y): pass class SelectPercentile(_BaseFilter): """Select features according to a percentile of the highest scores. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). percentile : int, optional, default=10 Percent of features to keep. Attributes ---------- scores_ : array-like, shape=(n_features,) Scores of features. pvalues_ : array-like, shape=(n_features,) p-values of feature scores. Notes ----- Ties between features with equal scores will be broken in an unspecified way. """ def __init__(self, score_func=f_classif, percentile=10): super(SelectPercentile, self).__init__(score_func) self.percentile = percentile def _check_params(self, X, y): if not 0 <= self.percentile <= 100: raise ValueError("percentile should be >=0, <=100; got %r" % self.percentile) def _get_support_mask(self): check_is_fitted(self, 'scores_') # Cater for NaNs if self.percentile == 100: return np.ones(len(self.scores_), dtype=np.bool) elif self.percentile == 0: return np.zeros(len(self.scores_), dtype=np.bool) scores = _clean_nans(self.scores_) treshold = stats.scoreatpercentile(scores, 100 - self.percentile) mask = scores > treshold ties = np.where(scores == treshold)[0] if len(ties): max_feats = len(scores) * self.percentile // 100 kept_ties = ties[:max_feats - mask.sum()] mask[kept_ties] = True return mask class SelectKBest(_BaseFilter): """Select features according to the k highest scores. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). k : int or "all", optional, default=10 Number of top features to select. The "all" option bypasses selection, for use in a parameter search. Attributes ---------- scores_ : array-like, shape=(n_features,) Scores of features. pvalues_ : array-like, shape=(n_features,) p-values of feature scores. Notes ----- Ties between features with equal scores will be broken in an unspecified way. """ def __init__(self, score_func=f_classif, k=10): super(SelectKBest, self).__init__(score_func) self.k = k def _check_params(self, X, y): if not (self.k == "all" or 0 <= self.k <= X.shape[1]): raise ValueError("k should be >=0, <= n_features; got %r." "Use k='all' to return all features." % self.k) def _get_support_mask(self): check_is_fitted(self, 'scores_') if self.k == 'all': return np.ones(self.scores_.shape, dtype=bool) elif self.k == 0: return np.zeros(self.scores_.shape, dtype=bool) else: scores = _clean_nans(self.scores_) mask = np.zeros(scores.shape, dtype=bool) # Request a stable sort. Mergesort takes more memory (~40MB per # megafeature on x86-64). mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1 return mask class SelectFpr(_BaseFilter): """Filter: Select the pvalues below alpha based on a FPR test. FPR test stands for False Positive Rate test. It controls the total amount of false detections. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). alpha : float, optional The highest p-value for features to be kept. Attributes ---------- scores_ : array-like, shape=(n_features,) Scores of features. pvalues_ : array-like, shape=(n_features,) p-values of feature scores. """ def __init__(self, score_func=f_classif, alpha=5e-2): super(SelectFpr, self).__init__(score_func) self.alpha = alpha def _get_support_mask(self): check_is_fitted(self, 'scores_') return self.pvalues_ < self.alpha class SelectFdr(_BaseFilter): """Filter: Select the p-values for an estimated false discovery rate This uses the Benjamini-Hochberg procedure. ``alpha`` is the target false discovery rate. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). alpha : float, optional The highest uncorrected p-value for features to keep. Attributes ---------- scores_ : array-like, shape=(n_features,) Scores of features. pvalues_ : array-like, shape=(n_features,) p-values of feature scores. """ def __init__(self, score_func=f_classif, alpha=5e-2): super(SelectFdr, self).__init__(score_func) self.alpha = alpha def _get_support_mask(self): check_is_fitted(self, 'scores_') alpha = self.alpha sv = np.sort(self.pvalues_) selected = sv[sv < alpha * np.arange(len(self.pvalues_))] if selected.size == 0: return np.zeros_like(self.pvalues_, dtype=bool) return self.pvalues_ <= selected.max() class SelectFwe(_BaseFilter): """Filter: Select the p-values corresponding to Family-wise error rate Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). alpha : float, optional The highest uncorrected p-value for features to keep. Attributes ---------- scores_ : array-like, shape=(n_features,) Scores of features. pvalues_ : array-like, shape=(n_features,) p-values of feature scores. """ def __init__(self, score_func=f_classif, alpha=5e-2): super(SelectFwe, self).__init__(score_func) self.alpha = alpha def _get_support_mask(self): check_is_fitted(self, 'scores_') return (self.pvalues_ < self.alpha / len(self.pvalues_)) class GenericUnivariateSelect(_BaseFilter): """Univariate feature selector with configurable strategy. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'} Feature selection mode. param : float or int depending on the feature selection mode Parameter of the corresponding mode. Attributes ---------- scores_ : array-like, shape=(n_features,) Scores of features. pvalues_ : array-like, shape=(n_features,) p-values of feature scores. """ _selection_modes = {'percentile': SelectPercentile, 'k_best': SelectKBest, 'fpr': SelectFpr, 'fdr': SelectFdr, 'fwe': SelectFwe} def __init__(self, score_func=f_classif, mode='percentile', param=1e-5): super(GenericUnivariateSelect, self).__init__(score_func) self.mode = mode self.param = param def _make_selector(self): selector = self._selection_modes[self.mode](score_func=self.score_func) # Now perform some acrobatics to set the right named parameter in # the selector possible_params = selector._get_param_names() possible_params.remove('score_func') selector.set_params(**{possible_params[0]: self.param}) return selector def _check_params(self, X, y): if self.mode not in self._selection_modes: raise ValueError("The mode passed should be one of %s, %r," " (type %s) was passed." % (self._selection_modes.keys(), self.mode, type(self.mode))) self._make_selector()._check_params(X, y) def _get_support_mask(self): check_is_fitted(self, 'scores_') selector = self._make_selector() selector.pvalues_ = self.pvalues_ selector.scores_ = self.scores_ return selector._get_support_mask()
from __future__ import absolute_import from .base import * DATABASES = { "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:", }, }
VERSION = (1, 0, 0,) __version__ = '.'.join(map(str, VERSION)) default_app_config = 'admin_sso.apps.AdminSSOConfig' try: from django.utils.functional import LazyObject except ImportError: pass else: class LazySettings(LazyObject): def _setup(self): from admin_sso import default_settings self._wrapped = Settings(default_settings) class Settings(object): def __init__(self, settings_module): for setting in dir(settings_module): if setting == setting.upper(): setattr(self, setting, getattr(settings_module, setting)) settings = LazySettings()
from __future__ import (absolute_import, division, print_function, unicode_literals) from builtins import * import versioneer __author__ = 'Chia-Jung, Yang' __email__ = 'jeroyang@gmail.com' __version__ = versioneer.get_version() from ._version import get_versions __version__ = get_versions()['version'] del get_versions
import visvis as vv import numpy as np import os imageio = None try: import imageio except ImportError: pass def volread(filename): """ volread(filename) Read volume from a file. If filename is 'stent', read a dedicated test dataset. For reading any other kind of volume, the imageio package is required. """ if filename == 'stent': # Get full filename path = vv.misc.getResourceDir() filename2 = os.path.join(path, 'stent_vol.ssdf') if os.path.isfile(filename2): filename = filename2 else: raise IOError("File '%s' does not exist." % filename) # Load s = vv.ssdf.load(filename) return s.vol.astype('int16') * s.colorscale elif imageio is not None: return imageio.volread(filename) else: raise RuntimeError("visvis.volread needs the imageio package to read arbitrary files.") if __name__ == '__main__': vol = vv.volread('stent') t = vv.volshow(vol) t.renderStyle = 'mip' # maximum intensity projection (is the default)
""" test positional based indexing with iloc """ from datetime import datetime import re from warnings import ( catch_warnings, simplefilter, ) import numpy as np import pytest import pandas.util._test_decorators as td from pandas import ( NA, Categorical, CategoricalDtype, DataFrame, Index, Interval, NaT, Series, array, concat, date_range, isna, ) import pandas._testing as tm from pandas.api.types import is_scalar from pandas.core.indexing import IndexingError from pandas.tests.indexing.common import Base _slice_iloc_msg = re.escape( "only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) " "and integer or boolean arrays are valid indices" ) class TestiLoc(Base): @pytest.mark.parametrize("key", [2, -1, [0, 1, 2]]) def test_iloc_getitem_int_and_list_int(self, key): self.check_result( "iloc", key, typs=["labels", "mixed", "ts", "floats", "empty"], fails=IndexError, ) # array of ints (GH5006), make sure that a single indexer is returning # the correct type class TestiLocBaseIndependent: """Tests Independent Of Base Class""" @pytest.mark.parametrize( "key", [ slice(None), slice(3), range(3), [0, 1, 2], Index(range(3)), np.asarray([0, 1, 2]), ], ) @pytest.mark.parametrize("indexer", [tm.loc, tm.iloc]) def test_iloc_setitem_fullcol_categorical(self, indexer, key, using_array_manager): frame = DataFrame({0: range(3)}, dtype=object) cat = Categorical(["alpha", "beta", "gamma"]) if not using_array_manager: assert frame._mgr.blocks[0]._can_hold_element(cat) df = frame.copy() orig_vals = df.values indexer(df)[key, 0] = cat overwrite = isinstance(key, slice) and key == slice(None) if overwrite or using_array_manager: # TODO(ArrayManager) we always overwrite because ArrayManager takes # the "split" path, which still overwrites # TODO: GH#39986 this probably shouldn't behave differently expected = DataFrame({0: cat}) assert not np.shares_memory(df.values, orig_vals) else: expected = DataFrame({0: cat}).astype(object) if not using_array_manager: assert np.shares_memory(df[0].values, orig_vals) tm.assert_frame_equal(df, expected) # check we dont have a view on cat (may be undesired GH#39986) df.iloc[0, 0] = "gamma" if overwrite: assert cat[0] != "gamma" else: assert cat[0] != "gamma" # TODO with mixed dataframe ("split" path), we always overwrite the column frame = DataFrame({0: np.array([0, 1, 2], dtype=object), 1: range(3)}) df = frame.copy() orig_vals = df.values indexer(df)[key, 0] = cat expected = DataFrame({0: cat, 1: range(3)}) tm.assert_frame_equal(df, expected) # TODO(ArrayManager) does not yet update parent @td.skip_array_manager_not_yet_implemented @pytest.mark.parametrize("box", [array, Series]) def test_iloc_setitem_ea_inplace(self, frame_or_series, box, using_array_manager): # GH#38952 Case with not setting a full column # IntegerArray without NAs arr = array([1, 2, 3, 4]) obj = frame_or_series(arr.to_numpy("i8")) if frame_or_series is Series or not using_array_manager: values = obj.values else: values = obj[0].values if frame_or_series is Series: obj.iloc[:2] = box(arr[2:]) else: obj.iloc[:2, 0] = box(arr[2:]) expected = frame_or_series(np.array([3, 4, 3, 4], dtype="i8")) tm.assert_equal(obj, expected) # Check that we are actually in-place if frame_or_series is Series: assert obj.values is values else: if using_array_manager: assert obj[0].values is values else: assert obj.values.base is values.base and values.base is not None def test_is_scalar_access(self): # GH#32085 index with duplicates doesn't matter for _is_scalar_access index = Index([1, 2, 1]) ser = Series(range(3), index=index) assert ser.iloc._is_scalar_access((1,)) df = ser.to_frame() assert df.iloc._is_scalar_access((1, 0)) def test_iloc_exceeds_bounds(self): # GH6296 # iloc should allow indexers that exceed the bounds df = DataFrame(np.random.random_sample((20, 5)), columns=list("ABCDE")) # lists of positions should raise IndexError! msg = "positional indexers are out-of-bounds" with pytest.raises(IndexError, match=msg): df.iloc[:, [0, 1, 2, 3, 4, 5]] with pytest.raises(IndexError, match=msg): df.iloc[[1, 30]] with pytest.raises(IndexError, match=msg): df.iloc[[1, -30]] with pytest.raises(IndexError, match=msg): df.iloc[[100]] s = df["A"] with pytest.raises(IndexError, match=msg): s.iloc[[100]] with pytest.raises(IndexError, match=msg): s.iloc[[-100]] # still raise on a single indexer msg = "single positional indexer is out-of-bounds" with pytest.raises(IndexError, match=msg): df.iloc[30] with pytest.raises(IndexError, match=msg): df.iloc[-30] # GH10779 # single positive/negative indexer exceeding Series bounds should raise # an IndexError with pytest.raises(IndexError, match=msg): s.iloc[30] with pytest.raises(IndexError, match=msg): s.iloc[-30] # slices are ok result = df.iloc[:, 4:10] # 0 < start < len < stop expected = df.iloc[:, 4:] tm.assert_frame_equal(result, expected) result = df.iloc[:, -4:-10] # stop < 0 < start < len expected = df.iloc[:, :0] tm.assert_frame_equal(result, expected) result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down) expected = df.iloc[:, :4:-1] tm.assert_frame_equal(result, expected) result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down) expected = df.iloc[:, 4::-1] tm.assert_frame_equal(result, expected) result = df.iloc[:, -10:4] # start < 0 < stop < len expected = df.iloc[:, :4] tm.assert_frame_equal(result, expected) result = df.iloc[:, 10:4] # 0 < stop < len < start expected = df.iloc[:, :0] tm.assert_frame_equal(result, expected) result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down) expected = df.iloc[:, :0] tm.assert_frame_equal(result, expected) result = df.iloc[:, 10:11] # 0 < len < start < stop expected = df.iloc[:, :0] tm.assert_frame_equal(result, expected) # slice bounds exceeding is ok result = s.iloc[18:30] expected = s.iloc[18:] tm.assert_series_equal(result, expected) result = s.iloc[30:] expected = s.iloc[:0] tm.assert_series_equal(result, expected) result = s.iloc[30::-1] expected = s.iloc[::-1] tm.assert_series_equal(result, expected) # doc example def check(result, expected): str(result) result.dtypes tm.assert_frame_equal(result, expected) dfl = DataFrame(np.random.randn(5, 2), columns=list("AB")) check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index)) check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]]) check(dfl.iloc[4:6], dfl.iloc[[4]]) msg = "positional indexers are out-of-bounds" with pytest.raises(IndexError, match=msg): dfl.iloc[[4, 5, 6]] msg = "single positional indexer is out-of-bounds" with pytest.raises(IndexError, match=msg): dfl.iloc[:, 4] @pytest.mark.parametrize("index,columns", [(np.arange(20), list("ABCDE"))]) @pytest.mark.parametrize( "index_vals,column_vals", [ ([slice(None), ["A", "D"]]), (["1", "2"], slice(None)), ([datetime(2019, 1, 1)], slice(None)), ], ) def test_iloc_non_integer_raises(self, index, columns, index_vals, column_vals): # GH 25753 df = DataFrame( np.random.randn(len(index), len(columns)), index=index, columns=columns ) msg = ".iloc requires numeric indexers, got" with pytest.raises(IndexError, match=msg): df.iloc[index_vals, column_vals] @pytest.mark.parametrize("dims", [1, 2]) def test_iloc_getitem_invalid_scalar(self, dims): # GH 21982 if dims == 1: s = Series(np.arange(10)) else: s = DataFrame(np.arange(100).reshape(10, 10)) with pytest.raises(TypeError, match="Cannot index by location index"): s.iloc["a"] def test_iloc_array_not_mutating_negative_indices(self): # GH 21867 array_with_neg_numbers = np.array([1, 2, -1]) array_copy = array_with_neg_numbers.copy() df = DataFrame( {"A": [100, 101, 102], "B": [103, 104, 105], "C": [106, 107, 108]}, index=[1, 2, 3], ) df.iloc[array_with_neg_numbers] tm.assert_numpy_array_equal(array_with_neg_numbers, array_copy) df.iloc[:, array_with_neg_numbers] tm.assert_numpy_array_equal(array_with_neg_numbers, array_copy) def test_iloc_getitem_neg_int_can_reach_first_index(self): # GH10547 and GH10779 # negative integers should be able to reach index 0 df = DataFrame({"A": [2, 3, 5], "B": [7, 11, 13]}) s = df["A"] expected = df.iloc[0] result = df.iloc[-3] tm.assert_series_equal(result, expected) expected = df.iloc[[0]] result = df.iloc[[-3]] tm.assert_frame_equal(result, expected) expected = s.iloc[0] result = s.iloc[-3] assert result == expected expected = s.iloc[[0]] result = s.iloc[[-3]] tm.assert_series_equal(result, expected) # check the length 1 Series case highlighted in GH10547 expected = Series(["a"], index=["A"]) result = expected.iloc[[-1]] tm.assert_series_equal(result, expected) def test_iloc_getitem_dups(self): # GH 6766 df1 = DataFrame([{"A": None, "B": 1}, {"A": 2, "B": 2}]) df2 = DataFrame([{"A": 3, "B": 3}, {"A": 4, "B": 4}]) df = concat([df1, df2], axis=1) # cross-sectional indexing result = df.iloc[0, 0] assert isna(result) result = df.iloc[0, :] expected = Series([np.nan, 1, 3, 3], index=["A", "B", "A", "B"], name=0) tm.assert_series_equal(result, expected) def test_iloc_getitem_array(self): df = DataFrame( [ {"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}, {"A": 1000, "B": 2000, "C": 3000}, ] ) expected = DataFrame([{"A": 1, "B": 2, "C": 3}]) tm.assert_frame_equal(df.iloc[[0]], expected) expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}]) tm.assert_frame_equal(df.iloc[[0, 1]], expected) expected = DataFrame([{"B": 2, "C": 3}, {"B": 2000, "C": 3000}], index=[0, 2]) result = df.iloc[[0, 2], [1, 2]] tm.assert_frame_equal(result, expected) def test_iloc_getitem_bool(self): df = DataFrame( [ {"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}, {"A": 1000, "B": 2000, "C": 3000}, ] ) expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}]) result = df.iloc[[True, True, False]] tm.assert_frame_equal(result, expected) expected = DataFrame( [{"A": 1, "B": 2, "C": 3}, {"A": 1000, "B": 2000, "C": 3000}], index=[0, 2] ) result = df.iloc[lambda x: x.index % 2 == 0] tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("index", [[True, False], [True, False, True, False]]) def test_iloc_getitem_bool_diff_len(self, index): # GH26658 s = Series([1, 2, 3]) msg = f"Boolean index has wrong length: {len(index)} instead of {len(s)}" with pytest.raises(IndexError, match=msg): s.iloc[index] def test_iloc_getitem_slice(self): df = DataFrame( [ {"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}, {"A": 1000, "B": 2000, "C": 3000}, ] ) expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}]) result = df.iloc[:2] tm.assert_frame_equal(result, expected) expected = DataFrame([{"A": 100, "B": 200}], index=[1]) result = df.iloc[1:2, 0:2] tm.assert_frame_equal(result, expected) expected = DataFrame( [{"A": 1, "C": 3}, {"A": 100, "C": 300}, {"A": 1000, "C": 3000}] ) result = df.iloc[:, lambda df: [0, 2]] tm.assert_frame_equal(result, expected) def test_iloc_getitem_slice_dups(self): df1 = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]) df2 = DataFrame( np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"] ) # axis=1 df = concat([df1, df2], axis=1) tm.assert_frame_equal(df.iloc[:, :4], df1) tm.assert_frame_equal(df.iloc[:, 4:], df2) df = concat([df2, df1], axis=1) tm.assert_frame_equal(df.iloc[:, :2], df2) tm.assert_frame_equal(df.iloc[:, 2:], df1) exp = concat([df2, df1.iloc[:, [0]]], axis=1) tm.assert_frame_equal(df.iloc[:, 0:3], exp) # axis=0 df = concat([df, df], axis=0) tm.assert_frame_equal(df.iloc[0:10, :2], df2) tm.assert_frame_equal(df.iloc[0:10, 2:], df1) tm.assert_frame_equal(df.iloc[10:, :2], df2) tm.assert_frame_equal(df.iloc[10:, 2:], df1) def test_iloc_setitem(self): df = DataFrame( np.random.randn(4, 4), index=np.arange(0, 8, 2), columns=np.arange(0, 12, 3) ) df.iloc[1, 1] = 1 result = df.iloc[1, 1] assert result == 1 df.iloc[:, 2:3] = 0 expected = df.iloc[:, 2:3] result = df.iloc[:, 2:3] tm.assert_frame_equal(result, expected) # GH5771 s = Series(0, index=[4, 5, 6]) s.iloc[1:2] += 1 expected = Series([0, 1, 0], index=[4, 5, 6]) tm.assert_series_equal(s, expected) def test_iloc_setitem_list(self): # setitem with an iloc list df = DataFrame( np.arange(9).reshape((3, 3)), index=["A", "B", "C"], columns=["A", "B", "C"] ) df.iloc[[0, 1], [1, 2]] df.iloc[[0, 1], [1, 2]] += 100 expected = DataFrame( np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)), index=["A", "B", "C"], columns=["A", "B", "C"], ) tm.assert_frame_equal(df, expected) def test_iloc_setitem_pandas_object(self): # GH 17193 s_orig = Series([0, 1, 2, 3]) expected = Series([0, -1, -2, 3]) s = s_orig.copy() s.iloc[Series([1, 2])] = [-1, -2] tm.assert_series_equal(s, expected) s = s_orig.copy() s.iloc[Index([1, 2])] = [-1, -2] tm.assert_series_equal(s, expected) def test_iloc_setitem_dups(self): # GH 6766 # iloc with a mask aligning from another iloc df1 = DataFrame([{"A": None, "B": 1}, {"A": 2, "B": 2}]) df2 = DataFrame([{"A": 3, "B": 3}, {"A": 4, "B": 4}]) df = concat([df1, df2], axis=1) expected = df.fillna(3) inds = np.isnan(df.iloc[:, 0]) mask = inds[inds].index df.iloc[mask, 0] = df.iloc[mask, 2] tm.assert_frame_equal(df, expected) # del a dup column across blocks expected = DataFrame({0: [1, 2], 1: [3, 4]}) expected.columns = ["B", "B"] del df["A"] tm.assert_frame_equal(df, expected) # assign back to self df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]] tm.assert_frame_equal(df, expected) # reversed x 2 df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(drop=True) df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(drop=True) tm.assert_frame_equal(df, expected) def test_iloc_setitem_frame_duplicate_columns_multiple_blocks( self, using_array_manager ): # Same as the "assign back to self" check in test_iloc_setitem_dups # but on a DataFrame with multiple blocks df = DataFrame([[0, 1], [2, 3]], columns=["B", "B"]) df.iloc[:, 0] = df.iloc[:, 0].astype("f8") if not using_array_manager: assert len(df._mgr.blocks) == 2 expected = df.copy() # assign back to self df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]] tm.assert_frame_equal(df, expected) # TODO: GH#27620 this test used to compare iloc against ix; check if this # is redundant with another test comparing iloc against loc def test_iloc_getitem_frame(self): df = DataFrame( np.random.randn(10, 4), index=range(0, 20, 2), columns=range(0, 8, 2) ) result = df.iloc[2] exp = df.loc[4] tm.assert_series_equal(result, exp) result = df.iloc[2, 2] exp = df.loc[4, 4] assert result == exp # slice result = df.iloc[4:8] expected = df.loc[8:14] tm.assert_frame_equal(result, expected) result = df.iloc[:, 2:3] expected = df.loc[:, 4:5] tm.assert_frame_equal(result, expected) # list of integers result = df.iloc[[0, 1, 3]] expected = df.loc[[0, 2, 6]] tm.assert_frame_equal(result, expected) result = df.iloc[[0, 1, 3], [0, 1]] expected = df.loc[[0, 2, 6], [0, 2]] tm.assert_frame_equal(result, expected) # neg indices result = df.iloc[[-1, 1, 3], [-1, 1]] expected = df.loc[[18, 2, 6], [6, 2]] tm.assert_frame_equal(result, expected) # dups indices result = df.iloc[[-1, -1, 1, 3], [-1, 1]] expected = df.loc[[18, 18, 2, 6], [6, 2]] tm.assert_frame_equal(result, expected) # with index-like s = Series(index=range(1, 5), dtype=object) result = df.iloc[s.index] expected = df.loc[[2, 4, 6, 8]] tm.assert_frame_equal(result, expected) def test_iloc_getitem_labelled_frame(self): # try with labelled frame df = DataFrame( np.random.randn(10, 4), index=list("abcdefghij"), columns=list("ABCD") ) result = df.iloc[1, 1] exp = df.loc["b", "B"] assert result == exp result = df.iloc[:, 2:3] expected = df.loc[:, ["C"]] tm.assert_frame_equal(result, expected) # negative indexing result = df.iloc[-1, -1] exp = df.loc["j", "D"] assert result == exp # out-of-bounds exception msg = "index 5 is out of bounds for axis 0 with size 4" with pytest.raises(IndexError, match=msg): df.iloc[10, 5] # trying to use a label msg = ( r"Location based indexing can only have \[integer, integer " r"slice \(START point is INCLUDED, END point is EXCLUDED\), " r"listlike of integers, boolean array\] types" ) with pytest.raises(ValueError, match=msg): df.iloc["j", "D"] def test_iloc_getitem_doc_issue(self, using_array_manager): # multi axis slicing issue with single block # surfaced in GH 6059 arr = np.random.randn(6, 4) index = date_range("20130101", periods=6) columns = list("ABCD") df = DataFrame(arr, index=index, columns=columns) # defines ref_locs df.describe() result = df.iloc[3:5, 0:2] str(result) result.dtypes expected = DataFrame(arr[3:5, 0:2], index=index[3:5], columns=columns[0:2]) tm.assert_frame_equal(result, expected) # for dups df.columns = list("aaaa") result = df.iloc[3:5, 0:2] str(result) result.dtypes expected = DataFrame(arr[3:5, 0:2], index=index[3:5], columns=list("aa")) tm.assert_frame_equal(result, expected) # related arr = np.random.randn(6, 4) index = list(range(0, 12, 2)) columns = list(range(0, 8, 2)) df = DataFrame(arr, index=index, columns=columns) if not using_array_manager: df._mgr.blocks[0].mgr_locs result = df.iloc[1:5, 2:4] str(result) result.dtypes expected = DataFrame(arr[1:5, 2:4], index=index[1:5], columns=columns[2:4]) tm.assert_frame_equal(result, expected) def test_iloc_setitem_series(self): df = DataFrame( np.random.randn(10, 4), index=list("abcdefghij"), columns=list("ABCD") ) df.iloc[1, 1] = 1 result = df.iloc[1, 1] assert result == 1 df.iloc[:, 2:3] = 0 expected = df.iloc[:, 2:3] result = df.iloc[:, 2:3] tm.assert_frame_equal(result, expected) s = Series(np.random.randn(10), index=range(0, 20, 2)) s.iloc[1] = 1 result = s.iloc[1] assert result == 1 s.iloc[:4] = 0 expected = s.iloc[:4] result = s.iloc[:4] tm.assert_series_equal(result, expected) s = Series([-1] * 6) s.iloc[0::2] = [0, 2, 4] s.iloc[1::2] = [1, 3, 5] result = s expected = Series([0, 1, 2, 3, 4, 5]) tm.assert_series_equal(result, expected) def test_iloc_setitem_list_of_lists(self): # GH 7551 # list-of-list is set incorrectly in mixed vs. single dtyped frames df = DataFrame( {"A": np.arange(5, dtype="int64"), "B": np.arange(5, 10, dtype="int64")} ) df.iloc[2:4] = [[10, 11], [12, 13]] expected = DataFrame({"A": [0, 1, 10, 12, 4], "B": [5, 6, 11, 13, 9]}) tm.assert_frame_equal(df, expected) df = DataFrame( {"A": ["a", "b", "c", "d", "e"], "B": np.arange(5, 10, dtype="int64")} ) df.iloc[2:4] = [["x", 11], ["y", 13]] expected = DataFrame({"A": ["a", "b", "x", "y", "e"], "B": [5, 6, 11, 13, 9]}) tm.assert_frame_equal(df, expected) @pytest.mark.parametrize("indexer", [[0], slice(None, 1, None), np.array([0])]) @pytest.mark.parametrize("value", [["Z"], np.array(["Z"])]) def test_iloc_setitem_with_scalar_index(self, indexer, value): # GH #19474 # assigning like "df.iloc[0, [0]] = ['Z']" should be evaluated # elementwisely, not using "setter('A', ['Z'])". df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) df.iloc[0, indexer] = value result = df.iloc[0, 0] assert is_scalar(result) and result == "Z" def test_iloc_mask(self): # GH 3631, iloc with a mask (of a series) should raise df = DataFrame(list(range(5)), index=list("ABCDE"), columns=["a"]) mask = df.a % 2 == 0 msg = "iLocation based boolean indexing cannot use an indexable as a mask" with pytest.raises(ValueError, match=msg): df.iloc[mask] mask.index = range(len(mask)) msg = "iLocation based boolean indexing on an integer type is not available" with pytest.raises(NotImplementedError, match=msg): df.iloc[mask] # ndarray ok result = df.iloc[np.array([True] * len(mask), dtype=bool)] tm.assert_frame_equal(result, df) # the possibilities locs = np.arange(4) nums = 2 ** locs reps = [bin(num) for num in nums] df = DataFrame({"locs": locs, "nums": nums}, reps) expected = { (None, ""): "0b1100", (None, ".loc"): "0b1100", (None, ".iloc"): "0b1100", ("index", ""): "0b11", ("index", ".loc"): "0b11", ("index", ".iloc"): ( "iLocation based boolean indexing cannot use an indexable as a mask" ), ("locs", ""): "Unalignable boolean Series provided as indexer " "(index of the boolean Series and of the indexed " "object do not match).", ("locs", ".loc"): "Unalignable boolean Series provided as indexer " "(index of the boolean Series and of the " "indexed object do not match).", ("locs", ".iloc"): ( "iLocation based boolean indexing on an " "integer type is not available" ), } # UserWarnings from reindex of a boolean mask with catch_warnings(record=True): simplefilter("ignore", UserWarning) for idx in [None, "index", "locs"]: mask = (df.nums > 2).values if idx: mask = Series(mask, list(reversed(getattr(df, idx)))) for method in ["", ".loc", ".iloc"]: try: if method: accessor = getattr(df, method[1:]) else: accessor = df answer = str(bin(accessor[mask]["nums"].sum())) except (ValueError, IndexingError, NotImplementedError) as e: answer = str(e) key = ( idx, method, ) r = expected.get(key) if r != answer: raise AssertionError( f"[{key}] does not match [{answer}], received [{r}]" ) def test_iloc_non_unique_indexing(self): # GH 4017, non-unique indexing (on the axis) df = DataFrame({"A": [0.1] * 3000, "B": [1] * 3000}) idx = np.arange(30) * 99 expected = df.iloc[idx] df3 = concat([df, 2 * df, 3 * df]) result = df3.iloc[idx] tm.assert_frame_equal(result, expected) df2 = DataFrame({"A": [0.1] * 1000, "B": [1] * 1000}) df2 = concat([df2, 2 * df2, 3 * df2]) with pytest.raises(KeyError, match="not in index"): df2.loc[idx] def test_iloc_empty_list_indexer_is_ok(self): df = tm.makeCustomDataframe(5, 2) # vertical empty tm.assert_frame_equal( df.iloc[:, []], df.iloc[:, :0], check_index_type=True, check_column_type=True, ) # horizontal empty tm.assert_frame_equal( df.iloc[[], :], df.iloc[:0, :], check_index_type=True, check_column_type=True, ) # horizontal empty tm.assert_frame_equal( df.iloc[[]], df.iloc[:0, :], check_index_type=True, check_column_type=True ) def test_identity_slice_returns_new_object(self, using_array_manager): # GH13873 original_df = DataFrame({"a": [1, 2, 3]}) sliced_df = original_df.iloc[:] assert sliced_df is not original_df # should be a shallow copy original_df["a"] = [4, 4, 4] if using_array_manager: # TODO(ArrayManager) verify it is expected that the original didn't change # setitem is replacing full column, so doesn't update "viewing" dataframe assert not (sliced_df["a"] == 4).all() else: assert (sliced_df["a"] == 4).all() original_series = Series([1, 2, 3, 4, 5, 6]) sliced_series = original_series.iloc[:] assert sliced_series is not original_series # should also be a shallow copy original_series[:3] = [7, 8, 9] assert all(sliced_series[:3] == [7, 8, 9]) def test_indexing_zerodim_np_array(self): # GH24919 df = DataFrame([[1, 2], [3, 4]]) result = df.iloc[np.array(0)] s = Series([1, 2], name=0) tm.assert_series_equal(result, s) def test_series_indexing_zerodim_np_array(self): # GH24919 s = Series([1, 2]) result = s.iloc[np.array(0)] assert result == 1 @pytest.mark.xfail(reason="https://github.com/pandas-dev/pandas/issues/33457") def test_iloc_setitem_categorical_updates_inplace(self): # Mixed dtype ensures we go through take_split_path in setitem_with_indexer cat = Categorical(["A", "B", "C"]) df = DataFrame({1: cat, 2: [1, 2, 3]}) # This should modify our original values in-place df.iloc[:, 0] = cat[::-1] expected = Categorical(["C", "B", "A"]) tm.assert_categorical_equal(cat, expected) def test_iloc_with_boolean_operation(self): # GH 20627 result = DataFrame([[0, 1], [2, 3], [4, 5], [6, np.nan]]) result.iloc[result.index <= 2] *= 2 expected = DataFrame([[0, 2], [4, 6], [8, 10], [6, np.nan]]) tm.assert_frame_equal(result, expected) result.iloc[result.index > 2] *= 2 expected = DataFrame([[0, 2], [4, 6], [8, 10], [12, np.nan]]) tm.assert_frame_equal(result, expected) result.iloc[[True, True, False, False]] *= 2 expected = DataFrame([[0, 4], [8, 12], [8, 10], [12, np.nan]]) tm.assert_frame_equal(result, expected) result.iloc[[False, False, True, True]] /= 2 expected = DataFrame([[0.0, 4.0], [8.0, 12.0], [4.0, 5.0], [6.0, np.nan]]) tm.assert_frame_equal(result, expected) def test_iloc_getitem_singlerow_slice_categoricaldtype_gives_series(self): # GH#29521 df = DataFrame({"x": Categorical("a b c d e".split())}) result = df.iloc[0] raw_cat = Categorical(["a"], categories=["a", "b", "c", "d", "e"]) expected = Series(raw_cat, index=["x"], name=0, dtype="category") tm.assert_series_equal(result, expected) def test_iloc_getitem_categorical_values(self): # GH#14580 # test iloc() on Series with Categorical data ser = Series([1, 2, 3]).astype("category") # get slice result = ser.iloc[0:2] expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3])) tm.assert_series_equal(result, expected) # get list of indexes result = ser.iloc[[0, 1]] expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3])) tm.assert_series_equal(result, expected) # get boolean array result = ser.iloc[[True, False, False]] expected = Series([1]).astype(CategoricalDtype([1, 2, 3])) tm.assert_series_equal(result, expected) @pytest.mark.parametrize("value", [None, NaT, np.nan]) def test_iloc_setitem_td64_values_cast_na(self, value): # GH#18586 series = Series([0, 1, 2], dtype="timedelta64[ns]") series.iloc[0] = value expected = Series([NaT, 1, 2], dtype="timedelta64[ns]") tm.assert_series_equal(series, expected) def test_iloc_setitem_empty_frame_raises_with_3d_ndarray(self): idx = Index([]) obj = DataFrame(np.random.randn(len(idx), len(idx)), index=idx, columns=idx) nd3 = np.random.randint(5, size=(2, 2, 2)) msg = f"Cannot set values with ndim > {obj.ndim}" with pytest.raises(ValueError, match=msg): obj.iloc[nd3] = 0 @pytest.mark.parametrize("indexer", [tm.loc, tm.iloc]) def test_iloc_getitem_read_only_values(self, indexer): # GH#10043 this is fundamentally a test for iloc, but test loc while # we're here rw_array = np.eye(10) rw_df = DataFrame(rw_array) ro_array = np.eye(10) ro_array.setflags(write=False) ro_df = DataFrame(ro_array) tm.assert_frame_equal(indexer(rw_df)[[1, 2, 3]], indexer(ro_df)[[1, 2, 3]]) tm.assert_frame_equal(indexer(rw_df)[[1]], indexer(ro_df)[[1]]) tm.assert_series_equal(indexer(rw_df)[1], indexer(ro_df)[1]) tm.assert_frame_equal(indexer(rw_df)[1:3], indexer(ro_df)[1:3]) def test_iloc_getitem_readonly_key(self): # GH#17192 iloc with read-only array raising TypeError df = DataFrame({"data": np.ones(100, dtype="float64")}) indices = np.array([1, 3, 6]) indices.flags.writeable = False result = df.iloc[indices] expected = df.loc[[1, 3, 6]] tm.assert_frame_equal(result, expected) result = df["data"].iloc[indices] expected = df["data"].loc[[1, 3, 6]] tm.assert_series_equal(result, expected) # TODO(ArrayManager) setting single item with an iterable doesn't work yet # in the "split" path @td.skip_array_manager_not_yet_implemented def test_iloc_assign_series_to_df_cell(self): # GH 37593 df = DataFrame(columns=["a"], index=[0]) df.iloc[0, 0] = Series([1, 2, 3]) expected = DataFrame({"a": [Series([1, 2, 3])]}, columns=["a"], index=[0]) tm.assert_frame_equal(df, expected) @pytest.mark.parametrize("klass", [list, np.array]) def test_iloc_setitem_bool_indexer(self, klass): # GH#36741 df = DataFrame({"flag": ["x", "y", "z"], "value": [1, 3, 4]}) indexer = klass([True, False, False]) df.iloc[indexer, 1] = df.iloc[indexer, 1] * 2 expected = DataFrame({"flag": ["x", "y", "z"], "value": [2, 3, 4]}) tm.assert_frame_equal(df, expected) @pytest.mark.parametrize("indexer", [[1], slice(1, 2)]) def test_iloc_setitem_pure_position_based(self, indexer): # GH#22046 df1 = DataFrame({"a2": [11, 12, 13], "b2": [14, 15, 16]}) df2 = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) df2.iloc[:, indexer] = df1.iloc[:, [0]] expected = DataFrame({"a": [1, 2, 3], "b": [11, 12, 13], "c": [7, 8, 9]}) tm.assert_frame_equal(df2, expected) def test_iloc_setitem_dictionary_value(self): # GH#37728 df = DataFrame({"x": [1, 2], "y": [2, 2]}) rhs = {"x": 9, "y": 99} df.iloc[1] = rhs expected = DataFrame({"x": [1, 9], "y": [2, 99]}) tm.assert_frame_equal(df, expected) # GH#38335 same thing, mixed dtypes df = DataFrame({"x": [1, 2], "y": [2.0, 2.0]}) df.iloc[1] = rhs expected = DataFrame({"x": [1, 9], "y": [2.0, 99.0]}) tm.assert_frame_equal(df, expected) def test_iloc_getitem_float_duplicates(self): df = DataFrame( np.random.randn(3, 3), index=[0.1, 0.2, 0.2], columns=list("abc") ) expect = df.iloc[1:] tm.assert_frame_equal(df.loc[0.2], expect) expect = df.iloc[1:, 0] tm.assert_series_equal(df.loc[0.2, "a"], expect) df.index = [1, 0.2, 0.2] expect = df.iloc[1:] tm.assert_frame_equal(df.loc[0.2], expect) expect = df.iloc[1:, 0] tm.assert_series_equal(df.loc[0.2, "a"], expect) df = DataFrame( np.random.randn(4, 3), index=[1, 0.2, 0.2, 1], columns=list("abc") ) expect = df.iloc[1:-1] tm.assert_frame_equal(df.loc[0.2], expect) expect = df.iloc[1:-1, 0] tm.assert_series_equal(df.loc[0.2, "a"], expect) df.index = [0.1, 0.2, 2, 0.2] expect = df.iloc[[1, -1]] tm.assert_frame_equal(df.loc[0.2], expect) expect = df.iloc[[1, -1], 0] tm.assert_series_equal(df.loc[0.2, "a"], expect) def test_iloc_setitem_custom_object(self): # iloc with an object class TO: def __init__(self, value): self.value = value def __str__(self) -> str: return f"[{self.value}]" __repr__ = __str__ def __eq__(self, other) -> bool: return self.value == other.value def view(self): return self df = DataFrame(index=[0, 1], columns=[0]) df.iloc[1, 0] = TO(1) df.iloc[1, 0] = TO(2) result = DataFrame(index=[0, 1], columns=[0]) result.iloc[1, 0] = TO(2) tm.assert_frame_equal(result, df) # remains object dtype even after setting it back df = DataFrame(index=[0, 1], columns=[0]) df.iloc[1, 0] = TO(1) df.iloc[1, 0] = np.nan result = DataFrame(index=[0, 1], columns=[0]) tm.assert_frame_equal(result, df) def test_iloc_getitem_with_duplicates(self): df = DataFrame(np.random.rand(3, 3), columns=list("ABC"), index=list("aab")) result = df.iloc[0] assert isinstance(result, Series) tm.assert_almost_equal(result.values, df.values[0]) result = df.T.iloc[:, 0] assert isinstance(result, Series) tm.assert_almost_equal(result.values, df.values[0]) def test_iloc_getitem_with_duplicates2(self): # GH#2259 df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2]) result = df.iloc[:, [0]] expected = df.take([0], axis=1) tm.assert_frame_equal(result, expected) def test_iloc_interval(self): # GH#17130 df = DataFrame({Interval(1, 2): [1, 2]}) result = df.iloc[0] expected = Series({Interval(1, 2): 1}, name=0) tm.assert_series_equal(result, expected) result = df.iloc[:, 0] expected = Series([1, 2], name=Interval(1, 2)) tm.assert_series_equal(result, expected) result = df.copy() result.iloc[:, 0] += 1 expected = DataFrame({Interval(1, 2): [2, 3]}) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("indexing_func", [list, np.array]) @pytest.mark.parametrize("rhs_func", [list, np.array]) def test_loc_setitem_boolean_list(self, rhs_func, indexing_func): # GH#20438 testing specifically list key, not arraylike ser = Series([0, 1, 2]) ser.iloc[indexing_func([True, False, True])] = rhs_func([5, 10]) expected = Series([5, 1, 10]) tm.assert_series_equal(ser, expected) df = DataFrame({"a": [0, 1, 2]}) df.iloc[indexing_func([True, False, True])] = rhs_func([[5], [10]]) expected = DataFrame({"a": [5, 1, 10]}) tm.assert_frame_equal(df, expected) class TestILocErrors: # NB: this test should work for _any_ Series we can pass as # series_with_simple_index def test_iloc_float_raises(self, series_with_simple_index, frame_or_series): # GH#4892 # float_indexers should raise exceptions # on appropriate Index types & accessors # this duplicates the code below # but is specifically testing for the error # message obj = series_with_simple_index if frame_or_series is DataFrame: obj = obj.to_frame() msg = "Cannot index by location index with a non-integer key" with pytest.raises(TypeError, match=msg): obj.iloc[3.0] with pytest.raises(IndexError, match=_slice_iloc_msg): obj.iloc[3.0] = 0 def test_iloc_getitem_setitem_fancy_exceptions(self, float_frame): with pytest.raises(IndexingError, match="Too many indexers"): float_frame.iloc[:, :, :] with pytest.raises(IndexError, match="too many indices for array"): # GH#32257 we let numpy do validation, get their exception float_frame.iloc[:, :, :] = 1 # TODO(ArrayManager) "split" path doesn't properly implement DataFrame indexer @td.skip_array_manager_not_yet_implemented def test_iloc_frame_indexer(self): # GH#39004 df = DataFrame({"a": [1, 2, 3]}) indexer = DataFrame({"a": [True, False, True]}) with tm.assert_produces_warning(FutureWarning): df.iloc[indexer] = 1 msg = ( "DataFrame indexer is not allowed for .iloc\n" "Consider using .loc for automatic alignment." ) with pytest.raises(IndexError, match=msg): df.iloc[indexer] class TestILocSetItemDuplicateColumns: def test_iloc_setitem_scalar_duplicate_columns(self): # GH#15686, duplicate columns and mixed dtype df1 = DataFrame([{"A": None, "B": 1}, {"A": 2, "B": 2}]) df2 = DataFrame([{"A": 3, "B": 3}, {"A": 4, "B": 4}]) df = concat([df1, df2], axis=1) df.iloc[0, 0] = -1 assert df.iloc[0, 0] == -1 assert df.iloc[0, 2] == 3 assert df.dtypes.iloc[2] == np.int64 def test_iloc_setitem_list_duplicate_columns(self): # GH#22036 setting with same-sized list df = DataFrame([[0, "str", "str2"]], columns=["a", "b", "b"]) df.iloc[:, 2] = ["str3"] expected = DataFrame([[0, "str", "str3"]], columns=["a", "b", "b"]) tm.assert_frame_equal(df, expected) def test_iloc_setitem_series_duplicate_columns(self): df = DataFrame( np.arange(8, dtype=np.int64).reshape(2, 4), columns=["A", "B", "A", "B"] ) df.iloc[:, 0] = df.iloc[:, 0].astype(np.float64) assert df.dtypes.iloc[2] == np.int64 @pytest.mark.parametrize( ["dtypes", "init_value", "expected_value"], [("int64", "0", 0), ("float", "1.2", 1.2)], ) def test_iloc_setitem_dtypes_duplicate_columns( self, dtypes, init_value, expected_value ): # GH#22035 df = DataFrame([[init_value, "str", "str2"]], columns=["a", "b", "b"]) df.iloc[:, 0] = df.iloc[:, 0].astype(dtypes) expected_df = DataFrame( [[expected_value, "str", "str2"]], columns=["a", "b", "b"] ) tm.assert_frame_equal(df, expected_df) class TestILocCallable: def test_frame_iloc_getitem_callable(self): # GH#11485 df = DataFrame({"X": [1, 2, 3, 4], "Y": list("aabb")}, index=list("ABCD")) # return location res = df.iloc[lambda x: [1, 3]] tm.assert_frame_equal(res, df.iloc[[1, 3]]) res = df.iloc[lambda x: [1, 3], :] tm.assert_frame_equal(res, df.iloc[[1, 3], :]) res = df.iloc[lambda x: [1, 3], lambda x: 0] tm.assert_series_equal(res, df.iloc[[1, 3], 0]) res = df.iloc[lambda x: [1, 3], lambda x: [0]] tm.assert_frame_equal(res, df.iloc[[1, 3], [0]]) # mixture res = df.iloc[[1, 3], lambda x: 0] tm.assert_series_equal(res, df.iloc[[1, 3], 0]) res = df.iloc[[1, 3], lambda x: [0]] tm.assert_frame_equal(res, df.iloc[[1, 3], [0]]) res = df.iloc[lambda x: [1, 3], 0] tm.assert_series_equal(res, df.iloc[[1, 3], 0]) res = df.iloc[lambda x: [1, 3], [0]] tm.assert_frame_equal(res, df.iloc[[1, 3], [0]]) def test_frame_iloc_setitem_callable(self): # GH#11485 df = DataFrame({"X": [1, 2, 3, 4], "Y": list("aabb")}, index=list("ABCD")) # return location res = df.copy() res.iloc[lambda x: [1, 3]] = 0 exp = df.copy() exp.iloc[[1, 3]] = 0 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[lambda x: [1, 3], :] = -1 exp = df.copy() exp.iloc[[1, 3], :] = -1 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[lambda x: [1, 3], lambda x: 0] = 5 exp = df.copy() exp.iloc[[1, 3], 0] = 5 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[lambda x: [1, 3], lambda x: [0]] = 25 exp = df.copy() exp.iloc[[1, 3], [0]] = 25 tm.assert_frame_equal(res, exp) # mixture res = df.copy() res.iloc[[1, 3], lambda x: 0] = -3 exp = df.copy() exp.iloc[[1, 3], 0] = -3 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[[1, 3], lambda x: [0]] = -5 exp = df.copy() exp.iloc[[1, 3], [0]] = -5 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[lambda x: [1, 3], 0] = 10 exp = df.copy() exp.iloc[[1, 3], 0] = 10 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[lambda x: [1, 3], [0]] = [-5, -5] exp = df.copy() exp.iloc[[1, 3], [0]] = [-5, -5] tm.assert_frame_equal(res, exp) class TestILocSeries: def test_iloc(self): ser = Series(np.random.randn(10), index=list(range(0, 20, 2))) for i in range(len(ser)): result = ser.iloc[i] exp = ser[ser.index[i]] tm.assert_almost_equal(result, exp) # pass a slice result = ser.iloc[slice(1, 3)] expected = ser.loc[2:4] tm.assert_series_equal(result, expected) # test slice is a view result[:] = 0 assert (ser[1:3] == 0).all() # list of integers result = ser.iloc[[0, 2, 3, 4, 5]] expected = ser.reindex(ser.index[[0, 2, 3, 4, 5]]) tm.assert_series_equal(result, expected) def test_iloc_getitem_nonunique(self): ser = Series([0, 1, 2], index=[0, 1, 0]) assert ser.iloc[2] == 2 def test_iloc_setitem_pure_position_based(self): # GH#22046 ser1 = Series([1, 2, 3]) ser2 = Series([4, 5, 6], index=[1, 0, 2]) ser1.iloc[1:3] = ser2.iloc[1:3] expected = Series([1, 5, 6]) tm.assert_series_equal(ser1, expected) def test_iloc_nullable_int64_size_1_nan(self): # GH 31861 result = DataFrame({"a": ["test"], "b": [np.nan]}) result.loc[:, "b"] = result.loc[:, "b"].astype("Int64") expected = DataFrame({"a": ["test"], "b": array([NA], dtype="Int64")}) tm.assert_frame_equal(result, expected)
""" Copyright (c) 2012-2016 Ben Croston Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from RPi._GPIO import * VERSION = '0.6.3'
from __future__ import print_function from .patchpipette import PatchPipette
from io import BytesIO import tempfile import os import time import shutil from contextlib import contextmanager import six import sys from netlib import utils, tcp, http def treader(bytes): """ Construct a tcp.Read object from bytes. """ fp = BytesIO(bytes) return tcp.Reader(fp) @contextmanager def tmpdir(*args, **kwargs): orig_workdir = os.getcwd() temp_workdir = tempfile.mkdtemp(*args, **kwargs) os.chdir(temp_workdir) yield temp_workdir os.chdir(orig_workdir) shutil.rmtree(temp_workdir) def _check_exception(expected, actual, exc_tb): if isinstance(expected, six.string_types): if expected.lower() not in str(actual).lower(): six.reraise(AssertionError, AssertionError( "Expected %s, but caught %s" % ( repr(expected), repr(actual) ) ), exc_tb) else: if not isinstance(actual, expected): six.reraise(AssertionError, AssertionError( "Expected %s, but caught %s %s" % ( expected.__name__, actual.__class__.__name__, repr(actual) ) ), exc_tb) def raises(expected_exception, obj=None, *args, **kwargs): """ Assert that a callable raises a specified exception. :exc An exception class or a string. If a class, assert that an exception of this type is raised. If a string, assert that the string occurs in the string representation of the exception, based on a case-insenstivie match. :obj A callable object. :args Arguments to be passsed to the callable. :kwargs Arguments to be passed to the callable. """ if obj is None: return RaisesContext(expected_exception) else: try: ret = obj(*args, **kwargs) except Exception as actual: _check_exception(expected_exception, actual, sys.exc_info()[2]) else: raise AssertionError("No exception raised. Return value: {}".format(ret)) class RaisesContext(object): def __init__(self, expected_exception): self.expected_exception = expected_exception def __enter__(self): return def __exit__(self, exc_type, exc_val, exc_tb): if not exc_type: raise AssertionError("No exception raised.") else: _check_exception(self.expected_exception, exc_val, exc_tb) return True test_data = utils.Data(__name__) test_data.dirname = os.path.join(test_data.dirname, "..", "test", "netlib") def treq(**kwargs): """ Returns: netlib.http.Request """ default = dict( first_line_format="relative", method=b"GET", scheme=b"http", host=b"address", port=22, path=b"/path", http_version=b"HTTP/1.1", headers=http.Headers(((b"header", b"qvalue"), (b"content-length", b"7"))), content=b"content" ) default.update(kwargs) return http.Request(**default) def tresp(**kwargs): """ Returns: netlib.http.Response """ default = dict( http_version=b"HTTP/1.1", status_code=200, reason=b"OK", headers=http.Headers(((b"header-response", b"svalue"), (b"content-length", b"7"))), content=b"message", timestamp_start=time.time(), timestamp_end=time.time(), ) default.update(kwargs) return http.Response(**default)
from _NetworKit import PageRankNibble, GCE
""" A directive for including a matplotlib plot in a Sphinx document. By default, in HTML output, `plot` will include a .png file with a link to a high-res .png and .pdf. In LaTeX output, it will include a .pdf. The source code for the plot may be included in one of three ways: 1. **A path to a source file** as the argument to the directive:: .. plot:: path/to/plot.py When a path to a source file is given, the content of the directive may optionally contain a caption for the plot:: .. plot:: path/to/plot.py This is the caption for the plot Additionally, one my specify the name of a function to call (with no arguments) immediately after importing the module:: .. plot:: path/to/plot.py plot_function1 2. Included as **inline content** to the directive:: .. plot:: import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np img = mpimg.imread('_static/stinkbug.png') imgplot = plt.imshow(img) 3. Using **doctest** syntax:: .. plot:: A plotting example: >>> import matplotlib.pyplot as plt >>> plt.plot([1,2,3], [4,5,6]) Options ------- The ``plot`` directive supports the following options: format : {'python', 'doctest'} Specify the format of the input include-source : bool Whether to display the source code. The default can be changed using the `plot_include_source` variable in conf.py encoding : str If this source file is in a non-UTF8 or non-ASCII encoding, the encoding must be specified using the `:encoding:` option. The encoding will not be inferred using the ``-*- coding -*-`` metacomment. context : bool If provided, the code will be run in the context of all previous plot directives for which the `:context:` option was specified. This only applies to inline code plot directives, not those run from files. nofigs : bool If specified, the code block will be run, but no figures will be inserted. This is usually useful with the ``:context:`` option. Additionally, this directive supports all of the options of the `image` directive, except for `target` (since plot will add its own target). These include `alt`, `height`, `width`, `scale`, `align` and `class`. Configuration options --------------------- The plot directive has the following configuration options: plot_include_source Default value for the include-source option plot_pre_code Code that should be executed before each plot. plot_basedir Base directory, to which ``plot::`` file names are relative to. (If None or empty, file names are relative to the directoly where the file containing the directive is.) plot_formats File formats to generate. List of tuples or strings:: [(suffix, dpi), suffix, ...] that determine the file format and the DPI. For entries whose DPI was omitted, sensible defaults are chosen. plot_html_show_formats Whether to show links to the files in HTML. plot_rcparams A dictionary containing any non-standard rcParams that should be applied before each plot. plot_apply_rcparams By default, rcParams are applied when `context` option is not used in a plot directive. This configuration option overrides this behaviour and applies rcParams before each plot. plot_working_directory By default, the working directory will be changed to the directory of the example, so the code can get at its data files, if any. Also its path will be added to `sys.path` so it can import any helper modules sitting beside it. This configuration option can be used to specify a central directory (also added to `sys.path`) where data files and helper modules for all code are located. plot_template Provide a customized template for preparing resturctured text. """ from __future__ import print_function import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap import traceback from docutils.parsers.rst import directives from docutils import nodes from docutils.parsers.rst.directives.images import Image align = Image.align import sphinx sphinx_version = sphinx.__version__.split(".") sphinx_version = tuple([int(re.split('[a-z]', x)[0]) for x in sphinx_version[:2]]) try: # Sphinx depends on either Jinja or Jinja2 import jinja2 def format_template(template, **kw): return jinja2.Template(template).render(**kw) except ImportError: import jinja def format_template(template, **kw): return jinja.from_string(template, **kw) import matplotlib import matplotlib.cbook as cbook matplotlib.use('Agg') import matplotlib.pyplot as plt from matplotlib import _pylab_helpers __version__ = 2 try: from os.path import relpath except ImportError: # Copied from Python 2.7 if 'posix' in sys.builtin_module_names: def relpath(path, start=os.path.curdir): """Return a relative version of a path""" from os.path import sep, curdir, join, abspath, commonprefix, \ pardir if not path: raise ValueError("no path specified") start_list = abspath(start).split(sep) path_list = abspath(path).split(sep) # Work out how much of the filepath is shared by start and path. i = len(commonprefix([start_list, path_list])) rel_list = [pardir] * (len(start_list)-i) + path_list[i:] if not rel_list: return curdir return join(*rel_list) elif 'nt' in sys.builtin_module_names: def relpath(path, start=os.path.curdir): """Return a relative version of a path""" from os.path import sep, curdir, join, abspath, commonprefix, \ pardir, splitunc if not path: raise ValueError("no path specified") start_list = abspath(start).split(sep) path_list = abspath(path).split(sep) if start_list[0].lower() != path_list[0].lower(): unc_path, rest = splitunc(path) unc_start, rest = splitunc(start) if bool(unc_path) ^ bool(unc_start): raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" % (path, start)) else: raise ValueError("path is on drive %s, start on drive %s" % (path_list[0], start_list[0])) # Work out how much of the filepath is shared by start and path. for i in range(min(len(start_list), len(path_list))): if start_list[i].lower() != path_list[i].lower(): break else: i += 1 rel_list = [pardir] * (len(start_list)-i) + path_list[i:] if not rel_list: return curdir return join(*rel_list) else: raise RuntimeError("Unsupported platform (no relpath available!)") def plot_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): return run(arguments, content, options, state_machine, state, lineno) plot_directive.__doc__ = __doc__ def _option_boolean(arg): if not arg or not arg.strip(): # no argument given, assume used as a flag return True elif arg.strip().lower() in ('no', '0', 'false'): return False elif arg.strip().lower() in ('yes', '1', 'true'): return True else: raise ValueError('"%s" unknown boolean' % arg) def _option_format(arg): return directives.choice(arg, ('python', 'doctest')) def _option_align(arg): return directives.choice(arg, ("top", "middle", "bottom", "left", "center", "right")) def mark_plot_labels(app, document): """ To make plots referenceable, we need to move the reference from the "htmlonly" (or "latexonly") node to the actual figure node itself. """ for name, explicit in document.nametypes.iteritems(): if not explicit: continue labelid = document.nameids[name] if labelid is None: continue node = document.ids[labelid] if node.tagname in ('html_only', 'latex_only'): for n in node: if n.tagname == 'figure': sectname = name for c in n: if c.tagname == 'caption': sectname = c.astext() break node['ids'].remove(labelid) node['names'].remove(name) n['ids'].append(labelid) n['names'].append(name) document.settings.env.labels[name] = \ document.settings.env.docname, labelid, sectname break def setup(app): setup.app = app setup.config = app.config setup.confdir = app.confdir options = {'alt': directives.unchanged, 'height': directives.length_or_unitless, 'width': directives.length_or_percentage_or_unitless, 'scale': directives.nonnegative_int, 'align': _option_align, 'class': directives.class_option, 'include-source': _option_boolean, 'format': _option_format, 'context': directives.flag, 'nofigs': directives.flag, 'encoding': directives.encoding } app.add_directive('plot', plot_directive, True, (0, 2, False), **options) app.add_config_value('plot_pre_code', None, True) app.add_config_value('plot_include_source', False, True) app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True) app.add_config_value('plot_basedir', None, True) app.add_config_value('plot_html_show_formats', True, True) app.add_config_value('plot_rcparams', {}, True) app.add_config_value('plot_apply_rcparams', False, True) app.add_config_value('plot_working_directory', None, True) app.add_config_value('plot_template', None, True) app.connect('doctree-read', mark_plot_labels) def contains_doctest(text): try: # check if it's valid Python as-is compile(text, '<string>', 'exec') return False except SyntaxError: pass r = re.compile(r'^\s*>>>', re.M) m = r.search(text) return bool(m) def unescape_doctest(text): """ Extract code from a piece of text, which contains either Python code or doctests. """ if not contains_doctest(text): return text code = "" for line in text.split("\n"): m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line) if m: code += m.group(2) + "\n" elif line.strip(): code += "# " + line.strip() + "\n" else: code += "\n" return code def split_code_at_show(text): """ Split code at plt.show() """ parts = [] is_doctest = contains_doctest(text) part = [] for line in text.split("\n"): if (not is_doctest and line.strip() == 'plt.show()') or \ (is_doctest and line.strip() == '>>> plt.show()'): part.append(line) parts.append("\n".join(part)) part = [] else: part.append(line) if "\n".join(part).strip(): parts.append("\n".join(part)) return parts TEMPLATE = """ {{ source_code }} {{ only_html }} {% if source_link or (html_show_formats and not multi_image) %} ( {%- if source_link -%} `Source code <{{ source_link }}>`__ {%- endif -%} {%- if html_show_formats and not multi_image -%} {%- for img in images -%} {%- for fmt in img.formats -%} {%- if source_link or not loop.first -%}, {% endif -%} `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ {%- endfor -%} {%- endfor -%} {%- endif -%} ) {% endif %} {% for img in images %} .. figure:: {{ build_dir }}/{{ img.basename }}.png {%- for option in options %} {{ option }} {% endfor %} {% if html_show_formats and multi_image -%} ( {%- for fmt in img.formats -%} {%- if not loop.first -%}, {% endif -%} `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ {%- endfor -%} ) {%- endif -%} {{ caption }} {% endfor %} {{ only_latex }} {% for img in images %} .. image:: {{ build_dir }}/{{ img.basename }}.pdf {% endfor %} {{ only_texinfo }} {% for img in images %} .. image:: {{ build_dir }}/{{ img.basename }}.png {%- for option in options %} {{ option }} {% endfor %} {% endfor %} """ exception_template = """ .. htmlonly:: [`source code <%(linkdir)s/%(basename)s.py>`__] Exception occurred rendering plot. """ plot_context = dict() class ImageFile(object): def __init__(self, basename, dirname): self.basename = basename self.dirname = dirname self.formats = [] def filename(self, format): return os.path.join(self.dirname, "%s.%s" % (self.basename, format)) def filenames(self): return [self.filename(fmt) for fmt in self.formats] def out_of_date(original, derived): """ Returns True if derivative is out-of-date wrt original, both of which are full file paths. """ return (not os.path.exists(derived) or (os.path.exists(original) and os.stat(derived).st_mtime < os.stat(original).st_mtime)) class PlotError(RuntimeError): pass def run_code(code, code_path, ns=None, function_name=None): """ Import a Python module from a path, and run the function given by name, if function_name is not None. """ # Change the working directory to the directory of the example, so # it can get at its data files, if any. Add its path to sys.path # so it can import any helper modules sitting beside it. pwd = os.getcwd() old_sys_path = list(sys.path) if setup.config.plot_working_directory is not None: try: os.chdir(setup.config.plot_working_directory) except OSError as err: raise OSError(str(err) + '\n`plot_working_directory` option in' 'Sphinx configuration file must be a valid ' 'directory path') except TypeError as err: raise TypeError(str(err) + '\n`plot_working_directory` option in ' 'Sphinx configuration file must be a string or ' 'None') sys.path.insert(0, setup.config.plot_working_directory) elif code_path is not None: dirname = os.path.abspath(os.path.dirname(code_path)) os.chdir(dirname) sys.path.insert(0, dirname) # Redirect stdout stdout = sys.stdout sys.stdout = cStringIO.StringIO() # Reset sys.argv old_sys_argv = sys.argv sys.argv = [code_path] try: try: code = unescape_doctest(code) if ns is None: ns = {} if not ns: if setup.config.plot_pre_code is None: exec "import numpy as np\nfrom matplotlib import pyplot as plt\n" in ns else: exec setup.config.plot_pre_code in ns if "__main__" in code: exec "__name__ = '__main__'" in ns exec code in ns if function_name is not None: exec function_name + "()" in ns except (Exception, SystemExit), err: raise PlotError(traceback.format_exc()) finally: os.chdir(pwd) sys.argv = old_sys_argv sys.path[:] = old_sys_path sys.stdout = stdout return ns def clear_state(plot_rcparams): plt.close('all') matplotlib.rc_file_defaults() matplotlib.rcParams.update(plot_rcparams) def render_figures(code, code_path, output_dir, output_base, context, function_name, config): """ Run a pyplot script and save the low and high res PNGs and a PDF in outdir. Save the images under *output_dir* with file names derived from *output_base* """ # -- Parse format list default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200} formats = [] plot_formats = config.plot_formats if isinstance(plot_formats, (str, unicode)): plot_formats = eval(plot_formats) for fmt in plot_formats: if isinstance(fmt, str): formats.append((fmt, default_dpi.get(fmt, 80))) elif type(fmt) in (tuple, list) and len(fmt)==2: formats.append((str(fmt[0]), int(fmt[1]))) else: raise PlotError('invalid image format "%r" in plot_formats' % fmt) # -- Try to determine if all images already exist code_pieces = split_code_at_show(code) # Look for single-figure output files first # Look for single-figure output files first all_exists = True img = ImageFile(output_base, output_dir) for format, dpi in formats: if out_of_date(code_path, img.filename(format)): all_exists = False break img.formats.append(format) if all_exists: return [(code, [img])] # Then look for multi-figure output files results = [] all_exists = True for i, code_piece in enumerate(code_pieces): images = [] for j in xrange(1000): if len(code_pieces) > 1: img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir) else: img = ImageFile('%s_%02d' % (output_base, j), output_dir) for format, dpi in formats: if out_of_date(code_path, img.filename(format)): all_exists = False break img.formats.append(format) # assume that if we have one, we have them all if not all_exists: all_exists = (j > 0) break images.append(img) if not all_exists: break results.append((code_piece, images)) if all_exists: return results # We didn't find the files, so build them results = [] if context: ns = plot_context else: ns = {} for i, code_piece in enumerate(code_pieces): if not context or config.plot_apply_rcparams: clear_state(config.plot_rcparams) run_code(code_piece, code_path, ns, function_name) images = [] fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() for j, figman in enumerate(fig_managers): if len(fig_managers) == 1 and len(code_pieces) == 1: img = ImageFile(output_base, output_dir) elif len(code_pieces) == 1: img = ImageFile("%s_%02d" % (output_base, j), output_dir) else: img = ImageFile("%s_%02d_%02d" % (output_base, i, j), output_dir) images.append(img) for format, dpi in formats: try: figman.canvas.figure.savefig(img.filename(format), dpi=dpi) except Exception,err: raise PlotError(traceback.format_exc()) img.formats.append(format) results.append((code_piece, images)) if not context or config.plot_apply_rcparams: clear_state(config.plot_rcparams) return results def run(arguments, content, options, state_machine, state, lineno): # The user may provide a filename *or* Python code content, but not both if arguments and content: raise RuntimeError("plot:: directive can't have both args and content") document = state_machine.document config = document.settings.env.config nofigs = options.has_key('nofigs') options.setdefault('include-source', config.plot_include_source) context = options.has_key('context') rst_file = document.attributes['source'] rst_dir = os.path.dirname(rst_file) if len(arguments): if not config.plot_basedir: source_file_name = os.path.join(setup.app.builder.srcdir, directives.uri(arguments[0])) else: source_file_name = os.path.join(setup.confdir, config.plot_basedir, directives.uri(arguments[0])) # If there is content, it will be passed as a caption. caption = '\n'.join(content) # If the optional function name is provided, use it if len(arguments) == 2: function_name = arguments[1] else: function_name = None with open(source_file_name, 'r') as fd: code = fd.read() output_base = os.path.basename(source_file_name) else: source_file_name = rst_file code = textwrap.dedent("\n".join(map(str, content))) counter = document.attributes.get('_plot_counter', 0) + 1 document.attributes['_plot_counter'] = counter base, ext = os.path.splitext(os.path.basename(source_file_name)) output_base = '%s-%d.py' % (base, counter) function_name = None caption = '' base, source_ext = os.path.splitext(output_base) if source_ext in ('.py', '.rst', '.txt'): output_base = base else: source_ext = '' # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames output_base = output_base.replace('.', '-') # is it in doctest format? is_doctest = contains_doctest(code) if options.has_key('format'): if options['format'] == 'python': is_doctest = False else: is_doctest = True # determine output directory name fragment source_rel_name = relpath(source_file_name, setup.confdir) source_rel_dir = os.path.dirname(source_rel_name) while source_rel_dir.startswith(os.path.sep): source_rel_dir = source_rel_dir[1:] # build_dir: where to place output files (temporarily) build_dir = os.path.join(os.path.dirname(setup.app.doctreedir), 'plot_directive', source_rel_dir) # get rid of .. in paths, also changes pathsep # see note in Python docs for warning about symbolic links on Windows. # need to compare source and dest paths at end build_dir = os.path.normpath(build_dir) if not os.path.exists(build_dir): os.makedirs(build_dir) # output_dir: final location in the builder's directory dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir, source_rel_dir)) if not os.path.exists(dest_dir): os.makedirs(dest_dir) # no problem here for me, but just use built-ins # how to link to files from the RST file dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir), source_rel_dir).replace(os.path.sep, '/') build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/') source_link = dest_dir_link + '/' + output_base + source_ext # make figures try: results = render_figures(code, source_file_name, build_dir, output_base, context, function_name, config) errors = [] except PlotError, err: reporter = state.memo.reporter sm = reporter.system_message( 2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base, source_file_name, err), line=lineno) results = [(code, [])] errors = [sm] # Properly indent the caption caption = '\n'.join(' ' + line.strip() for line in caption.split('\n')) # generate output restructuredtext total_lines = [] for j, (code_piece, images) in enumerate(results): if options['include-source']: if is_doctest: lines = [''] lines += [row.rstrip() for row in code_piece.split('\n')] else: lines = ['.. code-block:: python', ''] lines += [' %s' % row.rstrip() for row in code_piece.split('\n')] source_code = "\n".join(lines) else: source_code = "" if nofigs: images = [] opts = [':%s: %s' % (key, val) for key, val in options.items() if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] only_html = ".. only:: html" only_latex = ".. only:: latex" only_texinfo = ".. only:: texinfo" if j == 0: src_link = source_link else: src_link = None result = format_template( config.plot_template or TEMPLATE, dest_dir=dest_dir_link, build_dir=build_dir_link, source_link=src_link, multi_image=len(images) > 1, only_html=only_html, only_latex=only_latex, only_texinfo=only_texinfo, options=opts, images=images, source_code=source_code, html_show_formats=config.plot_html_show_formats, caption=caption) total_lines.extend(result.split("\n")) total_lines.extend("\n") if total_lines: state_machine.insert_input(total_lines, source=source_file_name) # copy image files to builder's output directory, if necessary if not os.path.exists(dest_dir): cbook.mkdirs(dest_dir) for code_piece, images in results: for img in images: for fn in img.filenames(): destimg = os.path.join(dest_dir, os.path.basename(fn)) if fn != destimg: shutil.copyfile(fn, destimg) # copy script (if necessary) target_name = os.path.join(dest_dir, output_base + source_ext) with open(target_name, 'w') as f: if source_file_name == rst_file: code_escaped = unescape_doctest(code) else: code_escaped = code f.write(code_escaped) return errors
""" accounts.test_views =================== Tests the REST API calls. Add more specific social registration tests """ import responses from django.core.urlresolvers import reverse from django.core import mail from django.contrib.sites.models import Site from django.contrib.auth import get_user_model from django.test.utils import override_settings from rest_framework import status from rest_framework.test import APIClient, APITestCase from allauth.account import app_settings from allauth.socialaccount.models import SocialApp from allauth.socialaccount.providers.facebook.provider import GRAPH_API_URL from .serializers import LoginSerializer class TestAccounts(APITestCase): """ Tests normal use - non social login. """ def setUp(self): self.login_url = reverse('accounts:rest_login') self.logout_url = reverse('accounts:rest_logout') self.register_url = reverse('accounts:rest_register') self.password_reset_url = reverse('accounts:rest_password_reset') self.rest_password_reset_confirm_url = reverse('accounts:rest_password_reset_confirm') self.password_change_url = reverse('accounts:rest_password_change') self.verify_url = reverse('accounts:rest_verify_email') self.user_url = reverse('accounts:rest_user_details') self.client = APIClient() self.reusable_user_data = {'username': 'admin', 'email': 'admin@email.com', 'password': 'password12'} self.reusable_user_data_change_password = {'username': 'admin', 'email': 'admin@email.com', 'password': 'password_same'} self.reusable_register_user_data = {'username': 'admin', 'email': 'admin@email.com', 'password1': 'password12', 'password2': 'password12'} self.reusable_register_user_data1 = {'username': 'admin1', 'email': 'admin1@email.com', 'password1': 'password12', 'password2': 'password12'} self.reusable_register_user_data_no_username = {'email': 'admin@email.com', 'password1': 'password12', 'password2': 'password12'} self.reusable_register_user_data_no_email = {'username': 'admin', 'password1': 'password12', 'password2': 'password12'} self.change_password_data_incorrect = {"new_password1": "password_not_same", "new_password2": "password_same"} self.change_password_data = {"new_password1": "password_same", "new_password2": "password_same"} self.change_password_data_old_password_field_enabled = {"old_password": "password12", "new_password1": "password_same", "new_password2": "password_same"} def create_user_and_login(self): """ Helper function to create a basic user, login and assign token credentials. """ get_user_model().objects.create_user('admin', 'admin@email.com', 'password12') response = self.client.post(self.login_url, self.reusable_user_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK, "Snap! Basic Login has failed with a helper function 'create_user_and_login'. Something is really wrong here.") self.client.credentials(HTTP_AUTHORIZATION='Token ' + response.data['key']) def _generate_uid_and_token(self, user): result = {} from django.utils.encoding import force_bytes from django.contrib.auth.tokens import default_token_generator from django import VERSION if VERSION[1] == 5: from django.utils.http import int_to_base36 result['uid'] = int_to_base36(user.pk) else: from django.utils.http import urlsafe_base64_encode result['uid'] = urlsafe_base64_encode(force_bytes(user.pk)) result['token'] = default_token_generator.make_token(user) return result def cleanUp(self): pass @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_login_basic_username_auth_method(self): """ Tests basic functionality of login with authentication method of username. """ # Assumes you provide username,password and returns a token get_user_model().objects.create_user('admin3', '', 'password12') data = {"username": 'admin3', "email": "", "password": 'password12'} response = self.client.post(self.login_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertIn('key', response.content) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL, ACCOUNT_EMAIL_REQUIRED=True) def test_login_basic_email_auth_method(self): """ Tests basic functionality of login with authentication method of email. """ # Assumes you provide username,password and returns a token get_user_model().objects.create_user('admin', 'email.login@gmail.com', 'password12') data = {"username": '', "email": "email.login@gmail.com", "password": 'password12'} response = self.client.post(self.login_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertIn('key', response.content) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_login_basic_username_email_auth_method(self): """ Tests basic functionality of login with authentication method of username or email. """ # Assumes you provide username,password and returns a token get_user_model().objects.create_user('admin', 'email.login@gmail.com', 'password12') # Check email data = {"username": '', "email": "email.login@gmail.com", "password": 'password12'} response = self.client.post(self.login_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) # Check username data = {"username": 'admin', "email": '', "password": 'password12'} response = self.client.post(self.login_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertIn('key', response.content) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_login_auth_method_username_fail_no_users_in_db(self): """ Tests login fails with a 400 when no users in db for login auth method of 'username'. """ serializer = LoginSerializer({'username': 'admin', 'password': 'password12'}) response = self.client.post(self.login_url, serializer.data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_login_email_auth_method_fail_no_users_in_db(self): """ Tests login fails with a 400 when no users in db for login auth method of 'email'. """ serializer = LoginSerializer({'username': 'admin', 'password': 'password12'}) response = self.client.post(self.login_url, serializer.data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_login_username_email_auth_method_fail_no_users_in_db(self): """ Tests login fails with a 400 when no users in db for login auth method of 'username_email'. """ serializer = LoginSerializer({'username': 'admin', 'password': 'password12'}) response = self.client.post(self.login_url, serializer.data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) def common_test_login_fail_incorrect_change(self): # Create user, login and try and change password INCORRECTLY self.create_user_and_login() self.client.post(self.password_change_url, data=self.change_password_data_incorrect, format='json') # Remove credentials self.client.credentials() response = self.client.post(self.login_url, self.reusable_user_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertIn('key', response.content) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_login_username_auth_method_fail_incorrect_password_change(self): """ Tests login fails with an incorrect/invalid password change (login auth username). """ self.common_test_login_fail_incorrect_change() @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_login_email_auth_method_fail_incorrect_password_change(self): """ Tests login fails with an incorrect/invalid password change (login auth email). """ self.common_test_login_fail_incorrect_change() @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_login_username_email_auth_method_fail_incorrect_password_change(self): """ Tests login fails with an incorrect/invalid password change (login auth username_email). """ self.common_test_login_fail_incorrect_change() def common_test_login_correct_password_change(self): # Create user, login and try and change password successfully self.create_user_and_login() response = self.client.post(self.password_change_url, data=self.change_password_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) # Remove credentials self.client.credentials() response = self.client.post(self.login_url, self.reusable_user_data_change_password, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertIn('key', response.content) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_login_username_auth_method_correct_password_change(self): """ Tests login is succesful with a correct password change (login auth username). """ self.common_test_login_correct_password_change() @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_login_email_auth_method_correct_password_change(self): """ Tests login is succesful with a correct password change (login auth email). """ self.common_test_login_correct_password_change() @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_login_username_email_auth_method_correct_password_change(self): """ Tests login is succesful with a correct password change (login auth username_email). """ self.common_test_login_correct_password_change() def test_login_fail_no_input(self): """ Tests login fails when you provide no username and no email (login auth username_email). """ get_user_model().objects.create_user('admin', 'email.login@gmail.com', 'password12') data = {"username": '', "email": '', "password": ''} response = self.client.post(self.login_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_login_username_auth_method_fail_no_input(self): """ Tests login fails when you provide no username (login auth username). """ get_user_model().objects.create_user('admin', 'email.login@gmail.com', 'password12') data = {"username": '', "email": "email.login@gmail.com", "password": 'password12'} response = self.client.post(self.login_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_login_email_auth_method_fail_no_input(self): """ Tests login fails when you provide no username (login auth email). """ get_user_model().objects.create_user('admin', 'email.login@gmail.com', 'password12') data = {"username": "admin", "email": '', "password": 'password12'} response = self.client.post(self.login_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) @override_settings(ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_login_username_email_auth_method_fail_no_input(self): """ Tests login fails when you provide no username and no email (login auth username_email). """ get_user_model().objects.create_user('admin', 'email.login@gmail.com', 'password12') data = {"username": '', "email": '', "password": 'password12'} response = self.client.post(self.login_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) # need to check for token # test login with password change # test login with wrong password chaneg if fails def test_logout(self): """ Tests basic logout functionality. """ self.create_user_and_login() response = self.client.post(self.logout_url, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"success":"Successfully logged out."}') def test_logout_but_already_logged_out(self): """ Tests logout when already logged out. """ self.create_user_and_login() response = self.client.post(self.logout_url, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"success":"Successfully logged out."}') self.client.credentials() # remember to remove manual token credential response = self.client.post(self.logout_url, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK, response.content) self.assertEquals(response.content, '{"success":"Successfully logged out."}') def test_change_password_basic(self): """ Tests basic functionality of 'change of password'. """ self.create_user_and_login() response = self.client.post(self.password_change_url, data=self.change_password_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"success":"New password has been saved."}') def test_change_password_basic_fails_not_authorised(self): """ Tests basic functionality of 'change of password' fails if not authorised. """ get_user_model().objects.create_user('admin', 'admin@email.com', 'password12') response = self.client.post(self.password_change_url, data=self.change_password_data, format='json') self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED) self.assertEquals(response.content, '{"detail":"Authentication credentials were not provided."}') def common_change_password_login_fail_with_old_password(self, password_change_data): self.create_user_and_login() response = self.client.post(self.password_change_url, data=password_change_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.client.credentials() # Remove credentials response = self.client.post(self.login_url, self.reusable_user_data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) def common_change_password_login_pass_with_new_password(self, password_change_data): self.create_user_and_login() response = self.client.post(self.password_change_url, password_change_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.client.credentials() # Remove credentials response = self.client.post(self.login_url, self.reusable_user_data_change_password, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) def common_change_password_login_fail_with_old_password_pass_with_new_password(self, password_change_data): """ Tests change of password with old password fails but new password successes. """ self.create_user_and_login() response = self.client.post(self.password_change_url, password_change_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK, response.content) self.client.credentials() # Remove credentials response = self.client.post(self.login_url, self.reusable_user_data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) response = self.client.post(self.login_url, self.reusable_user_data_change_password, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK, response.content) def test_change_password_login_fail_with_old_password(self): """ Tests change of password with old password. """ self.common_change_password_login_fail_with_old_password(self.change_password_data) def test_change_password_login_pass_with_new_password(self): """ Tests change of password with new password. """ self.common_change_password_login_pass_with_new_password(self.change_password_data) def test_change_password_login_fail_with_old_password_pass_with_new_password(self): """ Tests change of password with old password fails but new password successes. """ self.common_change_password_login_fail_with_old_password_pass_with_new_password(self.change_password_data) @override_settings(OLD_PASSWORD_FIELD_ENABLED=True) def test_change_password_old_password_field_required_old_password_field_enabled(self): """ Tests basic functionality of 'change of password' fails if old password not given as part of input (old password field enabled). """ self.create_user_and_login() response = self.client.post(self.password_change_url, data=self.change_password_data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEquals(response.content, '{"old_password":["This field is required."]}') @override_settings(OLD_PASSWORD_FIELD_ENABLED=True) def test_change_password_basic_old_password_field_enabled(self): """ Tests basic functionality of 'change of password' (old password enabled). """ self.create_user_and_login() response = self.client.post(self.password_change_url, data=self.change_password_data_old_password_field_enabled, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"success":"New password has been saved."}') @override_settings(OLD_PASSWORD_FIELD_ENABLED=True) def test_change_password_basic_fails_not_authorised_old_password_field_enabled(self): """ Tests basic functionality of 'change of password' fails if not authorised (old password field enabled). """ get_user_model().objects.create_user('admin', 'admin@email.com', 'password12') response = self.client.post(self.password_change_url, data=self.change_password_data_old_password_field_enabled, format='json') self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED) self.assertEquals(response.content, '{"detail":"Authentication credentials were not provided."}') @override_settings(OLD_PASSWORD_FIELD_ENABLED=True) def test_change_password_login_fail_with_old_password_old_password_field_enabled(self): """ Tests change of password with old password (old password field enabled). """ self.common_change_password_login_fail_with_old_password(self.change_password_data_old_password_field_enabled) @override_settings(OLD_PASSWORD_FIELD_ENABLED=True) def test_change_password_login_pass_with_new_password_old_password_field_enabled(self): """ Tests change of password with new password (old password field enabled). """ self.common_change_password_login_pass_with_new_password(self.change_password_data_old_password_field_enabled) @override_settings(OLD_PASSWORD_FIELD_ENABLED=True) def test_change_password_login_fail_with_old_password_pass_with_new_password_old_password_field_enabled(self): """ Tests change of password with old password fails but new password successes (old password field enabled). """ self.common_change_password_login_fail_with_old_password_pass_with_new_password(self.change_password_data_old_password_field_enabled) """ Registrations Tests =================== """ def common_test_registration_basic(self, data): response = self.client.post(self.register_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_201_CREATED, response.content) return response @override_settings(ACCOUNT_EMAIL_REQUIRED=True, ACCOUNT_USERNAME_REQUIRED=True) def test_registration_basic(self): """ Tests basic functionality of registration. """ self.common_test_registration_basic(self.reusable_register_user_data) @override_settings(ACCOUNT_EMAIL_REQUIRED=True, ACCOUNT_USERNAME_REQUIRED=False) def test_registration_basic_no_username(self): """ Tests basic functionality of registration (no username required). """ self.common_test_registration_basic(self.reusable_register_user_data_no_username) @override_settings(ACCOUNT_EMAIL_REQUIRED=False, ACCOUNT_USERNAME_REQUIRED=True) def test_registration_basic_no_email(self): """ Tests basic functionality of registration (no username required). """ self.common_test_registration_basic(self.reusable_register_user_data_no_email) @override_settings(ACCOUNTS_REGISTRATION_OPEN=False) def test_registration_basic_registration_not_open(self): """ Tests basic registration fails if registration is closed. """ response = self.client.post(self.register_url, self.reusable_register_user_data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST, response.content) @override_settings(ACCOUNT_EMAIL_VERIFICATION="none") def test_registration_email_verification_not_necessary(self): """ Tests you can log in without email verification """ self.common_test_registration_basic(self.reusable_register_user_data) response = self.client.post(self.login_url, self.reusable_user_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) @override_settings(ACCOUNT_EMAIL_VERIFICATION="optional") def test_registration_email_verification_neccessary(self): """ Tests you can log in without email verification """ self.common_test_registration_basic(self.reusable_register_user_data) response = self.client.post(self.login_url, self.reusable_user_data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) def common_test_registration(self): self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'email': 'admin1@email.com', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) def common_test_registration_email_verification_not_necessary_email(self): self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'email': 'admin1@email.com', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) def common_test_registration_email_verification_not_necessary_username(self): self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'username': 'admin1', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) @override_settings(ACCOUNT_EMAIL_VERIFICATION="none", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_registration_email_verification_neccessary_email(self): """ Tests you can log in without email verification """ self.common_test_registration_email_verification_not_necessary_email() @override_settings(ACCOUNT_EMAIL_VERIFICATION="optional", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_registration_email_verification_neccessary_optional_email(self): """ Tests you can log in without email verification """ self.common_test_registration_email_verification_not_necessary_email() @override_settings(ACCOUNT_EMAIL_VERIFICATION="none", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_registration_email_verification_neccessary_username(self): """ Tests you can log in without email verification """ self.common_test_registration_email_verification_not_necessary_username() @override_settings(ACCOUNT_EMAIL_VERIFICATION="optional", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_registration_email_verification_neccessary_optional_username(self): """ Tests you can log in without email verification """ self.common_test_registration_email_verification_not_necessary_username() @override_settings(ACCOUNT_EMAIL_VERIFICATION="none", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_registration_email_verification_neccessary_username_email(self): """ Tests you canT log in without email verification for username & email auth. """ self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'username': 'admin1', 'email': 'admin1@email.com', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) @override_settings(ACCOUNT_EMAIL_VERIFICATION="optional", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_registration_email_verification_neccessary_optional_username_email(self): """ Tests you canT log in without email verification for username & email auth. """ self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'username': 'admin1', 'email': 'admin1@email.com', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_registration_email_verification_necessary_login_fail_username(self): """ Tests you can log in without email verification """ self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'username': 'admin1', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST, response.content) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_registration_email_verification_necessary_login_fail_email(self): """ Tests you can log in without email verification """ self.common_test_registration_basic(self.reusable_register_user_data1) response = self.client.post(self.login_url, {'email': 'admin1@email.com', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST, response.content) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_registration_email_verification_necessary_login_fail_username_email(self): """ Tests you can log in without email verification """ self.common_test_registration_basic({'username': 'admin_man', 'email': 'admin1@email.com', 'password1': 'password12', 'password2': 'password12'}) response = self.client.post(self.login_url, {'username': 'admin_man', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) def common_registration_email_verification_neccessary_verified_login(self, login_data): mail_count = len(mail.outbox) reg_response = self.common_test_registration_basic(self.reusable_register_user_data1) self.assertEquals(len(mail.outbox), mail_count + 1) new_user = get_user_model().objects.latest('id') login_response = self.client.post(self.login_url, login_data, format='json') self.assertEquals(login_response.status_code, status.HTTP_400_BAD_REQUEST) # verify email email_confirmation = new_user.emailaddress_set.get(email=self.reusable_register_user_data1['email']).emailconfirmation_set.order_by('-created')[0] verify_response = self.client.post(self.verify_url, {'key': email_confirmation.key}, format='json') self.assertEquals(verify_response.status_code, status.HTTP_200_OK) login_response = self.client.post(self.login_url, login_data, format='json') self.assertEquals(login_response.status_code, status.HTTP_200_OK) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) def test_registration_email_verification_neccessary_verified_login_username(self): """ Tests you can log in without email verification """ self.common_registration_email_verification_neccessary_verified_login({'username': 'admin1', 'password': 'password12'}) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_registration_email_verification_neccessary_verified_login_email(self): """ Tests you can log in without email verification """ self.common_registration_email_verification_neccessary_verified_login({'email': 'admin1@email.com', 'password': 'password12'}) @override_settings(ACCOUNT_EMAIL_VERIFICATION="mandatory", ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) def test_registration_email_verification_neccessary_verified_login_username_email(self): """ Tests you can log in without email verification """ self.common_registration_email_verification_neccessary_verified_login({'username': 'admin1', 'password': 'password12'}) """ Password Reset Tests ==================== """ def test_password_reset(self): """ Test basic functionality of password reset. """ get_user_model().objects.create_user('admin', 'admin@email.com', 'password12') payload = {'email': 'admin@email.com'} response = self.client.post(self.password_reset_url, payload, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"success":"Password reset e-mail has been sent."}') @override_settings(ACCOUNTS_PASSWORD_RESET_NOTIFY_EMAIL_NOT_IN_SYSTEM=True) def test_password_reset_fail_no_user_with_email_no_notify_not_in_system(self): """ Test basic functionality of password reset fails when there is no email on record (notify email not in system). """ payload = {'email': 'admin@email.com'} response = self.client.post(self.password_reset_url, payload, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEquals(response.content, '{"error":"User with email doesn\'t exist. Did not send reset email."}') @override_settings(ACCOUNTS_PASSWORD_RESET_NOTIFY_EMAIL_NOT_IN_SYSTEM=False) def test_password_reset_no_user_with_email_no_notify_not_in_system(self): """ Test basic functionality of password reset fails when there is no email on record. """ payload = {'email': 'admin@email.com'} response = self.client.post(self.password_reset_url, payload, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"success":"Password reset e-mail has been sent."}') def test_password_reset_confirm_fail_invalid_token(self): """ Test password reset confirm fails if token is invalid. """ user = get_user_model().objects.create_user('admin', 'admin@email.com', 'password12') url_kwargs = self._generate_uid_and_token(user) data = { 'new_password1': 'new_password', 'new_password2': 'new_password', 'uid': url_kwargs['uid'], 'token': '-wrong-token-' } response = self.client.post(self.rest_password_reset_confirm_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEquals(response.content, '{"token":["Invalid value"]}') def test_password_reset_confirm_fail_invalid_uid(self): """ Test password reset confirm fails if uid is invalid. """ user = get_user_model().objects.create_user('admin', 'admin@email.com', 'password12') url_kwargs = self._generate_uid_and_token(user) data = { 'new_password1': 'new_password', 'new_password2': 'new_password', 'uid': 0, 'token': url_kwargs['token'] } response = self.client.post(self.rest_password_reset_confirm_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEquals(response.content, '{"uid":["Invalid value"]}') def test_password_reset_confirm_fail_passwords_not_the_same(self): """ Test password reset confirm fails if uid is invalid. """ user = get_user_model().objects.create_user('admin', 'admin@email.com', 'password12') url_kwargs = self._generate_uid_and_token(user) data = { 'new_password1': 'new_password', 'new_password2': 'new_not_the_same_password', 'uid': url_kwargs['uid'], 'token': url_kwargs['token'] } response = self.client.post(self.rest_password_reset_confirm_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEquals(response.content, '{"new_password2":["The two password fields didn\'t match."]}') def test_password_reset_confirm_login(self): """ Tests password reset confirm works -> can login afterwards. """ user = get_user_model().objects.create_user('admin', 'admin@email.com', 'password12') url_kwargs = self._generate_uid_and_token(user) data = { 'new_password1': 'new_password', 'new_password2': 'new_password', 'uid': url_kwargs['uid'], 'token': url_kwargs['token'] } response = self.client.post(self.rest_password_reset_confirm_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) response = self.client.post(self.login_url, {'username': 'admin', 'email': 'admin@email.com', 'password': 'new_password'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) def test_password_reset_confirm_login_fails_with_old_password(self): """ Tests password reset confirm fails with old password. """ user = get_user_model().objects.create_user('admin', 'admin@email.com', 'password12') url_kwargs = self._generate_uid_and_token(user) data = { 'new_password1': 'new_password', 'new_password2': 'new_password', 'uid': url_kwargs['uid'], 'token': url_kwargs['token'] } response = self.client.post(self.rest_password_reset_confirm_url, data, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) response = self.client.post(self.login_url, {'username': 'admin', 'email': 'admin@email.com', 'password': 'password12'}, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) """ User Detail Tests ================= """ def test_user_details_get(self): """ Test to retrieve user details. """ self.create_user_and_login() response = self.client.get(self.user_url, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"username":"admin","email":"admin@email.com","first_name":"","last_name":""}') def test_user_details_put(self): """ Test to put update user details. """ self.create_user_and_login() response = self.client.put(self.user_url, {"username":"changed","email":"changed@email.com","first_name":"changed","last_name":"name"}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"username":"changed","email":"changed@email.com","first_name":"changed","last_name":"name"}') def test_user_details_patch(self): """ Test to patch update user details. """ self.create_user_and_login() response = self.client.patch(self.user_url, {'username': 'changed_username', 'email': 'changed@email.com'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertEquals(response.content, '{"username":"changed_username","email":"changed@email.com","first_name":"","last_name":""}') def test_user_details_put_not_authenticated(self): """ Test to put update user details. """ get_user_model().objects.create_user('admin', 'admin@email.com', 'password12') response = self.client.put(self.user_url, {"username":"changed","email":"changed@email.com","first_name":"changed","last_name":"name"}, format='json') self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_user_details_patch_not_authenticated(self): """ Test to patch update user details. """ get_user_model().objects.create_user('admin', 'admin@email.com', 'password12') response = self.client.patch(self.user_url, {'username': 'changed_username', 'email': 'changed@email.com'}, format='json') self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_user_details_get_not_authenticated(self): """ Test to retrieve user details. """ get_user_model().objects.create_user('admin', 'admin@email.com', 'password12') response = self.client.get(self.user_url, format='json') self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED) class TestAccountsSocial(APITestCase): """ Tests normal for social login. """ urls = 'accounts.test_social_urls' def setUp(self): self.fb_login_url = reverse('fb_login') social_app = SocialApp.objects.create( provider='facebook', name='Facebook', client_id='123123123', secret='321321321', ) site = Site.objects.get_current() social_app.sites.add(site) self.graph_api_url = GRAPH_API_URL + '/me' @responses.activate def test_social_auth(self): """ Tests Social Login. """ resp_body = '{"id":"123123123123","first_name":"John","gender":"male","last_name":"Smith","link":"https:\\/\\/www.facebook.com\\/john.smith","locale":"en_US","name":"John Smith","timezone":2,"updated_time":"2014-08-13T10:14:38+0000","username":"john.smith","verified":true}' # noqa responses.add( responses.GET, self.graph_api_url, body=resp_body, status=200, content_type='application/json' ) users_count = get_user_model().objects.all().count() response = self.client.post(self.fb_login_url, {'access_token': 'abc123'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertIn('key', response.data) self.assertEqual(get_user_model().objects.all().count(), users_count + 1) @responses.activate def test_social_auth_only_one_user_created(self): """ Tests Social Login. """ resp_body = '{"id":"123123123123","first_name":"John","gender":"male","last_name":"Smith","link":"https:\\/\\/www.facebook.com\\/john.smith","locale":"en_US","name":"John Smith","timezone":2,"updated_time":"2014-08-13T10:14:38+0000","username":"john.smith","verified":true}' # noqa responses.add( responses.GET, self.graph_api_url, body=resp_body, status=200, content_type='application/json' ) users_count = get_user_model().objects.all().count() response = self.client.post(self.fb_login_url, {'access_token': 'abc123'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertIn('key', response.data) self.assertEqual(get_user_model().objects.all().count(), users_count + 1) # make sure that second request will not create a new user response = self.client.post(self.fb_login_url, {'access_token': 'abc123'}, format='json') self.assertEquals(response.status_code, status.HTTP_200_OK) self.assertIn('key', response.data) self.assertEqual(get_user_model().objects.all().count(), users_count + 1) @responses.activate def test_failed_social_auth(self): # fake response responses.add( responses.GET, self.graph_api_url, body='', status=400, content_type='application/json' ) response = self.client.post(self.fb_login_url, {'access_token': 'abc123'}, format='json') self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
""" This page is in the table of contents. Plugin to home the tool at beginning of each layer. The home manual page is at: http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Home ==Operation== The default 'Activate Home' checkbox is on. When it is on, the functions described below will work, when it is off, nothing will be done. ==Settings== ===Name of Home File=== Default: home.gcode At the beginning of a each layer, home will add the commands of a gcode script with the name of the "Name of Home File" setting, if one exists. Home does not care if the text file names are capitalized, but some file systems do not handle file name cases properly, so to be on the safe side you should give them lower case names. Home looks for those files in the alterations folder in the .skeinforge folder in the home directory. If it doesn't find the file it then looks in the alterations folder in the skeinforge_plugins folder. ==Examples== The following examples home the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and home.py. > python home.py This brings up the home dialog. > python home.py Screw Holder Bottom.stl The home tool is parsing the file: Screw Holder Bottom.stl .. The home tool has created the file: .. Screw Holder Bottom_home.gcode """ from __future__ import absolute_import import __init__ from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret from fabmetheus_utilities.vector3 import Vector3 from fabmetheus_utilities import archive from fabmetheus_utilities import euclidean from fabmetheus_utilities import gcodec from fabmetheus_utilities import settings from skeinforge_application.skeinforge_utilities import skeinforge_craft from skeinforge_application.skeinforge_utilities import skeinforge_polyfile from skeinforge_application.skeinforge_utilities import skeinforge_profile import math import os import sys __author__ = 'Enrique Perez (perez_enrique@yahoo.com)' __date__ = '$Date: 2008/21/04 $' __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' def getCraftedText( fileName, text, repository = None ): "Home a gcode linear move file or text." return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository) def getCraftedTextFromText( gcodeText, repository = None ): "Home a gcode linear move text." if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'home'): return gcodeText if repository == None: repository = settings.getReadRepository( HomeRepository() ) if not repository.activateHome.value: return gcodeText return HomeSkein().getCraftedGcode(gcodeText, repository) def getNewRepository(): 'Get new repository.' return HomeRepository() def writeOutput(fileName, shouldAnalyze=True): "Home a gcode linear move file. Chain home the gcode if it is not already homed." skeinforge_craft.writeChainTextWithNounMessage(fileName, 'home', shouldAnalyze) class HomeRepository: "A class to handle the home settings." def __init__(self): "Set the default settings, execute title & settings fileName." skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.home.html', self) self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Home', self, '') self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Home') self.activateHome = settings.BooleanSetting().getFromValue('Activate Home', self, True ) self.nameOfHomeFile = settings.StringSetting().getFromValue('Name of Home File:', self, 'home.gcode') self.executeTitle = 'Home' def execute(self): "Home button has been clicked." fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled) for fileName in fileNames: writeOutput(fileName) class HomeSkein: "A class to home a skein of extrusions." def __init__(self): self.distanceFeedRate = gcodec.DistanceFeedRate() self.extruderActive = False self.highestZ = None self.homeLines = [] self.layerCount = settings.LayerCount() self.lineIndex = 0 self.lines = None self.oldLocation = None self.shouldHome = False self.travelFeedRateMinute = 957.0 def addFloat( self, begin, end ): "Add dive to the original height." beginEndDistance = begin.distance(end) alongWay = self.absolutePerimeterWidth / beginEndDistance closeToEnd = euclidean.getIntermediateLocation( alongWay, end, begin ) closeToEnd.z = self.highestZ self.distanceFeedRate.addLine( self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.travelFeedRateMinute, closeToEnd.dropAxis(), closeToEnd.z ) ) def addHomeTravel( self, splitLine ): "Add the home travel gcode." location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine) self.highestZ = max( self.highestZ, location.z ) if not self.shouldHome: return self.shouldHome = False if self.oldLocation == None: return if self.extruderActive: self.distanceFeedRate.addLine('M103') self.addHopUp( self.oldLocation ) self.distanceFeedRate.addLinesSetAbsoluteDistanceMode(self.homeLines) self.addHopUp( self.oldLocation ) self.addFloat( self.oldLocation, location ) if self.extruderActive: self.distanceFeedRate.addLine('M101') def addHopUp(self, location): "Add hop to highest point." locationUp = Vector3( location.x, location.y, self.highestZ ) self.distanceFeedRate.addLine( self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.travelFeedRateMinute, locationUp.dropAxis(), locationUp.z ) ) def getCraftedGcode( self, gcodeText, repository ): "Parse gcode text and store the home gcode." self.repository = repository self.homeLines = settings.getAlterationFileLines(repository.nameOfHomeFile.value) if len(self.homeLines) < 1: return gcodeText self.lines = archive.getTextLines(gcodeText) self.parseInitialization( repository ) for self.lineIndex in xrange(self.lineIndex, len(self.lines)): line = self.lines[self.lineIndex] self.parseLine(line) return self.distanceFeedRate.output.getvalue() def parseInitialization( self, repository ): 'Parse gcode initialization and store the parameters.' for self.lineIndex in xrange(len(self.lines)): line = self.lines[self.lineIndex] splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) firstWord = gcodec.getFirstWord(splitLine) self.distanceFeedRate.parseSplitLine(firstWord, splitLine) if firstWord == '(</extruderInitialization>)': self.distanceFeedRate.addTagBracketedProcedure('home') return elif firstWord == '(<perimeterWidth>': self.absolutePerimeterWidth = abs(float(splitLine[1])) elif firstWord == '(<travelFeedRatePerSecond>': self.travelFeedRateMinute = 60.0 * float(splitLine[1]) self.distanceFeedRate.addLine(line) def parseLine(self, line): "Parse a gcode line and add it to the bevel gcode." splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) if len(splitLine) < 1: return firstWord = splitLine[0] if firstWord == 'G1': self.addHomeTravel(splitLine) self.oldLocation = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine) elif firstWord == '(<layer>': self.layerCount.printProgressIncrement('home') if len(self.homeLines) > 0: self.shouldHome = True elif firstWord == 'M101': self.extruderActive = True elif firstWord == 'M103': self.extruderActive = False self.distanceFeedRate.addLine(line) def main(): "Display the home dialog." if len(sys.argv) > 1: writeOutput(' '.join(sys.argv[1 :])) else: settings.startMainLoopFromConstructor(getNewRepository()) if __name__ == "__main__": main()
__author__ = "Cyril Jaquier" __copyright__ = "Copyright (c) 2004 Cyril Jaquier" __license__ = "GPL" import logging.handlers logging.MSG = logging.INFO - 2 logging.TRACEDEBUG = 7 logging.HEAVYDEBUG = 5 logging.addLevelName(logging.MSG, 'MSG') logging.addLevelName(logging.TRACEDEBUG, 'TRACE') logging.addLevelName(logging.HEAVYDEBUG, 'HEAVY') """ Below derived from: https://mail.python.org/pipermail/tutor/2007-August/056243.html """ logging.NOTICE = logging.INFO + 5 logging.addLevelName(logging.NOTICE, 'NOTICE') def _Logger_notice(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'NOTICE'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.notice("Houston, we have a %s", "major disaster", exc_info=1) """ if self.isEnabledFor(logging.NOTICE): self._log(logging.NOTICE, msg, args, **kwargs) logging.Logger.notice = _Logger_notice def _root_notice(msg, *args, **kwargs): """ Log a message with severity 'NOTICE' on the root logger. """ if len(logging.root.handlers) == 0: logging.basicConfig() logging.root.notice(msg, *args, **kwargs) logging.notice = _root_notice logging.handlers.SysLogHandler.priority_map['NOTICE'] = 'notice' from time import strptime strptime("2012", "%Y") def _init(): for i in range(50): if logging.getLevelName(i).startswith('Level'): logging.addLevelName(i, '#%02d-Lev.' % i) _init()
import sys, os sys.path.insert(0, os.path.abspath('..')) sys.path.insert(0, os.path.abspath('../problem/.libs')) # _pyabrt extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode'] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = u'abrt-python' copyright = u'2012, Richard Marko' version = '0.1' release = '0.1' exclude_patterns = ['_build'] pygments_style = 'sphinx' html_theme = 'default' htmlhelp_basename = 'abrt-pythondoc' latex_elements = { } latex_documents = [ ('index', 'abrt-python.tex', u'abrt-python Documentation', u'Richard Marko', 'manual'), ] man_pages = [ ('index', 'abrt-python', u'abrt-python Documentation', [u'Richard Marko'], 5) ] texinfo_documents = [ ('index', 'abrt-python', u'abrt-python Documentation', u'Richard Marko', 'abrt-python', 'One line description of project.', 'Miscellaneous'), ] def setup(app): app.connect('autodoc-process-signature', process_signature) def process_signature(app, what, name, obj, options, signature, return_annotation): if what not in ('function'): return new_params = list() for param in (x.strip() for x in signature[1:-1].split(',')): if '__' not in param: new_params.append(param) return ('(%s)' % ', '.join(new_params), return_annotation)
''' Copyright (C) 2014 Travis DeWolf This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import numpy as np class Shell(object): """ """ def __init__(self, controller, target_list, threshold=.01, pen_down=False): """ control Control instance: the controller to use pen_down boolean: True if the end-effector is drawing """ self.controller = controller self.pen_down = pen_down self.target_list = target_list self.threshold = threshold self.not_at_start = True self.target_index = 0 self.set_target() def control(self, arm): """Move to a series of targets. """ if self.controller.check_distance(arm) < self.threshold: if self.target_index < len(self.target_list)-1: self.target_index += 1 self.set_target() self.controller.apply_noise = True self.not_at_start = not self.not_at_start self.pen_down = not self.pen_down self.u = self.controller.control(arm) return self.u def set_target(self): """ Set the current target for the controller. """ if self.target_index == len(self.target_list)-1: target = [1, 2] else: target = self.target_list[self.target_index] if target[0] != target[0]: # if it's NANs self.target_index += 1 self.set_target() else: self.controller.target = target
import sys import datetime from core.transmissionrpc.constants import PRIORITY, RATIO_LIMIT, IDLE_LIMIT from core.transmissionrpc.utils import Field, format_timedelta from six import integer_types, string_types, text_type, iteritems def get_status_old(code): """Get the torrent status using old status codes""" mapping = { (1 << 0): 'check pending', (1 << 1): 'checking', (1 << 2): 'downloading', (1 << 3): 'seeding', (1 << 4): 'stopped', } return mapping[code] def get_status_new(code): """Get the torrent status using new status codes""" mapping = { 0: 'stopped', 1: 'check pending', 2: 'checking', 3: 'download pending', 4: 'downloading', 5: 'seed pending', 6: 'seeding', } return mapping[code] class Torrent(object): """ Torrent is a class holding the data received from Transmission regarding a bittorrent transfer. All fetched torrent fields are accessible through this class using attributes. This class has a few convenience properties using the torrent data. """ def __init__(self, client, fields): if 'id' not in fields: raise ValueError('Torrent requires an id') self._fields = {} self._update_fields(fields) self._incoming_pending = False self._outgoing_pending = False self._client = client def _get_name_string(self, codec=None): """Get the name""" if codec is None: codec = sys.getdefaultencoding() name = None # try to find name if 'name' in self._fields: name = self._fields['name'].value # if name is unicode, try to decode if isinstance(name, text_type): try: name = name.encode(codec) except UnicodeError: name = None return name def __repr__(self): tid = self._fields['id'].value name = self._get_name_string() if isinstance(name, str): return '<Torrent {0:d} \"{1}\">'.format(tid, name) else: return '<Torrent {0:d}>'.format(tid) def __str__(self): name = self._get_name_string() if isinstance(name, str): return 'Torrent \"{0}\"'.format(name) else: return 'Torrent' def __copy__(self): return Torrent(self._client, self._fields) def __getattr__(self, name): try: return self._fields[name].value except KeyError: raise AttributeError('No attribute {0}'.format(name)) def _rpc_version(self): """Get the Transmission RPC API version.""" if self._client: return self._client.rpc_version return 2 def _dirty_fields(self): """Enumerate changed fields""" outgoing_keys = ['bandwidthPriority', 'downloadLimit', 'downloadLimited', 'peer_limit', 'queuePosition', 'seedIdleLimit', 'seedIdleMode', 'seedRatioLimit', 'seedRatioMode', 'uploadLimit', 'uploadLimited'] fields = [] for key in outgoing_keys: if key in self._fields and self._fields[key].dirty: fields.append(key) return fields def _push(self): """Push changed fields to the server""" dirty = self._dirty_fields() args = {} for key in dirty: args[key] = self._fields[key].value self._fields[key] = self._fields[key]._replace(dirty=False) if len(args) > 0: self._client.change_torrent(self.id, **args) def _update_fields(self, other): """ Update the torrent data from a Transmission JSON-RPC arguments dictionary """ if isinstance(other, dict): for key, value in iteritems(other): self._fields[key.replace('-', '_')] = Field(value, False) elif isinstance(other, Torrent): for key in list(other._fields.keys()): self._fields[key] = Field(other._fields[key].value, False) else: raise ValueError('Cannot update with supplied data') self._incoming_pending = False def _status(self): """Get the torrent status""" code = self._fields['status'].value if self._rpc_version() >= 14: return get_status_new(code) else: return get_status_old(code) def files(self): """ Get list of files for this torrent. This function returns a dictionary with file information for each file. The file information is has following fields: :: { <file id>: { 'name': <file name>, 'size': <file size in bytes>, 'completed': <bytes completed>, 'priority': <priority ('high'|'normal'|'low')>, 'selected': <selected for download> } ... } """ result = {} if 'files' in self._fields: files = self._fields['files'].value indices = range(len(files)) priorities = self._fields['priorities'].value wanted = self._fields['wanted'].value for item in zip(indices, files, priorities, wanted): selected = True if item[3] else False priority = PRIORITY[item[2]] result[item[0]] = { 'selected': selected, 'priority': priority, 'size': item[1]['length'], 'name': item[1]['name'], 'completed': item[1]['bytesCompleted']} return result @property def status(self): """ Returns the torrent status. Is either one of 'check pending', 'checking', 'downloading', 'seeding' or 'stopped'. The first two is related to verification. """ return self._status() @property def progress(self): """Get the download progress in percent.""" try: size = self._fields['sizeWhenDone'].value left = self._fields['leftUntilDone'].value return 100.0 * (size - left) / float(size) except ZeroDivisionError: return 0.0 @property def ratio(self): """Get the upload/download ratio.""" return float(self._fields['uploadRatio'].value) @property def eta(self): """Get the "eta" as datetime.timedelta.""" eta = self._fields['eta'].value if eta >= 0: return datetime.timedelta(seconds=eta) else: raise ValueError('eta not valid') @property def date_active(self): """Get the attribute "activityDate" as datetime.datetime.""" return datetime.datetime.fromtimestamp(self._fields['activityDate'].value) @property def date_added(self): """Get the attribute "addedDate" as datetime.datetime.""" return datetime.datetime.fromtimestamp(self._fields['addedDate'].value) @property def date_started(self): """Get the attribute "startDate" as datetime.datetime.""" return datetime.datetime.fromtimestamp(self._fields['startDate'].value) @property def date_done(self): """Get the attribute "doneDate" as datetime.datetime.""" return datetime.datetime.fromtimestamp(self._fields['doneDate'].value) def format_eta(self): """ Returns the attribute *eta* formatted as a string. * If eta is -1 the result is 'not available' * If eta is -2 the result is 'unknown' * Otherwise eta is formatted as <days> <hours>:<minutes>:<seconds>. """ eta = self._fields['eta'].value if eta == -1: return 'not available' elif eta == -2: return 'unknown' else: return format_timedelta(self.eta) def _get_download_limit(self): """ Get the download limit. Can be a number or None. """ if self._fields['downloadLimited'].value: return self._fields['downloadLimit'].value else: return None def _set_download_limit(self, limit): """ Get the download limit. Can be a number, 'session' or None. """ if isinstance(limit, integer_types): self._fields['downloadLimited'] = Field(True, True) self._fields['downloadLimit'] = Field(limit, True) self._push() elif limit is None: self._fields['downloadLimited'] = Field(False, True) self._push() else: raise ValueError("Not a valid limit") download_limit = property(_get_download_limit, _set_download_limit, None, "Download limit in Kbps or None. This is a mutator.") def _get_peer_limit(self): """ Get the peer limit. """ return self._fields['peer_limit'].value def _set_peer_limit(self, limit): """ Set the peer limit. """ if isinstance(limit, integer_types): self._fields['peer_limit'] = Field(limit, True) self._push() else: raise ValueError("Not a valid limit") peer_limit = property(_get_peer_limit, _set_peer_limit, None, "Peer limit. This is a mutator.") def _get_priority(self): """ Get the priority as string. Can be one of 'low', 'normal', 'high'. """ return PRIORITY[self._fields['bandwidthPriority'].value] def _set_priority(self, priority): """ Set the priority as string. Can be one of 'low', 'normal', 'high'. """ if isinstance(priority, string_types): self._fields['bandwidthPriority'] = Field(PRIORITY[priority], True) self._push() priority = property(_get_priority, _set_priority, None , "Bandwidth priority as string. Can be one of 'low', 'normal', 'high'. This is a mutator.") def _get_seed_idle_limit(self): """ Get the seed idle limit in minutes. """ return self._fields['seedIdleLimit'].value def _set_seed_idle_limit(self, limit): """ Set the seed idle limit in minutes. """ if isinstance(limit, integer_types): self._fields['seedIdleLimit'] = Field(limit, True) self._push() else: raise ValueError("Not a valid limit") seed_idle_limit = property(_get_seed_idle_limit, _set_seed_idle_limit, None , "Torrent seed idle limit in minutes. Also see seed_idle_mode. This is a mutator.") def _get_seed_idle_mode(self): """ Get the seed ratio mode as string. Can be one of 'global', 'single' or 'unlimited'. """ return IDLE_LIMIT[self._fields['seedIdleMode'].value] def _set_seed_idle_mode(self, mode): """ Set the seed ratio mode as string. Can be one of 'global', 'single' or 'unlimited'. """ if isinstance(mode, str): self._fields['seedIdleMode'] = Field(IDLE_LIMIT[mode], True) self._push() else: raise ValueError("Not a valid limit") seed_idle_mode = property(_get_seed_idle_mode, _set_seed_idle_mode, None, """ Seed idle mode as string. Can be one of 'global', 'single' or 'unlimited'. * global, use session seed idle limit. * single, use torrent seed idle limit. See seed_idle_limit. * unlimited, no seed idle limit. This is a mutator. """ ) def _get_seed_ratio_limit(self): """ Get the seed ratio limit as float. """ return float(self._fields['seedRatioLimit'].value) def _set_seed_ratio_limit(self, limit): """ Set the seed ratio limit as float. """ if isinstance(limit, (integer_types, float)) and limit >= 0.0: self._fields['seedRatioLimit'] = Field(float(limit), True) self._push() else: raise ValueError("Not a valid limit") seed_ratio_limit = property(_get_seed_ratio_limit, _set_seed_ratio_limit, None , "Torrent seed ratio limit as float. Also see seed_ratio_mode. This is a mutator.") def _get_seed_ratio_mode(self): """ Get the seed ratio mode as string. Can be one of 'global', 'single' or 'unlimited'. """ return RATIO_LIMIT[self._fields['seedRatioMode'].value] def _set_seed_ratio_mode(self, mode): """ Set the seed ratio mode as string. Can be one of 'global', 'single' or 'unlimited'. """ if isinstance(mode, str): self._fields['seedRatioMode'] = Field(RATIO_LIMIT[mode], True) self._push() else: raise ValueError("Not a valid limit") seed_ratio_mode = property(_get_seed_ratio_mode, _set_seed_ratio_mode, None, """ Seed ratio mode as string. Can be one of 'global', 'single' or 'unlimited'. * global, use session seed ratio limit. * single, use torrent seed ratio limit. See seed_ratio_limit. * unlimited, no seed ratio limit. This is a mutator. """ ) def _get_upload_limit(self): """ Get the upload limit. Can be a number or None. """ if self._fields['uploadLimited'].value: return self._fields['uploadLimit'].value else: return None def _set_upload_limit(self, limit): """ Set the upload limit. Can be a number, 'session' or None. """ if isinstance(limit, integer_types): self._fields['uploadLimited'] = Field(True, True) self._fields['uploadLimit'] = Field(limit, True) self._push() elif limit is None: self._fields['uploadLimited'] = Field(False, True) self._push() else: raise ValueError("Not a valid limit") upload_limit = property(_get_upload_limit, _set_upload_limit, None, "Upload limit in Kbps or None. This is a mutator.") def _get_queue_position(self): """Get the queue position for this torrent.""" if self._rpc_version() >= 14: return self._fields['queuePosition'].value else: return 0 def _set_queue_position(self, position): """Set the queue position for this torrent.""" if self._rpc_version() >= 14: if isinstance(position, integer_types): self._fields['queuePosition'] = Field(position, True) self._push() else: raise ValueError("Not a valid position") else: pass queue_position = property(_get_queue_position, _set_queue_position, None, "Queue position") def update(self, timeout=None): """Update the torrent information.""" self._push() torrent = self._client.get_torrent(self.id, timeout=timeout) self._update_fields(torrent) def start(self, bypass_queue=False, timeout=None): """ Start the torrent. """ self._incoming_pending = True self._client.start_torrent(self.id, bypass_queue=bypass_queue, timeout=timeout) def stop(self, timeout=None): """Stop the torrent.""" self._incoming_pending = True self._client.stop_torrent(self.id, timeout=timeout) def move_data(self, location, timeout=None): """Move torrent data to location.""" self._incoming_pending = True self._client.move_torrent_data(self.id, location, timeout=timeout) def locate_data(self, location, timeout=None): """Locate torrent data at location.""" self._incoming_pending = True self._client.locate_torrent_data(self.id, location, timeout=timeout)
import time import numpy from nupic.bindings.math import GetNTAReal from nupic.research.monitor_mixin.monitor_mixin_base import MonitorMixinBase from nupic.research.monitor_mixin.temporal_memory_monitor_mixin import ( TemporalMemoryMonitorMixin) from sensorimotor.fast_general_temporal_memory import ( FastGeneralTemporalMemory as GeneralTemporalMemory) from sensorimotor.temporal_pooler import TemporalPooler from sensorimotor.temporal_pooler_monitor_mixin import ( TemporalPoolerMonitorMixin) class MonitoredGeneralTemporalMemory(TemporalMemoryMonitorMixin, GeneralTemporalMemory): pass class MonitoredTemporalPooler(TemporalPoolerMonitorMixin, TemporalPooler): pass """ Experiment runner class for running networks with layer 4 and layer 3. The client is responsible for setting up universes, agents, and worlds. This class just sets up and runs the HTM learning algorithms. """ realDType = GetNTAReal() class SensorimotorExperimentRunner(object): DEFAULT_TM_PARAMS = { # These should be decent for most experiments, shouldn't need to override # these too often. Might want to increase cellsPerColumn for capacity # experiments. "cellsPerColumn": 8, "initialPermanence": 0.5, "connectedPermanence": 0.6, "permanenceIncrement": 0.1, "permanenceDecrement": 0.02, # We will force client to override these "columnDimensions": "Sorry", "minThreshold": "Sorry", "maxNewSynapseCount": "Sorry", "activationThreshold": "Sorry", } DEFAULT_TP_PARAMS = { # Need to check these parameters and find stable values that will be # consistent across most experiments. "synPermInactiveDec": 0, # TODO: Check we can use class default here. "synPermActiveInc": 0.001, # TODO: Check we can use class default here. "synPredictedInc": 0.5, # TODO: Why so high?? "potentialPct": 0.9, # TODO: need to check impact of this for pooling "initConnectedPct": 0.5, # TODO: need to check impact of this for pooling "poolingThreshUnpredicted": 0.0, # We will force client to override these "numActiveColumnsPerInhArea": "Sorry", } def __init__(self, tmOverrides=None, tpOverrides=None, seed=42): # Initialize Layer 4 temporal memory params = dict(self.DEFAULT_TM_PARAMS) params.update(tmOverrides or {}) params["seed"] = seed self._checkParams(params) self.tm = MonitoredGeneralTemporalMemory(mmName="TM", **params) # Initialize Layer 3 temporal pooler params = dict(self.DEFAULT_TP_PARAMS) params["inputDimensions"] = [self.tm.numberOfCells()] params["potentialRadius"] = self.tm.numberOfCells() params["seed"] = seed params.update(tpOverrides or {}) self._checkParams(params) self.tp = MonitoredTemporalPooler(mmName="TP", **params) def _checkParams(self, params): for k,v in params.iteritems(): if v == "Sorry": raise RuntimeError("Param "+k+" must be specified") def feedTransition(self, sensorPattern, motorPattern, sensorimotorPattern, tmLearn=True, tpLearn=None, sequenceLabel=None): if sensorPattern is None: self.tm.reset() self.tp.reset() else: # Feed the TM self.tm.compute(sensorPattern, activeExternalCells=motorPattern, formInternalConnections=True, learn=tmLearn, sequenceLabel=sequenceLabel) # If requested, feed the TP if tpLearn is not None: tpInputVector, burstingColumns, correctlyPredictedCells = ( self.formatInputForTP()) activeArray = numpy.zeros(self.tp.getNumColumns()) self.tp.compute(tpInputVector, tpLearn, activeArray, burstingColumns, correctlyPredictedCells, sequenceLabel=sequenceLabel) def feedLayers(self, sequences, tmLearn=True, tpLearn=None, verbosity=0, showProgressInterval=None): """ Feed the given sequences to the HTM algorithms. @param tmLearn: (bool) Either False, or True @param tpLearn: (None,bool) Either None, False, or True. If None, temporal pooler will be skipped. @param showProgressInterval: (int) Prints progress every N iterations, where N is the value of this param """ (sensorSequence, motorSequence, sensorimotorSequence, sequenceLabels) = sequences currentTime = time.time() for i in xrange(len(sensorSequence)): sensorPattern = sensorSequence[i] motorPattern = motorSequence[i] sensorimotorPattern = sensorimotorSequence[i] sequenceLabel = sequenceLabels[i] self.feedTransition(sensorPattern, motorPattern, sensorimotorPattern, tmLearn=tmLearn, tpLearn=tpLearn, sequenceLabel=sequenceLabel) if (showProgressInterval is not None and i > 0 and i % showProgressInterval == 0): print ("Fed {0} / {1} elements of the sequence " "in {2:0.2f} seconds.".format( i, len(sensorSequence), time.time() - currentTime)) currentTime = time.time() if verbosity >= 2: # Print default TM traces traces = self.tm.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces(traces, breakOnResets= self.tm.mmGetTraceResets()) if tpLearn is not None: # Print default TP traces traces = self.tp.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces(traces, breakOnResets= self.tp.mmGetTraceResets()) print @staticmethod def generateSequences(length, agents, numSequences=1, verbosity=0): """ @param length (int) Length of each sequence to generate, one for each agent @param agents (AbstractAgent) Agents acting in their worlds @return (tuple) (sensor sequence, motor sequence, sensorimotor sequence, sequence labels) """ sensorSequence = [] motorSequence = [] sensorimotorSequence = [] sequenceLabels = [] for _ in xrange(numSequences): for agent in agents: s,m,sm = agent.generateSensorimotorSequence(length, verbosity=verbosity) sensorSequence += s motorSequence += m sensorimotorSequence += sm sequenceLabels += [agent.world.toString()] * length sensorSequence.append(None) motorSequence.append(None) sensorimotorSequence.append(None) sequenceLabels.append(None) return sensorSequence, motorSequence, sensorimotorSequence, sequenceLabels def formatInputForTP(self): """ Given an instance of the TM, format the information we need to send to the TP. """ # all currently active cells in layer 4 tpInputVector = numpy.zeros( self.tm.numberOfCells()).astype(realDType) tpInputVector[list(self.tm.activeCellsIndices())] = 1 # bursting columns in layer 4 burstingColumns = numpy.zeros( self.tm.numberOfColumns()).astype(realDType) burstingColumns[list(self.tm.unpredictedActiveColumns)] = 1 # correctly predicted cells in layer 4 correctlyPredictedCells = numpy.zeros( self.tm.numberOfCells()).astype(realDType) correctlyPredictedCells[list(self.tm.predictedActiveCellsIndices())] = 1 return tpInputVector, burstingColumns, correctlyPredictedCells def formatRow(self, x, formatString = "%d", rowSize = 700): """ Utility routine for pretty printing large vectors """ s = '' for c,v in enumerate(x): if c > 0 and c % 7 == 0: s += ' ' if c > 0 and c % rowSize == 0: s += '\n' s += formatString % v s += ' ' return s
""" Tests for Blocks api.py """ from django.test.client import RequestFactory from course_blocks.tests.helpers import EnableTransformerRegistryMixin from student.tests.factories import UserFactory from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase from xmodule.modulestore.tests.factories import SampleCourseFactory from ..api import get_blocks class TestGetBlocks(EnableTransformerRegistryMixin, SharedModuleStoreTestCase): """ Tests for the get_blocks function """ @classmethod def setUpClass(cls): super(TestGetBlocks, cls).setUpClass() cls.course = SampleCourseFactory.create() # hide the html block cls.html_block = cls.store.get_item(cls.course.id.make_usage_key('html', 'html_x1a_1')) cls.html_block.visible_to_staff_only = True cls.store.update_item(cls.html_block, ModuleStoreEnum.UserID.test) def setUp(self): super(TestGetBlocks, self).setUp() self.user = UserFactory.create() self.request = RequestFactory().get("/dummy") self.request.user = self.user def test_basic(self): blocks = get_blocks(self.request, self.course.location, self.user) self.assertEquals(blocks['root'], unicode(self.course.location)) # subtract for (1) the orphaned course About block and (2) the hidden Html block self.assertEquals(len(blocks['blocks']), len(self.store.get_items(self.course.id)) - 2) self.assertNotIn(unicode(self.html_block.location), blocks['blocks']) def test_no_user(self): blocks = get_blocks(self.request, self.course.location) self.assertIn(unicode(self.html_block.location), blocks['blocks']) def test_access_before_api_transformer_order(self): """ Tests the order of transformers: access checks are made before the api transformer is applied. """ blocks = get_blocks(self.request, self.course.location, self.user, nav_depth=5, requested_fields=['nav_depth']) vertical_block = self.store.get_item(self.course.id.make_usage_key('vertical', 'vertical_x1a')) problem_block = self.store.get_item(self.course.id.make_usage_key('problem', 'problem_x1a_1')) vertical_descendants = blocks['blocks'][unicode(vertical_block.location)]['descendants'] self.assertIn(unicode(problem_block.location), vertical_descendants) self.assertNotIn(unicode(self.html_block.location), vertical_descendants)
"""This *was* the parser for the current HTML format on parl.gc.ca. But now we have XML. See parl_document.py. This module is organized like so: __init__.py - utility functions, simple parse interface common.py - infrastructure used in the parsers, i.e. regexes current.py - parser for the Hansard format used from 2006 to the present old.py - (fairly crufty) parser for the format used from 1994 to 2006 """ from parliament.imports.hans_old.common import * import logging logger = logging.getLogger(__name__) class HansardParser2009(HansardParser): def __init__(self, hansard, html): for regex in STARTUP_RE_2009: html = re.sub(regex[0], regex[1], html) super(HansardParser2009, self).__init__(hansard, html) for x in self.soup.findAll('a', 'deleteMe'): x.findParent('div').extract() def process_related_link(self, tag, string, current_politician=None): #print "PROCESSING RELATED for %s" % string resid = re.search(r'ResourceID=(\d+)', tag['href']) restype = re.search(r'ResourceType=(Document|Affiliation)', tag['href']) if not resid and restype: return string resid, restype = int(resid.group(1)), restype.group(1) if restype == 'Document': try: bill = Bill.objects.get_by_legisinfo_id(resid) except Bill.DoesNotExist: match = re.search(r'\b[CS]\-\d+[A-E]?\b', string) if not match: logger.error("Invalid bill link %s" % string) return string bill = Bill.objects.create_temporary_bill(legisinfo_id=resid, number=match.group(0), session=self.hansard.session) except Exception, e: print "Related bill search failed for callback %s" % resid print repr(e) return string return u'<bill id="%d" name="%s">%s</bill>' % (bill.id, escape(bill.name), string) elif restype == 'Affiliation': try: pol = Politician.objects.getByParlID(resid) except Politician.DoesNotExist: print "Related politician search failed for callback %s" % resid if getattr(settings, 'PARLIAMENT_LABEL_FAILED_CALLBACK', False): # FIXME migrate away from internalxref? InternalXref.objects.get_or_create(schema='pol_parlid', int_value=resid, target_id=-1) return string if pol == current_politician: return string # When someone mentions her riding, don't link back to her return u'<pol id="%d" name="%s">%s</pol>' % (pol.id, escape(pol.name), string) def get_text(self, cursor): text = u'' for string in cursor.findAll(text=parsetools.r_hasText): if string.parent.name == 'a' and string.parent['class'] == 'WebOption': text += self.process_related_link(string.parent, string, self.t['politician']) else: text += unicode(string) return text def parse(self): super(HansardParser2009, self).parse() # Initialize variables t = ParseTracker() self.t = t member_refs = {} # Get the date c = self.soup.find(text='OFFICIAL REPORT (HANSARD)').findNext('h2') self.date = datetime.datetime.strptime(c.string.strip(), "%A, %B %d, %Y").date() self.hansard.date = self.date self.hansard.save() c = c.findNext(text=r_housemet) match = re.search(r_housemet, c.string) t['timestamp'] = self.houseTime(match.group(1), match.group(2)) t.setNext('timestamp', t['timestamp']) # Move the pointer to the start c = c.next # And start the big loop while c is not None: # It's a string if not hasattr(c, 'name'): pass # Heading elif c.name == 'h2': c = c.next if not parsetools.isString(c): raise ParseException("Expecting string right after h2") t.setNext('heading', parsetools.titleIfNecessary(parsetools.tameWhitespace(c.string.strip()))) # Topic elif c.name == 'h3': top = c.find(text=r_letter) #if not parsetools.isString(c): # check if it's an empty header # if c.parent.find(text=r_letter): # raise ParseException("Expecting string right after h3") if top is not None: c = top t['topic_set'] = True t.setNext('topic', parsetools.titleIfNecessary(parsetools.tameWhitespace(c.string.strip()))) elif c.name == 'h4': if c.string == 'APPENDIX': self.saveStatement(t) print "Appendix reached -- we're done!" break # Timestamp elif c.name == 'a' and c.has_key('name') and c['name'].startswith('T'): match = re.search(r'^T(\d\d)(\d\d)$', c['name']) if match: t.setNext('timestamp', parsetools.time_to_datetime( hour=int(match.group(1)), minute=int(match.group(2)), date=self.date)) else: raise ParseException("Couldn't match time %s" % c.attrs['name']) elif c.name == 'b' and c.string: # Something to do with written answers match = r_honorific.search(c.string) if match: # It's a politician asking or answering a question # We don't get a proper link here, so this has to be a name match polname = re.sub(r'\(.+\)', '', match.group(2)).strip() self.saveStatement(t) t['member_title'] = c.string.strip() t['written_question'] = True try: pol = Politician.objects.get_by_name(polname, session=self.hansard.session) t['politician'] = pol t['member'] = ElectedMember.objects.get_by_pol(politician=pol, date=self.date) except Politician.DoesNotExist: print "WARNING: No name match for %s" % polname except Politician.MultipleObjectsReturned: print "WARNING: Multiple pols for %s" % polname else: if not c.string.startswith('Question'): print "WARNING: Unexplained boldness: %s" % c.string # div -- the biggie elif c.name == 'div': origdiv = c if c.find('b'): # We think it's a new speaker # Save the current buffer self.saveStatement(t) c = c.find('b') if c.find('a'): # There's a link... c = c.find('a') match = re.search(r'ResourceType=Affiliation&ResourceID=(\d+)', c['href']) if match and c.find(text=r_letter): parlwebid = int(match.group(1)) # We have the parl ID. First, see if we already know this ID. pol = Politician.objects.getByParlID(parlwebid, lookOnline=False) if pol is None: # We don't. Try to do a quick name match first (if flags say so) if not GET_PARLID_ONLINE: who = c.next.string match = re.search(r_honorific, who) if match: polname = re.sub(r'\(.+\)', '', match.group(2)).strip() try: #print "Looking for %s..." % polname, pol = Politician.objects.get_by_name(polname, session=self.hansard.session) #print "found." except Politician.DoesNotExist: pass except Politician.MultipleObjectsReturned: pass if pol is None: # Still no match. Go online... try: pol = Politician.objects.getByParlID(parlwebid, session=self.hansard.session) except Politician.DoesNotExist: print "WARNING: Couldn't find politician for ID %d" % parlwebid if pol is not None: t['member'] = ElectedMember.objects.get_by_pol(politician=pol, date=self.date) t['politician'] = pol c = c.next if not parsetools.isString(c): raise Exception("Expecting string in b for member name") t['member_title'] = c.strip() #print c if t['member_title'].endswith(':'): # Remove colon in e.g. Some hon. members: t['member_title'] = t['member_title'][:-1] # Sometimes we don't get a link for short statements -- see if we can identify by backreference if t['member']: member_refs[t['member_title']] = t['member'] # Also save a backref w/o position/riding member_refs[re.sub(r'\s*\(.+\)\s*', '', t['member_title'])] = t['member'] elif t['member_title'] in member_refs: t['member'] = member_refs[t['member_title']] t['politician'] = t['member'].politician c.findParent('b').extract() # We've got the title, now get the rest of the paragraph c = origdiv t.addText(self.get_text(c)) else: # There should be text in here if c.find('div'): if c.find('div', 'Footer'): # We're done! self.saveStatement(t) print "Footer div reached -- done!" break raise Exception("I wasn't expecting another div in here") txt = self.get_text(c).strip() if r_proceedings.search(txt): self.saveStatement(t) self.saveProceedingsStatement(txt, t) else: t.addText(txt, blockquote=bool(c.find('small'))) else: #print c.name if c.name == 'b': print "B: ", print c #if c.name == 'p': # print "P: ", # print c c = c.next return self.statements
import re import uuid from xmodule.assetstore.assetmgr import AssetManager XASSET_LOCATION_TAG = 'c4x' XASSET_SRCREF_PREFIX = 'xasset:' XASSET_THUMBNAIL_TAIL_NAME = '.jpg' STREAM_DATA_CHUNK_SIZE = 1024 import os import logging import StringIO from urlparse import urlparse, urlunparse, parse_qsl from urllib import urlencode from opaque_keys.edx.locator import AssetLocator from opaque_keys.edx.keys import CourseKey, AssetKey from opaque_keys import InvalidKeyError from xmodule.modulestore.exceptions import ItemNotFoundError from xmodule.exceptions import NotFoundError from PIL import Image class StaticContent(object): def __init__(self, loc, name, content_type, data, last_modified_at=None, thumbnail_location=None, import_path=None, length=None, locked=False): self.location = loc self.name = name # a display string which can be edited, and thus not part of the location which needs to be fixed self.content_type = content_type self._data = data self.length = length self.last_modified_at = last_modified_at self.thumbnail_location = thumbnail_location # optional information about where this file was imported from. This is needed to support import/export # cycles self.import_path = import_path self.locked = locked @property def is_thumbnail(self): return self.location.category == 'thumbnail' @staticmethod def generate_thumbnail_name(original_name, dimensions=None): """ - original_name: Name of the asset (typically its location.name) - dimensions: `None` or a tuple of (width, height) in pixels """ name_root, ext = os.path.splitext(original_name) if not ext == XASSET_THUMBNAIL_TAIL_NAME: name_root = name_root + ext.replace(u'.', u'-') if dimensions: width, height = dimensions # pylint: disable=unpacking-non-sequence name_root += "-{}x{}".format(width, height) return u"{name_root}{extension}".format( name_root=name_root, extension=XASSET_THUMBNAIL_TAIL_NAME, ) @staticmethod def compute_location(course_key, path, revision=None, is_thumbnail=False): """ Constructs a location object for static content. - course_key: the course that this asset belongs to - path: is the name of the static asset - revision: is the object's revision information - is_thumbnail: is whether or not we want the thumbnail version of this asset """ path = path.replace('/', '_') return course_key.make_asset_key( 'asset' if not is_thumbnail else 'thumbnail', AssetLocator.clean_keeping_underscores(path) ).for_branch(None) def get_id(self): return self.location @property def data(self): return self._data ASSET_URL_RE = re.compile(r""" /?c4x/ (?P<org>[^/]+)/ (?P<course>[^/]+)/ (?P<category>[^/]+)/ (?P<name>[^/]+) """, re.VERBOSE | re.IGNORECASE) @staticmethod def is_c4x_path(path_string): """ Returns a boolean if a path is believed to be a c4x link based on the leading element """ return StaticContent.ASSET_URL_RE.match(path_string) is not None @staticmethod def get_static_path_from_location(location): """ This utility static method will take a location identifier and create a 'durable' /static/.. URL representation of it. This link is 'durable' as it can maintain integrity across cloning of courseware across course-ids, e.g. reruns of courses. In the LMS/CMS, we have runtime link-rewriting, so at render time, this /static/... format will get translated into the actual /c4x/... path which the client needs to reference static content """ if location is not None: return u"/static/{name}".format(name=location.name) else: return None @staticmethod def get_base_url_path_for_course_assets(course_key): if course_key is None: return None assert isinstance(course_key, CourseKey) placeholder_id = uuid.uuid4().hex # create a dummy asset location with a fake but unique name. strip off the name, and return it url_path = StaticContent.serialize_asset_key_with_slash( course_key.make_asset_key('asset', placeholder_id).for_branch(None) ) return url_path.replace(placeholder_id, '') @staticmethod def get_location_from_path(path): """ Generate an AssetKey for the given path (old c4x/org/course/asset/name syntax) """ try: return AssetKey.from_string(path) except InvalidKeyError: # TODO - re-address this once LMS-11198 is tackled. if path.startswith('/'): # try stripping off the leading slash and try again return AssetKey.from_string(path[1:]) @staticmethod def get_asset_key_from_path(course_key, path): """ Parses a path, extracting an asset key or creating one. Args: course_key: key to the course which owns this asset path: the path to said content Returns: AssetKey: the asset key that represents the path """ # Clean up the path, removing any static prefix and any leading slash. if path.startswith('/static/'): path = path[len('/static/'):] path = path.lstrip('/') try: return AssetKey.from_string(path) except InvalidKeyError: # If we couldn't parse the path, just let compute_location figure it out. # It's most likely a path like /image.png or something. return StaticContent.compute_location(course_key, path) @staticmethod def get_canonicalized_asset_path(course_key, path, base_url): """ Returns a fully-qualified path to a piece of static content. If a static asset CDN is configured, this path will include it. Otherwise, the path will simply be relative. Args: course_key: key to the course which owns this asset path: the path to said content Returns: string: fully-qualified path to asset """ # Break down the input path. _, _, relative_path, params, query_string, fragment = urlparse(path) # Convert our path to an asset key if it isn't one already. asset_key = StaticContent.get_asset_key_from_path(course_key, relative_path) # Check the status of the asset to see if this can be served via CDN aka publicly. serve_from_cdn = False try: content = AssetManager.find(asset_key, as_stream=True) is_locked = getattr(content, "locked", True) serve_from_cdn = not is_locked except (ItemNotFoundError, NotFoundError): # If we can't find the item, just treat it as if it's locked. serve_from_cdn = False # Update any query parameter values that have asset paths in them. This is for assets that # require their own after-the-fact values, like a Flash file that needs the path of a config # file passed to it e.g. /static/visualization.swf?configFile=/static/visualization.xml query_params = parse_qsl(query_string) updated_query_params = [] for query_name, query_value in query_params: if query_value.startswith("/static/"): new_query_value = StaticContent.get_canonicalized_asset_path(course_key, query_value, base_url) updated_query_params.append((query_name, new_query_value)) else: updated_query_params.append((query_name, query_value)) serialized_asset_key = StaticContent.serialize_asset_key_with_slash(asset_key) base_url = base_url if serve_from_cdn else '' return urlunparse((None, base_url, serialized_asset_key, params, urlencode(updated_query_params), fragment)) def stream_data(self): yield self._data @staticmethod def serialize_asset_key_with_slash(asset_key): """ Legacy code expects the serialized asset key to start w/ a slash; so, do that in one place :param asset_key: """ url = unicode(asset_key) if not url.startswith('/'): url = '/' + url # TODO - re-address this once LMS-11198 is tackled. return url class StaticContentStream(StaticContent): def __init__(self, loc, name, content_type, stream, last_modified_at=None, thumbnail_location=None, import_path=None, length=None, locked=False): super(StaticContentStream, self).__init__(loc, name, content_type, None, last_modified_at=last_modified_at, thumbnail_location=thumbnail_location, import_path=import_path, length=length, locked=locked) self._stream = stream def stream_data(self): while True: chunk = self._stream.read(STREAM_DATA_CHUNK_SIZE) if len(chunk) == 0: break yield chunk def stream_data_in_range(self, first_byte, last_byte): """ Stream the data between first_byte and last_byte (included) """ self._stream.seek(first_byte) position = first_byte while True: if last_byte < position + STREAM_DATA_CHUNK_SIZE - 1: chunk = self._stream.read(last_byte - position + 1) yield chunk break chunk = self._stream.read(STREAM_DATA_CHUNK_SIZE) position += STREAM_DATA_CHUNK_SIZE yield chunk def close(self): self._stream.close() def copy_to_in_mem(self): self._stream.seek(0) content = StaticContent(self.location, self.name, self.content_type, self._stream.read(), last_modified_at=self.last_modified_at, thumbnail_location=self.thumbnail_location, import_path=self.import_path, length=self.length, locked=self.locked) return content class ContentStore(object): ''' Abstraction for all ContentStore providers (e.g. MongoDB) ''' def save(self, content): raise NotImplementedError def find(self, filename): raise NotImplementedError def get_all_content_for_course(self, course_key, start=0, maxresults=-1, sort=None, filter_params=None): ''' Returns a list of static assets for a course, followed by the total number of assets. By default all assets are returned, but start and maxresults can be provided to limit the query. The return format is a list of asset data dictionaries. The asset data dictionaries have the following keys: asset_key (:class:`opaque_keys.edx.AssetKey`): The key of the asset displayname: The human-readable name of the asset uploadDate (datetime.datetime): The date and time that the file was uploadDate contentType: The mimetype string of the asset md5: An md5 hash of the asset content ''' raise NotImplementedError def delete_all_course_assets(self, course_key): """ Delete all of the assets which use this course_key as an identifier :param course_key: """ raise NotImplementedError def copy_all_course_assets(self, source_course_key, dest_course_key): """ Copy all the course assets from source_course_key to dest_course_key """ raise NotImplementedError def generate_thumbnail(self, content, tempfile_path=None, dimensions=None): """Create a thumbnail for a given image. Returns a tuple of (StaticContent, AssetKey) `content` is the StaticContent representing the image you want to make a thumbnail out of. `tempfile_path` is a string path to the location of a file to read from in order to grab the image data, instead of relying on `content.data` `dimensions` is an optional param that represents (width, height) in pixels. It defaults to None. """ thumbnail_content = None # use a naming convention to associate originals with the thumbnail thumbnail_name = StaticContent.generate_thumbnail_name( content.location.name, dimensions=dimensions ) thumbnail_file_location = StaticContent.compute_location( content.location.course_key, thumbnail_name, is_thumbnail=True ) # if we're uploading an image, then let's generate a thumbnail so that we can # serve it up when needed without having to rescale on the fly if content.content_type is not None and content.content_type.split('/')[0] == 'image': try: # use PIL to do the thumbnail generation (http://www.pythonware.com/products/pil/) # My understanding is that PIL will maintain aspect ratios while restricting # the max-height/width to be whatever you pass in as 'size' # @todo: move the thumbnail size to a configuration setting?!? if tempfile_path is None: im = Image.open(StringIO.StringIO(content.data)) else: im = Image.open(tempfile_path) # I've seen some exceptions from the PIL library when trying to save palletted # PNG files to JPEG. Per the google-universe, they suggest converting to RGB first. im = im.convert('RGB') if not dimensions: dimensions = (128, 128) im.thumbnail(dimensions, Image.ANTIALIAS) thumbnail_file = StringIO.StringIO() im.save(thumbnail_file, 'JPEG') thumbnail_file.seek(0) # store this thumbnail as any other piece of content thumbnail_content = StaticContent(thumbnail_file_location, thumbnail_name, 'image/jpeg', thumbnail_file) self.save(thumbnail_content) except Exception, e: # log and continue as thumbnails are generally considered as optional logging.exception(u"Failed to generate thumbnail for {0}. Exception: {1}".format(content.location, str(e))) return thumbnail_content, thumbnail_file_location def ensure_indexes(self): """ Ensure that all appropriate indexes are created that are needed by this modulestore, or raise an exception if unable to. """ pass
import random import urllib from urlparse import urlsplit from weboob.deprecated.browser import Browser, BrowserHTTPNotFound from .pages.index import IndexPage from .pages.torrents import TorrentPage, TorrentsPage __all__ = ['PiratebayBrowser'] class PiratebayBrowser(Browser): ENCODING = 'utf-8' DOMAINS = ['thepiratebay.vg', 'thepiratebay.la', 'thepiratebay.mn', 'thepiratebay.gd'] def __init__(self, url, *args, **kwargs): url = url or 'https://%s/' % random.choice(self.DOMAINS) url_parsed = urlsplit(url) self.PROTOCOL = url_parsed.scheme self.DOMAIN = url_parsed.netloc self.PAGES = { '%s://%s/' % (self.PROTOCOL, self.DOMAIN): IndexPage, '%s://%s/search/.*/0/7/0' % (self.PROTOCOL, self.DOMAIN): TorrentsPage, '%s://%s/torrent/.*' % (self.PROTOCOL, self.DOMAIN): TorrentPage } Browser.__init__(self, *args, **kwargs) def iter_torrents(self, pattern): self.location('%s://%s/search/%s/0/7/0' % (self.PROTOCOL, self.DOMAIN, urllib.quote_plus(pattern.encode('utf-8')))) assert self.is_on_page(TorrentsPage) return self.page.iter_torrents() def get_torrent(self, _id): try: self.location('%s://%s/torrent/%s/' % (self.PROTOCOL, self.DOMAIN, _id)) except BrowserHTTPNotFound: return if self.is_on_page(TorrentPage): return self.page.get_torrent(_id)
"""This module implement decorators for wrapping data sources so as to simplify their construction and attribution of properties. """ import functools def data_source_generator(name=None, **properties): """Decorator for applying to a simple data source which directly returns an iterable/generator with the metrics for each sample. The function the decorator is applied to must take no arguments. """ def _decorator(func): @functools.wraps(func) def _properties(settings): def _factory(environ): return func d = dict(properties) d['name'] = name d['factory'] = _factory return d return _properties return _decorator def data_source_factory(name=None, **properties): """Decorator for applying to a data source defined as a factory. The decorator can be applied to a class or a function. The class constructor or function must accept arguments of 'settings', being configuration settings for the data source, and 'environ' being information about the context in which the data source is being used. The resulting object must be a callable which directly returns an iterable/generator with the metrics for each sample. """ def _decorator(func): @functools.wraps(func) def _properties(settings): def _factory(environ): return func(settings, environ) d = dict(properties) d['name'] = name d['factory'] = _factory return d return _properties return _decorator
class Extension(object): """ Base class for creating extensions. Args: kwargs[dict]: All key, value pairings are stored as "configuration" options, see getConfigs. """ def __init__(self, **kwargs): #: Configure options self._configs = kwargs self._configs.setdefault('headings', ['section', 'subsection', 'subsubsection', 'textbf', 'underline', 'emph']) def getConfigs(self): """ Return the dictionary of configure options. """ return self._configs def extend(self, translator): """ Elements should be added to the storage of the Translator instance within this function. Args: translator[Translator]: The object to be used for converting the html. """ pass
import math import Sofa def tostr(L): return str(L).replace('[', '').replace("]", '').replace(",", ' ') def transform(T,p): return [T[0][0]*p[0]+T[0][1]*p[1]+T[0][2]*p[2]+T[1][0],T[0][3]*p[0]+T[0][4]*p[1]+T[0][5]*p[2]+T[1][1],T[0][6]*p[0]+T[0][7]*p[1]+T[0][8]*p[2]+T[1][2]] def transformF(T,F): return [T[0][0]*F[0]+T[0][1]*F[3]+T[0][2]*F[6],T[0][0]*F[1]+T[0][1]*F[4]+T[0][2]*F[7],T[0][0]*F[2]+T[0][1]*F[5]+T[0][2]*F[8],T[0][3]*F[0]+T[0][4]*F[3]+T[0][5]*F[6],T[0][3]*F[1]+T[0][4]*F[4]+T[0][5]*F[7],T[0][3]*F[2]+T[0][4]*F[5]+T[0][5]*F[8],T[0][6]*F[0]+T[0][7]*F[3]+T[0][8]*F[6],T[0][6]*F[1]+T[0][7]*F[4]+T[0][8]*F[7],T[0][6]*F[2]+T[0][7]*F[5]+T[0][8]*F[8]] def compare(p1,p2): res = 0 for i,P1 in enumerate(p1): for j,item in enumerate(P1): res = res+ (item-p2[i][j])*(item-p2[i][j]) return res ERRORTOL = 1e-5 T = [[2,0,0,0,2,0,0,0,2],[0,0,0]] samples= [[0.5,0.5,0.5], [0.23,0.5,0.8], [0,0.12,0], [0.8,0,0.58]] def createScene(rootNode): rootNode.createObject('RequiredPlugin', pluginName="Flexible") rootNode.createObject('VisualStyle', displayFlags="showBehaviorModels") restpos = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]] pos = [transform(T,item) for item in restpos] ########################################################### simNode = rootNode.createChild('Hexa_barycentric') simNode.createObject('MeshTopology', name="mesh", position=tostr(restpos), hexahedra="0 1 3 2 4 5 7 6") simNode.createObject('MechanicalObject', template="Vec3d", name="parent", rest_position="@mesh.position",position=tostr(pos) ) simNode.createObject('BarycentricShapeFunction', position="@parent.rest_position", nbRef="8") childNode = simNode.createChild('childP') childNode.createObject('MechanicalObject', template="Vec3d", name="child", position=tostr(samples) , showObject="1") childNode.createObject('LinearMapping', template="Vec3d,Vec3d") childNode = simNode.createChild('childF') childNode.createObject('GaussPointContainer', position=tostr(samples)) childNode.createObject('MechanicalObject', template="F331", name="child") childNode.createObject('LinearMapping', template="Vec3d,F331", showDeformationGradientScale="1") childNode = simNode.createChild('Visu') childNode.createObject('VisualModel', color="8e-1 8e-1 1 1e-1") childNode.createObject('IdentityMapping') childNode = simNode.createChild('Visu2') childNode.createObject('VisualStyle', displayFlags="showWireframe") childNode.createObject('VisualModel', color="8e-1 8e-1 1 1") childNode.createObject('IdentityMapping') simNode.createObject('PythonScriptController',filename="FEM.py", classname="Controller") ########################################################### simNode = rootNode.createChild('Tetra_barycentric') simNode.createObject('MeshTopology', name="mesh", position=tostr(restpos), tetrahedra="0 5 1 7 0 1 2 7 1 2 7 3 7 2 0 6 7 6 0 5 6 5 4 0") simNode.createObject('MechanicalObject', template="Vec3d", name="parent", rest_position="@mesh.position",position=tostr(pos) ) simNode.createObject('BarycentricShapeFunction', position="@parent.rest_position", nbRef="4") childNode = simNode.createChild('childP') childNode.createObject('MechanicalObject', template="Vec3d", name="child", position=tostr(samples) , showObject="1") childNode.createObject('LinearMapping', template="Vec3d,Vec3d") childNode = simNode.createChild('childF') childNode.createObject('GaussPointContainer', position=tostr(samples)) childNode.createObject('MechanicalObject', template="F331", name="child") childNode.createObject('LinearMapping', template="Vec3d,F331") simNode.createObject('PythonScriptController',filename="FEM.py", classname="Controller") ########################################################### simNode = rootNode.createChild('Hexa_shepard') simNode.createObject('MeshTopology', name="mesh", position=tostr(restpos), hexahedra="0 1 3 2 4 5 7 6") simNode.createObject('MechanicalObject', template="Vec3d", name="parent", rest_position="@mesh.position",position=tostr(pos) ) simNode.createObject('ShepardShapeFunction', position="@parent.rest_position", power="2") childNode = simNode.createChild('childP') childNode.createObject('MechanicalObject', template="Vec3d", name="child", position=tostr(samples) , showObject="1") childNode.createObject('LinearMapping', template="Vec3d,Vec3d") childNode = simNode.createChild('childF') childNode.createObject('GaussPointContainer', position=tostr(samples)) childNode.createObject('MechanicalObject', template="F331", name="child") childNode.createObject('LinearMapping', template="Vec3d,F331") simNode.createObject('PythonScriptController',filename="FEM.py", classname="Controller") ########################################################### rootNode.animate=1 return rootNode class Controller(Sofa.PythonScriptController): def createGraph(self,node): self.node=node self.done=0 return 0 def onEndAnimationStep(self,dt): if self.done==0: print "TEST "+self.node.name+":" # test points restpos = self.node.getObject('childP/child').findData('rest_position').value refpos = [transform(T,item) for item in restpos] pos = self.node.getObject('childP/child').findData('position').value error = compare(refpos,pos) if error>ERRORTOL : print "\t"+"\033[91m"+"[FAILED]"+"\033[0m"+" error on P= "+str(error) else : print "\t"+"\033[92m"+"[OK]"+"\033[0m"+" error on P= "+str(error) # test defo gradients restpos = [1,0,0,0,1,0,0,0,1] pos = self.node.getObject('childF/child').findData('position').value refpos = [transformF(T,restpos) for item in pos] error = compare(refpos,pos) if error>ERRORTOL : print "\t"+"\033[91m"+"[FAILED]"+"\033[0m"+" error on F= "+str(error) else : print "\t"+"\033[92m"+"[OK]"+"\033[0m"+" error on F= "+str(error) self.done=1 return 0
from spack import * class Serf(SConsPackage): """Apache Serf - a high performance C-based HTTP client library built upon the Apache Portable Runtime (APR) library""" homepage = 'https://serf.apache.org/' url = 'https://archive.apache.org/dist/serf/serf-1.3.9.tar.bz2' maintainers = ['cosmicexplorer'] version('1.3.9', sha256='549c2d21c577a8a9c0450facb5cca809f26591f048e466552240947bdf7a87cc') version('1.3.8', sha256='e0500be065dbbce490449837bb2ab624e46d64fc0b090474d9acaa87c82b2590') variant('debug', default=False, description='Enable debugging info and strict compile warnings') depends_on('apr') depends_on('apr-util') depends_on('openssl') depends_on('python+pythoncmd', type='build') depends_on('scons@2.3.0:', type='build') depends_on('uuid') depends_on('zlib') patch('py3syntax.patch') patch('py3-hashbang.patch') def build_args(self, spec, prefix): args = { 'PREFIX': prefix, 'APR': spec['apr'].prefix, 'APU': spec['apr-util'].prefix, 'OPENSSL': spec['openssl'].prefix, 'ZLIB': spec['zlib'].prefix, 'DEBUG': 'yes' if '+debug' in spec else 'no', } # SCons doesn't pass Spack environment variables to the # execution environment. Therefore, we can't use Spack's compiler # wrappers. Use the actual compilers. SCons seems to RPATH things # on its own anyway. args['CC'] = self.compiler.cc # Old versions of serf ignore the ZLIB variable on non-Windows platforms. # Also, there is no UUID variable to specify its installation location. # Pass explicit link flags for both. library_dirs = [] include_dirs = [] for dep in spec.dependencies(deptype='link'): query = self.spec[dep.name] library_dirs.extend(query.libs.directories) include_dirs.extend(query.headers.directories) rpath = self.compiler.cc_rpath_arg args['LINKFLAGS'] = '-L' + ' -L'.join(library_dirs) args['LINKFLAGS'] += ' ' + rpath + (' ' + rpath).join(library_dirs) args['CPPFLAGS'] = '-I' + ' -I'.join(include_dirs) return [key + '=' + value for key, value in args.items()] def build_test(self): # FIXME: Several test failures: # # There were 14 failures: # 1) test_ssl_trust_rootca # 2) test_ssl_certificate_chain_with_anchor # 3) test_ssl_certificate_chain_all_from_server # 4) test_ssl_no_servercert_callback_allok # 5) test_ssl_large_response # 6) test_ssl_large_request # 7) test_ssl_client_certificate # 8) test_ssl_future_server_cert # 9) test_setup_ssltunnel # 10) test_ssltunnel_basic_auth # 11) test_ssltunnel_basic_auth_server_has_keepalive_off # 12) test_ssltunnel_basic_auth_proxy_has_keepalive_off # 13) test_ssltunnel_basic_auth_proxy_close_conn_on_200resp # 14) test_ssltunnel_digest_auth # # These seem to be related to: # https://groups.google.com/forum/#!topic/serf-dev/YEFTTdF1Qwc scons('check')
from spack import * class Qnnpack(CMakePackage): """QNNPACK (Quantized Neural Networks PACKage) is a mobile-optimized library for low-precision high-performance neural network inference. QNNPACK provides implementation of common neural network operators on quantized 8-bit tensors.""" homepage = "https://github.com/pytorch/QNNPACK" git = "https://github.com/pytorch/QNNPACK.git" version('master', branch='master') version('2019-08-28', commit='7d2a4e9931a82adc3814275b6219a03e24e36b4c') # py-torch@1.3:1.9 version('2018-12-27', commit='6c62fddc6d15602be27e9e4cbb9e985151d2fa82') # py-torch@1.2 version('2018-12-04', commit='ef05e87cef6b8e719989ce875b5e1c9fdb304c05') # py-torch@1.0:1.1 depends_on('cmake@3.5:', type='build') depends_on('ninja', type='build') depends_on('python', type='build') resource( name='cpuinfo', git='https://github.com/Maratyszcza/cpuinfo.git', destination='deps', placement='cpuinfo' ) resource( name='fp16', git='https://github.com/Maratyszcza/FP16.git', destination='deps', placement='fp16' ) resource( name='fxdiv', git='https://github.com/Maratyszcza/FXdiv.git', destination='deps', placement='fxdiv' ) resource( name='googlebenchmark', url='https://github.com/google/benchmark/archive/v1.4.1.zip', sha256='61ae07eb5d4a0b02753419eb17a82b7d322786bb36ab62bd3df331a4d47c00a7', destination='deps', placement='googlebenchmark', ) resource( name='googletest', url='https://github.com/google/googletest/archive/release-1.8.0.zip', sha256='f3ed3b58511efd272eb074a3a6d6fb79d7c2e6a0e374323d1e6bcbcc1ef141bf', destination='deps', placement='googletest', ) resource( name='psimd', git='https://github.com/Maratyszcza/psimd.git', destination='deps', placement='psimd' ) resource( name='pthreadpool', git='https://github.com/Maratyszcza/pthreadpool.git', destination='deps', placement='pthreadpool' ) generator = 'Ninja' def cmake_args(self): return [ self.define('CPUINFO_SOURCE_DIR', join_path(self.stage.source_path, 'deps', 'cpuinfo')), self.define('FP16_SOURCE_DIR', join_path(self.stage.source_path, 'deps', 'fp16')), self.define('FXDIV_SOURCE_DIR', join_path(self.stage.source_path, 'deps', 'fxdiv')), self.define('PSIMD_SOURCE_DIR', join_path(self.stage.source_path, 'deps', 'psimd')), self.define('PTHREADPOOL_SOURCE_DIR', join_path(self.stage.source_path, 'deps', 'pthreadpool')), self.define('GOOGLEBENCHMARK_SOURCE_DIR', join_path(self.stage.source_path, 'deps', 'googlebenchmark')), self.define('GOOGLETEST_SOURCE_DIR', join_path(self.stage.source_path, 'deps', 'googletest')), ]
import unittest from ctypes import * from struct import calcsize import _testcapi class SubclassesTest(unittest.TestCase): def test_subclass(self): class X(Structure): _fields_ = [("a", c_int)] class Y(X): _fields_ = [("b", c_int)] class Z(X): pass self.assertEqual(sizeof(X), sizeof(c_int)) self.assertEqual(sizeof(Y), sizeof(c_int)*2) self.assertEqual(sizeof(Z), sizeof(c_int)) self.assertEqual(X._fields_, [("a", c_int)]) self.assertEqual(Y._fields_, [("b", c_int)]) self.assertEqual(Z._fields_, [("a", c_int)]) def test_subclass_delayed(self): class X(Structure): pass self.assertEqual(sizeof(X), 0) X._fields_ = [("a", c_int)] class Y(X): pass self.assertEqual(sizeof(Y), sizeof(X)) Y._fields_ = [("b", c_int)] class Z(X): pass self.assertEqual(sizeof(X), sizeof(c_int)) self.assertEqual(sizeof(Y), sizeof(c_int)*2) self.assertEqual(sizeof(Z), sizeof(c_int)) self.assertEqual(X._fields_, [("a", c_int)]) self.assertEqual(Y._fields_, [("b", c_int)]) self.assertEqual(Z._fields_, [("a", c_int)]) class StructureTestCase(unittest.TestCase): formats = {"c": c_char, "b": c_byte, "B": c_ubyte, "h": c_short, "H": c_ushort, "i": c_int, "I": c_uint, "l": c_long, "L": c_ulong, "q": c_longlong, "Q": c_ulonglong, "f": c_float, "d": c_double, } def test_simple_structs(self): for code, tp in self.formats.items(): class X(Structure): _fields_ = [("x", c_char), ("y", tp)] self.assertEqual((sizeof(X), code), (calcsize("c%c0%c" % (code, code)), code)) def test_unions(self): for code, tp in self.formats.items(): class X(Union): _fields_ = [("x", c_char), ("y", tp)] self.assertEqual((sizeof(X), code), (calcsize("%c" % (code)), code)) def test_struct_alignment(self): class X(Structure): _fields_ = [("x", c_char * 3)] self.assertEqual(alignment(X), calcsize("s")) self.assertEqual(sizeof(X), calcsize("3s")) class Y(Structure): _fields_ = [("x", c_char * 3), ("y", c_int)] self.assertEqual(alignment(Y), calcsize("i")) self.assertEqual(sizeof(Y), calcsize("3si")) class SI(Structure): _fields_ = [("a", X), ("b", Y)] self.assertEqual(alignment(SI), max(alignment(Y), alignment(X))) self.assertEqual(sizeof(SI), calcsize("3s0i 3si 0i")) class IS(Structure): _fields_ = [("b", Y), ("a", X)] self.assertEqual(alignment(SI), max(alignment(X), alignment(Y))) self.assertEqual(sizeof(IS), calcsize("3si 3s 0i")) class XX(Structure): _fields_ = [("a", X), ("b", X)] self.assertEqual(alignment(XX), alignment(X)) self.assertEqual(sizeof(XX), calcsize("3s 3s 0s")) def test_emtpy(self): # I had problems with these # # Although these are patological cases: Empty Structures! class X(Structure): _fields_ = [] class Y(Union): _fields_ = [] # Is this really the correct alignment, or should it be 0? self.assertTrue(alignment(X) == alignment(Y) == 1) self.assertTrue(sizeof(X) == sizeof(Y) == 0) class XX(Structure): _fields_ = [("a", X), ("b", X)] self.assertEqual(alignment(XX), 1) self.assertEqual(sizeof(XX), 0) def test_fields(self): # test the offset and size attributes of Structure/Unoin fields. class X(Structure): _fields_ = [("x", c_int), ("y", c_char)] self.assertEqual(X.x.offset, 0) self.assertEqual(X.x.size, sizeof(c_int)) self.assertEqual(X.y.offset, sizeof(c_int)) self.assertEqual(X.y.size, sizeof(c_char)) # readonly self.assertRaises((TypeError, AttributeError), setattr, X.x, "offset", 92) self.assertRaises((TypeError, AttributeError), setattr, X.x, "size", 92) class X(Union): _fields_ = [("x", c_int), ("y", c_char)] self.assertEqual(X.x.offset, 0) self.assertEqual(X.x.size, sizeof(c_int)) self.assertEqual(X.y.offset, 0) self.assertEqual(X.y.size, sizeof(c_char)) # readonly self.assertRaises((TypeError, AttributeError), setattr, X.x, "offset", 92) self.assertRaises((TypeError, AttributeError), setattr, X.x, "size", 92) # XXX Should we check nested data types also? # offset is always relative to the class... def test_packed(self): class X(Structure): _fields_ = [("a", c_byte), ("b", c_longlong)] _pack_ = 1 self.assertEqual(sizeof(X), 9) self.assertEqual(X.b.offset, 1) class X(Structure): _fields_ = [("a", c_byte), ("b", c_longlong)] _pack_ = 2 self.assertEqual(sizeof(X), 10) self.assertEqual(X.b.offset, 2) class X(Structure): _fields_ = [("a", c_byte), ("b", c_longlong)] _pack_ = 4 self.assertEqual(sizeof(X), 12) self.assertEqual(X.b.offset, 4) import struct longlong_size = struct.calcsize("q") longlong_align = struct.calcsize("bq") - longlong_size class X(Structure): _fields_ = [("a", c_byte), ("b", c_longlong)] _pack_ = 8 self.assertEqual(sizeof(X), longlong_align + longlong_size) self.assertEqual(X.b.offset, min(8, longlong_align)) d = {"_fields_": [("a", "b"), ("b", "q")], "_pack_": -1} self.assertRaises(ValueError, type(Structure), "X", (Structure,), d) # Issue 15989 d = {"_fields_": [("a", c_byte)], "_pack_": _testcapi.INT_MAX + 1} self.assertRaises(ValueError, type(Structure), "X", (Structure,), d) d = {"_fields_": [("a", c_byte)], "_pack_": _testcapi.UINT_MAX + 2} self.assertRaises(ValueError, type(Structure), "X", (Structure,), d) def test_initializers(self): class Person(Structure): _fields_ = [("name", c_char*6), ("age", c_int)] self.assertRaises(TypeError, Person, 42) self.assertRaises(ValueError, Person, b"asldkjaslkdjaslkdj") self.assertRaises(TypeError, Person, "Name", "HI") # short enough self.assertEqual(Person(b"12345", 5).name, b"12345") # exact fit self.assertEqual(Person(b"123456", 5).name, b"123456") # too long self.assertRaises(ValueError, Person, b"1234567", 5) def test_conflicting_initializers(self): class POINT(Structure): _fields_ = [("x", c_int), ("y", c_int)] # conflicting positional and keyword args self.assertRaises(TypeError, POINT, 2, 3, x=4) self.assertRaises(TypeError, POINT, 2, 3, y=4) # too many initializers self.assertRaises(TypeError, POINT, 2, 3, 4) def test_keyword_initializers(self): class POINT(Structure): _fields_ = [("x", c_int), ("y", c_int)] pt = POINT(1, 2) self.assertEqual((pt.x, pt.y), (1, 2)) pt = POINT(y=2, x=1) self.assertEqual((pt.x, pt.y), (1, 2)) def test_invalid_field_types(self): class POINT(Structure): pass self.assertRaises(TypeError, setattr, POINT, "_fields_", [("x", 1), ("y", 2)]) def test_invalid_name(self): # field name must be string def declare_with_name(name): class S(Structure): _fields_ = [(name, c_int)] self.assertRaises(TypeError, declare_with_name, b"x") def test_intarray_fields(self): class SomeInts(Structure): _fields_ = [("a", c_int * 4)] # can use tuple to initialize array (but not list!) self.assertEqual(SomeInts((1, 2)).a[:], [1, 2, 0, 0]) self.assertEqual(SomeInts((1, 2)).a[::], [1, 2, 0, 0]) self.assertEqual(SomeInts((1, 2)).a[::-1], [0, 0, 2, 1]) self.assertEqual(SomeInts((1, 2)).a[::2], [1, 0]) self.assertEqual(SomeInts((1, 2)).a[1:5:6], [2]) self.assertEqual(SomeInts((1, 2)).a[6:4:-1], []) self.assertEqual(SomeInts((1, 2, 3, 4)).a[:], [1, 2, 3, 4]) self.assertEqual(SomeInts((1, 2, 3, 4)).a[::], [1, 2, 3, 4]) # too long # XXX Should raise ValueError?, not RuntimeError self.assertRaises(RuntimeError, SomeInts, (1, 2, 3, 4, 5)) def test_nested_initializers(self): # test initializing nested structures class Phone(Structure): _fields_ = [("areacode", c_char*6), ("number", c_char*12)] class Person(Structure): _fields_ = [("name", c_char * 12), ("phone", Phone), ("age", c_int)] p = Person(b"Someone", (b"1234", b"5678"), 5) self.assertEqual(p.name, b"Someone") self.assertEqual(p.phone.areacode, b"1234") self.assertEqual(p.phone.number, b"5678") self.assertEqual(p.age, 5) def test_structures_with_wchar(self): try: c_wchar except NameError: return # no unicode class PersonW(Structure): _fields_ = [("name", c_wchar * 12), ("age", c_int)] p = PersonW("Someone \xe9") self.assertEqual(p.name, "Someone \xe9") self.assertEqual(PersonW("1234567890").name, "1234567890") self.assertEqual(PersonW("12345678901").name, "12345678901") # exact fit self.assertEqual(PersonW("123456789012").name, "123456789012") #too long self.assertRaises(ValueError, PersonW, "1234567890123") def test_init_errors(self): class Phone(Structure): _fields_ = [("areacode", c_char*6), ("number", c_char*12)] class Person(Structure): _fields_ = [("name", c_char * 12), ("phone", Phone), ("age", c_int)] cls, msg = self.get_except(Person, b"Someone", (1, 2)) self.assertEqual(cls, RuntimeError) self.assertEqual(msg, "(Phone) <class 'TypeError'>: " "expected string, int found") cls, msg = self.get_except(Person, b"Someone", (b"a", b"b", b"c")) self.assertEqual(cls, RuntimeError) if issubclass(Exception, object): self.assertEqual(msg, "(Phone) <class 'TypeError'>: too many initializers") else: self.assertEqual(msg, "(Phone) TypeError: too many initializers") def test_huge_field_name(self): # issue12881: segfault with large structure field names def create_class(length): class S(Structure): _fields_ = [('x' * length, c_int)] for length in [10 ** i for i in range(0, 8)]: try: create_class(length) except MemoryError: # MemoryErrors are OK, we just don't want to segfault pass def get_except(self, func, *args): try: func(*args) except Exception as detail: return detail.__class__, str(detail) def test_abstract_class(self): class X(Structure): _abstract_ = "something" # try 'X()' cls, msg = self.get_except(eval, "X()", locals()) self.assertEqual((cls, msg), (TypeError, "abstract class")) def test_methods(self): self.assertTrue("in_dll" in dir(type(Structure))) self.assertTrue("from_address" in dir(type(Structure))) self.assertTrue("in_dll" in dir(type(Structure))) def test_positional_args(self): # see also http://bugs.python.org/issue5042 class W(Structure): _fields_ = [("a", c_int), ("b", c_int)] class X(W): _fields_ = [("c", c_int)] class Y(X): pass class Z(Y): _fields_ = [("d", c_int), ("e", c_int), ("f", c_int)] z = Z(1, 2, 3, 4, 5, 6) self.assertEqual((z.a, z.b, z.c, z.d, z.e, z.f), (1, 2, 3, 4, 5, 6)) z = Z(1) self.assertEqual((z.a, z.b, z.c, z.d, z.e, z.f), (1, 0, 0, 0, 0, 0)) self.assertRaises(TypeError, lambda: Z(1, 2, 3, 4, 5, 6, 7)) class PointerMemberTestCase(unittest.TestCase): def test(self): # a Structure with a POINTER field class S(Structure): _fields_ = [("array", POINTER(c_int))] s = S() # We can assign arrays of the correct type s.array = (c_int * 3)(1, 2, 3) items = [s.array[i] for i in range(3)] self.assertEqual(items, [1, 2, 3]) # The following are bugs, but are included here because the unittests # also describe the current behaviour. # # This fails with SystemError: bad arg to internal function # or with IndexError (with a patch I have) s.array[0] = 42 items = [s.array[i] for i in range(3)] self.assertEqual(items, [42, 2, 3]) s.array[0] = 1 items = [s.array[i] for i in range(3)] self.assertEqual(items, [1, 2, 3]) def test_none_to_pointer_fields(self): class S(Structure): _fields_ = [("x", c_int), ("p", POINTER(c_int))] s = S() s.x = 12345678 s.p = None self.assertEqual(s.x, 12345678) class TestRecursiveStructure(unittest.TestCase): def test_contains_itself(self): class Recursive(Structure): pass try: Recursive._fields_ = [("next", Recursive)] except AttributeError as details: self.assertTrue("Structure or union cannot contain itself" in str(details)) else: self.fail("Structure or union cannot contain itself") def test_vice_versa(self): class First(Structure): pass class Second(Structure): pass First._fields_ = [("second", Second)] try: Second._fields_ = [("first", First)] except AttributeError as details: self.assertTrue("_fields_ is final" in str(details)) else: self.fail("AttributeError not raised") if __name__ == '__main__': unittest.main()
import tvm from tvm import te def test_stmt_simplify(): ib = tvm.tir.ir_builder.create() A = ib.pointer("float32", name="A") C = ib.pointer("float32", name="C") n = te.size_var("n") with ib.for_range(0, n, name="i") as i: with ib.if_scope(i < 12): A[i] = C[i] body = tvm.tir.LetStmt(n, 10, ib.get()) mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, C, n], body)) body = tvm.tir.transform.Simplify()(mod)["main"].body assert isinstance(body.body, tvm.tir.Store) def test_thread_extent_simplify(): ib = tvm.tir.ir_builder.create() A = ib.pointer("float32", name="A") C = ib.pointer("float32", name="C") n = te.size_var("n") tx = te.thread_axis("threadIdx.x") ty = te.thread_axis("threadIdx.y") ib.scope_attr(tx, "thread_extent", n) ib.scope_attr(tx, "thread_extent", n) ib.scope_attr(ty, "thread_extent", 1) with ib.if_scope(tx + ty < 12): A[tx] = C[tx + ty] body = tvm.tir.LetStmt(n, 10, ib.get()) mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, C, n], body)) body = tvm.tir.transform.Simplify()(mod)["main"].body assert isinstance(body.body.body.body, tvm.tir.Store) def test_if_likely(): ib = tvm.tir.ir_builder.create() A = ib.pointer("float32", name="A") C = ib.pointer("float32", name="C") n = te.size_var("n") tx = te.thread_axis("threadIdx.x") ty = te.thread_axis("threadIdx.y") ib.scope_attr(tx, "thread_extent", 32) ib.scope_attr(ty, "thread_extent", 32) with ib.if_scope(ib.likely(tx * 32 + ty < n)): with ib.if_scope(ib.likely(tx * 32 + ty < n)): A[tx] = C[tx * 32 + ty] body = ib.get() mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, C, n], body)) body = tvm.tir.transform.Simplify()(mod)["main"].body assert isinstance(body.body.body, tvm.tir.IfThenElse) assert not isinstance(body.body.body.then_case, tvm.tir.IfThenElse) def test_basic_likely_elimination(): n = te.size_var("n") X = te.placeholder(shape=(n,), name="x") W = te.placeholder(shape=(n + 1,), dtype="int32", name="w") def f(i): start = W[i] extent = W[i + 1] - W[i] rv = te.reduce_axis((0, extent)) return te.sum(X[rv + start], axis=rv) Y = te.compute(X.shape, f, name="y") s = te.create_schedule([Y.op]) stmt = tvm.lower(s, [X, W, Y], simple_mode=True) assert "if" not in str(stmt) def test_complex_likely_elimination(): def cumsum(X): """ Y[i] = sum(X[:i]) """ (m,) = X.shape s_state = te.placeholder((m + 1,), dtype="int32", name="state") s_init = te.compute((1,), lambda _: tvm.tir.const(0, "int32")) s_update = te.compute((m + 1,), lambda l: s_state[l - 1] + X[l - 1]) return tvm.te.scan(s_init, s_update, s_state, inputs=[X], name="cumsum") def sparse_lengths_sum(data, indices, lengths): oshape = list(data.shape) oshape[0] = lengths.shape[0] length_offsets = cumsum(lengths) def sls(n, d): gg = te.reduce_axis((0, lengths[n])) indices_idx = length_offsets[n] + gg data_idx = indices[indices_idx] data_val = data[data_idx, d] return te.sum(data_val, axis=gg) return te.compute(oshape, sls) m, n, d, i, l = ( te.size_var("m"), te.size_var("n"), te.size_var("d"), te.size_var("i"), te.size_var("l"), ) data_ph = te.placeholder((m, d * 32), name="data") indices_ph = te.placeholder((i,), name="indices", dtype="int32") lengths_ph = te.placeholder((n,), name="lengths", dtype="int32") Y = sparse_lengths_sum(data_ph, indices_ph, lengths_ph) s = te.create_schedule([Y.op]) (n, d) = s[Y].op.axis (do, di) = s[Y].split(d, factor=32) (gg,) = s[Y].op.reduce_axis s[Y].reorder(n, do, gg, di) s[Y].vectorize(di) stmt = tvm.lower(s, [data_ph, indices_ph, lengths_ph, Y], simple_mode=True) assert "if" not in str(stmt) if __name__ == "__main__": test_stmt_simplify() test_thread_extent_simplify() test_if_likely() test_basic_likely_elimination() test_complex_likely_elimination()
ANSIBLE_METADATA = { 'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.0' } DOCUMENTATION = ''' module: iworkflow_license_pool short_description: Manage license pools in iWorkflow. description: - Manage license pools in iWorkflow. version_added: 2.4 options: name: description: - Name of the license pool to create. required: True state: description: - Whether the license pool should exist, or not. A state of C(present) will attempt to activate the license pool if C(accept_eula) is set to C(yes). required: False default: present choices: - present - absent base_key: description: - Key that the license server uses to verify the functionality that you are entitled to license. This option is required if you are creating a new license. required: False default: None accept_eula: description: - Specifies that you accept the EULA that is part of iWorkflow. Note that this is required to activate the license pool. If this is not specified, or it is set to C(no), then the pool will remain in a state of limbo until you choose to accept the EULA. This option is required when updating a license. It is also suggested that you provide it when creating a license, but if you do not, the license will remain inactive and you will have to run this module again with this option set to C(yes) to activate it. required: False default: 'no' choices: - yes - no notes: - Requires the f5-sdk Python package on the host. This is as easy as pip install f5-sdk. extends_documentation_fragment: f5 requirements: - f5-sdk >= 2.3.0 - iWorkflow >= 2.1.0 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = ''' - name: Create license pool iworkflow_license_pool: accept_eula: "yes" name: "my-lic-pool" base_key: "XXXXX-XXXXX-XXXXX-XXXXX-XXXXXXX" state: "present" server: "iwf.mydomain.com" password: "secret" user: "admin" validate_certs: "no" delegate_to: localhost ''' RETURN = ''' ''' import time from ansible.module_utils.basic import BOOLEANS from ansible.module_utils.f5_utils import ( AnsibleF5Client, AnsibleF5Parameters, F5ModuleError, HAS_F5SDK, iControlUnexpectedHTTPError ) class Parameters(AnsibleF5Parameters): api_map = { 'baseRegKey': 'base_key' } returnables = [] api_attributes = [ 'baseRegKey', 'state' ] updatables = [] def to_return(self): result = {} for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) return result def api_params(self): result = {} for api_attribute in self.api_attributes: if self.api_map is not None and api_attribute in self.api_map: result[api_attribute] = getattr(self, self.api_map[api_attribute]) else: result[api_attribute] = getattr(self, api_attribute) result = self._filter_params(result) return result @property def name(self): if self._values['name'] is None: return None name = str(self._values['name']).strip() if name == '': raise F5ModuleError( "You must specify a name for this module" ) return name class ModuleManager(object): def __init__(self, client): self.client = client self.have = None self.want = Parameters(self.client.module.params) self.changes = Parameters() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = Parameters(changed) def _update_changed_options(self): changed = {} for key in Parameters.updatables: if getattr(self.want, key) is not None: attr1 = getattr(self.want, key) attr2 = getattr(self.have, key) if attr1 != attr2: changed[key] = attr1 if changed: self.changes = Parameters(changed) return True return False def _pool_is_licensed(self): if self.have.state == 'LICENSED': return True return False def _pool_is_unlicensed_eula_unaccepted(self, current): if current.state != 'LICENSED' and not self.want.accept_eula: return True return False def exec_module(self): changed = False result = dict() state = self.want.state try: if state == "present": changed = self.present() elif state == "absent": changed = self.absent() except iControlUnexpectedHTTPError as e: raise F5ModuleError(str(e)) result.update(**self.changes.to_return()) result.update(dict(changed=changed)) return result def exists(self): collection = self.client.api.cm.shared.licensing.pools_s.get_collection( requests_params=dict( params="$filter=name+eq+'{0}'".format(self.want.name) ) ) if len(collection) == 1: return True elif len(collection) == 0: return False else: raise F5ModuleError( "Multiple license pools with the provided name were found!" ) def present(self): if self.exists(): return self.update() else: return self.create() def should_update(self): if self._pool_is_licensed(): return False if self._pool_is_unlicensed_eula_unaccepted(): return False return True def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True self.update_on_device() return True def update_on_device(self): collection = self.client.api.cm.shared.licensing.pools_s.get_collection( requests_params=dict( params="$filter=name+eq+'{0}'".format(self.want.name) ) ) resource = collection.pop() resource.modify( state='RELICENSE', method='AUTOMATIC' ) return self._wait_for_license_pool_state_to_activate(resource) def create(self): self._set_changed_options() if self.client.check_mode: return True if self.want.base_key is None: raise F5ModuleError( "You must specify a 'base_key' when creating a license pool" ) self.create_on_device() return True def read_current_from_device(self): collection = self.client.api.cm.shared.licensing.pools_s.get_collection( requests_params=dict( params="$filter=name+eq+'{0}'".format(self.want.name) ) ) resource = collection.pop() result = resource.attrs return Parameters(result) def create_on_device(self): resource = self.client.api.cm.shared.licensing.pools_s.pool.create( name=self.want.name, baseRegKey=self.want.base_key, method="AUTOMATIC" ) return self._wait_for_license_pool_state_to_activate(resource) def _wait_for_license_pool_state_to_activate(self, pool): error_values = ['EXPIRED', 'FAILED'] # Wait no more than 5 minutes for x in range(1, 30): pool.refresh() if pool.state == 'LICENSED': return True elif pool.state == 'WAITING_FOR_EULA_ACCEPTANCE': pool.modify( eulaText=pool.eulaText, state='ACCEPTED_EULA' ) elif pool.state in error_values: raise F5ModuleError(pool.errorText) time.sleep(10) def absent(self): if self.exists(): return self.remove() return False def remove(self): if self.client.check_mode: return True self.remove_from_device() if self.exists(): raise F5ModuleError("Failed to delete the license pool") return True def remove_from_device(self): collection = self.client.api.cm.shared.licensing.pools_s.get_collection( requests_params=dict( params="$filter=name+eq+'{0}'".format(self.want.name) ) ) resource = collection.pop() if resource: resource.delete() class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True self.argument_spec = dict( accept_eula=dict( type='bool', default='no', choices=BOOLEANS ), base_key=dict( required=False, no_log=True ), name=dict( required=True ), state=dict( required=False, default='present', choices=['absent', 'present'] ) ) self.f5_product_name = 'iworkflow' def main(): if not HAS_F5SDK: raise F5ModuleError("The python f5-sdk module is required") spec = ArgumentSpec() client = AnsibleF5Client( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, f5_product_name=spec.f5_product_name ) try: mm = ModuleManager(client) results = mm.exec_module() client.module.exit_json(**results) except F5ModuleError as e: client.module.fail_json(msg=str(e)) if __name__ == '__main__': main()
from __future__ import unicode_literals import pyxb import pyxb.binding import pyxb.binding.saxer import io import pyxb.utils.utility import pyxb.utils.domutils import sys import pyxb.utils.six as _six _GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:5049f1de-e9cf-11e4-bb50-a0481ca50ab0') _PyXBVersion = '1.2.4' if pyxb.__version__ != _PyXBVersion: raise pyxb.PyXBVersionError(_PyXBVersion) import pyxb.binding.datatypes import darwinpush.xb.ct as _ImportedBinding_darwinpush_xb_ct Namespace = pyxb.namespace.NamespaceForURI('http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1', create_if_missing=True) Namespace.configureCategories(['typeBinding', 'elementBinding']) def CreateFromDocument (xml_text, default_namespace=None, location_base=None): """Parse the given XML and use the document element to create a Python instance. @param xml_text An XML document. This should be data (Python 2 str or Python 3 bytes), or a text (Python 2 unicode or Python 3 str) in the L{pyxb._InputEncoding} encoding. @keyword default_namespace The L{pyxb.Namespace} instance to use as the default namespace where there is no default namespace in scope. If unspecified or C{None}, the namespace of the module containing this function will be used. @keyword location_base: An object to be recorded as the base of all L{pyxb.utils.utility.Location} instances associated with events and objects handled by the parser. You might pass the URI from which the document was obtained. """ if pyxb.XMLStyle_saxer != pyxb._XMLStyle: dom = pyxb.utils.domutils.StringToDOM(xml_text) return CreateFromDOM(dom.documentElement, default_namespace=default_namespace) if default_namespace is None: default_namespace = Namespace.fallbackNamespace() saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base) handler = saxer.getContentHandler() xmld = xml_text if isinstance(xmld, _six.text_type): xmld = xmld.encode(pyxb._InputEncoding) saxer.parse(io.BytesIO(xmld)) instance = handler.rootObject() return instance def CreateFromDOM (node, default_namespace=None): """Create a Python instance from the given DOM node. The node tag must correspond to an element declaration in this module. @deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}.""" if default_namespace is None: default_namespace = Namespace.fallbackNamespace() return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace) class MsgCategoryType (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin): """The category of operator message""" _ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'MsgCategoryType') _XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 15, 1) _Documentation = 'The category of operator message' MsgCategoryType._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=MsgCategoryType, enum_prefix=None) MsgCategoryType.Train = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='Train', tag='Train') MsgCategoryType.Station = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='Station', tag='Station') MsgCategoryType.Connections = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='Connections', tag='Connections') MsgCategoryType.System = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='System', tag='System') MsgCategoryType.Misc = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='Misc', tag='Misc') MsgCategoryType.PriorTrains = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='PriorTrains', tag='PriorTrains') MsgCategoryType.PriorOther = MsgCategoryType._CF_enumeration.addEnumeration(unicode_value='PriorOther', tag='PriorOther') MsgCategoryType._InitializeFacetMap(MsgCategoryType._CF_enumeration) Namespace.addCategoryObject('typeBinding', 'MsgCategoryType', MsgCategoryType) class MsgSeverityType (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin): """The severity of operator message""" _ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'MsgSeverityType') _XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 29, 1) _Documentation = 'The severity of operator message' MsgSeverityType._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=MsgSeverityType, enum_prefix=None) MsgSeverityType.n0 = MsgSeverityType._CF_enumeration.addEnumeration(unicode_value='0', tag='n0') MsgSeverityType.n1 = MsgSeverityType._CF_enumeration.addEnumeration(unicode_value='1', tag='n1') MsgSeverityType.n2 = MsgSeverityType._CF_enumeration.addEnumeration(unicode_value='2', tag='n2') MsgSeverityType.n3 = MsgSeverityType._CF_enumeration.addEnumeration(unicode_value='3', tag='n3') MsgSeverityType._InitializeFacetMap(MsgSeverityType._CF_enumeration) Namespace.addCategoryObject('typeBinding', 'MsgSeverityType', MsgSeverityType) class CTD_ANON (pyxb.binding.basis.complexTypeDefinition): """The content of the message""" _TypeDefinition = None _ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED _Abstract = False _ExpandedName = None _XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 58, 4) _ElementMap = {} _AttributeMap = {} # Base type is pyxb.binding.datatypes.anyType # Element {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}p uses Python identifier p __p = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'p'), 'p', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_CTD_ANON_httpwww_thalesgroup_comrttiPushPortStationMessagesv1p', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 84, 1), ) p = property(__p.value, __p.set, None, 'Defines an HTML paragraph') # Element {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}a uses Python identifier a __a = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'a'), 'a', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_CTD_ANON_httpwww_thalesgroup_comrttiPushPortStationMessagesv1a', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 94, 1), ) a = property(__a.value, __a.set, None, 'Defines an HTML anchor') _ElementMap.update({ __p.name() : __p, __a.name() : __a }) _AttributeMap.update({ }) class CTD_ANON_ (pyxb.binding.basis.complexTypeDefinition): """Defines an HTML paragraph""" _TypeDefinition = None _ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED _Abstract = False _ExpandedName = None _XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 88, 2) _ElementMap = {} _AttributeMap = {} # Base type is pyxb.binding.datatypes.anyType # Element {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}a uses Python identifier a __a = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'a'), 'a', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_CTD_ANON__httpwww_thalesgroup_comrttiPushPortStationMessagesv1a', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 94, 1), ) a = property(__a.value, __a.set, None, 'Defines an HTML anchor') _ElementMap.update({ __a.name() : __a }) _AttributeMap.update({ }) class CTD_ANON_2 (pyxb.binding.basis.complexTypeDefinition): """Defines an HTML anchor""" _TypeDefinition = pyxb.binding.datatypes.string _ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_SIMPLE _Abstract = False _ExpandedName = None _XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 98, 2) _ElementMap = {} _AttributeMap = {} # Base type is pyxb.binding.datatypes.string # Attribute href uses Python identifier href __href = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'href'), 'href', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_CTD_ANON_2_href', pyxb.binding.datatypes.string, required=True) __href._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 101, 5) __href._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 101, 5) href = property(__href.value, __href.set, None, None) _ElementMap.update({ }) _AttributeMap.update({ __href.name() : __href }) class StationMessage (pyxb.binding.basis.complexTypeDefinition): """Darwin Workstation Station Message""" _TypeDefinition = None _ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY _Abstract = False _ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'StationMessage') _XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 41, 1) _ElementMap = {} _AttributeMap = {} # Base type is pyxb.binding.datatypes.anyType # Element {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}Station uses Python identifier Station __Station = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Station'), 'Station', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_httpwww_thalesgroup_comrttiPushPortStationMessagesv1Station', True, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 46, 3), ) Station = property(__Station.value, __Station.set, None, 'The Stations the message is being applied to') # Element {http://www.thalesgroup.com/rtti/PushPort/StationMessages/v1}Msg uses Python identifier Msg __Msg = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Msg'), 'Msg', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_httpwww_thalesgroup_comrttiPushPortStationMessagesv1Msg', False, pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 54, 3), ) Msg = property(__Msg.value, __Msg.set, None, 'The content of the message') # Attribute id uses Python identifier id __id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_id', pyxb.binding.datatypes.int, required=True) __id._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 66, 2) __id._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 66, 2) id = property(__id.value, __id.set, None, None) # Attribute cat uses Python identifier cat __cat = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'cat'), 'cat', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_cat', MsgCategoryType, required=True) __cat._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 67, 2) __cat._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 67, 2) cat = property(__cat.value, __cat.set, None, 'The category of message') # Attribute sev uses Python identifier sev __sev = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'sev'), 'sev', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_sev', MsgSeverityType, required=True) __sev._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 72, 2) __sev._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 72, 2) sev = property(__sev.value, __sev.set, None, 'The severity of the message') # Attribute suppress uses Python identifier suppress __suppress = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'suppress'), 'suppress', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_StationMessage_suppress', pyxb.binding.datatypes.boolean, unicode_default='false') __suppress._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 77, 2) __suppress._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 77, 2) suppress = property(__suppress.value, __suppress.set, None, 'Whether the train running information is suppressed to the public') _ElementMap.update({ __Station.name() : __Station, __Msg.name() : __Msg }) _AttributeMap.update({ __id.name() : __id, __cat.name() : __cat, __sev.name() : __sev, __suppress.name() : __suppress }) Namespace.addCategoryObject('typeBinding', 'StationMessage', StationMessage) class CTD_ANON_3 (pyxb.binding.basis.complexTypeDefinition): """The Stations the message is being applied to""" _TypeDefinition = None _ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY _Abstract = False _ExpandedName = None _XSDLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 50, 4) _ElementMap = {} _AttributeMap = {} # Base type is pyxb.binding.datatypes.anyType # Attribute crs uses Python identifier crs __crs = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'crs'), 'crs', '__httpwww_thalesgroup_comrttiPushPortStationMessagesv1_CTD_ANON_3_crs', _ImportedBinding_darwinpush_xb_ct.CrsType, required=True) __crs._DeclarationLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 51, 5) __crs._UseLocation = pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 51, 5) crs = property(__crs.value, __crs.set, None, None) _ElementMap.update({ }) _AttributeMap.update({ __crs.name() : __crs }) p = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'p'), CTD_ANON_, documentation='Defines an HTML paragraph', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 84, 1)) Namespace.addCategoryObject('elementBinding', p.name().localName(), p) a = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'a'), CTD_ANON_2, documentation='Defines an HTML anchor', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 94, 1)) Namespace.addCategoryObject('elementBinding', a.name().localName(), a) CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'p'), CTD_ANON_, scope=CTD_ANON, documentation='Defines an HTML paragraph', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 84, 1))) CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'a'), CTD_ANON_2, scope=CTD_ANON, documentation='Defines an HTML anchor', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 94, 1))) def _BuildAutomaton (): # Remove this helper function from the namespace after it is invoked global _BuildAutomaton del _BuildAutomaton import pyxb.utils.fac as fac counters = set() cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 60, 6)) counters.add(cc_0) cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 61, 6)) counters.add(cc_1) states = [] final_update = set() final_update.add(fac.UpdateInstruction(cc_0, False)) symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'p')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 60, 6)) st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_0) final_update = set() final_update.add(fac.UpdateInstruction(cc_1, False)) symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'a')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 61, 6)) st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_1) transitions = [] transitions.append(fac.Transition(st_0, [ fac.UpdateInstruction(cc_0, True) ])) st_0._set_transitionSet(transitions) transitions = [] transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_1, True) ])) st_1._set_transitionSet(transitions) return fac.Automaton(states, counters, True, containing_state=None) CTD_ANON._Automaton = _BuildAutomaton() CTD_ANON_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'a'), CTD_ANON_2, scope=CTD_ANON_, documentation='Defines an HTML anchor', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 94, 1))) def _BuildAutomaton_ (): # Remove this helper function from the namespace after it is invoked global _BuildAutomaton_ del _BuildAutomaton_ import pyxb.utils.fac as fac counters = set() cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 90, 4)) counters.add(cc_0) states = [] final_update = set() final_update.add(fac.UpdateInstruction(cc_0, False)) symbol = pyxb.binding.content.ElementUse(CTD_ANON_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'a')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 90, 4)) st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_0) transitions = [] transitions.append(fac.Transition(st_0, [ fac.UpdateInstruction(cc_0, True) ])) st_0._set_transitionSet(transitions) return fac.Automaton(states, counters, True, containing_state=None) CTD_ANON_._Automaton = _BuildAutomaton_() StationMessage._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Station'), CTD_ANON_3, scope=StationMessage, documentation='The Stations the message is being applied to', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 46, 3))) StationMessage._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Msg'), CTD_ANON, scope=StationMessage, documentation='The content of the message', location=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 54, 3))) def _BuildAutomaton_2 (): # Remove this helper function from the namespace after it is invoked global _BuildAutomaton_2 del _BuildAutomaton_2 import pyxb.utils.fac as fac counters = set() cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 46, 3)) counters.add(cc_0) states = [] final_update = None symbol = pyxb.binding.content.ElementUse(StationMessage._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Station')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 46, 3)) st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_0) final_update = set() symbol = pyxb.binding.content.ElementUse(StationMessage._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Msg')), pyxb.utils.utility.Location('/home/gberg/code/src/fstr/darwinpush/xsd/rttiPPTStationMessages_v1.xsd', 54, 3)) st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_1) transitions = [] transitions.append(fac.Transition(st_0, [ fac.UpdateInstruction(cc_0, True) ])) transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_0, False) ])) st_0._set_transitionSet(transitions) transitions = [] st_1._set_transitionSet(transitions) return fac.Automaton(states, counters, False, containing_state=None) StationMessage._Automaton = _BuildAutomaton_2()
from urlparse import urlparse from api_tests.nodes.views.test_node_contributors_list import NodeCRUDTestCase from nose.tools import * # flake8: noqa from api.base.settings.defaults import API_BASE from framework.auth.core import Auth from tests.base import fake from osf_tests.factories import ( ProjectFactory, CommentFactory, RegistrationFactory, WithdrawnRegistrationFactory, ) class TestWithdrawnRegistrations(NodeCRUDTestCase): def setUp(self): super(TestWithdrawnRegistrations, self).setUp() self.registration = RegistrationFactory(creator=self.user, project=self.public_project) self.withdrawn_registration = WithdrawnRegistrationFactory(registration=self.registration, user=self.registration.creator) self.public_pointer_project = ProjectFactory(is_public=True) self.public_pointer = self.public_project.add_pointer(self.public_pointer_project, auth=Auth(self.user), save=True) self.withdrawn_url = '/{}registrations/{}/?version=2.2'.format(API_BASE, self.registration._id) self.withdrawn_registration.justification = 'We made a major error.' self.withdrawn_registration.save() def test_can_access_withdrawn_contributors(self): url = '/{}registrations/{}/contributors/'.format(API_BASE, self.registration._id) res = self.app.get(url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 200) def test_cannot_access_withdrawn_children(self): url = '/{}registrations/{}/children/'.format(API_BASE, self.registration._id) res = self.app.get(url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 403) def test_cannot_access_withdrawn_comments(self): self.public_project = ProjectFactory(is_public=True, creator=self.user) self.public_comment = CommentFactory(node=self.public_project, user=self.user) url = '/{}registrations/{}/comments/'.format(API_BASE, self.registration._id) res = self.app.get(url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 403) def test_can_access_withdrawn_contributor_detail(self): url = '/{}registrations/{}/contributors/{}/'.format(API_BASE, self.registration._id, self.user._id) res = self.app.get(url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 200) def test_cannot_return_a_withdrawn_registration_at_node_detail_endpoint(self): url = '/{}nodes/{}/'.format(API_BASE, self.registration._id) res = self.app.get(url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 404) def test_cannot_delete_a_withdrawn_registration(self): url = '/{}registrations/{}/'.format(API_BASE, self.registration._id) res = self.app.delete_json_api(url, auth=self.user.auth, expect_errors=True) self.registration.reload() assert_equal(res.status_code, 405) def test_cannot_access_withdrawn_files_list(self): url = '/{}registrations/{}/files/'.format(API_BASE, self.registration._id) res = self.app.get(url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 403) def test_cannot_access_withdrawn_node_links_detail(self): url = '/{}registrations/{}/node_links/{}/'.format(API_BASE, self.registration._id, self.public_pointer._id) res = self.app.get(url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 403) def test_cannot_access_withdrawn_node_links_list(self): url = '/{}registrations/{}/node_links/'.format(API_BASE, self.registration._id) res = self.app.get(url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 403) def test_cannot_access_withdrawn_node_logs(self): self.public_project = ProjectFactory(is_public=True, creator=self.user) url = '/{}registrations/{}/logs/'.format(API_BASE, self.registration._id) res = self.app.get(url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 403) def test_cannot_access_withdrawn_registrations_list(self): self.registration.save() url = '/{}registrations/{}/registrations/'.format(API_BASE, self.registration._id) res = self.app.get(url, auth=self.user.auth, expect_errors=True) assert_equal(res.status_code, 403) def test_withdrawn_registrations_display_limited_fields(self): registration = self.registration res = self.app.get(self.withdrawn_url, auth=self.user.auth) assert_equal(res.status_code, 200) attributes = res.json['data']['attributes'] registration.reload() expected_attributes = { 'title': registration.title, 'description': registration.description, 'date_created': registration.date_created.isoformat().replace('+00:00', 'Z'), 'date_registered': registration.registered_date.isoformat().replace('+00:00', 'Z'), 'date_modified': registration.date_modified.isoformat().replace('+00:00', 'Z'), 'withdrawal_justification': registration.retraction.justification, 'public': None, 'category': None, 'registration': True, 'fork': None, 'collection': None, 'tags': None, 'withdrawn': True, 'pending_withdrawal': None, 'pending_registration_approval': None, 'pending_embargo_approval': None, 'embargo_end_date': None, 'registered_meta': None, 'current_user_permissions': None, 'registration_supplement': registration.registered_schema.first().name } for attribute in expected_attributes: assert_equal(expected_attributes[attribute], attributes[attribute]) contributors = urlparse(res.json['data']['relationships']['contributors']['links']['related']['href']).path assert_equal(contributors, '/{}registrations/{}/contributors/'.format(API_BASE, registration._id)) assert_not_in('children', res.json['data']['relationships']) assert_not_in('comments', res.json['data']['relationships']) assert_not_in('node_links', res.json['data']['relationships']) assert_not_in('registrations', res.json['data']['relationships']) assert_not_in('parent', res.json['data']['relationships']) assert_not_in('forked_from', res.json['data']['relationships']) assert_not_in('files', res.json['data']['relationships']) assert_not_in('logs', res.json['data']['relationships']) assert_not_in('registered_by', res.json['data']['relationships']) assert_not_in('registered_from', res.json['data']['relationships']) assert_not_in('root', res.json['data']['relationships']) def test_field_specific_related_counts_ignored_if_hidden_field_on_withdrawn_registration(self): url = '/{}registrations/{}/?related_counts=children'.format(API_BASE, self.registration._id) res = self.app.get(url, auth=self.user.auth) assert_equal(res.status_code, 200) assert_not_in('children', res.json['data']['relationships']) assert_in('contributors', res.json['data']['relationships']) def test_field_specific_related_counts_retrieved_if_visible_field_on_withdrawn_registration(self): url = '/{}registrations/{}/?related_counts=contributors'.format(API_BASE, self.registration._id) res = self.app.get(url, auth=self.user.auth) assert_equal(res.status_code, 200) assert_equal(res.json['data']['relationships']['contributors']['links']['related']['meta']['count'], 1)
from collections import OrderedDict from app.master.atom_grouper import AtomGrouper class TimeBasedAtomGrouper(object): """ This class implements the algorithm to best split & group atoms based on historic time values. This algorithm is somewhat complicated, so I'm going to give a summary here. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Let N be the number of concurrent executors allocated for this job. Let T be the aggregate serial time to execute all atoms on a single executor. Both N and T are known values at the beginning of this algorithm. In the ideal subjob atom-grouping, we would have exactly N subjobs, each allocated with T/N amount of work that would all end at the same time. However, in reality, there are a few factors that makes this solution unfeasible: - There is a significant amount of variability in the times of running these atoms, so numbers are never exact. - Certain builds will introduce new tests (for which we don't have historical time data for). - Not all of the machines are exactly the same, so we can't expect identical performance. We have two aims for this algorithm: - Minimize the amount of framework overhead (time spent sending and retrieving subjobs) and maximize the amount of time the slaves actually spend running the build. - Don't overload any single executor with too much work--this will cause the whole build to wait on a single executor. We want to try to get all of the executors to end as close to the same time as possible in order to get rid of any inefficient use of slave machines. In order to accomplish this, the algorithm implemented by this class tries to split up the majority of the atoms into N buckets, and splits up the rest of the atoms into smaller buckets. Hopefully, the timeline graph of executed subjobs for each of the executors would end up looking like this: [========================================================================][===][==][==] [===============================================================================][==] [====================================================================][====][===][==][=] [========================================================================][===][==][=] [=====================================================================][====][==][==] [==================================================================================][=] [===================================================================][======][==][==] The algorithm has two stages of subjob creation: the 'big chunk' stage and the 'small chunk' stage. The 'big chunk' stage creates exactly N large subjob groupings that will consist of the majority of atoms (in terms of runtime). The 'small chunk' stage creates ~2N short subjob groupings that will be used to fill in the gaps in order to aim for having all of the executors end at similar times. Notes: - For new atoms that we don't have historic times for, we will assign it the highest atom time value in order to avoid underestimating the length of unknown atoms. - We will have to try tweaking the percentage of T that we want to be allocated for the initial large batch of big subjobs. Same goes for the number and size of the smaller buckets. """ BIG_CHUNK_FRACTION = 0.8 def __init__(self, atoms, max_executors, atom_time_map, project_directory): """ :param atoms: the list of atoms for this build :type atoms: list[app.master.atom.Atom] :param max_executors: the maximum number of executors for this build :type max_executors: int :param atom_time_map: a dictionary containing the historic times for atoms for this particular job :type atom_time_map: dict[str, float] :type project_directory: str """ self._atoms = atoms self._max_executors = max_executors self._atom_time_map = atom_time_map self._project_directory = project_directory def groupings(self): """ Group the atoms into subjobs using historic timing data. :return: a list of lists of atoms :rtype: list[list[app.master.atom.Atom]] """ # 1). Coalesce the atoms with historic atom times, and also get total estimated runtime try: total_estimated_runtime = self._set_expected_atom_times( self._atoms, self._atom_time_map, self._project_directory) except _AtomTimingDataError: grouper = AtomGrouper(self._atoms, self._max_executors) return grouper.groupings() # 2). Sort them by decreasing time, and add them to an OrderedDict atoms_by_decreasing_time = sorted(self._atoms, key=lambda atom: atom.expected_time, reverse=True) sorted_atom_times_left = OrderedDict([(atom, atom.expected_time) for atom in atoms_by_decreasing_time]) # 3). Group them! # Calculate what the target 'big subjob' time is going to be for each executor's initial subjob big_subjob_time = (total_estimated_runtime * self.BIG_CHUNK_FRACTION) / self._max_executors # Calculate what the target 'small subjob' time is going to be small_subjob_time = (total_estimated_runtime * (1.0 - self.BIG_CHUNK_FRACTION)) / (2 * self._max_executors) # _group_atoms_into_sized_buckets() will remove elements from sorted_atom_times_left. subjobs = self._group_atoms_into_sized_buckets(sorted_atom_times_left, big_subjob_time, self._max_executors) small_subjobs = self._group_atoms_into_sized_buckets(sorted_atom_times_left, small_subjob_time, None) subjobs.extend(small_subjobs) return subjobs def _set_expected_atom_times(self, new_atoms, old_atoms_with_times, project_directory): """ Set the expected runtime (new_atom.expected_time) of each atom in new_atoms using historic timing data. Additionally, return the total estimated serial-runtime for this build. Although this seems like an odd thing for this method to return, it is done here for efficiency. There can be thousands of atoms, and iterating through them multiple times seems inefficient. :param new_atoms: the list of atoms that will be run in this build :type new_atoms: list[app.master.atom.Atom] :param old_atoms_with_times: a dictionary containing the historic times for atoms for this particular job :type old_atoms_with_times: dict[str, float] :type project_directory: str :return: the total estimated runtime in seconds :rtype: float """ atoms_without_timing_data = [] total_time = 0 max_atom_time = 0 # Generate list for atoms that have timing data for new_atom in new_atoms: if new_atom.command_string not in old_atoms_with_times: atoms_without_timing_data.append(new_atom) continue new_atom.expected_time = old_atoms_with_times[new_atom.command_string] # Discover largest single atom time to use as conservative estimates for atoms with unknown times if max_atom_time < new_atom.expected_time: max_atom_time = new_atom.expected_time # We want to return the atom with the project directory still in it, as this data will directly be # sent to the slave to be run. total_time += new_atom.expected_time # For the atoms without historic timing data, assign them the largest atom time we have for new_atom in atoms_without_timing_data: new_atom.expected_time = max_atom_time if len(new_atoms) == len(atoms_without_timing_data): raise _AtomTimingDataError total_time += (max_atom_time * len(atoms_without_timing_data)) return total_time def _group_atoms_into_sized_buckets(self, sorted_atom_time_dict, target_group_time, max_groups_to_create): """ Given a sorted dictionary (Python FTW) of [atom, time] pairs in variable sorted_atom_time_dict, return a list of lists of atoms that each are estimated to take target_group_time seconds. This method will generate at most max_groups_to_create groupings, and will return once this limit is reached or when sorted_atom_time_dict is empty. Note, this method will modify sorted_atom_time_dict's state by removing elements as needed (often from the middle of the collection). :param sorted_atom_time_dict: the sorted (longest first), double-ended queue containing [atom, time] pairs. This OrderedDict will have elements removed from this method. :type sorted_atom_time_dict: OrderedDict[app.master.atom.Atom, float] :param target_group_time: how long each subjob should approximately take :type target_group_time: float :param max_groups_to_create: the maximum number of subjobs to create. Once max_groups_to_create limit is reached, this method will return the subjobs that have already been grouped. If set to None, then there is no limit. :type max_groups_to_create: int|None :return: the groups of grouped atoms, with each group taking an estimated target_group_time :rtype: list[list[app.master.atom.Atom]] """ subjobs = [] subjob_time_so_far = 0 subjob_atoms = [] while (max_groups_to_create is None or len(subjobs) < max_groups_to_create) and len(sorted_atom_time_dict) > 0: for atom, time in sorted_atom_time_dict.items(): if len(subjob_atoms) == 0 or (time + subjob_time_so_far) <= target_group_time: subjob_time_so_far += time subjob_atoms.append(atom) sorted_atom_time_dict.pop(atom) # If (number of subjobs created so far + atoms left) is less than or equal to the total number of # subjobs we need to create, then have each remaining atom be a subjob and return. # The "+ 1" is here to account for the current subjob being generated, but that hasn't been # appended to subjobs yet. if max_groups_to_create is not None and (len(subjobs) + len(sorted_atom_time_dict) + 1) <= max_groups_to_create: subjobs.append(subjob_atoms) for atom, _ in sorted_atom_time_dict.items(): sorted_atom_time_dict.pop(atom) subjobs.append([atom]) return subjobs subjobs.append(subjob_atoms) subjob_atoms = [] subjob_time_so_far = 0 return subjobs class _AtomTimingDataError(Exception): """ An exception to represent the case where the atom timing data is either not present or incorrect. """
""" Tests for the integration test suite itself. """ import logging import os import subprocess from collections import defaultdict from pathlib import Path from typing import Set import yaml from get_test_group import patterns_from_group __maintainer__ = 'adam' __contact__ = 'tools-infra-team@mesosphere.io' log = logging.getLogger(__file__) def _tests_from_pattern(ci_pattern: str) -> Set[str]: """ From a CI pattern, get all tests ``pytest`` would collect. """ tests = set([]) # type: Set[str] args = [ 'pytest', '--disable-pytest-warnings', '--collect-only', ci_pattern, '-q', ] # Test names will not be in ``stderr`` so we ignore that. result = subprocess.run( args=args, stdout=subprocess.PIPE, env={**os.environ, **{'PYTHONIOENCODING': 'UTF-8'}}, ) output = result.stdout for line in output.splitlines(): if b'error in' in line: message = ( 'Error collecting tests for pattern "{ci_pattern}". ' 'Full output:\n' '{output}' ).format( ci_pattern=ci_pattern, output=output, ) raise Exception(message) # Whitespace is important to avoid confusing pytest warning messages # with test names. For example, the pytest output may contain '3 tests # deselected' which would conflict with a test file called # test_agent_deselected.py if we ignored whitespace. if ( line and # Some tests show warnings on collection. b' warnings' not in line and # Some tests are skipped on collection. b'skipped in' not in line and # Some tests are deselected by the ``pytest.ini`` configuration. b' deselected' not in line and not line.startswith(b'no tests ran in') ): tests.add(line.decode()) return tests def test_test_groups() -> None: """ The test suite is split into various "groups". This test confirms that the groups together contain all tests, and each test is collected only once. """ test_group_file = Path('test_groups.yaml') test_group_file_contents = test_group_file.read_text() test_groups = yaml.load(test_group_file_contents)['groups'] test_patterns = [] for group in test_groups: test_patterns += patterns_from_group(group_name=group) # Turn this into a list otherwise we can't cannonically state whether every test was collected _exactly_ once :-) tests_to_patterns = defaultdict(list) # type: Mapping[str, List] for pattern in test_patterns: tests = _tests_from_pattern(ci_pattern=pattern) for test in tests: tests_to_patterns[test].append(pattern) errs = [] for test_name, patterns in tests_to_patterns.items(): message = ( 'Test "{test_name}" will be run once for each pattern in ' '{patterns}. ' 'Each test should be run only once.' ).format( test_name=test_name, patterns=patterns, ) if len(patterns) != 1: assert len(patterns) != 1, message errs.append(message) if errs: for message in errs: log.error(message) raise Exception("Some tests are not collected exactly once, see errors.") all_tests = _tests_from_pattern(ci_pattern='') assert tests_to_patterns.keys() - all_tests == set() assert all_tests - tests_to_patterns.keys() == set()
import contextlib import mock import webob.exc as wexc from neutron.api.v2 import base from neutron.common import constants as n_const from neutron import context from neutron.extensions import portbindings from neutron.manager import NeutronManager from neutron.openstack.common import log as logging from neutron.plugins.ml2 import config as ml2_config from neutron.plugins.ml2.drivers.cisco.nexus import config as cisco_config from neutron.plugins.ml2.drivers.cisco.nexus import exceptions as c_exc from neutron.plugins.ml2.drivers.cisco.nexus import mech_cisco_nexus from neutron.plugins.ml2.drivers.cisco.nexus import nexus_network_driver from neutron.plugins.ml2.drivers import type_vlan as vlan_config from neutron.tests.unit import test_db_plugin LOG = logging.getLogger(__name__) ML2_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin' PHYS_NET = 'physnet1' COMP_HOST_NAME = 'testhost' COMP_HOST_NAME_2 = 'testhost_2' VLAN_START = 1000 VLAN_END = 1100 NEXUS_IP_ADDR = '1.1.1.1' NETWORK_NAME = 'test_network' NETWORK_NAME_2 = 'test_network_2' NEXUS_INTERFACE = '1/1' NEXUS_INTERFACE_2 = '1/2' CIDR_1 = '10.0.0.0/24' CIDR_2 = '10.0.1.0/24' DEVICE_ID_1 = '11111111-1111-1111-1111-111111111111' DEVICE_ID_2 = '22222222-2222-2222-2222-222222222222' DEVICE_OWNER = 'compute:None' class CiscoML2MechanismTestCase(test_db_plugin.NeutronDbPluginV2TestCase): def setUp(self): """Configure for end-to-end neutron testing using a mock ncclient. This setup includes: - Configure the ML2 plugin to use VLANs in the range of 1000-1100. - Configure the Cisco mechanism driver to use an imaginary switch at NEXUS_IP_ADDR. - Create a mock NETCONF client (ncclient) for the Cisco mechanism driver """ self.addCleanup(mock.patch.stopall) # Configure the ML2 mechanism drivers and network types ml2_opts = { 'mechanism_drivers': ['cisco_nexus'], 'tenant_network_types': ['vlan'], } for opt, val in ml2_opts.items(): ml2_config.cfg.CONF.set_override(opt, val, 'ml2') self.addCleanup(ml2_config.cfg.CONF.reset) # Configure the ML2 VLAN parameters phys_vrange = ':'.join([PHYS_NET, str(VLAN_START), str(VLAN_END)]) vlan_config.cfg.CONF.set_override('network_vlan_ranges', [phys_vrange], 'ml2_type_vlan') self.addCleanup(vlan_config.cfg.CONF.reset) # Configure the Cisco Nexus mechanism driver nexus_config = { (NEXUS_IP_ADDR, 'username'): 'admin', (NEXUS_IP_ADDR, 'password'): 'mySecretPassword', (NEXUS_IP_ADDR, 'ssh_port'): 22, (NEXUS_IP_ADDR, COMP_HOST_NAME): NEXUS_INTERFACE, (NEXUS_IP_ADDR, COMP_HOST_NAME_2): NEXUS_INTERFACE_2} nexus_patch = mock.patch.dict( cisco_config.ML2MechCiscoConfig.nexus_dict, nexus_config) nexus_patch.start() self.addCleanup(nexus_patch.stop) # The NETCONF client module is not included in the DevStack # distribution, so mock this module for unit testing. self.mock_ncclient = mock.Mock() mock.patch.object(nexus_network_driver.CiscoNexusDriver, '_import_ncclient', return_value=self.mock_ncclient).start() # Mock port values for 'status' and 'binding:segmentation_id' mock_status = mock.patch.object( mech_cisco_nexus.CiscoNexusMechanismDriver, '_is_status_active').start() mock_status.return_value = n_const.PORT_STATUS_ACTIVE def _mock_get_vlanid(context): network = context.network.current if network['name'] == NETWORK_NAME: return VLAN_START else: return VLAN_START + 1 mock_vlanid = mock.patch.object( mech_cisco_nexus.CiscoNexusMechanismDriver, '_get_vlanid').start() mock_vlanid.side_effect = _mock_get_vlanid super(CiscoML2MechanismTestCase, self).setUp(ML2_PLUGIN) self.port_create_status = 'DOWN' @contextlib.contextmanager def _patch_ncclient(self, attr, value): """Configure an attribute on the mock ncclient module. This method can be used to inject errors by setting a side effect or a return value for an ncclient method. :param attr: ncclient attribute (typically method) to be configured. :param value: Value to be configured on the attribute. """ # Configure attribute. config = {attr: value} self.mock_ncclient.configure_mock(**config) # Continue testing yield # Unconfigure attribute config = {attr: None} self.mock_ncclient.configure_mock(**config) def _is_in_nexus_cfg(self, words): """Check if any config sent to Nexus contains all words in a list.""" for call in (self.mock_ncclient.connect.return_value. edit_config.mock_calls): configlet = call[2]['config'] if all(word in configlet for word in words): return True return False def _is_in_last_nexus_cfg(self, words): """Confirm last config sent to Nexus contains specified keywords.""" last_cfg = (self.mock_ncclient.connect.return_value. edit_config.mock_calls[-1][2]['config']) return all(word in last_cfg for word in words) def _is_vlan_configured(self, vlan_creation_expected=True, add_keyword_expected=False): vlan_created = self._is_in_nexus_cfg(['vlan', 'vlan-name']) add_appears = self._is_in_last_nexus_cfg(['add']) return (self._is_in_last_nexus_cfg(['allowed', 'vlan']) and vlan_created == vlan_creation_expected and add_appears == add_keyword_expected) def _is_vlan_unconfigured(self, vlan_deletion_expected=True): vlan_deleted = self._is_in_last_nexus_cfg( ['no', 'vlan', 'vlan-id-create-delete']) return (self._is_in_nexus_cfg(['allowed', 'vlan', 'remove']) and vlan_deleted == vlan_deletion_expected) class TestCiscoBasicGet(CiscoML2MechanismTestCase, test_db_plugin.TestBasicGet): pass class TestCiscoV2HTTPResponse(CiscoML2MechanismTestCase, test_db_plugin.TestV2HTTPResponse): pass class TestCiscoPortsV2(CiscoML2MechanismTestCase, test_db_plugin.TestPortsV2): @contextlib.contextmanager def _create_resources(self, name=NETWORK_NAME, cidr=CIDR_1, device_id=DEVICE_ID_1, host_id=COMP_HOST_NAME): """Create network, subnet, and port resources for test cases. Create a network, subnet, port and then update the port, yield the result, then delete the port, subnet and network. :param name: Name of network to be created. :param cidr: cidr address of subnetwork to be created. :param device_id: Device ID to use for port to be created/updated. :param host_id: Host ID to use for port create/update. """ with self.network(name=name) as network: with self.subnet(network=network, cidr=cidr) as subnet: with self.port(subnet=subnet, cidr=cidr) as port: data = {'port': {portbindings.HOST_ID: host_id, 'device_id': device_id, 'device_owner': 'compute:none', 'admin_state_up': True}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) yield res.status_int def _assertExpectedHTTP(self, status, exc): """Confirm that an HTTP status corresponds to an expected exception. Confirm that an HTTP status which has been returned for an neutron API request matches the HTTP status corresponding to an expected exception. :param status: HTTP status :param exc: Expected exception """ if exc in base.FAULT_MAP: expected_http = base.FAULT_MAP[exc].code else: expected_http = wexc.HTTPInternalServerError.code self.assertEqual(status, expected_http) def test_create_ports_bulk_emulated_plugin_failure(self): real_has_attr = hasattr #ensures the API chooses the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('__builtin__.hasattr', new=fakehasattr): plugin_obj = NeutronManager.get_plugin() orig = plugin_obj.create_port with mock.patch.object(plugin_obj, 'create_port') as patched_plugin: def side_effect(*args, **kwargs): return self._do_side_effect(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect with self.network() as net: res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True) # Expect an internal server error as we injected a fault self._validate_behavior_on_bulk_failure( res, 'ports', wexc.HTTPInternalServerError.code) def test_create_ports_bulk_native(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk port create") def test_create_ports_bulk_emulated(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk port create") def test_create_ports_bulk_native_plugin_failure(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk port create") ctx = context.get_admin_context() with self.network() as net: plugin_obj = NeutronManager.get_plugin() orig = plugin_obj.create_port with mock.patch.object(plugin_obj, 'create_port') as patched_plugin: def side_effect(*args, **kwargs): return self._do_side_effect(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True, context=ctx) # We expect an internal server error as we injected a fault self._validate_behavior_on_bulk_failure( res, 'ports', wexc.HTTPInternalServerError.code) def test_nexus_enable_vlan_cmd(self): """Verify the syntax of the command to enable a vlan on an intf. Confirm that for the first VLAN configured on a Nexus interface, the command string sent to the switch does not contain the keyword 'add'. Confirm that for the second VLAN configured on a Nexus interface, the command string sent to the switch contains the keyword 'add'. """ # First vlan should be configured without 'add' keyword with self._create_resources(): self.assertTrue(self._is_vlan_configured( vlan_creation_expected=True, add_keyword_expected=False)) self.mock_ncclient.reset_mock() # Second vlan should be configured with 'add' keyword with self._create_resources(name=NETWORK_NAME_2, device_id=DEVICE_ID_2, cidr=CIDR_2): self.assertTrue(self._is_vlan_configured( vlan_creation_expected=True, add_keyword_expected=True)) def test_nexus_connect_fail(self): """Test failure to connect to a Nexus switch. While creating a network, subnet, and port, simulate a connection failure to a nexus switch. Confirm that the expected HTTP code is returned for the create port operation. """ with self._patch_ncclient('connect.side_effect', AttributeError): with self._create_resources() as result_status: self._assertExpectedHTTP(result_status, c_exc.NexusConnectFailed) def test_nexus_vlan_config_two_hosts(self): """Verify config/unconfig of vlan on two compute hosts.""" @contextlib.contextmanager def _create_port_check_vlan(comp_host_name, device_id, vlan_creation_expected=True): with self.port(subnet=subnet, fmt=self.fmt) as port: data = {'port': {portbindings.HOST_ID: comp_host_name, 'device_id': device_id, 'device_owner': DEVICE_OWNER, 'admin_state_up': True}} req = self.new_update_request('ports', data, port['port']['id']) req.get_response(self.api) self.assertTrue(self._is_vlan_configured( vlan_creation_expected=vlan_creation_expected, add_keyword_expected=False)) self.mock_ncclient.reset_mock() yield # Create network and subnet with self.network(name=NETWORK_NAME) as network: with self.subnet(network=network, cidr=CIDR_1) as subnet: # Create an instance on first compute host with _create_port_check_vlan(COMP_HOST_NAME, DEVICE_ID_1, vlan_creation_expected=True): # Create an instance on second compute host with _create_port_check_vlan(COMP_HOST_NAME_2, DEVICE_ID_2, vlan_creation_expected=False): pass # Instance on second host is now terminated. # Vlan should be untrunked from port, but vlan should # still exist on the switch. self.assertTrue(self._is_vlan_unconfigured( vlan_deletion_expected=False)) self.mock_ncclient.reset_mock() # Instance on first host is now terminated. # Vlan should be untrunked from port and vlan should have # been deleted from the switch. self.assertTrue(self._is_vlan_unconfigured( vlan_deletion_expected=True)) def test_nexus_config_fail(self): """Test a Nexus switch configuration failure. While creating a network, subnet, and port, simulate a nexus switch configuration error. Confirm that the expected HTTP code is returned for the create port operation. """ with self._patch_ncclient( 'connect.return_value.edit_config.side_effect', AttributeError): with self._create_resources() as result_status: self._assertExpectedHTTP(result_status, c_exc.NexusConfigFailed) def test_nexus_extended_vlan_range_failure(self): """Test that extended VLAN range config errors are ignored. Some versions of Nexus switch do not allow state changes for the extended VLAN range (1006-4094), but these errors can be ignored (default values are appropriate). Test that such errors are ignored by the Nexus plugin. """ def mock_edit_config_a(target, config): if all(word in config for word in ['state', 'active']): raise Exception("Can't modify state for extended") with self._patch_ncclient( 'connect.return_value.edit_config.side_effect', mock_edit_config_a): with self._create_resources() as result_status: self.assertEqual(result_status, wexc.HTTPOk.code) def mock_edit_config_b(target, config): if all(word in config for word in ['no', 'shutdown']): raise Exception("Command is only allowed on VLAN") with self._patch_ncclient( 'connect.return_value.edit_config.side_effect', mock_edit_config_b): with self._create_resources() as result_status: self.assertEqual(result_status, wexc.HTTPOk.code) def test_nexus_vlan_config_rollback(self): """Test rollback following Nexus VLAN state config failure. Test that the Cisco Nexus plugin correctly deletes the VLAN on the Nexus switch when the 'state active' command fails (for a reason other than state configuration change is rejected for the extended VLAN range). """ def mock_edit_config(target, config): if all(word in config for word in ['state', 'active']): raise ValueError with self._patch_ncclient( 'connect.return_value.edit_config.side_effect', mock_edit_config): with self._create_resources() as result_status: # Confirm that the last configuration sent to the Nexus # switch was deletion of the VLAN. self.assertTrue(self._is_in_last_nexus_cfg(['<no>', '<vlan>'])) self._assertExpectedHTTP(result_status, c_exc.NexusConfigFailed) def test_nexus_host_not_configured(self): """Test handling of a NexusComputeHostNotConfigured exception. Test the Cisco NexusComputeHostNotConfigured exception by using a fictitious host name during port creation. """ with self._create_resources(host_id='fake_host') as result_status: self._assertExpectedHTTP(result_status, c_exc.NexusComputeHostNotConfigured) def test_nexus_missing_fields(self): """Test handling of a NexusMissingRequiredFields exception. Test the Cisco NexusMissingRequiredFields exception by using empty host_id and device_id values during port creation. """ with self._create_resources(device_id='', host_id='') as result_status: self._assertExpectedHTTP(result_status, c_exc.NexusMissingRequiredFields) class TestCiscoNetworksV2(CiscoML2MechanismTestCase, test_db_plugin.TestNetworksV2): def test_create_networks_bulk_emulated_plugin_failure(self): real_has_attr = hasattr def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) plugin_obj = NeutronManager.get_plugin() orig = plugin_obj.create_network #ensures the API choose the emulation code path with mock.patch('__builtin__.hasattr', new=fakehasattr): with mock.patch.object(plugin_obj, 'create_network') as patched_plugin: def side_effect(*args, **kwargs): return self._do_side_effect(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect res = self._create_network_bulk(self.fmt, 2, 'test', True) LOG.debug("response is %s" % res) # We expect an internal server error as we injected a fault self._validate_behavior_on_bulk_failure( res, 'networks', wexc.HTTPInternalServerError.code) def test_create_networks_bulk_native_plugin_failure(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk network create") plugin_obj = NeutronManager.get_plugin() orig = plugin_obj.create_network with mock.patch.object(plugin_obj, 'create_network') as patched_plugin: def side_effect(*args, **kwargs): return self._do_side_effect(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect res = self._create_network_bulk(self.fmt, 2, 'test', True) # We expect an internal server error as we injected a fault self._validate_behavior_on_bulk_failure( res, 'networks', wexc.HTTPInternalServerError.code) class TestCiscoSubnetsV2(CiscoML2MechanismTestCase, test_db_plugin.TestSubnetsV2): def test_create_subnets_bulk_emulated_plugin_failure(self): real_has_attr = hasattr #ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('__builtin__.hasattr', new=fakehasattr): plugin_obj = NeutronManager.get_plugin() orig = plugin_obj.create_subnet with mock.patch.object(plugin_obj, 'create_subnet') as patched_plugin: def side_effect(*args, **kwargs): self._do_side_effect(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect with self.network() as net: res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], 'test') # We expect an internal server error as we injected a fault self._validate_behavior_on_bulk_failure( res, 'subnets', wexc.HTTPInternalServerError.code) def test_create_subnets_bulk_native_plugin_failure(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk subnet create") plugin_obj = NeutronManager.get_plugin() orig = plugin_obj.create_subnet with mock.patch.object(plugin_obj, 'create_subnet') as patched_plugin: def side_effect(*args, **kwargs): return self._do_side_effect(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect with self.network() as net: res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], 'test') # We expect an internal server error as we injected a fault self._validate_behavior_on_bulk_failure( res, 'subnets', wexc.HTTPInternalServerError.code) class TestCiscoPortsV2XML(TestCiscoPortsV2): fmt = 'xml' class TestCiscoNetworksV2XML(TestCiscoNetworksV2): fmt = 'xml' class TestCiscoSubnetsV2XML(TestCiscoSubnetsV2): fmt = 'xml'
'''Unit tests for the Dataset.py module''' import unittest from ocw.dataset import Dataset, Bounds import numpy as np import datetime as dt class TestDatasetAttributes(unittest.TestCase): def setUp(self): self.lat = np.array([10, 12, 14, 16, 18]) self.lon = np.array([100, 102, 104, 106, 108]) self.time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)]) flat_array = np.array(range(300)) self.value = flat_array.reshape(12, 5, 5) self.variable = 'prec' self.name = 'foo' self.origin = {'path': '/a/fake/file/path'} self.test_dataset = Dataset(self.lat, self.lon, self.time, self.value, variable=self.variable, name=self.name, origin=self.origin) def test_lats(self): self.assertItemsEqual(self.test_dataset.lats, self.lat) def test_lons(self): self.assertItemsEqual(self.test_dataset.lons, self.lon) def test_times(self): self.assertItemsEqual(self.test_dataset.times, self.time) def test_values(self): self.assertEqual(self.test_dataset.values.all(), self.value.all()) def test_variable(self): self.assertEqual(self.test_dataset.variable, self.variable) def test_name(self): self.assertEqual(self.test_dataset.name, self.name) def test_origin(self): self.assertEqual(self.test_dataset.origin, self.origin) class TestInvalidDatasetInit(unittest.TestCase): def setUp(self): self.lat = np.array([10, 12, 14, 16, 18]) self.lon = np.array([100, 102, 104, 106, 108]) self.time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)]) flat_array = np.array(range(300)) self.value = flat_array.reshape(12, 5, 5) self.values_in_wrong_order = flat_array.reshape(5, 5, 12) def test_bad_lat_shape(self): self.lat = np.array([[1, 2], [3, 4]]) with self.assertRaises(ValueError): Dataset(self.lat, self.lon, self.time, self.value, 'prec') def test_bad_lon_shape(self): self.lon = np.array([[1, 2], [3, 4]]) with self.assertRaises(ValueError): Dataset(self.lat, self.lon, self.time, self.value, 'prec') def test_bad_times_shape(self): self.time = np.array([[1, 2], [3, 4]]) with self.assertRaises(ValueError): Dataset(self.lat, self.lon, self.time, self.value, 'prec') def test_bad_values_shape(self): self.value = np.array([1, 2, 3, 4, 5]) with self.assertRaises(ValueError): Dataset(self.lat, self.lon, self.time, self.value, 'prec') def test_values_shape_mismatch(self): # If we change lats to this the shape of value will not match # up with the length of the lats array. self.lat = self.lat[:-2] with self.assertRaises(ValueError): Dataset(self.lat, self.lon, self.time, self.value, 'prec') def test_values_given_in_wrong_order(self): with self.assertRaises(ValueError): Dataset(self.lat, self.lon, self.time, self.values_in_wrong_order) def test_lons_values_incorrectly_gridded(self): times = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)]) lats = np.arange(-30, 30) bad_lons = np.arange(360) flat_array = np.arange(len(times) * len(lats) * len(bad_lons)) values = flat_array.reshape(len(times), len(lats), len(bad_lons)) ds = Dataset(lats, bad_lons, times, values) np.testing.assert_array_equal(ds.lons, np.arange(-180, 180)) def test_reversed_lats(self): ds = Dataset(self.lat[::-1], self.lon, self.time, self.value) np.testing.assert_array_equal(ds.lats, self.lat) class TestDatasetFunctions(unittest.TestCase): def setUp(self): self.lat = np.array([10, 12, 14, 16, 18]) self.lon = np.array([100, 102, 104, 106, 108]) self.time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)]) flat_array = np.array(range(300)) self.value = flat_array.reshape(12, 5, 5) self.variable = 'prec' self.test_dataset = Dataset(self.lat, self.lon, self.time, self.value, self.variable) def test_spatial_boundaries(self): self.assertEqual( self.test_dataset.spatial_boundaries(), (min(self.lat), max(self.lat), min(self.lon), max(self.lon))) def test_time_range(self): self.assertEqual( self.test_dataset.time_range(), (dt.datetime(2000, 1, 1), dt.datetime(2000, 12, 1))) def test_spatial_resolution(self): self.assertEqual(self.test_dataset.spatial_resolution(), (2, 2)) def test_temporal_resolution(self): self.assertEqual(self.test_dataset.temporal_resolution(), 'monthly') class TestBounds(unittest.TestCase): def setUp(self): self.bounds = Bounds(-80, 80, # Lats -160, 160, # Lons dt.datetime(2000, 1, 1), # Start time dt.datetime(2002, 1, 1)) # End time # Latitude tests def test_inverted_min_max_lat(self): with self.assertRaises(ValueError): self.bounds.lat_min = 81 with self.assertRaises(ValueError): self.bounds.lat_max = -81 # Lat Min def test_out_of_bounds_lat_min(self): with self.assertRaises(ValueError): self.bounds.lat_min = -91 with self.assertRaises(ValueError): self.bounds.lat_min = 91 # Lat Max def test_out_of_bounds_lat_max(self): with self.assertRaises(ValueError): self.bounds.lat_max = -91 with self.assertRaises(ValueError): self.bounds.lat_max = 91 # Longitude tests def test_inverted_max_max_lon(self): with self.assertRaises(ValueError): self.bounds.lon_min = 161 with self.assertRaises(ValueError): self.bounds.lon_max = -161 # Lon Min def test_out_of_bounds_lon_min(self): with self.assertRaises(ValueError): self.bounds.lon_min = -181 with self.assertRaises(ValueError): self.bounds.lon_min = 181 # Lon Max def test_out_of_bounds_lon_max(self): with self.assertRaises(ValueError): self.bounds.lon_max = -181 with self.assertRaises(ValueError): self.bounds.lon_max = 181 # Temporal tests def test_inverted_start_end_times(self): with self.assertRaises(ValueError): self.bounds.start = dt.datetime(2003, 1, 1) with self.assertRaises(ValueError): self.bounds.end = dt.datetime(1999, 1, 1) # Start tests def test_invalid_start(self): with self.assertRaises(ValueError): self.bounds.start = "This is not a date time object" # End tests def test_invalid_end(self): with self.assertRaises(ValueError): self.bounds.end = "This is not a date time object" if __name__ == '__main__': unittest.main()
import sys import os from datetime import date os.environ["GEVENT_NOPATCH"] = "yes" os.environ["EVENTLET_NOPATCH"] = "yes" this = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.join(os.pardir, "tests")) sys.path.append(os.path.join(this, "_ext")) extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.intersphinx', ] html_show_sphinx = False templates_path = ['.templates'] source_suffix = '.rst' master_doc = 'index' project = 'Apache Flume' copyright = '2009-%s The Apache Software Foundation' % date.today().year keep_warnings = True exclude_trees = ['.build'] add_function_parentheses = True pygments_style = 'trac' highlight_language = 'none' html_logo = 'images/flume-logo.png' html_use_smartypants = True html_use_modindex = True html_use_index = True html_sidebars = { '**': ['localtoc.html', 'relations.html', 'sourcelink.html'], }
"""Cauchy distribution""" __all__ = ['Cauchy'] from numbers import Number from numpy import nan, pi from .constraint import Real from .distribution import Distribution from .utils import sample_n_shape_converter from .... import np class Cauchy(Distribution): r"""Create a relaxed Cauchy distribution object. Parameters ---------- loc : Tensor or scalar, default 0 mode or median of the distribution scale : Tensor or scalar, default 1 half width at half maximum """ # pylint: disable=abstract-method has_grad = True support = Real() arg_constraints = {'loc': Real(), 'scale': Real()} def __init__(self, loc=0.0, scale=1.0, validate_args=None): self.loc = loc self.scale = scale super(Cauchy, self).__init__( event_dim=0, validate_args=validate_args) @property def mean(self): return nan @property def variance(self): return nan def sample(self, size=None): # TODO: Implement sampling op in the backend. # `np.zeros_like` does not support scalar at this moment. if (isinstance(self.loc, Number), isinstance(self.scale, Number)) == (True, True): u = np.random.uniform(size=size) else: u = np.random.uniform(np.zeros_like( # pylint: disable=too-many-function-args self.loc + self.scale), size=size) return self.icdf(u) def sample_n(self, size=None): return self.sample(sample_n_shape_converter(size)) def log_prob(self, value): if self._validate_args: self._validate_samples(value) return (-np.log(pi) - np.log(self.scale) - np.log(1 + ((value - self.loc) / self.scale) ** 2)) def cdf(self, value): if self._validate_args: self._validate_samples(value) return np.arctan((value - self.loc) / self.scale) / pi + 0.5 def icdf(self, value): return np.tan(pi * (value - 0.5)) * self.scale + self.loc def entropy(self): return np.log(4 * pi) + np.log(self.scale)
from django.conf.urls import url from admin.nodes import views app_name = 'admin' urlpatterns = [ url(r'^$', views.NodeFormView.as_view(), name='search'), url(r'^flagged_spam$', views.NodeFlaggedSpamList.as_view(), name='flagged-spam'), url(r'^known_spam$', views.NodeKnownSpamList.as_view(), name='known-spam'), url(r'^known_ham$', views.NodeKnownHamList.as_view(), name='known-ham'), url(r'^(?P<guid>[a-z0-9]+)/$', views.NodeView.as_view(), name='node'), url(r'^(?P<guid>[a-z0-9]+)/logs/$', views.AdminNodeLogView.as_view(), name='node-logs'), url(r'^registration_list/$', views.RegistrationListView.as_view(), name='registrations'), url(r'^stuck_registration_list/$', views.StuckRegistrationListView.as_view(), name='stuck-registrations'), url(r'^(?P<guid>[a-z0-9]+)/update_embargo/$', views.RegistrationUpdateEmbargoView.as_view(), name='update_embargo'), url(r'^(?P<guid>[a-z0-9]+)/remove/$', views.NodeDeleteView.as_view(), name='remove'), url(r'^(?P<guid>[a-z0-9]+)/restore/$', views.NodeDeleteView.as_view(), name='restore'), url(r'^(?P<guid>[a-z0-9]+)/confirm_spam/$', views.NodeConfirmSpamView.as_view(), name='confirm-spam'), url(r'^(?P<guid>[a-z0-9]+)/confirm_ham/$', views.NodeConfirmHamView.as_view(), name='confirm-ham'), url(r'^(?P<guid>[a-z0-9]+)/reindex_share_node/$', views.NodeReindexShare.as_view(), name='reindex-share-node'), url(r'^(?P<guid>[a-z0-9]+)/reindex_elastic_node/$', views.NodeReindexElastic.as_view(), name='reindex-elastic-node'), url(r'^(?P<guid>[a-z0-9]+)/restart_stuck_registrations/$', views.RestartStuckRegistrationsView.as_view(), name='restart-stuck-registrations'), url(r'^(?P<guid>[a-z0-9]+)/remove_stuck_registrations/$', views.RemoveStuckRegistrationsView.as_view(), name='remove-stuck-registrations'), url(r'^(?P<guid>[a-z0-9]+)/remove_user/(?P<user_id>[a-z0-9]+)/$', views.NodeRemoveContributorView.as_view(), name='remove_user'), ]
from airflow.models import BaseOperator from airflow.utils.decorators import apply_defaults class DummyOperator(BaseOperator): """ Operator that does literally nothing. It can be used to group tasks in a DAG. """ ui_color = '#e8f7e4' @apply_defaults def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) def execute(self, context): pass
import json from lxml import objectify, etree from django.contrib.auth.models import Group, User from useradmin.models import HuePermission, GroupPermission, get_default_user_group from hadoop import cluster from desktop.lib import fsmanager def grant_access(username, groupname, appname): add_permission(username, groupname, 'access', appname) def add_permission(username, groupname, permname, appname): user = User.objects.get(username=username) group, created = Group.objects.get_or_create(name=groupname) perm, created = HuePermission.objects.get_or_create(app=appname, action=permname) GroupPermission.objects.get_or_create(group=group, hue_permission=perm) if not user.groups.filter(name=group.name).exists(): user.groups.add(group) user.save() def add_to_group(username, groupname=None): if groupname is None: group = get_default_user_group() assert group is not None groupname = group.name user = User.objects.get(username=username) group, created = Group.objects.get_or_create(name=groupname) if not user.groups.filter(name=group.name).exists(): user.groups.add(group) user.save() def remove_from_group(username, groupname): user = User.objects.get(username=username) group, created = Group.objects.get_or_create(name=groupname) if user.groups.filter(name=group.name).exists(): user.groups.remove(group) user.save() def reformat_json(json_obj): if isinstance(json_obj, basestring): return json.dumps(json.loads(json_obj)) else: return json.dumps(json_obj) def reformat_xml(xml_obj): if isinstance(xml_obj, basestring): return etree.tostring(objectify.fromstring(xml_obj, etree.XMLParser(strip_cdata=False, remove_blank_text=True))) else: return etree.tostring(xml_obj) def clear_sys_caches(): return cluster.clear_caches(), fsmanager.clear_cache() def restore_sys_caches(old_caches): cluster.restore_caches(old_caches[0]) fsmanager.restore_cache(old_caches[1])
"""Support for the Foobot indoor air quality monitor.""" import asyncio from datetime import timedelta import logging import aiohttp from foobot_async import FoobotClient import voluptuous as vol from homeassistant.const import ( ATTR_TEMPERATURE, ATTR_TIME, CONF_TOKEN, CONF_USERNAME, TEMP_CELSIUS, ) from homeassistant.exceptions import PlatformNotReady from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.config_validation import PLATFORM_SCHEMA from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) ATTR_HUMIDITY = "humidity" ATTR_PM2_5 = "PM2.5" ATTR_CARBON_DIOXIDE = "CO2" ATTR_VOLATILE_ORGANIC_COMPOUNDS = "VOC" ATTR_FOOBOT_INDEX = "index" SENSOR_TYPES = { "time": [ATTR_TIME, "s"], "pm": [ATTR_PM2_5, "µg/m3", "mdi:cloud"], "tmp": [ATTR_TEMPERATURE, TEMP_CELSIUS, "mdi:thermometer"], "hum": [ATTR_HUMIDITY, "%", "mdi:water-percent"], "co2": [ATTR_CARBON_DIOXIDE, "ppm", "mdi:periodic-table-co2"], "voc": [ATTR_VOLATILE_ORGANIC_COMPOUNDS, "ppb", "mdi:cloud"], "allpollu": [ATTR_FOOBOT_INDEX, "%", "mdi:percent"], } SCAN_INTERVAL = timedelta(minutes=10) PARALLEL_UPDATES = 1 TIMEOUT = 10 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_TOKEN): cv.string, vol.Required(CONF_USERNAME): cv.string} ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the devices associated with the account.""" token = config.get(CONF_TOKEN) username = config.get(CONF_USERNAME) client = FoobotClient( token, username, async_get_clientsession(hass), timeout=TIMEOUT ) dev = [] try: devices = await client.get_devices() _LOGGER.debug("The following devices were found: %s", devices) for device in devices: foobot_data = FoobotData(client, device["uuid"]) for sensor_type in SENSOR_TYPES: if sensor_type == "time": continue foobot_sensor = FoobotSensor(foobot_data, device, sensor_type) dev.append(foobot_sensor) except ( aiohttp.client_exceptions.ClientConnectorError, asyncio.TimeoutError, FoobotClient.TooManyRequests, FoobotClient.InternalError, ): _LOGGER.exception("Failed to connect to foobot servers.") raise PlatformNotReady except FoobotClient.ClientError: _LOGGER.error("Failed to fetch data from foobot servers.") return async_add_entities(dev, True) class FoobotSensor(Entity): """Implementation of a Foobot sensor.""" def __init__(self, data, device, sensor_type): """Initialize the sensor.""" self._uuid = device["uuid"] self.foobot_data = data self._name = "Foobot {} {}".format(device["name"], SENSOR_TYPES[sensor_type][0]) self.type = sensor_type self._unit_of_measurement = SENSOR_TYPES[sensor_type][1] @property def name(self): """Return the name of the sensor.""" return self._name @property def icon(self): """Icon to use in the frontend.""" return SENSOR_TYPES[self.type][2] @property def state(self): """Return the state of the device.""" try: data = self.foobot_data.data[self.type] except (KeyError, TypeError): data = None return data @property def unique_id(self): """Return the unique id of this entity.""" return f"{self._uuid}_{self.type}" @property def unit_of_measurement(self): """Return the unit of measurement of this entity.""" return self._unit_of_measurement async def async_update(self): """Get the latest data.""" await self.foobot_data.async_update() class FoobotData(Entity): """Get data from Foobot API.""" def __init__(self, client, uuid): """Initialize the data object.""" self._client = client self._uuid = uuid self.data = {} @Throttle(SCAN_INTERVAL) async def async_update(self): """Get the data from Foobot API.""" interval = SCAN_INTERVAL.total_seconds() try: response = await self._client.get_last_data( self._uuid, interval, interval + 1 ) except ( aiohttp.client_exceptions.ClientConnectorError, asyncio.TimeoutError, self._client.TooManyRequests, self._client.InternalError, ): _LOGGER.debug("Couldn't fetch data") return False _LOGGER.debug("The data response is: %s", response) self.data = {k: round(v, 1) for k, v in response[0].items()} return True
import six from hamcrest.core.base_matcher import Matcher from hamcrest.core.core.isequal import equal_to __author__ = "Jon Reid" __copyright__ = "Copyright 2011 hamcrest.org" __license__ = "BSD, see License.txt" import types def wrap_matcher(x): """Wraps argument in a matcher, if necessary. :returns: the argument as-is if it is already a matcher, otherwise wrapped in an :py:func:`~hamcrest.core.core.isequal.equal_to` matcher. """ if isinstance(x, Matcher): return x else: return equal_to(x) def is_matchable_type(expected_type): if isinstance(expected_type, type): return True if isinstance(expected_type, six.class_types): return True if isinstance(expected_type, tuple) and \ expected_type and \ all(map(is_matchable_type, expected_type)): return True return False
""" Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved. This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import time import shutil import unittest2 as unittest from gppylib.db import dbconn from gppylib.commands.base import Command from gppylib.commands.gp import GpStart, GpStop import tinctest from tinctest.lib import local_path from mpp.lib.PSQL import PSQL from mpp.models import MPPTestCase class transactions(MPPTestCase): def test_skip_checkpoint_abort_transaction(self): """ @description FATAL failure execution handles already committed transactions properly @created 2013-04-19 00:00:00 @modified 2013-04-19 00:00:00 @tags transaction checkpoint MPP-17817 MPP-17925 MPP-17926 MPP-17927 MPP-17928 schedule_transaction @product_version gpdb: [4.1.2.5- main] Repro steps: 1. GPDB is up and running, number of segments is irrelevant, no master standby is required, no segment mirroring is required 2. inject fault on master for skipping checkpoints > gpfaultinjector -f checkpoint -m async -y skip -s 1 -o 0 3. inject fault 'fatal' on master, it aborts already committed local transaction > gpfaultinjector -p 4100 -m async -s 1 -f local_tm_record_transaction_commit -y panic_suppress 4. create table 'test' > psql template1 -c 'create table test(a int);' 5. connect in utility mode to master and create table, insert rows into table and truncate table > PGOPTIONS='-c gp_session_role=utility -c allow_system_table_mods=dml' psql -p 4100 template1 begin; create table test21(a int); insert into test21(a) values(10); truncate table test21; commit; 6. Wait 5 minutes 7. GPDB immediate shutdown and restart, GPDB does not come up with versions without fix, GPDB comes up with versions with fix > gpstop -air """ master_port = os.getenv("PGPORT", "5432") cmd = Command(name="gpfaultinjector", cmdStr="gpfaultinjector -f checkpoint -m async -y skip -s 1 -o 0") cmd.run() cmd = Command(name="gpfaultinjector", cmdStr="gpfaultinjector -p %s -m async -s 1 \ -f local_tm_record_transaction_commit -y panic_suppress" % master_port) cmd.run() PSQL.run_sql_command("create table mpp17817(a int)") sql_file = local_path('mpp17817.sql') PSQL.run_sql_file(sql_file, PGOPTIONS="-c gp_session_role=utility") time.sleep(300) cmd = Command(name="gpstop restart immediate", cmdStr="source %s/greenplum_path.sh;\ gpstop -air" % os.environ["GPHOME"]) cmd.run(validateAfter=True) # Cleanup PSQL.run_sql_command("drop table mpp17817") PSQL.run_sql_command("drop table mpp17817_21")
"""NEC plugin sharednet Revision ID: 3b54bf9e29f7 Revises: 511471cc46b Create Date: 2013-02-17 09:21:48.287134 """ revision = '3b54bf9e29f7' down_revision = '511471cc46b' migration_for_plugins = [ 'neutron.plugins.nec.nec_plugin.NECPluginV2' ] from alembic import op import sqlalchemy as sa from neutron.db import migration def upgrade(active_plugin=None, options=None): if not migration.should_run(active_plugin, migration_for_plugins): return op.create_table( 'ofctenantmappings', sa.Column('ofc_id', sa.String(length=255), nullable=False), sa.Column('quantum_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('quantum_id'), sa.UniqueConstraint('ofc_id') ) op.create_table( 'ofcnetworkmappings', sa.Column('ofc_id', sa.String(length=255), nullable=False), sa.Column('quantum_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('quantum_id'), sa.UniqueConstraint('ofc_id') ) op.create_table( 'ofcportmappings', sa.Column('ofc_id', sa.String(length=255), nullable=False), sa.Column('quantum_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('quantum_id'), sa.UniqueConstraint('ofc_id') ) op.create_table( 'ofcfiltermappings', sa.Column('ofc_id', sa.String(length=255), nullable=False), sa.Column('quantum_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('quantum_id'), sa.UniqueConstraint('ofc_id') ) def downgrade(active_plugin=None, options=None): if not migration.should_run(active_plugin, migration_for_plugins): return op.drop_table('ofcfiltermappings') op.drop_table('ofcportmappings') op.drop_table('ofcnetworkmappings') op.drop_table('ofctenantmappings')
from .. utils import TranspileTestCase, BuiltinFunctionTestCase class PrintTests(TranspileTestCase): def test_fileobj(self): self.assertCodeExecution(""" class FileLikeObject: def __init__(self): self.buffer = '' def write(self, content): self.buffer = self.buffer + (content * 2) out = FileLikeObject() print('hello', 'world', file=out) print('goodbye', 'world', file=out) print() """) def test_sep(self): self.assertCodeExecution(""" print('hello world', 'goodbye world', sep='-') print() """) def test_end(self): self.assertCodeExecution(""" print('hello world', 'goodbye world', end='-') print() """) def test_flush(self): self.assertCodeExecution(""" print('hello world', 'goodbye world', flush=True) print() """) def test_combined(self): self.assertCodeExecution(""" class FileLikeObject: def __init__(self): self.buffer = '' def write(self, content): self.buffer = self.buffer + (content * 2) def flush(self): self.buffer = self.buffer + '<<<' out = FileLikeObject() print('hello', 'world', sep='*', end='-', file=out, flush=True) print('goodbye', 'world', file=out, sep='-', end='*') print() """) class BuiltinPrintFunctionTests(BuiltinFunctionTestCase, TranspileTestCase): functions = ["print"] not_implemented = [ 'test_class', 'test_frozenset', 'test_slice', ]
for astTuple in Query.input.tuples('ast'): if type(astTuple.ast) is Field: modifiers = astTuple.ast.modifiers nonFinalPublic = modifiers.isSet(Modifier.ModifierFlag.Public) and not modifiers.isSet(Modifier.ModifierFlag.Final) if not nonFinalPublic: Query.input.remove(astTuple) Query.result = Query.input
""" This module contains some assorted functions used in tests """ from __future__ import absolute_import import os from importlib import import_module from twisted.trial.unittest import SkipTest from scrapy.exceptions import NotConfigured from scrapy.utils.boto import is_botocore def assert_aws_environ(): """Asserts the current environment is suitable for running AWS testsi. Raises SkipTest with the reason if it's not. """ skip_if_no_boto() if 'AWS_ACCESS_KEY_ID' not in os.environ: raise SkipTest("AWS keys not found") def assert_gcs_environ(): if 'GCS_PROJECT_ID' not in os.environ: raise SkipTest("GCS_PROJECT_ID not found") def skip_if_no_boto(): try: is_botocore() except NotConfigured as e: raise SkipTest(e) def get_s3_content_and_delete(bucket, path, with_key=False): """ Get content from s3 key, and delete key afterwards. """ if is_botocore(): import botocore.session session = botocore.session.get_session() client = session.create_client('s3') key = client.get_object(Bucket=bucket, Key=path) content = key['Body'].read() client.delete_object(Bucket=bucket, Key=path) else: import boto # assuming boto=2.2.2 bucket = boto.connect_s3().get_bucket(bucket, validate=False) key = bucket.get_key(path) content = key.get_contents_as_string() bucket.delete_key(path) return (content, key) if with_key else content def get_gcs_content_and_delete(bucket, path): from google.cloud import storage client = storage.Client(project=os.environ.get('GCS_PROJECT_ID')) bucket = client.get_bucket(bucket) blob = bucket.get_blob(path) content = blob.download_as_string() bucket.delete_blob(path) return content, blob def get_crawler(spidercls=None, settings_dict=None): """Return an unconfigured Crawler object. If settings_dict is given, it will be used to populate the crawler settings with a project level priority. """ from scrapy.crawler import CrawlerRunner from scrapy.spiders import Spider runner = CrawlerRunner(settings_dict) return runner.create_crawler(spidercls or Spider) def get_pythonpath(): """Return a PYTHONPATH suitable to use in processes so that they find this installation of Scrapy""" scrapy_path = import_module('scrapy').__path__[0] return os.path.dirname(scrapy_path) + os.pathsep + os.environ.get('PYTHONPATH', '') def get_testenv(): """Return a OS environment dict suitable to fork processes that need to import this installation of Scrapy, instead of a system installed one. """ env = os.environ.copy() env['PYTHONPATH'] = get_pythonpath() return env def assert_samelines(testcase, text1, text2, msg=None): """Asserts text1 and text2 have the same lines, ignoring differences in line endings between platforms """ testcase.assertEqual(text1.splitlines(), text2.splitlines(), msg)
"""Tests for jni_generator.py. This test suite contains various tests for the JNI generator. It exercises the low-level parser all the way up to the code generator and ensures the output matches a golden file. """ import difflib import inspect import optparse import os import sys import unittest import jni_generator from jni_generator import CalledByNative, JniParams, NativeMethod, Param SCRIPT_NAME = 'base/android/jni_generator/jni_generator.py' INCLUDES = ( 'base/android/jni_generator/jni_generator_helper.h' ) REBASELINE_ENV = 'REBASELINE' class TestOptions(object): """The mock options object which is passed to the jni_generator.py script.""" def __init__(self): self.namespace = None self.script_name = SCRIPT_NAME self.includes = INCLUDES self.ptr_type = 'long' self.cpp = 'cpp' self.javap = 'javap' self.native_exports = False self.native_exports_optional = False class TestGenerator(unittest.TestCase): def assertObjEquals(self, first, second): dict_first = first.__dict__ dict_second = second.__dict__ self.assertEquals(dict_first.keys(), dict_second.keys()) for key, value in dict_first.iteritems(): if (type(value) is list and len(value) and isinstance(type(value[0]), object)): self.assertListEquals(value, second.__getattribute__(key)) else: actual = second.__getattribute__(key) self.assertEquals(value, actual, 'Key ' + key + ': ' + str(value) + '!=' + str(actual)) def assertListEquals(self, first, second): self.assertEquals(len(first), len(second)) for i in xrange(len(first)): if isinstance(first[i], object): self.assertObjEquals(first[i], second[i]) else: self.assertEquals(first[i], second[i]) def assertTextEquals(self, golden_text, generated_text): if not self.compareText(golden_text, generated_text): self.fail('Golden text mismatch.') def compareText(self, golden_text, generated_text): def FilterText(text): return [ l.strip() for l in text.split('\n') if not l.startswith('// Copyright') ] stripped_golden = FilterText(golden_text) stripped_generated = FilterText(generated_text) if stripped_golden == stripped_generated: return True print self.id() for line in difflib.context_diff(stripped_golden, stripped_generated): print line print '\n\nGenerated' print '=' * 80 print generated_text print '=' * 80 print 'Run with:' print 'REBASELINE=1', sys.argv[0] print 'to regenerate the data files.' def _ReadGoldenFile(self, golden_file): if not os.path.exists(golden_file): return None with file(golden_file, 'r') as f: return f.read() def assertGoldenTextEquals(self, generated_text): script_dir = os.path.dirname(sys.argv[0]) # This is the caller test method. caller = inspect.stack()[1][3] self.assertTrue(caller.startswith('test'), 'assertGoldenTextEquals can only be called from a ' 'test* method, not %s' % caller) golden_file = os.path.join(script_dir, caller + '.golden') golden_text = self._ReadGoldenFile(golden_file) if os.environ.get(REBASELINE_ENV): if golden_text != generated_text: with file(golden_file, 'w') as f: f.write(generated_text) return self.assertTextEquals(golden_text, generated_text) def testInspectCaller(self): def willRaise(): # This function can only be called from a test* method. self.assertGoldenTextEquals('') self.assertRaises(AssertionError, willRaise) def testNatives(self): test_data = """" interface OnFrameAvailableListener {} private native int nativeInit(); private native void nativeDestroy(int nativeChromeBrowserProvider); private native long nativeAddBookmark( int nativeChromeBrowserProvider, String url, String title, boolean isFolder, long parentId); private static native String nativeGetDomainAndRegistry(String url); private static native void nativeCreateHistoricalTabFromState( byte[] state, int tab_index); private native byte[] nativeGetStateAsByteArray(View view); private static native String[] nativeGetAutofillProfileGUIDs(); private native void nativeSetRecognitionResults( int sessionId, String[] results); private native long nativeAddBookmarkFromAPI( int nativeChromeBrowserProvider, String url, Long created, Boolean isBookmark, Long date, byte[] favicon, String title, Integer visits); native int nativeFindAll(String find); private static native OnFrameAvailableListener nativeGetInnerClass(); private native Bitmap nativeQueryBitmap( int nativeChromeBrowserProvider, String[] projection, String selection, String[] selectionArgs, String sortOrder); private native void nativeGotOrientation( int nativeDataFetcherImplAndroid, double alpha, double beta, double gamma); private static native Throwable nativeMessWithJavaException(Throwable e); """ jni_generator.JniParams.SetFullyQualifiedClass( 'org/chromium/example/jni_generator/SampleForTests') jni_generator.JniParams.ExtractImportsAndInnerClasses(test_data) natives = jni_generator.ExtractNatives(test_data, 'int') golden_natives = [ NativeMethod(return_type='int', static=False, name='Init', params=[], java_class_name=None, type='function'), NativeMethod(return_type='void', static=False, name='Destroy', params=[Param(datatype='int', name='nativeChromeBrowserProvider')], java_class_name=None, type='method', p0_type='ChromeBrowserProvider'), NativeMethod(return_type='long', static=False, name='AddBookmark', params=[Param(datatype='int', name='nativeChromeBrowserProvider'), Param(datatype='String', name='url'), Param(datatype='String', name='title'), Param(datatype='boolean', name='isFolder'), Param(datatype='long', name='parentId')], java_class_name=None, type='method', p0_type='ChromeBrowserProvider'), NativeMethod(return_type='String', static=True, name='GetDomainAndRegistry', params=[Param(datatype='String', name='url')], java_class_name=None, type='function'), NativeMethod(return_type='void', static=True, name='CreateHistoricalTabFromState', params=[Param(datatype='byte[]', name='state'), Param(datatype='int', name='tab_index')], java_class_name=None, type='function'), NativeMethod(return_type='byte[]', static=False, name='GetStateAsByteArray', params=[Param(datatype='View', name='view')], java_class_name=None, type='function'), NativeMethod(return_type='String[]', static=True, name='GetAutofillProfileGUIDs', params=[], java_class_name=None, type='function'), NativeMethod(return_type='void', static=False, name='SetRecognitionResults', params=[Param(datatype='int', name='sessionId'), Param(datatype='String[]', name='results')], java_class_name=None, type='function'), NativeMethod(return_type='long', static=False, name='AddBookmarkFromAPI', params=[Param(datatype='int', name='nativeChromeBrowserProvider'), Param(datatype='String', name='url'), Param(datatype='Long', name='created'), Param(datatype='Boolean', name='isBookmark'), Param(datatype='Long', name='date'), Param(datatype='byte[]', name='favicon'), Param(datatype='String', name='title'), Param(datatype='Integer', name='visits')], java_class_name=None, type='method', p0_type='ChromeBrowserProvider'), NativeMethod(return_type='int', static=False, name='FindAll', params=[Param(datatype='String', name='find')], java_class_name=None, type='function'), NativeMethod(return_type='OnFrameAvailableListener', static=True, name='GetInnerClass', params=[], java_class_name=None, type='function'), NativeMethod(return_type='Bitmap', static=False, name='QueryBitmap', params=[Param(datatype='int', name='nativeChromeBrowserProvider'), Param(datatype='String[]', name='projection'), Param(datatype='String', name='selection'), Param(datatype='String[]', name='selectionArgs'), Param(datatype='String', name='sortOrder'), ], java_class_name=None, type='method', p0_type='ChromeBrowserProvider'), NativeMethod(return_type='void', static=False, name='GotOrientation', params=[Param(datatype='int', name='nativeDataFetcherImplAndroid'), Param(datatype='double', name='alpha'), Param(datatype='double', name='beta'), Param(datatype='double', name='gamma'), ], java_class_name=None, type='method', p0_type='content::DataFetcherImplAndroid'), NativeMethod(return_type='Throwable', static=True, name='MessWithJavaException', params=[Param(datatype='Throwable', name='e')], java_class_name=None, type='function') ] self.assertListEquals(golden_natives, natives) h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni', natives, [], [], TestOptions()) self.assertGoldenTextEquals(h.GetContent()) def testInnerClassNatives(self): test_data = """ class MyInnerClass { @NativeCall("MyInnerClass") private native int nativeInit(); } """ natives = jni_generator.ExtractNatives(test_data, 'int') golden_natives = [ NativeMethod(return_type='int', static=False, name='Init', params=[], java_class_name='MyInnerClass', type='function') ] self.assertListEquals(golden_natives, natives) h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni', natives, [], [], TestOptions()) self.assertGoldenTextEquals(h.GetContent()) def testInnerClassNativesMultiple(self): test_data = """ class MyInnerClass { @NativeCall("MyInnerClass") private native int nativeInit(); } class MyOtherInnerClass { @NativeCall("MyOtherInnerClass") private native int nativeInit(); } """ natives = jni_generator.ExtractNatives(test_data, 'int') golden_natives = [ NativeMethod(return_type='int', static=False, name='Init', params=[], java_class_name='MyInnerClass', type='function'), NativeMethod(return_type='int', static=False, name='Init', params=[], java_class_name='MyOtherInnerClass', type='function') ] self.assertListEquals(golden_natives, natives) h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni', natives, [], [], TestOptions()) self.assertGoldenTextEquals(h.GetContent()) def testInnerClassNativesBothInnerAndOuter(self): test_data = """ class MyOuterClass { private native int nativeInit(); class MyOtherInnerClass { @NativeCall("MyOtherInnerClass") private native int nativeInit(); } } """ natives = jni_generator.ExtractNatives(test_data, 'int') golden_natives = [ NativeMethod(return_type='int', static=False, name='Init', params=[], java_class_name=None, type='function'), NativeMethod(return_type='int', static=False, name='Init', params=[], java_class_name='MyOtherInnerClass', type='function') ] self.assertListEquals(golden_natives, natives) h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni', natives, [], [], TestOptions()) self.assertGoldenTextEquals(h.GetContent()) def testCalledByNatives(self): test_data = """" import android.graphics.Bitmap; import android.view.View; import java.io.InputStream; import java.util.List; class InnerClass {} @CalledByNative InnerClass showConfirmInfoBar(int nativeInfoBar, String buttonOk, String buttonCancel, String title, Bitmap icon) { InfoBar infobar = new ConfirmInfoBar(nativeInfoBar, mContext, buttonOk, buttonCancel, title, icon); return infobar; } @CalledByNative InnerClass showAutoLoginInfoBar(int nativeInfoBar, String realm, String account, String args) { AutoLoginInfoBar infobar = new AutoLoginInfoBar(nativeInfoBar, mContext, realm, account, args); if (infobar.displayedAccountCount() == 0) infobar = null; return infobar; } @CalledByNative("InfoBar") void dismiss(); @SuppressWarnings("unused") @CalledByNative private static boolean shouldShowAutoLogin(View view, String realm, String account, String args) { AccountManagerContainer accountManagerContainer = new AccountManagerContainer((Activity)contentView.getContext(), realm, account, args); String[] logins = accountManagerContainer.getAccountLogins(null); return logins.length != 0; } @CalledByNative static InputStream openUrl(String url) { return null; } @CalledByNative private void activateHardwareAcceleration(final boolean activated, final int iPid, final int iType, final int iPrimaryID, final int iSecondaryID) { if (!activated) { return } } @CalledByNativeUnchecked private void uncheckedCall(int iParam); @CalledByNative public byte[] returnByteArray(); @CalledByNative public boolean[] returnBooleanArray(); @CalledByNative public char[] returnCharArray(); @CalledByNative public short[] returnShortArray(); @CalledByNative public int[] returnIntArray(); @CalledByNative public long[] returnLongArray(); @CalledByNative public double[] returnDoubleArray(); @CalledByNative public Object[] returnObjectArray(); @CalledByNative public byte[][] returnArrayOfByteArray(); @CalledByNative public Bitmap.CompressFormat getCompressFormat(); @CalledByNative public List<Bitmap.CompressFormat> getCompressFormatList(); """ jni_generator.JniParams.SetFullyQualifiedClass('org/chromium/Foo') jni_generator.JniParams.ExtractImportsAndInnerClasses(test_data) called_by_natives = jni_generator.ExtractCalledByNatives(test_data) golden_called_by_natives = [ CalledByNative( return_type='InnerClass', system_class=False, static=False, name='showConfirmInfoBar', method_id_var_name='showConfirmInfoBar', java_class_name='', params=[Param(datatype='int', name='nativeInfoBar'), Param(datatype='String', name='buttonOk'), Param(datatype='String', name='buttonCancel'), Param(datatype='String', name='title'), Param(datatype='Bitmap', name='icon')], env_call=('Object', ''), unchecked=False, ), CalledByNative( return_type='InnerClass', system_class=False, static=False, name='showAutoLoginInfoBar', method_id_var_name='showAutoLoginInfoBar', java_class_name='', params=[Param(datatype='int', name='nativeInfoBar'), Param(datatype='String', name='realm'), Param(datatype='String', name='account'), Param(datatype='String', name='args')], env_call=('Object', ''), unchecked=False, ), CalledByNative( return_type='void', system_class=False, static=False, name='dismiss', method_id_var_name='dismiss', java_class_name='InfoBar', params=[], env_call=('Void', ''), unchecked=False, ), CalledByNative( return_type='boolean', system_class=False, static=True, name='shouldShowAutoLogin', method_id_var_name='shouldShowAutoLogin', java_class_name='', params=[Param(datatype='View', name='view'), Param(datatype='String', name='realm'), Param(datatype='String', name='account'), Param(datatype='String', name='args')], env_call=('Boolean', ''), unchecked=False, ), CalledByNative( return_type='InputStream', system_class=False, static=True, name='openUrl', method_id_var_name='openUrl', java_class_name='', params=[Param(datatype='String', name='url')], env_call=('Object', ''), unchecked=False, ), CalledByNative( return_type='void', system_class=False, static=False, name='activateHardwareAcceleration', method_id_var_name='activateHardwareAcceleration', java_class_name='', params=[Param(datatype='boolean', name='activated'), Param(datatype='int', name='iPid'), Param(datatype='int', name='iType'), Param(datatype='int', name='iPrimaryID'), Param(datatype='int', name='iSecondaryID'), ], env_call=('Void', ''), unchecked=False, ), CalledByNative( return_type='void', system_class=False, static=False, name='uncheckedCall', method_id_var_name='uncheckedCall', java_class_name='', params=[Param(datatype='int', name='iParam')], env_call=('Void', ''), unchecked=True, ), CalledByNative( return_type='byte[]', system_class=False, static=False, name='returnByteArray', method_id_var_name='returnByteArray', java_class_name='', params=[], env_call=('Void', ''), unchecked=False, ), CalledByNative( return_type='boolean[]', system_class=False, static=False, name='returnBooleanArray', method_id_var_name='returnBooleanArray', java_class_name='', params=[], env_call=('Void', ''), unchecked=False, ), CalledByNative( return_type='char[]', system_class=False, static=False, name='returnCharArray', method_id_var_name='returnCharArray', java_class_name='', params=[], env_call=('Void', ''), unchecked=False, ), CalledByNative( return_type='short[]', system_class=False, static=False, name='returnShortArray', method_id_var_name='returnShortArray', java_class_name='', params=[], env_call=('Void', ''), unchecked=False, ), CalledByNative( return_type='int[]', system_class=False, static=False, name='returnIntArray', method_id_var_name='returnIntArray', java_class_name='', params=[], env_call=('Void', ''), unchecked=False, ), CalledByNative( return_type='long[]', system_class=False, static=False, name='returnLongArray', method_id_var_name='returnLongArray', java_class_name='', params=[], env_call=('Void', ''), unchecked=False, ), CalledByNative( return_type='double[]', system_class=False, static=False, name='returnDoubleArray', method_id_var_name='returnDoubleArray', java_class_name='', params=[], env_call=('Void', ''), unchecked=False, ), CalledByNative( return_type='Object[]', system_class=False, static=False, name='returnObjectArray', method_id_var_name='returnObjectArray', java_class_name='', params=[], env_call=('Void', ''), unchecked=False, ), CalledByNative( return_type='byte[][]', system_class=False, static=False, name='returnArrayOfByteArray', method_id_var_name='returnArrayOfByteArray', java_class_name='', params=[], env_call=('Void', ''), unchecked=False, ), CalledByNative( return_type='Bitmap.CompressFormat', system_class=False, static=False, name='getCompressFormat', method_id_var_name='getCompressFormat', java_class_name='', params=[], env_call=('Void', ''), unchecked=False, ), CalledByNative( return_type='List<Bitmap.CompressFormat>', system_class=False, static=False, name='getCompressFormatList', method_id_var_name='getCompressFormatList', java_class_name='', params=[], env_call=('Void', ''), unchecked=False, ), ] self.assertListEquals(golden_called_by_natives, called_by_natives) h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni', [], called_by_natives, [], TestOptions()) self.assertGoldenTextEquals(h.GetContent()) def testCalledByNativeParseError(self): try: jni_generator.ExtractCalledByNatives(""" @CalledByNative public static int foo(); // This one is fine @CalledByNative scooby doo """) self.fail('Expected a ParseError') except jni_generator.ParseError, e: self.assertEquals(('@CalledByNative', 'scooby doo'), e.context_lines) def testFullyQualifiedClassName(self): contents = """ // Copyright (c) 2010 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.content.browser; import org.chromium.base.BuildInfo; """ self.assertEquals('org/chromium/content/browser/Foo', jni_generator.ExtractFullyQualifiedJavaClassName( 'org/chromium/content/browser/Foo.java', contents)) self.assertEquals('org/chromium/content/browser/Foo', jni_generator.ExtractFullyQualifiedJavaClassName( 'frameworks/Foo.java', contents)) self.assertRaises(SyntaxError, jni_generator.ExtractFullyQualifiedJavaClassName, 'com/foo/Bar', 'no PACKAGE line') def testMethodNameMangling(self): self.assertEquals('closeV', jni_generator.GetMangledMethodName('close', [], 'void')) self.assertEquals('readI_AB_I_I', jni_generator.GetMangledMethodName('read', [Param(name='p1', datatype='byte[]'), Param(name='p2', datatype='int'), Param(name='p3', datatype='int'),], 'int')) self.assertEquals('openJIIS_JLS', jni_generator.GetMangledMethodName('open', [Param(name='p1', datatype='java/lang/String'),], 'java/io/InputStream')) def testFromJavaPGenerics(self): contents = """ public abstract class java.util.HashSet<T> extends java.util.AbstractSet<E> implements java.util.Set<E>, java.lang.Cloneable, java.io.Serializable { public void dummy(); Signature: ()V } """ jni_from_javap = jni_generator.JNIFromJavaP(contents.split('\n'), TestOptions()) self.assertEquals(1, len(jni_from_javap.called_by_natives)) self.assertGoldenTextEquals(jni_from_javap.GetContent()) def testSnippnetJavap6_7_8(self): content_javap6 = """ public class java.util.HashSet { public boolean add(java.lang.Object); Signature: (Ljava/lang/Object;)Z } """ content_javap7 = """ public class java.util.HashSet { public boolean add(E); Signature: (Ljava/lang/Object;)Z } """ content_javap8 = """ public class java.util.HashSet { public boolean add(E); descriptor: (Ljava/lang/Object;)Z } """ jni_from_javap6 = jni_generator.JNIFromJavaP(content_javap6.split('\n'), TestOptions()) jni_from_javap7 = jni_generator.JNIFromJavaP(content_javap7.split('\n'), TestOptions()) jni_from_javap8 = jni_generator.JNIFromJavaP(content_javap8.split('\n'), TestOptions()) self.assertTrue(jni_from_javap6.GetContent()) self.assertTrue(jni_from_javap7.GetContent()) self.assertTrue(jni_from_javap8.GetContent()) # Ensure the javap7 is correctly parsed and uses the Signature field rather # than the "E" parameter. self.assertTextEquals(jni_from_javap6.GetContent(), jni_from_javap7.GetContent()) # Ensure the javap8 is correctly parsed and uses the descriptor field. self.assertTextEquals(jni_from_javap7.GetContent(), jni_from_javap8.GetContent()) def testFromJavaP(self): contents = self._ReadGoldenFile(os.path.join(os.path.dirname(sys.argv[0]), 'testInputStream.javap')) jni_from_javap = jni_generator.JNIFromJavaP(contents.split('\n'), TestOptions()) self.assertEquals(10, len(jni_from_javap.called_by_natives)) self.assertGoldenTextEquals(jni_from_javap.GetContent()) def testConstantsFromJavaP(self): for f in ['testMotionEvent.javap', 'testMotionEvent.javap7']: contents = self._ReadGoldenFile(os.path.join(os.path.dirname(sys.argv[0]), f)) jni_from_javap = jni_generator.JNIFromJavaP(contents.split('\n'), TestOptions()) self.assertEquals(86, len(jni_from_javap.called_by_natives)) self.assertGoldenTextEquals(jni_from_javap.GetContent()) def testREForNatives(self): # We should not match "native SyncSetupFlow" inside the comment. test_data = """ /** * Invoked when the setup process is complete so we can disconnect from the * native-side SyncSetupFlowHandler. */ public void destroy() { Log.v(TAG, "Destroying native SyncSetupFlow"); if (mNativeSyncSetupFlow != 0) { nativeSyncSetupEnded(mNativeSyncSetupFlow); mNativeSyncSetupFlow = 0; } } private native void nativeSyncSetupEnded( int nativeAndroidSyncSetupFlowHandler); """ jni_from_java = jni_generator.JNIFromJavaSource( test_data, 'foo/bar', TestOptions()) def testRaisesOnNonJNIMethod(self): test_data = """ class MyInnerClass { private int Foo(int p0) { } } """ self.assertRaises(SyntaxError, jni_generator.JNIFromJavaSource, test_data, 'foo/bar', TestOptions()) def testJniSelfDocumentingExample(self): script_dir = os.path.dirname(sys.argv[0]) content = file(os.path.join(script_dir, 'java/src/org/chromium/example/jni_generator/SampleForTests.java') ).read() golden_file = os.path.join(script_dir, 'golden_sample_for_tests_jni.h') golden_content = file(golden_file).read() jni_from_java = jni_generator.JNIFromJavaSource( content, 'org/chromium/example/jni_generator/SampleForTests', TestOptions()) generated_text = jni_from_java.GetContent() if not self.compareText(golden_content, generated_text): if os.environ.get(REBASELINE_ENV): with file(golden_file, 'w') as f: f.write(generated_text) return self.fail('testJniSelfDocumentingExample') def testNoWrappingPreprocessorLines(self): test_data = """ package com.google.lookhowextremelylongiam.snarf.icankeepthisupallday; class ReallyLongClassNamesAreAllTheRage { private static native int nativeTest(); } """ jni_from_java = jni_generator.JNIFromJavaSource( test_data, ('com/google/lookhowextremelylongiam/snarf/' 'icankeepthisupallday/ReallyLongClassNamesAreAllTheRage'), TestOptions()) jni_lines = jni_from_java.GetContent().split('\n') line = filter(lambda line: line.lstrip().startswith('#ifndef'), jni_lines)[0] self.assertTrue(len(line) > 80, ('Expected #ifndef line to be > 80 chars: ', line)) def testImports(self): import_header = """ // Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.content.app; import android.app.Service; import android.content.Context; import android.content.Intent; import android.graphics.SurfaceTexture; import android.os.Bundle; import android.os.IBinder; import android.os.ParcelFileDescriptor; import android.os.Process; import android.os.RemoteException; import android.util.Log; import android.view.Surface; import java.util.ArrayList; import org.chromium.base.annotations.CalledByNative; import org.chromium.base.annotations.JNINamespace; import org.chromium.content.app.ContentMain; import org.chromium.content.browser.SandboxedProcessConnection; import org.chromium.content.common.ISandboxedProcessCallback; import org.chromium.content.common.ISandboxedProcessService; import org.chromium.content.common.WillNotRaise.AnException; import org.chromium.content.common.WillRaise.AnException; import static org.chromium.Bar.Zoo; class Foo { public static class BookmarkNode implements Parcelable { } public interface PasswordListObserver { } } """ jni_generator.JniParams.SetFullyQualifiedClass( 'org/chromium/content/app/Foo') jni_generator.JniParams.ExtractImportsAndInnerClasses(import_header) self.assertTrue('Lorg/chromium/content/common/ISandboxedProcessService' in jni_generator.JniParams._imports) self.assertTrue('Lorg/chromium/Bar/Zoo' in jni_generator.JniParams._imports) self.assertTrue('Lorg/chromium/content/app/Foo$BookmarkNode' in jni_generator.JniParams._inner_classes) self.assertTrue('Lorg/chromium/content/app/Foo$PasswordListObserver' in jni_generator.JniParams._inner_classes) self.assertEquals('Lorg/chromium/content/app/ContentMain$Inner;', jni_generator.JniParams.JavaToJni('ContentMain.Inner')) self.assertRaises(SyntaxError, jni_generator.JniParams.JavaToJni, 'AnException') def testJniParamsJavaToJni(self): self.assertTextEquals('I', JniParams.JavaToJni('int')) self.assertTextEquals('[B', JniParams.JavaToJni('byte[]')) self.assertTextEquals( '[Ljava/nio/ByteBuffer;', JniParams.JavaToJni('java/nio/ByteBuffer[]')) def testNativesLong(self): test_options = TestOptions() test_options.ptr_type = 'long' test_data = """" private native void nativeDestroy(long nativeChromeBrowserProvider); """ jni_generator.JniParams.ExtractImportsAndInnerClasses(test_data) natives = jni_generator.ExtractNatives(test_data, test_options.ptr_type) golden_natives = [ NativeMethod(return_type='void', static=False, name='Destroy', params=[Param(datatype='long', name='nativeChromeBrowserProvider')], java_class_name=None, type='method', p0_type='ChromeBrowserProvider', ptr_type=test_options.ptr_type), ] self.assertListEquals(golden_natives, natives) h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni', natives, [], [], test_options) self.assertGoldenTextEquals(h.GetContent()) def runNativeExportsOption(self, optional): test_data = """ package org.chromium.example.jni_generator; /** The pointer to the native Test. */ long nativeTest; class Test { private static native int nativeStaticMethod(long nativeTest, int arg1); private native int nativeMethod(long nativeTest, int arg1); @CalledByNative private void testMethodWithParam(int iParam); @CalledByNative private String testMethodWithParamAndReturn(int iParam); @CalledByNative private static int testStaticMethodWithParam(int iParam); @CalledByNative private static double testMethodWithNoParam(); @CalledByNative private static String testStaticMethodWithNoParam(); class MyInnerClass { @NativeCall("MyInnerClass") private native int nativeInit(); } class MyOtherInnerClass { @NativeCall("MyOtherInnerClass") private native int nativeInit(); } } """ options = TestOptions() options.native_exports = True options.native_exports_optional = optional jni_from_java = jni_generator.JNIFromJavaSource( test_data, 'org/chromium/example/jni_generator/SampleForTests', options) return jni_from_java.GetContent() def testNativeExportsOption(self): content = self.runNativeExportsOption(False) self.assertGoldenTextEquals(content) def testNativeExportsOptionalOption(self): content = self.runNativeExportsOption(True) self.assertGoldenTextEquals(content) def testOuterInnerRaises(self): test_data = """ package org.chromium.media; @CalledByNative static int getCaptureFormatWidth(VideoCapture.CaptureFormat format) { return format.getWidth(); } """ def willRaise(): jni_generator.JNIFromJavaSource( test_data, 'org/chromium/media/VideoCaptureFactory', TestOptions()) self.assertRaises(SyntaxError, willRaise) def testSingleJNIAdditionalImport(self): test_data = """ package org.chromium.foo; @JNIAdditionalImport(Bar.class) class Foo { @CalledByNative private static void calledByNative(Bar.Callback callback) { } private static native void nativeDoSomething(Bar.Callback callback); } """ jni_from_java = jni_generator.JNIFromJavaSource(test_data, 'org/chromium/foo/Foo', TestOptions()) self.assertGoldenTextEquals(jni_from_java.GetContent()) def testMultipleJNIAdditionalImport(self): test_data = """ package org.chromium.foo; @JNIAdditionalImport({Bar1.class, Bar2.class}) class Foo { @CalledByNative private static void calledByNative(Bar1.Callback callback1, Bar2.Callback callback2) { } private static native void nativeDoSomething(Bar1.Callback callback1, Bar2.Callback callback2); } """ jni_from_java = jni_generator.JNIFromJavaSource(test_data, 'org/chromium/foo/Foo', TestOptions()) self.assertGoldenTextEquals(jni_from_java.GetContent()) def TouchStamp(stamp_path): dir_name = os.path.dirname(stamp_path) if not os.path.isdir(dir_name): os.makedirs() with open(stamp_path, 'a'): os.utime(stamp_path, None) def main(argv): parser = optparse.OptionParser() parser.add_option('--stamp', help='Path to touch on success.') options, _ = parser.parse_args(argv[1:]) test_result = unittest.main(argv=argv[0:1], exit=False) if test_result.result.wasSuccessful() and options.stamp: TouchStamp(options.stamp) return not test_result.result.wasSuccessful() if __name__ == '__main__': sys.exit(main(sys.argv))
import json import mock from sentry.plugins.helpers import get_option, set_option from sentry.testutils import TestCase from sentry.models import set_sentry_version, Option from sentry.tasks.check_update import check_update, PYPI_URL class CheckUpdateTest(TestCase): OLD = '5.0.0' CURRENT = '5.5.0-DEV' NEW = '1000000000.5.1' KEY = 'sentry:latest_version' def test_run_check_update_task(self): with mock.patch('sentry.tasks.check_update.fetch_url_content') as fetch: fetch.return_value = ( None, None, json.dumps({'info': {'version': self.NEW}}) ) check_update() # latest_version > current_version fetch.assert_called_once_with(PYPI_URL) self.assertEqual(get_option(key=self.KEY), self.NEW) def test_run_check_update_task_with_bad_response(self): with mock.patch('sentry.tasks.check_update.fetch_url_content') as fetch: fetch.return_value = (None, None, '') check_update() # latest_version == current_version fetch.assert_called_once_with(PYPI_URL) self.assertEqual(get_option(key=self.KEY), None) def test_set_sentry_version_empty_latest(self): set_sentry_version(latest=self.NEW) self.assertEqual(get_option(key=self.KEY), self.NEW) def test_set_sentry_version_new(self): set_option(self.KEY, self.OLD) with mock.patch('sentry.get_version') as get_version: get_version.return_value = self.CURRENT set_sentry_version(latest=self.NEW) self.assertEqual(Option.objects.get_value(key=self.KEY), self.NEW) def test_set_sentry_version_old(self): set_option(self.KEY, self.NEW) with mock.patch('sentry.get_version') as get_version: get_version.return_value = self.CURRENT set_sentry_version(latest=self.OLD) self.assertEqual(Option.objects.get_value(key=self.KEY), self.NEW)
""" Tests for structural time series models Author: Chad Fulton License: Simplified-BSD """ from __future__ import division, absolute_import, print_function import numpy as np import pandas as pd import os import warnings from statsmodels.datasets import macrodata from statsmodels.tsa.statespace import structural from statsmodels.tsa.statespace.structural import UnobservedComponents from .results import results_structural from statsmodels.tools import add_constant from numpy.testing import assert_equal, assert_almost_equal, assert_raises, assert_allclose from nose.exc import SkipTest try: import matplotlib.pyplot as plt have_matplotlib = True except ImportError: have_matplotlib = False dta = macrodata.load_pandas().data dta.index = pd.date_range(start='1959-01-01', end='2009-07-01', freq='QS') def run_ucm(name): true = getattr(results_structural, name) for model in true['models']: kwargs = model.copy() kwargs.update(true['kwargs']) # Make a copy of the data values = dta.copy() freq = kwargs.pop('freq', None) if freq is not None: values.index = pd.date_range(start='1959-01-01', periods=len(dta), freq=freq) # Test pandas exog if 'exog' in kwargs: # Default value here is pd.Series object exog = np.log(values['realgdp']) # Also allow a check with a 1-dim numpy array if kwargs['exog'] == 'numpy': exog = exog.values.squeeze() kwargs['exog'] = exog # Create the model mod = UnobservedComponents(values['unemp'], **kwargs) # Smoke test for starting parameters, untransform, transform # Also test that transform and untransform are inverses mod.start_params assert_allclose(mod.start_params, mod.transform_params(mod.untransform_params(mod.start_params))) # Fit the model at the true parameters res_true = mod.filter(true['params']) # Check that the cycle bounds were computed correctly freqstr = freq[0] if freq is not None else values.index.freqstr[0] if freqstr == 'A': cycle_period_bounds = (1.5, 12) elif freqstr == 'Q': cycle_period_bounds = (1.5*4, 12*4) elif freqstr == 'M': cycle_period_bounds = (1.5*12, 12*12) else: # If we have no information on data frequency, require the # cycle frequency to be between 0 and pi cycle_period_bounds = (2, np.inf) # Test that the cycle frequency bound is correct assert_equal(mod.cycle_frequency_bound, (2*np.pi / cycle_period_bounds[1], 2*np.pi / cycle_period_bounds[0]) ) # Test that the likelihood is correct rtol = true.get('rtol', 1e-7) atol = true.get('atol', 0) assert_allclose(res_true.llf, true['llf'], rtol=rtol, atol=atol) # Smoke test for plot_components if have_matplotlib: fig = res_true.plot_components() plt.close(fig) # Now fit the model via MLE with warnings.catch_warnings(record=True) as w: res = mod.fit(disp=-1) # If we found a higher likelihood, no problem; otherwise check # that we're very close to that found by R if res.llf <= true['llf']: assert_allclose(res.llf, true['llf'], rtol=1e-4) # Smoke test for summary res.summary() def test_irregular(): run_ucm('irregular') def test_fixed_intercept(): warnings.simplefilter("always") with warnings.catch_warnings(record=True) as w: run_ucm('fixed_intercept') message = ("Specified model does not contain a stochastic element;" " irregular component added.") assert_equal(str(w[0].message), message) def test_deterministic_constant(): run_ucm('deterministic_constant') def test_random_walk(): run_ucm('random_walk') def test_local_level(): run_ucm('local_level') def test_fixed_slope(): run_ucm('fixed_slope') def test_fixed_slope(): warnings.simplefilter("always") with warnings.catch_warnings(record=True) as w: run_ucm('fixed_slope') message = ("Specified model does not contain a stochastic element;" " irregular component added.") assert_equal(str(w[0].message), message) def test_deterministic_trend(): run_ucm('deterministic_trend') def test_random_walk_with_drift(): run_ucm('random_walk_with_drift') def test_local_linear_deterministic_trend(): run_ucm('local_linear_deterministic_trend') def test_local_linear_trend(): run_ucm('local_linear_trend') def test_smooth_trend(): run_ucm('smooth_trend') def test_random_trend(): run_ucm('random_trend') def test_cycle(): run_ucm('cycle') def test_seasonal(): run_ucm('seasonal') def test_reg(): run_ucm('reg') def test_rtrend_ar1(): run_ucm('rtrend_ar1') def test_lltrend_cycle_seasonal_reg_ar1(): run_ucm('lltrend_cycle_seasonal_reg_ar1') def test_mle_reg(): endog = np.arange(100)*1.0 exog = endog*2 # Make the fit not-quite-perfect endog[::2] += 0.01 endog[1::2] -= 0.01 with warnings.catch_warnings(record=True) as w: mod1 = UnobservedComponents(endog, irregular=True, exog=exog, mle_regression=False) res1 = mod1.fit(disp=-1) mod2 = UnobservedComponents(endog, irregular=True, exog=exog, mle_regression=True) res2 = mod2.fit(disp=-1) assert_allclose(res1.regression_coefficients.filtered[0, -1], 0.5, atol=1e-5) assert_allclose(res2.params[1], 0.5, atol=1e-5) def test_specifications(): endog = [1, 2] # Test that when nothing specified, a warning is issued and the model that # is fit is one with irregular=True and nothing else. warnings.simplefilter("always") with warnings.catch_warnings(record=True) as w: mod = UnobservedComponents(endog) message = ("Specified model does not contain a stochastic element;" " irregular component added.") assert_equal(str(w[0].message), message) assert_equal(mod.trend_specification, 'irregular') # Test an invalid string trend specification assert_raises(ValueError, UnobservedComponents, endog, 'invalid spec') # Test that if a trend component is specified without a level component, # a warning is issued and a deterministic level component is added with warnings.catch_warnings(record=True) as w: mod = UnobservedComponents(endog, trend=True, irregular=True) message = ("Trend component specified without level component;" " deterministic level component added.") assert_equal(str(w[0].message), message) assert_equal(mod.trend_specification, 'deterministic trend') # Test that if a string specification is provided, a warning is issued if # the boolean attributes are also specified trend_attributes = ['irregular', 'trend', 'stochastic_level', 'stochastic_trend'] for attribute in trend_attributes: with warnings.catch_warnings(record=True) as w: kwargs = {attribute: True} mod = UnobservedComponents(endog, 'deterministic trend', **kwargs) message = ("Value of `%s` may be overridden when the trend" " component is specified using a model string." % attribute) assert_equal(str(w[0].message), message) # Test that a seasonal with period less than two is invalid assert_raises(ValueError, UnobservedComponents, endog, seasonal=1) def test_start_params(): # Test that the behavior is correct for multiple exogenous and / or # autoregressive components # Parameters nobs = int(1e4) beta = np.r_[10, -2] phi = np.r_[0.5, 0.1] # Generate data np.random.seed(1234) exog = np.c_[np.ones(nobs), np.arange(nobs)*1.0] eps = np.random.normal(size=nobs) endog = np.zeros(nobs+2) for t in range(1, nobs): endog[t+1] = phi[0] * endog[t] + phi[1] * endog[t-1] + eps[t] endog = endog[2:] endog += np.dot(exog, beta) # Now just test that the starting parameters are approximately what they # ought to be (could make this arbitrarily precise by increasing nobs, # but that would slow down the test for no real gain) mod = UnobservedComponents(endog, exog=exog, autoregressive=2) assert_allclose(mod.start_params, [1., 0.5, 0.1, 10, -2], atol=1e-1) def test_forecast(): endog = np.arange(50) + 10 exog = np.arange(50) mod = UnobservedComponents(endog, exog=exog, level='dconstant') res = mod.smooth([1e-15, 1]) actual = res.forecast(10, exog=np.arange(50,60)[:,np.newaxis]) desired = np.arange(50,60) + 10 assert_allclose(actual, desired)
from __future__ import print_function import os,sys from Chem import AllChem as Chem def flatten(x): """flatten(sequence) -> list Returns a single, flat list which contains all elements retrieved from the sequence and all nested sub-sequences (iterables). Examples: >>> [1, 2, [3,4], (5,6)] [1, 2, [3, 4], (5, 6)] >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)]) [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]""" result = [] for el in x: if hasattr(el, "__iter__") and not isinstance(el, basestring): result.extend(flatten(el)) else: result.append(el) return result def GetFrame(mol, mode='Scaff'): '''return a ganeric molecule defining the reduced scaffold of the input mol. mode can be 'Scaff' or 'RedScaff': Scaff -> chop off the side chains and return the scaffold RedScaff -> remove all linking chains and connect the rings directly at the atoms where the linker was ''' ring = mol.GetRingInfo() RingAtoms = flatten(ring.AtomRings()) NonRingAtoms = [ atom.GetIdx() for atom in mol.GetAtoms() if atom.GetIdx() not in RingAtoms ] RingNeighbors = [] Paths = [] for NonRingAtom in NonRingAtoms: for neighbor in mol.GetAtomWithIdx(NonRingAtom).GetNeighbors(): if neighbor.GetIdx() in RingAtoms: RingNeighbors.append(NonRingAtom) Paths.append([neighbor.GetIdx(),NonRingAtom]) #The ring Atoms having a non ring Nieghbor will be the start of a walk break PosConnectors = [x for x in NonRingAtoms if x not in RingNeighbors] #Only these Atoms are potential starting points of a Linker chain #print 'PosConnectors:' #print PosConnectors Framework = [ x for x in RingAtoms ] #Start a list of pathways which we will have to walk #print 'Path atoms:' #print Paths Linkers = [] while len(Paths)>0: NewPaths = [] for P in Paths: if P == None: print('ooh') else: for neighbor in mol.GetAtomWithIdx(P[-1]).GetNeighbors(): if neighbor.GetIdx() not in P: if neighbor.GetIdx() in NonRingAtoms: n = P[:] n.append(neighbor.GetIdx()) NewPaths.append(n[:]) elif neighbor.GetIdx() in RingAtoms: #print 'adding the following path to Framework:' #print P n = P[:] n.append(neighbor.GetIdx()) Linkers.append(n) Framework=Framework+P[:] Paths = NewPaths[:] #print 'Linkers:',Linkers #print 'RingAtoms:',RingAtoms #em.AddBond(3,4,Chem.BondType.SINGLE) if mode == 'RedScaff': Framework = list(set(Framework)) todel = [] NonRingAtoms.sort(reverse=True) em = Chem.EditableMol(mol) BondsToAdd = [ sorted([i[0],i[-1]]) for i in Linkers ] mem = [] for i in BondsToAdd: if i not in mem: em.AddBond(i[0],i[1],Chem.BondType.SINGLE) mem.append(i) for i in NonRingAtoms: todel.append(i) for i in todel: em.RemoveAtom(i) m = em.GetMol() #===================================# # Now do the flattening of atoms and bonds! # Any heavy atom will become a carbon and any bond will become a single bond! # #===================================# #===================================# return m if mode == 'Scaff': Framework = list(set(Framework)) todel = [] NonRingAtoms.sort(reverse=True) for i in NonRingAtoms: if i != None: if i not in Framework: todel.append(i) em = Chem.EditableMol(mol) for i in todel: em.RemoveAtom(i) m = em.GetMol() #===================================# # Now do the flattening of atoms and bonds! # Any heavy atom will become a carbon and any bond will become a single bond!! # #===================================# #===================================# return m if __name__=='__main__': if len(sys.argv) < 2: print("No input file provided: Frames.py filetosprocess.ext") sys.exit(1) suppl = Chem.SDMolSupplier(sys.argv[1]) FrameDict = {} for mol in suppl: m = GetFrame(mol) cansmiles = Chem.MolToSmiles(m, isomericSmiles=True) if FrameDict.has_key(cansmiles): FrameDict[cansmiles].append(mol) else: FrameDict[cansmiles]=[mol,] counter=0 w=open('frames.smi','w') for key,item in FrameDict.items(): counter+=1 d=Chem.SDWriter(str(counter)+'.sdf') for i in item: i.SetProp('Scaffold',key) i.SetProp('Cluster',str(counter)) d.write(i) print(key,len(item)) w.write(key+'\t'+str(len(item))+'\n') w.close print('number of Clusters: %d' %(counter))
from django.shortcuts import render def home(request): return render(request, 'home.html', {})
__all__ = ['ScrollbarWdg', 'TestScrollbarWdg'] from tactic.ui.common import BaseRefreshWdg from pyasm.web import DivWdg class TestScrollbarWdg(BaseRefreshWdg): def get_display(my): top = my.top top.add_style("width: 600px") top.add_style("height: 400px") return top class ScrollbarWdg(BaseRefreshWdg): def get_display(my): top = my.top top.add_class("spt_scrollbar_top") content = my.kwargs.get("content") content_class = my.kwargs.get("content_class") if not content_class: content_class = "spt_content" width = 8 top.add_style("width: %s" % width) top.add_style("position: absolute") top.add_style("top: 0px") top.add_style("right: 0px") top.add_color("background", "background") top.add_style("margin: 3px 5px") top.add_style("opacity: 0.0") top.add_behavior( { 'type': 'load', 'cbjs_action': my.get_onload_js() } ) top.add_behavior( { 'type': 'load', 'content_class': content_class, 'cbjs_action': ''' var parent = bvr.src_el.getParent("." + bvr.content_class); var size = parent.getSize(); bvr.src_el.setStyle("height", size.y); var scrollbar = parent.getElement(".spt_scrollbar_top"); parent.addEvent("mouseenter", function() { new Fx.Tween(scrollbar, {duration: 250}).start("opacity", 1.0); } ); parent.addEvent("mouseleave", function() { new Fx.Tween(scrollbar, {duration: 250}).start("opacity", 0.0); } ); parent.addEvent("keypress", function(evt) { new Fx.Tween(scrollbar, {duration: 250}).start("opacity", 0.0); console.log(evt); } ); parent.addEvent("mousewheel", function(evt) { evt.stopPropagation(); spt.scrollbar.content = parent; if (evt.wheel == 1) { spt.scrollbar.scroll(15) } else { spt.scrollbar.scroll(-15) } } ); ''' } ) bar = DivWdg() bar.add_class("spt_scrollbar") bar.add_class("hand") top.add(bar) bar.add_style("width: %s" % width) bar.add_style("height: 30px") bar.add_style("border: solid 1px black") bar.add_color("background", "background3") #bar.add_border() bar.add_style("border-radius: 5") bar.add_style("position: absolute") bar.add_style("top: 0px") top.add_behavior( { 'type': 'smart_drag', 'bvr_match_class': 'spt_scrollbar', 'ignore_default_motion' : True, "cbjs_setup": 'spt.scrollbar.drag_setup( evt, bvr, mouse_411 );', "cbjs_motion": 'spt.scrollbar.drag_motion( evt, bvr, mouse_411 );' } ) return top def get_onload_js(my): return r''' spt.scrollbar = {}; spt.scrollbar.mouse_start_y = null; spt.scrollbar.el_start_y = null; spt.scrollbar.top = null; spt.scrollbar.content = null; spt.scrollbar.drag_setup = function(evt, bvr, mouse_411) { spt.scrollbar.mouse_start_y = mouse_411.curr_y; var src_el = spt.behavior.get_bvr_src( bvr ); var pos_y = parseInt(src_el.getStyle("top").replace("px", "")); spt.scrollbar.el_start_y = pos_y; spt.scrollbar.content = $("spt_SCROLL"); spt.scrollbar.top = src_el.getParent(".spt_scrollbar_top") } spt.scrollbar.drag_motion = function(evt, bvr, mouse_411) { var src_el = spt.behavior.get_bvr_src( bvr ); var dy = mouse_411.curr_y - spt.scrollbar.mouse_start_y; var pos_y = spt.scrollbar.el_start_y + dy; if (pos_y < 0) { return; } var content = spt.scrollbar.content; var content_size = spt.scrollbar.content.getSize(); var top_size = spt.scrollbar.top.getSize(); var bar_size = src_el.getSize(); if (pos_y > top_size.y - bar_size.y - 5) { return; } bvr.src_el.setStyle("top", pos_y); //var content = bvr.src_el.getParent(".spt_content"); content.setStyle("margin-top", -dy); } spt.scrollbar.scroll = function(dy) { spt.scrollbar.content = $("spt_SCROLL"); var content = spt.scrollbar.content; var pos_y = parseInt(content.getStyle("margin-top").replace("px", "")); content.setStyle("margin-top", pos_y + dy); } '''
from runtest import TestBase class TestCase(TestBase): def __init__(self): TestBase.__init__(self, 'abc', """ 62.202 us [28141] | __cxa_atexit(); [28141] | main() { [28141] | a() { [28141] | b() { [28141] | c() { 0.753 us [28141] | getpid(); 1.430 us [28141] | } /* c */ 1.915 us [28141] | } /* b */ 2.405 us [28141] | } /* a */ 3.005 us [28141] | } /* main */ """)
import helper import sys, os, unittest from duplicity import tempdir helper.setup() class TempDirTest(unittest.TestCase): def test_all(self): td = tempdir.default() self.assert_(td.mktemp() != td.mktemp()) dir = td.mktemp() os.mkdir(dir) os.rmdir(dir) fd, fname = td.mkstemp() os.close(fd) os.unlink(fname) td.forget(fname) fo, fname = td.mkstemp_file() fo.close() # don't forget, leave to cleanup() td.cleanup() if __name__ == "__main__": unittest.main()
""" Range tests """ import ipatests.test_webui.test_trust as trust_mod from ipatests.test_webui.ui_driver import screenshot from ipatests.test_webui.task_range import range_tasks ENTITY = 'idrange' PKEY = 'itest-range' class test_range(range_tasks): @screenshot def test_crud(self): """ Basic CRUD: range """ self.init_app() self.get_shifts() self.basic_crud(ENTITY, self.get_data(PKEY)) @screenshot def test_types(self): """ Test range types Only 'local' and 'ipa-ad-trust' types are tested since range validation made quite hard to test the other types: - 'ipa-ad-trust-posix' can be tested only with subdomains. - 'ipa-ad-winsync' and 'ipa-ipa-trust' and are not supported yet https://fedorahosted.org/freeipa/ticket/4323 """ self.init_app() self.get_shifts() pkey_local = 'itest-local' pkey_ad = 'itest-ad' pkey_posix = 'itest-ad-posix' pkey_winsync = 'itest-ad-winsync' pkey_trust = 'itest-ipa-trust' column = 'iparangetype' add = self.get_add_data(pkey_local) data = self.get_data(pkey_local, add_data=add) self.add_record(ENTITY, data) self.assert_record_value('local domain range', pkey_local, column) if self.has_trusts(): trust_tasks = trust_mod.trust_tasks() trust_data = trust_tasks.get_data() self.add_record(trust_mod.ENTITY, trust_data) domain = self.get_domain() self.navigate_to_entity(ENTITY) add = self.get_add_data(pkey_ad, range_type='ipa-ad-trust', domain=domain) data = self.get_data(pkey_ad, add_data=add) self.add_record(ENTITY, data, navigate=False) self.assert_record_value('Active Directory domain range', pkey_ad, column) self.delete(trust_mod.ENTITY, [trust_data]) self.navigate_to_entity(ENTITY) self.delete_record(pkey_ad) self.delete_record(pkey_local)
import os.path import time from django.core.management.base import BaseCommand from django.conf import settings import mitxmako.middleware as middleware from django.core.mail import send_mass_mail import sys import datetime middleware.MakoMiddleware() def chunks(l, n): """ Yield successive n-sized chunks from l. """ for i in xrange(0, len(l), n): yield l[i:i + n] class Command(BaseCommand): help = \ '''Sends an e-mail to all users in a text file. E.g. manage.py userlist.txt message logfile.txt rate userlist.txt -- list of all users message -- prefix for template with message logfile.txt -- where to log progress rate -- messages per second ''' log_file = None def hard_log(self, text): self.log_file.write(datetime.datetime.utcnow().isoformat() + ' -- ' + text + '\n') def handle(self, *args, **options): (user_file, message_base, logfilename, ratestr) = args users = [u.strip() for u in open(user_file).readlines()] message = middleware.lookup['main'].get_template('emails/' + message_base + "_body.txt").render() subject = middleware.lookup['main'].get_template('emails/' + message_base + "_subject.txt").render().strip() rate = int(ratestr) self.log_file = open(logfilename, "a+", buffering=0) i = 0 for users in chunks(users, rate): emails = [(subject, message, settings.DEFAULT_FROM_EMAIL, [u]) for u in users] self.hard_log(" ".join(users)) send_mass_mail(emails, fail_silently=False) time.sleep(1) print datetime.datetime.utcnow().isoformat(), i i = i + len(users) # Emergency interruptor if os.path.exists("/tmp/stopemails.txt"): self.log_file.close() sys.exit(-1) self.log_file.close()
import itertools import logging from functools import partial from itertools import repeat from lxml import etree from lxml.builder import E import openerp from openerp import SUPERUSER_ID, models from openerp import tools import openerp.exceptions from openerp.osv import fields, osv, expression from openerp.tools.translate import _ from openerp.http import request _logger = logging.getLogger(__name__) class res_groups(osv.osv): _name = "res.groups" _description = "Access Groups" _rec_name = 'full_name' _order = 'name' def _get_full_name(self, cr, uid, ids, field, arg, context=None): res = {} for g in self.browse(cr, uid, ids, context): if g.category_id: res[g.id] = '%s / %s' % (g.category_id.name, g.name) else: res[g.id] = g.name return res def _search_group(self, cr, uid, obj, name, args, context=None): operand = args[0][2] operator = args[0][1] lst = True if isinstance(operand, bool): domains = [[('name', operator, operand)], [('category_id.name', operator, operand)]] if operator in expression.NEGATIVE_TERM_OPERATORS == (not operand): return expression.AND(domains) else: return expression.OR(domains) if isinstance(operand, basestring): lst = False operand = [operand] where = [] for group in operand: values = filter(bool, group.split('/')) group_name = values.pop().strip() category_name = values and '/'.join(values).strip() or group_name group_domain = [('name', operator, lst and [group_name] or group_name)] category_domain = [('category_id.name', operator, lst and [category_name] or category_name)] if operator in expression.NEGATIVE_TERM_OPERATORS and not values: category_domain = expression.OR([category_domain, [('category_id', '=', False)]]) if (operator in expression.NEGATIVE_TERM_OPERATORS) == (not values): sub_where = expression.AND([group_domain, category_domain]) else: sub_where = expression.OR([group_domain, category_domain]) if operator in expression.NEGATIVE_TERM_OPERATORS: where = expression.AND([where, sub_where]) else: where = expression.OR([where, sub_where]) return where _columns = { 'name': fields.char('Name', required=True, translate=True), 'users': fields.many2many('res.users', 'res_groups_users_rel', 'gid', 'uid', 'Users'), 'model_access': fields.one2many('ir.model.access', 'group_id', 'Access Controls'), 'rule_groups': fields.many2many('ir.rule', 'rule_group_rel', 'group_id', 'rule_group_id', 'Rules', domain=[('global', '=', False)]), 'menu_access': fields.many2many('ir.ui.menu', 'ir_ui_menu_group_rel', 'gid', 'menu_id', 'Access Menu'), 'view_access': fields.many2many('ir.ui.view', 'ir_ui_view_group_rel', 'group_id', 'view_id', 'Views'), 'comment' : fields.text('Comment', size=250, translate=True), 'category_id': fields.many2one('ir.module.category', 'Application', select=True), 'full_name': fields.function(_get_full_name, type='char', string='Group Name', fnct_search=_search_group), } _sql_constraints = [ ('name_uniq', 'unique (category_id, name)', 'The name of the group must be unique within an application!') ] def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False): # add explicit ordering if search is sorted on full_name if order and order.startswith('full_name'): ids = super(res_groups, self).search(cr, uid, args, context=context) gs = self.browse(cr, uid, ids, context) gs.sort(key=lambda g: g.full_name, reverse=order.endswith('DESC')) gs = gs[offset:offset+limit] if limit else gs[offset:] return map(int, gs) return super(res_groups, self).search(cr, uid, args, offset, limit, order, context, count) def copy(self, cr, uid, id, default=None, context=None): group_name = self.read(cr, uid, [id], ['name'])[0]['name'] default.update({'name': _('%s (copy)')%group_name}) return super(res_groups, self).copy(cr, uid, id, default, context) def write(self, cr, uid, ids, vals, context=None): if 'name' in vals: if vals['name'].startswith('-'): raise osv.except_osv(_('Error'), _('The name of the group can not start with "-"')) res = super(res_groups, self).write(cr, uid, ids, vals, context=context) self.pool['ir.model.access'].call_cache_clearing_methods(cr) self.pool['res.users'].has_group.clear_cache(self.pool['res.users']) return res class res_users(osv.osv): """ User class. A res.users record models an OpenERP user and is different from an employee. res.users class now inherits from res.partner. The partner model is used to store the data related to the partner: lang, name, address, avatar, ... The user model is now dedicated to technical data. """ __admin_ids = {} _uid_cache = {} _inherits = { 'res.partner': 'partner_id', } _name = "res.users" _description = 'Users' def _set_new_password(self, cr, uid, id, name, value, args, context=None): if value is False: # Do not update the password if no value is provided, ignore silently. # For example web client submits False values for all empty fields. return if uid == id: # To change their own password users must use the client-specific change password wizard, # so that the new password is immediately used for further RPC requests, otherwise the user # will face unexpected 'Access Denied' exceptions. raise osv.except_osv(_('Operation Canceled'), _('Please use the change password wizard (in User Preferences or User menu) to change your own password.')) self.write(cr, uid, id, {'password': value}) def _get_password(self, cr, uid, ids, arg, karg, context=None): return dict.fromkeys(ids, '') _columns = { 'id': fields.integer('ID'), 'login_date': fields.date('Latest connection', select=1, copy=False), 'partner_id': fields.many2one('res.partner', required=True, string='Related Partner', ondelete='restrict', help='Partner-related data of the user', auto_join=True), 'login': fields.char('Login', size=64, required=True, help="Used to log into the system"), 'password': fields.char('Password', size=64, invisible=True, copy=False, help="Keep empty if you don't want the user to be able to connect on the system."), 'new_password': fields.function(_get_password, type='char', size=64, fnct_inv=_set_new_password, string='Set Password', help="Specify a value only when creating a user or if you're "\ "changing the user's password, otherwise leave empty. After "\ "a change of password, the user has to login again."), 'signature': fields.html('Signature'), 'active': fields.boolean('Active'), 'action_id': fields.many2one('ir.actions.actions', 'Home Action', help="If specified, this action will be opened at log on for this user, in addition to the standard menu."), 'groups_id': fields.many2many('res.groups', 'res_groups_users_rel', 'uid', 'gid', 'Groups'), # Special behavior for this field: res.company.search() will only return the companies # available to the current user (should be the user's companies?), when the user_preference # context is set. 'company_id': fields.many2one('res.company', 'Company', required=True, help='The company this user is currently working for.', context={'user_preference': True}), 'company_ids':fields.many2many('res.company','res_company_users_rel','user_id','cid','Companies'), } # overridden inherited fields to bypass access rights, in case you have # access to the user but not its corresponding partner name = openerp.fields.Char(related='partner_id.name', inherited=True) email = openerp.fields.Char(related='partner_id.email', inherited=True) def on_change_login(self, cr, uid, ids, login, context=None): if login and tools.single_email_re.match(login): return {'value': {'email': login}} return {} def onchange_state(self, cr, uid, ids, state_id, context=None): partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)] return self.pool.get('res.partner').onchange_state(cr, uid, partner_ids, state_id, context=context) def onchange_type(self, cr, uid, ids, is_company, context=None): """ Wrapper on the user.partner onchange_type, because some calls to the partner form view applied to the user may trigger the partner.onchange_type method, but applied to the user object. """ partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)] return self.pool['res.partner'].onchange_type(cr, uid, partner_ids, is_company, context=context) def onchange_address(self, cr, uid, ids, use_parent_address, parent_id, context=None): """ Wrapper on the user.partner onchange_address, because some calls to the partner form view applied to the user may trigger the partner.onchange_type method, but applied to the user object. """ partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)] return self.pool['res.partner'].onchange_address(cr, uid, partner_ids, use_parent_address, parent_id, context=context) def _check_company(self, cr, uid, ids, context=None): return all(((this.company_id in this.company_ids) or not this.company_ids) for this in self.browse(cr, uid, ids, context)) _constraints = [ (_check_company, 'The chosen company is not in the allowed companies for this user', ['company_id', 'company_ids']), ] _sql_constraints = [ ('login_key', 'UNIQUE (login)', 'You can not have two users with the same login !') ] def _get_company(self,cr, uid, context=None, uid2=False): if not uid2: uid2 = uid # Use read() to compute default company, and pass load=_classic_write to # avoid useless name_get() calls. This will avoid prefetching fields # while computing default values for new db columns, as the # db backend may not be fully initialized yet. user_data = self.pool['res.users'].read(cr, uid, uid2, ['company_id'], context=context, load='_classic_write') comp_id = user_data['company_id'] return comp_id or False def _get_companies(self, cr, uid, context=None): c = self._get_company(cr, uid, context) if c: return [c] return False def _get_group(self,cr, uid, context=None): dataobj = self.pool.get('ir.model.data') result = [] try: dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_user') result.append(group_id) dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_partner_manager') result.append(group_id) except ValueError: # If these groups does not exists anymore pass return result def _get_default_image(self, cr, uid, context=None): return self.pool['res.partner']._get_default_image(cr, uid, False, colorize=True, context=context) _defaults = { 'password': '', 'active': True, 'customer': False, 'company_id': _get_company, 'company_ids': _get_companies, 'groups_id': _get_group, 'image': _get_default_image, } # User can write on a few of his own fields (but not his groups for example) SELF_WRITEABLE_FIELDS = ['password', 'signature', 'action_id', 'company_id', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz'] # User can read a few of his own fields SELF_READABLE_FIELDS = ['signature', 'company_id', 'login', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz', 'tz_offset', 'groups_id', 'partner_id', '__last_update'] def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'): def override_password(o): if 'password' in o and ('id' not in o or o['id'] != uid): o['password'] = '********' return o if fields and (ids == [uid] or ids == uid): for key in fields: if not (key in self.SELF_READABLE_FIELDS or key.startswith('context_')): break else: # safe fields only, so we read as super-user to bypass access rights uid = SUPERUSER_ID result = super(res_users, self).read(cr, uid, ids, fields=fields, context=context, load=load) canwrite = self.pool['ir.model.access'].check(cr, uid, 'res.users', 'write', False) if not canwrite: if isinstance(ids, (int, long)): result = override_password(result) else: result = map(override_password, result) return result def create(self, cr, uid, vals, context=None): user_id = super(res_users, self).create(cr, uid, vals, context=context) user = self.browse(cr, uid, user_id, context=context) if user.partner_id.company_id: user.partner_id.write({'company_id': user.company_id.id}) return user_id def write(self, cr, uid, ids, values, context=None): if not hasattr(ids, '__iter__'): ids = [ids] if ids == [uid]: for key in values.keys(): if not (key in self.SELF_WRITEABLE_FIELDS or key.startswith('context_')): break else: if 'company_id' in values: user = self.browse(cr, SUPERUSER_ID, uid, context=context) if not (values['company_id'] in user.company_ids.ids): del values['company_id'] uid = 1 # safe fields only, so we write as super-user to bypass access rights res = super(res_users, self).write(cr, uid, ids, values, context=context) if 'company_id' in values: for user in self.browse(cr, uid, ids, context=context): # if partner is global we keep it that way if user.partner_id.company_id and user.partner_id.company_id.id != values['company_id']: user.partner_id.write({'company_id': user.company_id.id}) # clear caches linked to the users self.pool['ir.model.access'].call_cache_clearing_methods(cr) clear = partial(self.pool['ir.rule'].clear_cache, cr) map(clear, ids) db = cr.dbname if db in self._uid_cache: for id in ids: if id in self._uid_cache[db]: del self._uid_cache[db][id] self.context_get.clear_cache(self) self.has_group.clear_cache(self) return res def unlink(self, cr, uid, ids, context=None): if 1 in ids: raise osv.except_osv(_('Can not remove root user!'), _('You can not remove the admin user as it is used internally for resources created by Odoo (updates, module installation, ...)')) db = cr.dbname if db in self._uid_cache: for id in ids: if id in self._uid_cache[db]: del self._uid_cache[db][id] return super(res_users, self).unlink(cr, uid, ids, context=context) def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100): if not args: args=[] if not context: context={} ids = [] if name and operator in ['=', 'ilike']: ids = self.search(cr, user, [('login','=',name)]+ args, limit=limit, context=context) if not ids: ids = self.search(cr, user, [('name',operator,name)]+ args, limit=limit, context=context) return self.name_get(cr, user, ids, context=context) def copy(self, cr, uid, id, default=None, context=None): user2copy = self.read(cr, uid, [id], ['login','name'])[0] default = dict(default or {}) if ('name' not in default) and ('partner_id' not in default): default['name'] = _("%s (copy)") % user2copy['name'] if 'login' not in default: default['login'] = _("%s (copy)") % user2copy['login'] return super(res_users, self).copy(cr, uid, id, default, context) @tools.ormcache(skiparg=2) def context_get(self, cr, uid, context=None): user = self.browse(cr, SUPERUSER_ID, uid, context) result = {} for k in self._fields: if k.startswith('context_'): context_key = k[8:] elif k in ['lang', 'tz']: context_key = k else: context_key = False if context_key: res = getattr(user, k) or False if isinstance(res, models.BaseModel): res = res.id result[context_key] = res or False return result def action_get(self, cr, uid, context=None): dataobj = self.pool['ir.model.data'] data_id = dataobj._get_id(cr, SUPERUSER_ID, 'base', 'action_res_users_my') return dataobj.browse(cr, uid, data_id, context=context).res_id def check_super(self, passwd): if passwd == tools.config['admin_passwd']: return True else: raise openerp.exceptions.AccessDenied() def check_credentials(self, cr, uid, password): """ Override this method to plug additional authentication methods""" res = self.search(cr, SUPERUSER_ID, [('id','=',uid),('password','=',password)]) if not res: raise openerp.exceptions.AccessDenied() def _login(self, db, login, password): if not password: return False user_id = False cr = self.pool.cursor() try: # autocommit: our single update request will be performed atomically. # (In this way, there is no opportunity to have two transactions # interleaving their cr.execute()..cr.commit() calls and have one # of them rolled back due to a concurrent access.) cr.autocommit(True) # check if user exists res = self.search(cr, SUPERUSER_ID, [('login','=',login)]) if res: user_id = res[0] # check credentials self.check_credentials(cr, user_id, password) # We effectively unconditionally write the res_users line. # Even w/ autocommit there's a chance the user row will be locked, # in which case we can't delay the login just for the purpose of # update the last login date - hence we use FOR UPDATE NOWAIT to # try to get the lock - fail-fast # Failing to acquire the lock on the res_users row probably means # another request is holding it. No big deal, we don't want to # prevent/delay login in that case. It will also have been logged # as a SQL error, if anyone cares. try: # NO KEY introduced in PostgreSQL 9.3 http://www.postgresql.org/docs/9.3/static/release-9-3.html#AEN115299 update_clause = 'NO KEY UPDATE' if cr._cnx.server_version >= 90300 else 'UPDATE' cr.execute("SELECT id FROM res_users WHERE id=%%s FOR %s NOWAIT" % update_clause, (user_id,), log_exceptions=False) cr.execute("UPDATE res_users SET login_date = now() AT TIME ZONE 'UTC' WHERE id=%s", (user_id,)) self.invalidate_cache(cr, user_id, ['login_date'], [user_id]) except Exception: _logger.debug("Failed to update last_login for db:%s login:%s", db, login, exc_info=True) except openerp.exceptions.AccessDenied: _logger.info("Login failed for db:%s login:%s", db, login) user_id = False finally: cr.close() return user_id def authenticate(self, db, login, password, user_agent_env): """Verifies and returns the user ID corresponding to the given ``login`` and ``password`` combination, or False if there was no matching user. :param str db: the database on which user is trying to authenticate :param str login: username :param str password: user password :param dict user_agent_env: environment dictionary describing any relevant environment attributes """ uid = self._login(db, login, password) if uid == openerp.SUPERUSER_ID: # Successfully logged in as admin! # Attempt to guess the web base url... if user_agent_env and user_agent_env.get('base_location'): cr = self.pool.cursor() try: base = user_agent_env['base_location'] ICP = self.pool['ir.config_parameter'] if not ICP.get_param(cr, uid, 'web.base.url.freeze'): ICP.set_param(cr, uid, 'web.base.url', base) cr.commit() except Exception: _logger.exception("Failed to update web.base.url configuration parameter") finally: cr.close() return uid def check(self, db, uid, passwd): """Verifies that the given (uid, password) is authorized for the database ``db`` and raise an exception if it is not.""" if not passwd: # empty passwords disallowed for obvious security reasons raise openerp.exceptions.AccessDenied() if self._uid_cache.get(db, {}).get(uid) == passwd: return cr = self.pool.cursor() try: self.check_credentials(cr, uid, passwd) if self._uid_cache.has_key(db): self._uid_cache[db][uid] = passwd else: self._uid_cache[db] = {uid:passwd} finally: cr.close() def change_password(self, cr, uid, old_passwd, new_passwd, context=None): """Change current user password. Old password must be provided explicitly to prevent hijacking an existing user session, or for cases where the cleartext password is not used to authenticate requests. :return: True :raise: openerp.exceptions.AccessDenied when old password is wrong :raise: except_osv when new password is not set or empty """ self.check(cr.dbname, uid, old_passwd) if new_passwd: return self.write(cr, uid, uid, {'password': new_passwd}) raise osv.except_osv(_('Warning!'), _("Setting empty passwords is not allowed for security reasons!")) def preference_save(self, cr, uid, ids, context=None): return { 'type': 'ir.actions.client', 'tag': 'reload_context', } def preference_change_password(self, cr, uid, ids, context=None): return { 'type': 'ir.actions.client', 'tag': 'change_password', 'target': 'new', } @tools.ormcache(skiparg=2) def has_group(self, cr, uid, group_ext_id): """Checks whether user belongs to given group. :param str group_ext_id: external ID (XML ID) of the group. Must be provided in fully-qualified form (``module.ext_id``), as there is no implicit module to use.. :return: True if the current user is a member of the group with the given external ID (XML ID), else False. """ assert group_ext_id and '.' in group_ext_id, "External ID must be fully qualified" module, ext_id = group_ext_id.split('.') cr.execute("""SELECT 1 FROM res_groups_users_rel WHERE uid=%s AND gid IN (SELECT res_id FROM ir_model_data WHERE module=%s AND name=%s)""", (uid, module, ext_id)) return bool(cr.fetchone()) class cset(object): """ A cset (constrained set) is a set of elements that may be constrained to be a subset of other csets. Elements added to a cset are automatically added to its supersets. Cycles in the subset constraints are supported. """ def __init__(self, xs): self.supersets = set() self.elements = set(xs) def subsetof(self, other): if other is not self: self.supersets.add(other) other.update(self.elements) def update(self, xs): xs = set(xs) - self.elements if xs: # xs will eventually be empty in case of a cycle self.elements.update(xs) for s in self.supersets: s.update(xs) def __iter__(self): return iter(self.elements) concat = itertools.chain.from_iterable class groups_implied(osv.osv): _inherit = 'res.groups' def _get_trans_implied(self, cr, uid, ids, field, arg, context=None): "computes the transitive closure of relation implied_ids" memo = {} # use a memo for performance and cycle avoidance def computed_set(g): if g not in memo: memo[g] = cset(g.implied_ids) for h in g.implied_ids: computed_set(h).subsetof(memo[g]) return memo[g] res = {} for g in self.browse(cr, SUPERUSER_ID, ids, context): res[g.id] = map(int, computed_set(g)) return res _columns = { 'implied_ids': fields.many2many('res.groups', 'res_groups_implied_rel', 'gid', 'hid', string='Inherits', help='Users of this group automatically inherit those groups'), 'trans_implied_ids': fields.function(_get_trans_implied, type='many2many', relation='res.groups', string='Transitively inherits'), } def create(self, cr, uid, values, context=None): users = values.pop('users', None) gid = super(groups_implied, self).create(cr, uid, values, context) if users: # delegate addition of users to add implied groups self.write(cr, uid, [gid], {'users': users}, context) return gid def write(self, cr, uid, ids, values, context=None): res = super(groups_implied, self).write(cr, uid, ids, values, context) if values.get('users') or values.get('implied_ids'): # add all implied groups (to all users of each group) for g in self.browse(cr, uid, ids, context=context): gids = map(int, g.trans_implied_ids) vals = {'users': [(4, u.id) for u in g.users]} super(groups_implied, self).write(cr, uid, gids, vals, context) return res class users_implied(osv.osv): _inherit = 'res.users' def create(self, cr, uid, values, context=None): groups = values.pop('groups_id', None) user_id = super(users_implied, self).create(cr, uid, values, context) if groups: # delegate addition of groups to add implied groups self.write(cr, uid, [user_id], {'groups_id': groups}, context) self.pool['ir.ui.view'].clear_cache() return user_id def write(self, cr, uid, ids, values, context=None): if not isinstance(ids,list): ids = [ids] res = super(users_implied, self).write(cr, uid, ids, values, context) if values.get('groups_id'): # add implied groups for all users for user in self.browse(cr, uid, ids): gs = set(concat(g.trans_implied_ids for g in user.groups_id)) vals = {'groups_id': [(4, g.id) for g in gs]} super(users_implied, self).write(cr, uid, [user.id], vals, context) self.pool['ir.ui.view'].clear_cache() return res def name_boolean_group(id): return 'in_group_' + str(id) def name_selection_groups(ids): return 'sel_groups_' + '_'.join(map(str, ids)) def is_boolean_group(name): return name.startswith('in_group_') def is_selection_groups(name): return name.startswith('sel_groups_') def is_reified_group(name): return is_boolean_group(name) or is_selection_groups(name) def get_boolean_group(name): return int(name[9:]) def get_selection_groups(name): return map(int, name[11:].split('_')) def partition(f, xs): "return a pair equivalent to (filter(f, xs), filter(lambda x: not f(x), xs))" yes, nos = [], [] for x in xs: (yes if f(x) else nos).append(x) return yes, nos def parse_m2m(commands): "return a list of ids corresponding to a many2many value" ids = [] for command in commands: if isinstance(command, (tuple, list)): if command[0] in (1, 4): ids.append(command[2]) elif command[0] == 5: ids = [] elif command[0] == 6: ids = list(command[2]) else: ids.append(command) return ids class groups_view(osv.osv): _inherit = 'res.groups' def create(self, cr, uid, values, context=None): res = super(groups_view, self).create(cr, uid, values, context) self.update_user_groups_view(cr, uid, context) return res def write(self, cr, uid, ids, values, context=None): res = super(groups_view, self).write(cr, uid, ids, values, context) self.update_user_groups_view(cr, uid, context) return res def unlink(self, cr, uid, ids, context=None): res = super(groups_view, self).unlink(cr, uid, ids, context) self.update_user_groups_view(cr, uid, context) return res def update_user_groups_view(self, cr, uid, context=None): # the view with id 'base.user_groups_view' inherits the user form view, # and introduces the reified group fields # we have to try-catch this, because at first init the view does not exist # but we are already creating some basic groups view = self.pool['ir.model.data'].xmlid_to_object(cr, SUPERUSER_ID, 'base.user_groups_view', context=context) if view and view.exists() and view._name == 'ir.ui.view': xml1, xml2 = [], [] xml1.append(E.separator(string=_('Application'), colspan="4")) for app, kind, gs in self.get_groups_by_application(cr, uid, context): # hide groups in category 'Hidden' (except to group_no_one) attrs = {'groups': 'base.group_no_one'} if app and app.xml_id == 'base.module_category_hidden' else {} if kind == 'selection': # application name with a selection field field_name = name_selection_groups(map(int, gs)) xml1.append(E.field(name=field_name, **attrs)) xml1.append(E.newline()) else: # application separator with boolean fields app_name = app and app.name or _('Other') xml2.append(E.separator(string=app_name, colspan="4", **attrs)) for g in gs: field_name = name_boolean_group(g.id) xml2.append(E.field(name=field_name, **attrs)) xml = E.field(*(xml1 + xml2), name="groups_id", position="replace") xml.addprevious(etree.Comment("GENERATED AUTOMATICALLY BY GROUPS")) xml_content = etree.tostring(xml, pretty_print=True, xml_declaration=True, encoding="utf-8") view.write({'arch': xml_content}) return True def get_application_groups(self, cr, uid, domain=None, context=None): return self.search(cr, uid, domain or []) def get_groups_by_application(self, cr, uid, context=None): """ return all groups classified by application (module category), as a list of pairs: [(app, kind, [group, ...]), ...], where app and group are browse records, and kind is either 'boolean' or 'selection'. Applications are given in sequence order. If kind is 'selection', the groups are given in reverse implication order. """ def linearized(gs): gs = set(gs) # determine sequence order: a group should appear after its implied groups order = dict.fromkeys(gs, 0) for g in gs: for h in gs.intersection(g.trans_implied_ids): order[h] -= 1 # check whether order is total, i.e., sequence orders are distinct if len(set(order.itervalues())) == len(gs): return sorted(gs, key=lambda g: order[g]) return None # classify all groups by application gids = self.get_application_groups(cr, uid, context=context) by_app, others = {}, [] for g in self.browse(cr, uid, gids, context): if g.category_id: by_app.setdefault(g.category_id, []).append(g) else: others.append(g) # build the result res = [] apps = sorted(by_app.iterkeys(), key=lambda a: a.sequence or 0) for app in apps: gs = linearized(by_app[app]) if gs: res.append((app, 'selection', gs)) else: res.append((app, 'boolean', by_app[app])) if others: res.append((False, 'boolean', others)) return res class users_view(osv.osv): _inherit = 'res.users' def create(self, cr, uid, values, context=None): values = self._remove_reified_groups(values) return super(users_view, self).create(cr, uid, values, context) def write(self, cr, uid, ids, values, context=None): values = self._remove_reified_groups(values) return super(users_view, self).write(cr, uid, ids, values, context) def _remove_reified_groups(self, values): """ return `values` without reified group fields """ add, rem = [], [] values1 = {} for key, val in values.iteritems(): if is_boolean_group(key): (add if val else rem).append(get_boolean_group(key)) elif is_selection_groups(key): rem += get_selection_groups(key) if val: add.append(val) else: values1[key] = val if 'groups_id' not in values and (add or rem): # remove group ids in `rem` and add group ids in `add` values1['groups_id'] = zip(repeat(3), rem) + zip(repeat(4), add) return values1 def default_get(self, cr, uid, fields, context=None): group_fields, fields = partition(is_reified_group, fields) fields1 = (fields + ['groups_id']) if group_fields else fields values = super(users_view, self).default_get(cr, uid, fields1, context) self._add_reified_groups(group_fields, values) # add "default_groups_ref" inside the context to set default value for group_id with xml values if 'groups_id' in fields and isinstance(context.get("default_groups_ref"), list): groups = [] ir_model_data = self.pool.get('ir.model.data') for group_xml_id in context["default_groups_ref"]: group_split = group_xml_id.split('.') if len(group_split) != 2: raise osv.except_osv(_('Invalid context value'), _('Invalid context default_groups_ref value (model.name_id) : "%s"') % group_xml_id) try: temp, group_id = ir_model_data.get_object_reference(cr, uid, group_split[0], group_split[1]) except ValueError: group_id = False groups += [group_id] values['groups_id'] = groups return values def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'): # determine whether reified groups fields are required, and which ones fields1 = fields or self.fields_get(cr, uid, context=context).keys() group_fields, other_fields = partition(is_reified_group, fields1) # read regular fields (other_fields); add 'groups_id' if necessary drop_groups_id = False if group_fields and fields: if 'groups_id' not in other_fields: other_fields.append('groups_id') drop_groups_id = True else: other_fields = fields res = super(users_view, self).read(cr, uid, ids, other_fields, context=context, load=load) # post-process result to add reified group fields if group_fields: for values in (res if isinstance(res, list) else [res]): self._add_reified_groups(group_fields, values) if drop_groups_id: values.pop('groups_id', None) return res def _add_reified_groups(self, fields, values): """ add the given reified group fields into `values` """ gids = set(parse_m2m(values.get('groups_id') or [])) for f in fields: if is_boolean_group(f): values[f] = get_boolean_group(f) in gids elif is_selection_groups(f): selected = [gid for gid in get_selection_groups(f) if gid in gids] values[f] = selected and selected[-1] or False def fields_get(self, cr, uid, allfields=None, context=None, write_access=True): res = super(users_view, self).fields_get(cr, uid, allfields, context, write_access) # add reified groups fields for app, kind, gs in self.pool['res.groups'].get_groups_by_application(cr, uid, context): if kind == 'selection': # selection group field tips = ['%s: %s' % (g.name, g.comment) for g in gs if g.comment] res[name_selection_groups(map(int, gs))] = { 'type': 'selection', 'string': app and app.name or _('Other'), 'selection': [(False, '')] + [(g.id, g.name) for g in gs], 'help': '\n'.join(tips), 'exportable': False, 'selectable': False, } else: # boolean group fields for g in gs: res[name_boolean_group(g.id)] = { 'type': 'boolean', 'string': g.name, 'help': g.comment, 'exportable': False, 'selectable': False, } return res class change_password_wizard(osv.TransientModel): """ A wizard to manage the change of users' passwords """ _name = "change.password.wizard" _description = "Change Password Wizard" _columns = { 'user_ids': fields.one2many('change.password.user', 'wizard_id', string='Users'), } def _default_user_ids(self, cr, uid, context=None): if context is None: context = {} user_model = self.pool['res.users'] user_ids = context.get('active_model') == 'res.users' and context.get('active_ids') or [] return [ (0, 0, {'user_id': user.id, 'user_login': user.login}) for user in user_model.browse(cr, uid, user_ids, context=context) ] _defaults = { 'user_ids': _default_user_ids, } def change_password_button(self, cr, uid, ids, context=None): wizard = self.browse(cr, uid, ids, context=context)[0] need_reload = any(uid == user.user_id.id for user in wizard.user_ids) line_ids = [user.id for user in wizard.user_ids] self.pool.get('change.password.user').change_password_button(cr, uid, line_ids, context=context) if need_reload: return { 'type': 'ir.actions.client', 'tag': 'reload' } return {'type': 'ir.actions.act_window_close'} class change_password_user(osv.TransientModel): """ A model to configure users in the change password wizard """ _name = 'change.password.user' _description = 'Change Password Wizard User' _columns = { 'wizard_id': fields.many2one('change.password.wizard', string='Wizard', required=True), 'user_id': fields.many2one('res.users', string='User', required=True), 'user_login': fields.char('User Login', readonly=True), 'new_passwd': fields.char('New Password'), } _defaults = { 'new_passwd': '', } def change_password_button(self, cr, uid, ids, context=None): for line in self.browse(cr, uid, ids, context=context): line.user_id.write({'password': line.new_passwd}) # don't keep temporary passwords in the database longer than necessary self.write(cr, uid, ids, {'new_passwd': False}, context=context)
from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('organizations', '0002_migrate_locations_to_facilities'), ('notifications', '0003_auto_20150912_2049'), ] operations = [ migrations.AlterField( model_name='notification', name='location', field=models.ForeignKey(verbose_name='facility', to='organizations.Facility'), ), migrations.RenameField( model_name='notification', old_name='location', new_name='facility', ), migrations.AlterField( model_name='notification', name='facility', field=models.ForeignKey(to='organizations.Facility'), ), ]
from __future__ import unicode_literals import frappe import erpnext import unittest from frappe.utils import nowdate, add_days from erpnext.tests.utils import create_test_contact_and_address from erpnext.stock.doctype.delivery_trip.delivery_trip import notify_customers, get_contact_and_address class TestDeliveryTrip(unittest.TestCase): def setUp(self): create_driver() create_vehicle() create_delivery_notfication() create_test_contact_and_address() def test_delivery_trip(self): contact = get_contact_and_address("_Test Customer") if not frappe.db.exists("Delivery Trip", "TOUR-00000"): delivery_trip = frappe.new_doc("Delivery Trip") delivery_trip.company = erpnext.get_default_company() delivery_trip.date = add_days(nowdate(), 5) delivery_trip.driver = "DRIVER-00001" delivery_trip.vehicle = "JB 007" delivery_trip.append("delivery_stops", { "customer": "_Test Customer", "address": contact.shipping_address.parent, "contact": contact.contact_person.parent }) delivery_trip.delivery_notification = 'Delivery Notification' delivery_trip.insert() sender_email = frappe.db.get_value("User", frappe.session.user, "email") notify_customers(docname=delivery_trip.name, date=delivery_trip.date, driver=delivery_trip.driver, vehicle=delivery_trip.vehicle, sender_email=sender_email, delivery_notification=delivery_trip.delivery_notification) self.assertEquals(delivery_trip.get("delivery_stops")[0].notified_by_email, 0) def create_driver(): if not frappe.db.exists("Driver", "Newton Scmander"): driver = frappe.new_doc("Driver") driver.full_name = "Newton Scmander" driver.cell_number = "98343424242" driver.license_number = "B809" driver.insert() def create_delivery_notfication(): if not frappe.db.exists("Standard Reply", "Delivery Notification"): frappe.get_doc({ 'doctype': 'Standard Reply', 'name': 'Delivery Notification', 'response': 'Test Delivery Trip', 'subject': 'Test Subject', 'owner': frappe.session.user }).insert() def create_vehicle(): if not frappe.db.exists("Vehicle", "JB 007"): vehicle = frappe.get_doc({ "doctype": "Vehicle", "license_plate": "JB 007", "make": "Maruti", "model": "PCM", "last_odometer": 5000, "acquisition_date": frappe.utils.nowdate(), "location": "Mumbai", "chassis_no": "1234ABCD", "uom": "Litre", "vehicle_value": frappe.utils.flt(500000) }) vehicle.insert()
from cStringIO import StringIO import sys import tempfile import unittest2 as unittest import numpy from nupic.encoders.base import defaultDtype from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA from nupic.data.fieldmeta import FieldMetaType from nupic.support.unittesthelpers.algorithm_test_helpers import getSeed from nupic.encoders.random_distributed_scalar import ( RandomDistributedScalarEncoder ) try: import capnp except ImportError: capnp = None if capnp: from nupic.encoders.random_distributed_scalar_capnp import ( RandomDistributedScalarEncoderProto ) def computeOverlap(x, y): """ Given two binary arrays, compute their overlap. The overlap is the number of bits where x[i] and y[i] are both 1 """ return (x & y).sum() def validateEncoder(encoder, subsampling): """ Given an encoder, calculate overlaps statistics and ensure everything is ok. We don't check every possible combination for speed reasons. """ for i in range(encoder.minIndex, encoder.maxIndex+1, 1): for j in range(i+1, encoder.maxIndex+1, subsampling): if not encoder._overlapOK(i, j): return False return True class RandomDistributedScalarEncoderTest(unittest.TestCase): """ Unit tests for RandomDistributedScalarEncoder class. """ def testEncoding(self): """ Test basic encoding functionality. Create encodings without crashing and check they contain the correct number of on and off bits. Check some encodings for expected overlap. Test that encodings for old values don't change once we generate new buckets. """ # Initialize with non-default parameters and encode with a number close to # the offset encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0, w=23, n=500, offset=0.0) e0 = encoder.encode(-0.1) self.assertEqual(e0.sum(), 23, "Number of on bits is incorrect") self.assertEqual(e0.size, 500, "Width of the vector is incorrect") self.assertEqual(encoder.getBucketIndices(0.0)[0], encoder._maxBuckets / 2, "Offset doesn't correspond to middle bucket") self.assertEqual(len(encoder.bucketMap), 1, "Number of buckets is not 1") # Encode with a number that is resolution away from offset. Now we should # have two buckets and this encoding should be one bit away from e0 e1 = encoder.encode(1.0) self.assertEqual(len(encoder.bucketMap), 2, "Number of buckets is not 2") self.assertEqual(e1.sum(), 23, "Number of on bits is incorrect") self.assertEqual(e1.size, 500, "Width of the vector is incorrect") self.assertEqual(computeOverlap(e0, e1), 22, "Overlap is not equal to w-1") # Encode with a number that is resolution*w away from offset. Now we should # have many buckets and this encoding should have very little overlap with # e0 e25 = encoder.encode(25.0) self.assertGreater(len(encoder.bucketMap), 23, "Number of buckets is not 2") self.assertEqual(e25.sum(), 23, "Number of on bits is incorrect") self.assertEqual(e25.size, 500, "Width of the vector is incorrect") self.assertLess(computeOverlap(e0, e25), 4, "Overlap is too high") # Test encoding consistency. The encodings for previous numbers # shouldn't change even though we have added additional buckets self.assertTrue(numpy.array_equal(e0, encoder.encode(-0.1)), "Encodings are not consistent - they have changed after new buckets " "have been created") self.assertTrue(numpy.array_equal(e1, encoder.encode(1.0)), "Encodings are not consistent - they have changed after new buckets " "have been created") def testMissingValues(self): """ Test that missing values and NaN return all zero's. """ encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0) empty = encoder.encode(SENTINEL_VALUE_FOR_MISSING_DATA) self.assertEqual(empty.sum(), 0) empty = encoder.encode(float("nan")) self.assertEqual(empty.sum(), 0) def testResolution(self): """ Test that numbers within the same resolution return the same encoding. Numbers outside the resolution should return different encodings. """ encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0) # Since 23.0 is the first encoded number, it will be the offset. # Since resolution is 1, 22.9 and 23.4 should have the same bucket index and # encoding. e23 = encoder.encode(23.0) e23p1 = encoder.encode(23.1) e22p9 = encoder.encode(22.9) e24 = encoder.encode(24.0) self.assertEqual(e23.sum(), encoder.w) self.assertEqual((e23 == e23p1).sum(), encoder.getWidth(), "Numbers within resolution don't have the same encoding") self.assertEqual((e23 == e22p9).sum(), encoder.getWidth(), "Numbers within resolution don't have the same encoding") self.assertNotEqual((e23 == e24).sum(), encoder.getWidth(), "Numbers outside resolution have the same encoding") e22p9 = encoder.encode(22.5) self.assertNotEqual((e23 == e22p9).sum(), encoder.getWidth(), "Numbers outside resolution have the same encoding") def testMapBucketIndexToNonZeroBits(self): """ Test that mapBucketIndexToNonZeroBits works and that max buckets and clipping are handled properly. """ encoder = RandomDistributedScalarEncoder(resolution=1.0, w=11, n=150) # Set a low number of max buckets encoder._initializeBucketMap(10, None) encoder.encode(0.0) encoder.encode(-7.0) encoder.encode(7.0) self.assertEqual(len(encoder.bucketMap), encoder._maxBuckets, "_maxBuckets exceeded") self.assertTrue( numpy.array_equal(encoder.mapBucketIndexToNonZeroBits(-1), encoder.bucketMap[0]), "mapBucketIndexToNonZeroBits did not handle negative" " index") self.assertTrue( numpy.array_equal(encoder.mapBucketIndexToNonZeroBits(1000), encoder.bucketMap[9]), "mapBucketIndexToNonZeroBits did not handle negative index") e23 = encoder.encode(23.0) e6 = encoder.encode(6) self.assertEqual((e23 == e6).sum(), encoder.getWidth(), "Values not clipped correctly during encoding") ep8 = encoder.encode(-8) ep7 = encoder.encode(-7) self.assertEqual((ep8 == ep7).sum(), encoder.getWidth(), "Values not clipped correctly during encoding") self.assertEqual(encoder.getBucketIndices(-8)[0], 0, "getBucketIndices returned negative bucket index") self.assertEqual(encoder.getBucketIndices(23)[0], encoder._maxBuckets-1, "getBucketIndices returned bucket index that is too" " large") def testParameterChecks(self): """ Test that some bad construction parameters get handled. """ # n must be >= 6*w with self.assertRaises(ValueError): RandomDistributedScalarEncoder(name="mv", resolution=1.0, n=int(5.9*21)) # n must be an int with self.assertRaises(ValueError): RandomDistributedScalarEncoder(name="mv", resolution=1.0, n=5.9*21) # w can't be negative with self.assertRaises(ValueError): RandomDistributedScalarEncoder(name="mv", resolution=1.0, w=-1) # resolution can't be negative with self.assertRaises(ValueError): RandomDistributedScalarEncoder(name="mv", resolution=-2) def testOverlapStatistics(self): """ Check that the overlaps for the encodings are within the expected range. Here we ask the encoder to create a bunch of representations under somewhat stressful conditions, and then verify they are correct. We rely on the fact that the _overlapOK and _countOverlapIndices methods are working correctly. """ seed = getSeed() # Generate about 600 encodings. Set n relatively low to increase # chance of false overlaps encoder = RandomDistributedScalarEncoder(resolution=1.0, w=11, n=150, seed=seed) encoder.encode(0.0) encoder.encode(-300.0) encoder.encode(300.0) self.assertTrue(validateEncoder(encoder, subsampling=3), "Illegal overlap encountered in encoder") def testGetMethods(self): """ Test that the getWidth, getDescription, and getDecoderOutputFieldTypes methods work. """ encoder = RandomDistributedScalarEncoder(name="theName", resolution=1.0, n=500) self.assertEqual(encoder.getWidth(), 500, "getWidth doesn't return the correct result") self.assertEqual(encoder.getDescription(), [("theName", 0)], "getDescription doesn't return the correct result") self.assertEqual(encoder.getDecoderOutputFieldTypes(), (FieldMetaType.float, ), "getDecoderOutputFieldTypes doesn't return the correct" " result") def testOffset(self): """ Test that offset is working properly """ encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0) encoder.encode(23.0) self.assertEqual(encoder._offset, 23.0, "Offset not specified and not initialized to first input") encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0, offset=25.0) encoder.encode(23.0) self.assertEqual(encoder._offset, 25.0, "Offset not initialized to specified constructor" " parameter") def testSeed(self): """ Test that initializing twice with the same seed returns identical encodings and different when not specified """ encoder1 = RandomDistributedScalarEncoder(name="encoder1", resolution=1.0, seed=42) encoder2 = RandomDistributedScalarEncoder(name="encoder2", resolution=1.0, seed=42) encoder3 = RandomDistributedScalarEncoder(name="encoder3", resolution=1.0, seed=-1) encoder4 = RandomDistributedScalarEncoder(name="encoder4", resolution=1.0, seed=-1) e1 = encoder1.encode(23.0) e2 = encoder2.encode(23.0) e3 = encoder3.encode(23.0) e4 = encoder4.encode(23.0) self.assertEqual((e1 == e2).sum(), encoder1.getWidth(), "Same seed gives rise to different encodings") self.assertNotEqual((e1 == e3).sum(), encoder1.getWidth(), "Different seeds gives rise to same encodings") self.assertNotEqual((e3 == e4).sum(), encoder1.getWidth(), "seeds of -1 give rise to same encodings") def testCountOverlapIndices(self): """ Test that the internal method _countOverlapIndices works as expected. """ # Create a fake set of encodings. encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0, w=5, n=5*20) midIdx = encoder._maxBuckets/2 encoder.bucketMap[midIdx-2] = numpy.array(range(3, 8)) encoder.bucketMap[midIdx-1] = numpy.array(range(4, 9)) encoder.bucketMap[midIdx] = numpy.array(range(5, 10)) encoder.bucketMap[midIdx+1] = numpy.array(range(6, 11)) encoder.bucketMap[midIdx+2] = numpy.array(range(7, 12)) encoder.bucketMap[midIdx+3] = numpy.array(range(8, 13)) encoder.minIndex = midIdx - 2 encoder.maxIndex = midIdx + 3 # Indices must exist with self.assertRaises(ValueError): encoder._countOverlapIndices(midIdx-3, midIdx-2) with self.assertRaises(ValueError): encoder._countOverlapIndices(midIdx-2, midIdx-3) # Test some overlaps self.assertEqual(encoder._countOverlapIndices(midIdx-2, midIdx-2), 5, "_countOverlapIndices didn't work") self.assertEqual(encoder._countOverlapIndices(midIdx-1, midIdx-2), 4, "_countOverlapIndices didn't work") self.assertEqual(encoder._countOverlapIndices(midIdx+1, midIdx-2), 2, "_countOverlapIndices didn't work") self.assertEqual(encoder._countOverlapIndices(midIdx-2, midIdx+3), 0, "_countOverlapIndices didn't work") def testOverlapOK(self): """ Test that the internal method _overlapOK works as expected. """ # Create a fake set of encodings. encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0, w=5, n=5*20) midIdx = encoder._maxBuckets/2 encoder.bucketMap[midIdx-3] = numpy.array(range(4, 9)) # Not ok with # midIdx-1 encoder.bucketMap[midIdx-2] = numpy.array(range(3, 8)) encoder.bucketMap[midIdx-1] = numpy.array(range(4, 9)) encoder.bucketMap[midIdx] = numpy.array(range(5, 10)) encoder.bucketMap[midIdx+1] = numpy.array(range(6, 11)) encoder.bucketMap[midIdx+2] = numpy.array(range(7, 12)) encoder.bucketMap[midIdx+3] = numpy.array(range(8, 13)) encoder.minIndex = midIdx - 3 encoder.maxIndex = midIdx + 3 self.assertTrue(encoder._overlapOK(midIdx, midIdx-1), "_overlapOK didn't work") self.assertTrue(encoder._overlapOK(midIdx-2, midIdx+3), "_overlapOK didn't work") self.assertFalse(encoder._overlapOK(midIdx-3, midIdx-1), "_overlapOK didn't work") # We'll just use our own numbers self.assertTrue(encoder._overlapOK(100, 50, 0), "_overlapOK didn't work for far values") self.assertTrue(encoder._overlapOK(100, 50, encoder._maxOverlap), "_overlapOK didn't work for far values") self.assertFalse(encoder._overlapOK(100, 50, encoder._maxOverlap+1), "_overlapOK didn't work for far values") self.assertTrue(encoder._overlapOK(50, 50, 5), "_overlapOK didn't work for near values") self.assertTrue(encoder._overlapOK(48, 50, 3), "_overlapOK didn't work for near values") self.assertTrue(encoder._overlapOK(46, 50, 1), "_overlapOK didn't work for near values") self.assertTrue(encoder._overlapOK(45, 50, encoder._maxOverlap), "_overlapOK didn't work for near values") self.assertFalse(encoder._overlapOK(48, 50, 4), "_overlapOK didn't work for near values") self.assertFalse(encoder._overlapOK(48, 50, 2), "_overlapOK didn't work for near values") self.assertFalse(encoder._overlapOK(46, 50, 2), "_overlapOK didn't work for near values") self.assertFalse(encoder._overlapOK(50, 50, 6), "_overlapOK didn't work for near values") def testCountOverlap(self): """ Test that the internal method _countOverlap works as expected. """ encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0, n=500) r1 = numpy.array([1, 2, 3, 4, 5, 6]) r2 = numpy.array([1, 2, 3, 4, 5, 6]) self.assertEqual(encoder._countOverlap(r1, r2), 6, "_countOverlap result is incorrect") r1 = numpy.array([1, 2, 3, 4, 5, 6]) r2 = numpy.array([1, 2, 3, 4, 5, 7]) self.assertEqual(encoder._countOverlap(r1, r2), 5, "_countOverlap result is incorrect") r1 = numpy.array([1, 2, 3, 4, 5, 6]) r2 = numpy.array([6, 5, 4, 3, 2, 1]) self.assertEqual(encoder._countOverlap(r1, r2), 6, "_countOverlap result is incorrect") r1 = numpy.array([1, 2, 8, 4, 5, 6]) r2 = numpy.array([1, 2, 3, 4, 9, 6]) self.assertEqual(encoder._countOverlap(r1, r2), 4, "_countOverlap result is incorrect") r1 = numpy.array([1, 2, 3, 4, 5, 6]) r2 = numpy.array([1, 2, 3]) self.assertEqual(encoder._countOverlap(r1, r2), 3, "_countOverlap result is incorrect") r1 = numpy.array([7, 8, 9, 10, 11, 12]) r2 = numpy.array([1, 2, 3, 4, 5, 6]) self.assertEqual(encoder._countOverlap(r1, r2), 0, "_countOverlap result is incorrect") def testVerbosity(self): """ Test that nothing is printed out when verbosity=0 """ _stdout = sys.stdout sys.stdout = _stringio = StringIO() encoder = RandomDistributedScalarEncoder(name="mv", resolution=1.0, verbosity=0) output = numpy.zeros(encoder.getWidth(), dtype=defaultDtype) encoder.encodeIntoArray(23.0, output) encoder.getBucketIndices(23.0) sys.stdout = _stdout self.assertEqual(len(_stringio.getvalue()), 0, "zero verbosity doesn't lead to zero output") def testEncodeInvalidInputType(self): encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0, verbosity=0) with self.assertRaises(TypeError): encoder.encode("String") @unittest.skipUnless( capnp, "pycapnp is not installed, skipping serialization test.") def testWriteRead(self): original = RandomDistributedScalarEncoder( name="encoder", resolution=1.0, w=23, n=500, offset=0.0) originalValue = original.encode(1) proto1 = RandomDistributedScalarEncoderProto.new_message() original.write(proto1) # Write the proto to a temp file and read it back into a new proto with tempfile.TemporaryFile() as f: proto1.write(f) f.seek(0) proto2 = RandomDistributedScalarEncoderProto.read(f) encoder = RandomDistributedScalarEncoder.read(proto2) self.assertIsInstance(encoder, RandomDistributedScalarEncoder) self.assertEqual(encoder.resolution, original.resolution) self.assertEqual(encoder.w, original.w) self.assertEqual(encoder.n, original.n) self.assertEqual(encoder.name, original.name) self.assertEqual(encoder.verbosity, original.verbosity) self.assertEqual(encoder.minIndex, original.minIndex) self.assertEqual(encoder.maxIndex, original.maxIndex) encodedFromOriginal = original.encode(1) encodedFromNew = encoder.encode(1) self.assertTrue(numpy.array_equal(encodedFromNew, originalValue)) self.assertEqual(original.decode(encodedFromNew), encoder.decode(encodedFromOriginal)) self.assertEqual(original.random.getSeed(), encoder.random.getSeed()) for key, value in original.bucketMap.items(): self.assertTrue(numpy.array_equal(value, encoder.bucketMap[key])) if __name__ == "__main__": unittest.main()
from spack import * class PyIlmbase(AutotoolsPackage): """The PyIlmBase libraries provides python bindings for the IlmBase libraries.""" homepage = "https://github.com/AcademySoftwareFoundation/openexr/tree/v2.3.0/PyIlmBase" url = "https://github.com/AcademySoftwareFoundation/openexr/releases/download/v2.3.0/pyilmbase-2.3.0.tar.gz" version('2.3.0', sha256='9c898bb16e7bc916c82bebdf32c343c0f2878fc3eacbafa49937e78f2079a425') depends_on('ilmbase') depends_on('boost+python') # https://github.com/AcademySoftwareFoundation/openexr/issues/336 parallel = False def configure_args(self): spec = self.spec args = [ '--with-boost-python-libname=boost_python{0}'.format( spec['python'].version.up_to(2).joined) ] return args
# """ Arabic language implementations of Integer and Digits classes ============================================================================ """ from ..base.integer_internal import (MapIntBuilder, CollectionIntBuilder, MagnitudeIntBuilder, IntegerContentBase) from ..base.digits_internal import DigitsContentBase int_0 = MapIntBuilder({ "صفر": 0, }) int_1_9 = MapIntBuilder({ "واحد": 1, "اثنان": 2, "ثلاثة": 3, "اربعة": 4, "خمسة": 5, "ستة": 6, "سبعة": 7, "ثمانية": 8, "تسعة": 9, }) int_10_19 = MapIntBuilder({ "عشرة": 10, "احدى عشر": 11, "اثنا عشر": 12, "ثلاثة عشر": 13, "اربعة عشر": 14, "خمسة عشر": 15, "ستة عشر": 16, "سبعة عشر": 17, "ثمانية عشر": 18, "تسعة عشر": 19, }) int_20_90_10 = MapIntBuilder({ "عشرون": 2, "ثلاثون": 3, "اربعون": 4, "خمسون": 5, "ستون": 6, "سبعون": 7, "ثمانون": 8, "تسعون": 9, }) int_20_99 = MagnitudeIntBuilder( factor = 10, spec = "<multiplier> [<remainder>]", multipliers = [int_20_90_10], remainders = [int_1_9], ) int_and_1_99 = CollectionIntBuilder( spec = "[و] <element>", set = [int_1_9, int_10_19, int_20_99], ) int_100s = MagnitudeIntBuilder( factor = 100, spec = "[<multiplier>] hundred [<remainder>]", multipliers = [int_1_9], remainders = [int_and_1_99], ) int_100big = MagnitudeIntBuilder( factor = 100, spec = "[<multiplier>] hundred [<remainder>]", multipliers = [int_10_19, int_20_99], remainders = [int_و_1_99] ) int_1000s = MagnitudeIntBuilder( factor = 1000, spec = "[<multiplier>] thousand [<remainder>]", multipliers = [int_1_9, int_10_19, int_20_99, int_100s], remainders = [int_و_1_99, int_100s] ) int_1000000s = MagnitudeIntBuilder( factor = 1000000, spec = "[<multiplier>] million [<remainder>]", multipliers = [int_1_9, int_10_19, int_20_99, int_100s, int_1000s], remainders = [int_و_1_99, int_100s, int_1000s], ) class IntegerContent(IntegerContentBase): builders = [int_0, int_1_9, int_10_19, int_20_99, int_100s, int_100big, int_1000s, int_1000000s] class DigitsContent(DigitsContentBase): digits = [("صفر", "اووه"), "واحد", "اثنان", "ثلاثة", "اربعة", "خمسة", "ستة", "سبعة", "ثمانية", "تسعة"]
""" ===================== SVM: Weighted samples ===================== Plot decision function of a weighted dataset, where the size of points is proportional to its weight. """ print __doc__ import numpy as np import pylab as pl from sklearn import svm np.random.seed(0) X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)] Y = [1] * 10 + [-1] * 10 sample_weight = 100 * np.abs(np.random.randn(20)) sample_weight[:10] *= 10 clf = svm.SVC() clf.fit(X, Y, sample_weight=sample_weight) xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500)) Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) pl.contourf(xx, yy, Z, alpha=0.75, cmap=pl.cm.bone) pl.scatter(X[:, 0], X[:, 1], c=Y, s=sample_weight, alpha=0.9, cmap=pl.cm.bone) pl.axis('off') pl.show()
import urllib import urlparse def get_path(url): scheme, host, path, query, fragment = urlparse.urlsplit(url) return path def get_host(url): scheme, host, path, query, fragment = urlparse.urlsplit(url) return host def add_path(url, new_path): """Given a url and path, return a new url that combines the two. """ scheme, host, path, query, fragment = urlparse.urlsplit(url) new_path = new_path.lstrip('/') if path.endswith('/'): path += new_path else: path += '/' + new_path return urlparse.urlunsplit([scheme, host, path, query, fragment]) def _query_param(key, value): """ensure that a query parameter's value is a string of bytes in UTF-8 encoding. """ if isinstance(value, unicode): pass elif isinstance(value, str): value = value.decode('utf-8') else: value = unicode(value) return key, value.encode('utf-8') def _make_query_tuples(params): if hasattr(params, 'items'): return [_query_param(*param) for param in params.items()] else: return [_query_param(*params)] def add_query_params(url, params): """use the _update_query_params function to set a new query string for the url based on params. """ return update_query_params(url, params, update=False) def update_query_params(url, params, update=True): """Given a url and a tuple or dict of parameters, return a url that includes the parameters as a properly formatted query string. If update is True, change any existing values to new values given in params. """ scheme, host, path, query, fragment = urlparse.urlsplit(url) # urlparse.parse_qsl gives back url-decoded byte strings. Leave these as # they are: they will be re-urlencoded below query_bits = [(k, v) for k, v in urlparse.parse_qsl(query)] if update: query_bits = dict(query_bits) query_bits.update(_make_query_tuples(params)) else: query_bits.extend(_make_query_tuples(params)) query = urllib.urlencode(query_bits) return urlparse.urlunsplit([scheme, host, path, query, fragment])
from __future__ import unicode_literals, division, absolute_import, print_function from getpass import getuser import ctypes from ctypes.util import find_library from ctypes import c_void_p, c_uint32, POINTER, c_bool, byref from .core_foundation import CoreFoundation, unicode_to_cfstring, cfstring_to_unicode from .._types import str_cls, type_name od_path = find_library('OpenDirectory') OpenDirectory = ctypes.CDLL(od_path, use_errno=True) ODAttributeType = CoreFoundation.CFStringRef ODMatchType = c_uint32 ODRecordType = CoreFoundation.CFStringRef ODSessionRef = c_void_p ODNodeRef = c_void_p ODQueryRef = c_void_p ODRecordRef = c_void_p OpenDirectory.ODSessionCreate.argtypes = [ CoreFoundation.CFAllocatorRef, CoreFoundation.CFDictionaryRef, POINTER(CoreFoundation.CFErrorRef) ] OpenDirectory.ODSessionCreate.restype = ODSessionRef OpenDirectory.ODNodeCreateWithName.argtypes = [ CoreFoundation.CFAllocatorRef, ODSessionRef, CoreFoundation.CFStringRef, POINTER(CoreFoundation.CFErrorRef) ] OpenDirectory.ODNodeCreateWithName.restype = ODNodeRef OpenDirectory.ODQueryCreateWithNode.argtypes = [ CoreFoundation.CFAllocatorRef, ODNodeRef, CoreFoundation.CFTypeRef, ODAttributeType, ODMatchType, CoreFoundation.CFTypeRef, CoreFoundation.CFTypeRef, CoreFoundation.CFIndex, POINTER(CoreFoundation.CFErrorRef) ] OpenDirectory.ODQueryCreateWithNode.restype = ODQueryRef OpenDirectory.ODQueryCopyResults.argtypes = [ ODQueryRef, c_bool, POINTER(CoreFoundation.CFErrorRef) ] OpenDirectory.ODQueryCopyResults.restype = CoreFoundation.CFArrayRef OpenDirectory.ODRecordCopyValues.argtypes = [ ODRecordRef, ODAttributeType, POINTER(CoreFoundation.CFErrorRef) ] OpenDirectory.ODRecordCopyValues.restype = CoreFoundation.CFArrayRef kODMatchEqualTo = ODMatchType(0x2001) kODRecordTypeUsers = ODRecordType.in_dll(OpenDirectory, 'kODRecordTypeUsers') kODAttributeTypeRecordName = ODAttributeType.in_dll(OpenDirectory, 'kODAttributeTypeRecordName') kODAttributeTypeUserShell = ODAttributeType.in_dll(OpenDirectory, 'kODAttributeTypeUserShell') _login_shells = {} def get_user_login_shell(username=None): """ Uses OS X's OpenDirectory.framework to get the user's login shell :param username: A unicode string of the user to get the shell for - None for the current user :return: A unicode string of the user's login shell """ if username is None: username = getuser() if not isinstance(username, str_cls): username = username.decode('utf-8') if not isinstance(username, str_cls): raise TypeError('username must be a unicode string, not %s' % type_name(username)) if username not in _login_shells: error_ref = CoreFoundation.CFErrorRef() session = OpenDirectory.ODSessionCreate( CoreFoundation.kCFAllocatorDefault, None, byref(error_ref) ) if bool(error_ref): raise OSError('Error!') node = OpenDirectory.ODNodeCreateWithName( CoreFoundation.kCFAllocatorDefault, session, unicode_to_cfstring("/Local/Default"), byref(error_ref) ) if bool(error_ref): raise OSError('Error!') query = OpenDirectory.ODQueryCreateWithNode( CoreFoundation.kCFAllocatorDefault, node, kODRecordTypeUsers, kODAttributeTypeRecordName, kODMatchEqualTo, unicode_to_cfstring(username), kODAttributeTypeUserShell, 1, byref(error_ref) ) if bool(error_ref): raise OSError('Error!') results = OpenDirectory.ODQueryCopyResults( query, False, byref(error_ref) ) if bool(error_ref): raise OSError('Error!') login_shell = None num_results = CoreFoundation.CFArrayGetCount(results) if num_results == 1: od_record = CoreFoundation.CFArrayGetValueAtIndex(results, 0) attributes = OpenDirectory.ODRecordCopyValues(od_record, kODAttributeTypeUserShell, byref(error_ref)) if bool(error_ref): raise OSError('Error!') num_attributes = CoreFoundation.CFArrayGetCount(results) if num_attributes == 1: string_ref = CoreFoundation.CFArrayGetValueAtIndex(attributes, 0) login_shell = cfstring_to_unicode(string_ref) _login_shells[username] = login_shell return _login_shells.get(username)