code
stringlengths
1
199k
import jinja2 from jingo import register from tower import ugettext_lazy as _lazy from mkt.site.helpers import page_title @register.function @jinja2.contextfunction def operators_page_title(context, title=None): section = _lazy('Operator Dashboard') title = u'%s | %s' % (title, section) if title else section return page_title(context, title)
from __future__ import unicode_literals from frappe.model.document import Document class CashFlowMapper(Document): pass
""" The MIT License (MIT) Copyright (c) 2015-2016 Rapptz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import asyncio class Context: """Represents the context in which a command is being invoked under. This class contains a lot of meta data to help you understand more about the invocation context. This class is not created manually and is instead passed around to commands by passing in :attr:`Command.pass_context`. Attributes ----------- message : :class:`discord.Message` The message that triggered the command being executed. bot : :class:`Bot` The bot that contains the command being executed. args : list The list of transformed arguments that were passed into the command. If this is accessed during the :func:`on_command_error` event then this list could be incomplete. kwargs : dict A dictionary of transformed arguments that were passed into the command. Similar to :attr:`args`\, if this is accessed in the :func:`on_command_error` event then this dict could be incomplete. prefix : str The prefix that was used to invoke the command. command The command (i.e. :class:`Command` or its superclasses) that is being invoked currently. invoked_with : str The command name that triggered this invocation. Useful for finding out which alias called the command. invoked_subcommand The subcommand (i.e. :class:`Command` or its superclasses) that was invoked. If no valid subcommand was invoked then this is equal to `None`. subcommand_passed : Optional[str] The string that was attempted to call a subcommand. This does not have to point to a valid registered subcommand and could just point to a nonsense string. If nothing was passed to attempt a call to a subcommand then this is set to `None`. """ def __init__(self, **attrs): self.message = attrs.pop('message', None) self.bot = attrs.pop('bot', None) self.args = attrs.pop('args', []) self.kwargs = attrs.pop('kwargs', {}) self.prefix = attrs.pop('prefix') self.command = attrs.pop('command', None) self.view = attrs.pop('view', None) self.invoked_with = attrs.pop('invoked_with', None) self.invoked_subcommand = attrs.pop('invoked_subcommand', None) self.subcommand_passed = attrs.pop('subcommand_passed', None) @asyncio.coroutine def invoke(self, command, *args, **kwargs): """|coro| Calls a command with the arguments given. This is useful if you want to just call the callback that a :class:`Command` holds internally. Note ------ You do not pass in the context as it is done for you. Parameters ----------- command : :class:`Command` A command or superclass of a command that is going to be called. \*args The arguments to to use. \*\*kwargs The keyword arguments to use. """ arguments = [] if command.instance is not None: arguments.append(command.instance) if command.pass_context: arguments.append(self) arguments.extend(args) ret = yield from command.callback(*arguments, **kwargs) return ret @property def cog(self): """Returns the cog associated with this context's command. None if it does not exist.""" if self.command is None: return None return self.command.instance
import pandas as pd import pytz import numpy as np from six import integer_types from unittest import TestCase import zipline.utils.factory as factory from zipline.sources import (DataFrameSource, DataPanelSource, RandomWalkSource) from zipline.utils import tradingcalendar as calendar_nyse from zipline.assets import AssetFinder class TestDataFrameSource(TestCase): def test_df_source(self): source, df = factory.create_test_df_source() assert isinstance(source.start, pd.lib.Timestamp) assert isinstance(source.end, pd.lib.Timestamp) for expected_dt, expected_price in df.iterrows(): sid0 = next(source) assert expected_dt == sid0.dt assert expected_price[0] == sid0.price def test_df_sid_filtering(self): _, df = factory.create_test_df_source() source = DataFrameSource(df) assert 1 not in [event.sid for event in source], \ "DataFrameSource should only stream selected sid 0, not sid 1." def test_panel_source(self): source, panel = factory.create_test_panel_source(source_type=5) assert isinstance(source.start, pd.lib.Timestamp) assert isinstance(source.end, pd.lib.Timestamp) for event in source: self.assertTrue('sid' in event) self.assertTrue('arbitrary' in event) self.assertTrue('type' in event) self.assertTrue(hasattr(event, 'volume')) self.assertTrue(hasattr(event, 'price')) self.assertEquals(event['type'], 5) self.assertEquals(event['arbitrary'], 1.) self.assertEquals(event['sid'], 0) self.assertTrue(isinstance(event['volume'], int)) self.assertTrue(isinstance(event['arbitrary'], float)) def test_yahoo_bars_to_panel_source(self): finder = AssetFinder() stocks = ['AAPL', 'GE'] start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc) end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc) data = factory.load_bars_from_yahoo(stocks=stocks, indexes={}, start=start, end=end) check_fields = ['sid', 'open', 'high', 'low', 'close', 'volume', 'price'] copy_panel = data.copy() sids = finder.map_identifier_index_to_sids( data.items, data.major_axis[0] ) copy_panel.items = sids source = DataPanelSource(copy_panel) for event in source: for check_field in check_fields: self.assertIn(check_field, event) self.assertTrue(isinstance(event['volume'], (integer_types))) self.assertTrue(event['sid'] in sids) def test_nan_filter_dataframe(self): dates = pd.date_range('1/1/2000', periods=2, freq='B', tz='UTC') df = pd.DataFrame(np.random.randn(2, 2), index=dates, columns=[4, 5]) # should be filtered df.loc[dates[0], 4] = np.nan # should not be filtered, should have been ffilled df.loc[dates[1], 5] = np.nan source = DataFrameSource(df) event = next(source) self.assertEqual(5, event.sid) event = next(source) self.assertEqual(4, event.sid) event = next(source) self.assertEqual(5, event.sid) self.assertFalse(np.isnan(event.price)) def test_nan_filter_panel(self): dates = pd.date_range('1/1/2000', periods=2, freq='B', tz='UTC') df = pd.Panel(np.random.randn(2, 2, 2), major_axis=dates, items=[4, 5], minor_axis=['price', 'volume']) # should be filtered df.loc[4, dates[0], 'price'] = np.nan # should not be filtered, should have been ffilled df.loc[5, dates[1], 'price'] = np.nan source = DataPanelSource(df) event = next(source) self.assertEqual(5, event.sid) event = next(source) self.assertEqual(4, event.sid) event = next(source) self.assertEqual(5, event.sid) self.assertFalse(np.isnan(event.price)) class TestRandomWalkSource(TestCase): def test_minute(self): np.random.seed(123) start_prices = {0: 100, 1: 500} start = pd.Timestamp('1990-01-01', tz='UTC') end = pd.Timestamp('1991-01-01', tz='UTC') source = RandomWalkSource(start_prices=start_prices, calendar=calendar_nyse, start=start, end=end) self.assertIsInstance(source.start, pd.lib.Timestamp) self.assertIsInstance(source.end, pd.lib.Timestamp) for event in source: self.assertIn(event.sid, start_prices.keys()) self.assertIn(event.dt.replace(minute=0, hour=0), calendar_nyse.trading_days) self.assertGreater(event.dt, start) self.assertLess(event.dt, end) self.assertGreater(event.price, 0, "price should never go negative.") self.assertTrue(13 <= event.dt.hour <= 21, "event.dt.hour == %i, not during market \ hours." % event.dt.hour) def test_day(self): np.random.seed(123) start_prices = {0: 100, 1: 500} start = pd.Timestamp('1990-01-01', tz='UTC') end = pd.Timestamp('1992-01-01', tz='UTC') source = RandomWalkSource(start_prices=start_prices, calendar=calendar_nyse, start=start, end=end, freq='daily') self.assertIsInstance(source.start, pd.lib.Timestamp) self.assertIsInstance(source.end, pd.lib.Timestamp) for event in source: self.assertIn(event.sid, start_prices.keys()) self.assertIn(event.dt.replace(minute=0, hour=0), calendar_nyse.trading_days) self.assertGreater(event.dt, start) self.assertLess(event.dt, end) self.assertGreater(event.price, 0, "price should never go negative.") self.assertEqual(event.dt.hour, 0)
import agents as ag import envgui as gui import submissions.Porter.vacuum2 as v2 class Dirt(ag.Thing): pass class VacuumEnvironment(ag.XYEnvironment): """The environment of [Ex. 2.12]. Agent perceives dirty or clean, and bump (into obstacle) or not; 2D discrete world of unknown size; performance measure is 100 for each dirt cleaned, and -1 for each turn taken.""" def __init__(self, width=4, height=3): super(VacuumEnvironment, self).__init__(width, height) self.add_walls() def thing_classes(self): return [ag.Wall, Dirt, # ReflexVacuumAgent, RandomVacuumAgent, # TableDrivenVacuumAgent, ModelBasedVacuumAgent ] def percept(self, agent): """The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None'). Unlike the TrivialVacuumEnvironment, location is NOT perceived.""" status = ('Dirty' if self.some_things_at( agent.location, Dirt) else 'Clean') bump = ('Bump' if agent.bump else'None') return (bump, status) def execute_action(self, agent, action): if action == 'Suck': dirt_list = self.list_things_at(agent.location, Dirt) if dirt_list != []: dirt = dirt_list[0] agent.performance += 100 self.delete_thing(dirt) else: super(VacuumEnvironment, self).execute_action(agent, action) if action != 'NoOp': agent.performance -= 1 def testVacuum(label, w=4, h=3, dloc=[(1,1),(2,1)], vloc=(1,1), limit=6): print(label) v = VacuumEnvironment(w, h) for loc in dloc: v.add_thing(Dirt(), loc) a = v2.HW2Agent() a = ag.TraceAgent(a) v.add_thing(a, vloc) t = gui.EnvTUI(v) t.mapImageNames({ ag.Wall: '#', Dirt: '@', ag.Agent: 'V', }) t.step(0) t.list_things(Dirt) t.step(limit) if len(t.env.get_things(Dirt)) > 0: t.list_things(Dirt) else: print('All clean!') # Check to continue if input('Do you want to continue [Y/n]? ') == 'n': exit(0) else: print('----------------------------------------') testVacuum('Two Cells, Agent on Left:') testVacuum('Two Cells, Agent on Right:', vloc=(2,1)) testVacuum('Two Cells, Agent on Top:', w=3, h=4, dloc=[(1,1), (1,2)], vloc=(1,1) ) testVacuum('Two Cells, Agent on Bottom:', w=3, h=4, dloc=[(1,1), (1,2)], vloc=(1,2) ) testVacuum('Five Cells, Agent on Left:', w=7, h=3, dloc=[(2,1), (4,1)], vloc=(1,1), limit=12) testVacuum('Five Cells, Agent near Right:', w=7, h=3, dloc=[(2,1), (3,1)], vloc=(4,1), limit=12) testVacuum('Five Cells, Agent on Top:', w=3, h=7, dloc=[(1,2), (1,4)], vloc=(1,1), limit=12 ) testVacuum('Five Cells, Agent Near Bottom:', w=3, h=7, dloc=[(1,2), (1,3)], vloc=(1,4), limit=12 ) testVacuum('5x4 Grid, Agent in Top Left:', w=7, h=6, dloc=[(1,4), (2,2), (3, 3), (4,1), (5,2)], vloc=(1,1), limit=46 ) testVacuum('5x4 Grid, Agent near Bottom Right:', w=7, h=6, dloc=[(1,3), (2,2), (3, 4), (4,1), (5,2)], vloc=(4, 3), limit=46 ) v = VacuumEnvironment(6, 3) a = v2.HW2Agent() a = ag.TraceAgent(a) loc = v.random_location_inbounds() v.add_thing(a, location=loc) v.scatter_things(Dirt) g = gui.EnvGUI(v, 'Vaccuum') c = g.getCanvas() c.mapImageNames({ ag.Wall: 'images/wall.jpg', # Floor: 'images/floor.png', Dirt: 'images/dirt.png', ag.Agent: 'images/vacuum.png', }) c.update() g.mainloop()
""" Implement common functions for tests """ from __future__ import print_function from __future__ import unicode_literals import io import sys def parse_yaml(yaml_file): """ Parses a yaml file, returning its contents as a dict. """ try: import yaml except ImportError: sys.exit("Unable to import yaml module.") try: with io.open(yaml_file, encoding='utf-8') as fname: return yaml.load(fname) except IOError: sys.exit("Unable to open YAML file: {0}".format(yaml_file))
import waterfall_window import common from gnuradio import gr, blks2 from pubsub import pubsub from constants import * class _waterfall_sink_base(gr.hier_block2, common.wxgui_hb): """ An fft block with real/complex inputs and a gui window. """ def __init__( self, parent, baseband_freq=0, ref_level=50, sample_rate=1, fft_size=512, fft_rate=waterfall_window.DEFAULT_FRAME_RATE, average=False, avg_alpha=None, title='', size=waterfall_window.DEFAULT_WIN_SIZE, ref_scale=2.0, dynamic_range=80, num_lines=256, win=None, **kwargs #do not end with a comma ): #ensure avg alpha if avg_alpha is None: avg_alpha = 2.0/fft_rate #init gr.hier_block2.__init__( self, "waterfall_sink", gr.io_signature(1, 1, self._item_size), gr.io_signature(0, 0, 0), ) #blocks fft = self._fft_chain( sample_rate=sample_rate, fft_size=fft_size, frame_rate=fft_rate, ref_scale=ref_scale, avg_alpha=avg_alpha, average=average, win=win, ) msgq = gr.msg_queue(2) sink = gr.message_sink(gr.sizeof_float*fft_size, msgq, True) #controller self.controller = pubsub() self.controller.subscribe(AVERAGE_KEY, fft.set_average) self.controller.publish(AVERAGE_KEY, fft.average) self.controller.subscribe(AVG_ALPHA_KEY, fft.set_avg_alpha) self.controller.publish(AVG_ALPHA_KEY, fft.avg_alpha) self.controller.subscribe(SAMPLE_RATE_KEY, fft.set_sample_rate) self.controller.publish(SAMPLE_RATE_KEY, fft.sample_rate) self.controller.subscribe(DECIMATION_KEY, fft.set_decimation) self.controller.publish(DECIMATION_KEY, fft.decimation) self.controller.subscribe(FRAME_RATE_KEY, fft.set_vec_rate) self.controller.publish(FRAME_RATE_KEY, fft.frame_rate) #start input watcher common.input_watcher(msgq, self.controller, MSG_KEY) #create window self.win = waterfall_window.waterfall_window( parent=parent, controller=self.controller, size=size, title=title, real=self._real, fft_size=fft_size, num_lines=num_lines, baseband_freq=baseband_freq, decimation_key=DECIMATION_KEY, sample_rate_key=SAMPLE_RATE_KEY, frame_rate_key=FRAME_RATE_KEY, dynamic_range=dynamic_range, ref_level=ref_level, average_key=AVERAGE_KEY, avg_alpha_key=AVG_ALPHA_KEY, msg_key=MSG_KEY, ) common.register_access_methods(self, self.win) setattr(self.win, 'set_baseband_freq', getattr(self, 'set_baseband_freq')) #BACKWARDS #connect self.wxgui_connect(self, fft, sink) class waterfall_sink_f(_waterfall_sink_base): _fft_chain = blks2.logpwrfft_f _item_size = gr.sizeof_float _real = True class waterfall_sink_c(_waterfall_sink_base): _fft_chain = blks2.logpwrfft_c _item_size = gr.sizeof_gr_complex _real = False import wx from gnuradio.wxgui import stdgui2 class test_top_block (stdgui2.std_top_block): def __init__(self, frame, panel, vbox, argv): stdgui2.std_top_block.__init__ (self, frame, panel, vbox, argv) fft_size = 512 # build our flow graph input_rate = 20.000e3 # Generate a complex sinusoid self.src1 = gr.sig_source_c (input_rate, gr.GR_SIN_WAVE, 5.75e3, 1000) #src1 = gr.sig_source_c (input_rate, gr.GR_CONST_WAVE, 5.75e3, 1000) # We add these throttle blocks so that this demo doesn't # suck down all the CPU available. Normally you wouldn't use these. self.thr1 = gr.throttle(gr.sizeof_gr_complex, input_rate) sink1 = waterfall_sink_c (panel, title="Complex Data", fft_size=fft_size, sample_rate=input_rate, baseband_freq=100e3) self.connect(self.src1, self.thr1, sink1) vbox.Add (sink1.win, 1, wx.EXPAND) # generate a real sinusoid self.src2 = gr.sig_source_f (input_rate, gr.GR_SIN_WAVE, 5.75e3, 1000) self.thr2 = gr.throttle(gr.sizeof_float, input_rate) sink2 = waterfall_sink_f (panel, title="Real Data", fft_size=fft_size, sample_rate=input_rate, baseband_freq=100e3) self.connect(self.src2, self.thr2, sink2) vbox.Add (sink2.win, 1, wx.EXPAND) def main (): app = stdgui2.stdapp (test_top_block, "Waterfall Sink Test App") app.MainLoop () if __name__ == '__main__': main ()
from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os from pants.util.dirutil import safe_mkdir_for class ReproMixin(object): """ Additional helper methods for use in Repro tests""" def add_file(self, root, path, content): """Add a file with specified contents :param str root: Root directory for path. :param str path: Path relative to root. :param str content: Content to write to file. """ fullpath = os.path.join(root, path) safe_mkdir_for(fullpath) with open(fullpath, 'w') as outfile: outfile.write(content) def assert_not_exists(self, root, path): """Assert a file at relpath doesn't exist :param str root: Root directory of path. :param str path: Path relative to tar.gz. :return: bool """ fullpath = os.path.join(root, path) self.assertFalse(os.path.exists(fullpath)) def assert_file(self, root, path, expected_content=None): """ Assert that a file exists with the content specified :param str root: Root directory of path. :param str path: Path relative to tar.gz. :param str expected_content: file contents. :return: bool """ fullpath = os.path.join(root, path) self.assertTrue(os.path.isfile(fullpath)) if expected_content: with open(fullpath, 'r') as infile: content = infile.read() self.assertEqual(expected_content, content)
import base64 import os import unittest import zipfile try: from io import BytesIO except ImportError: from cStringIO import StringIO as BytesIO try: unicode except NameError: unicode = str from selenium import webdriver from selenium.webdriver.common.proxy import Proxy, ProxyType from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer class TestFirefoxProfile: def setup_method(self, method): self.driver = webdriver.Firefox() self.webserver = SimpleWebServer() self.webserver.start() def test_that_we_can_accept_a_profile(self): profile1 = webdriver.FirefoxProfile() profile1.set_preference("startup.homepage_welcome_url", self.webserver.where_is('simpleTest.html')) profile1.update_preferences() profile2 = webdriver.FirefoxProfile(profile1.path) driver = webdriver.Firefox(firefox_profile=profile2) title = driver.title driver.quit() assert "Hello WebDriver" == title def test_that_prefs_are_written_in_the_correct_format(self): # The setup gave us a browser but we dont need it self.driver.quit() profile = webdriver.FirefoxProfile() profile.set_preference("sample.preference", "hi there") profile.update_preferences() assert 'hi there' == profile.default_preferences["sample.preference"] encoded = profile.encoded decoded = base64.decodestring(encoded) fp = BytesIO(decoded) zip = zipfile.ZipFile(fp, "r") for entry in zip.namelist(): if entry.endswith("user.js"): user_js = zip.read(entry) for line in user_js.splitlines(): if line.startswith(b'user_pref("sample.preference",'): assert True == line.endswith(b'hi there");') # there should be only one user.js break fp.close() def test_that_unicode_prefs_are_written_in_the_correct_format(self): # The setup gave us a browser but we dont need it self.driver.quit() profile = webdriver.FirefoxProfile() profile.set_preference('sample.preference.2', unicode('hi there')) profile.update_preferences() assert 'hi there' == profile.default_preferences["sample.preference.2"] encoded = profile.encoded decoded = base64.decodestring(encoded) fp = BytesIO(decoded) zip = zipfile.ZipFile(fp, "r") for entry in zip.namelist(): if entry.endswith('user.js'): user_js = zip.read(entry) for line in user_js.splitlines(): if line.startswith(b'user_pref("sample.preference.2",'): assert True == line.endswith(b'hi there");') # there should be only one user.js break fp.close() def test_that_integer_prefs_are_written_in_the_correct_format(self): # The setup gave us a browser but we dont need it self.driver.quit() profile = webdriver.FirefoxProfile() profile.set_preference("sample.int.preference", 12345) profile.update_preferences() assert 12345 == profile.default_preferences["sample.int.preference"] def test_that_boolean_prefs_are_written_in_the_correct_format(self): # The setup gave us a browser but we dont need it self.driver.quit() profile = webdriver.FirefoxProfile() profile.set_preference("sample.bool.preference", True) profile.update_preferences() assert True == profile.default_preferences["sample.bool.preference"] def test_that_we_delete_the_profile(self): path = self.driver.firefox_profile.path self.driver.quit() assert not os.path.exists(path) def test_profiles_do_not_share_preferences(self): self.profile1 = webdriver.FirefoxProfile() self.profile1.accept_untrusted_certs = False self.profile2 = webdriver.FirefoxProfile() # Default is true. Should remain so. assert self.profile2.default_preferences["webdriver_accept_untrusted_certs"] == True def test_none_proxy_is_set(self): # The setup gave us a browser but we dont need it self.driver.quit() self.profile = webdriver.FirefoxProfile() proxy = None try: self.profile.set_proxy(proxy) assert False, "exception after passing empty proxy is expected" except ValueError as e: pass assert "network.proxy.type" not in self.profile.default_preferences def test_unspecified_proxy_is_set(self): # The setup gave us a browser but we dont need it self.driver.quit() self.profile = webdriver.FirefoxProfile() proxy = Proxy() self.profile.set_proxy(proxy) assert "network.proxy.type" not in self.profile.default_preferences def test_manual_proxy_is_set_in_profile(self): # The setup gave us a browser but we dont need it self.driver.quit() self.profile = webdriver.FirefoxProfile() proxy = Proxy() proxy.no_proxy = 'localhost, foo.localhost' proxy.http_proxy = 'some.url:1234' proxy.ftp_proxy = None proxy.sslProxy = 'some2.url' self.profile.set_proxy(proxy) assert self.profile.default_preferences["network.proxy.type"] == ProxyType.MANUAL['ff_value'] assert self.profile.default_preferences["network.proxy.no_proxies_on"] == 'localhost, foo.localhost' assert self.profile.default_preferences["network.proxy.http"] == 'some.url' assert self.profile.default_preferences["network.proxy.http_port"] == 1234 assert self.profile.default_preferences["network.proxy.ssl"] == 'some2.url' assert "network.proxy.ssl_port" not in self.profile.default_preferences assert "network.proxy.ftp" not in self.profile.default_preferences def test_pac_proxy_is_set_in_profile(self): # The setup gave us a browser but we dont need it self.driver.quit() self.profile = webdriver.FirefoxProfile() proxy = Proxy() proxy.proxy_autoconfig_url = 'http://some.url:12345/path' self.profile.set_proxy(proxy) assert self.profile.default_preferences["network.proxy.type"] == ProxyType.PAC['ff_value'] assert self.profile.default_preferences["network.proxy.autoconfig_url"] == 'http://some.url:12345/path' def test_autodetect_proxy_is_set_in_profile(self): # The setup gave us a browser but we dont need it self.driver.quit() self.profile = webdriver.FirefoxProfile() proxy = Proxy() proxy.auto_detect = True self.profile.set_proxy(proxy) assert self.profile.default_preferences["network.proxy.type"] == ProxyType.AUTODETECT['ff_value'] def teardown_method(self, method): try: self.driver.quit() except: pass #don't care since we may have killed the browser above self.webserver.stop() def _pageURL(self, name): return self.webserver.where_is(name + '.html') def _loadSimplePage(self): self._loadPage("simpleTest") def _loadPage(self, name): self.driver.get(self._pageURL(name)) def teardown_module(module): try: TestFirefoxProfile.driver.quit() except: pass #Don't Care since we may have killed the browser above
from tests.package.test_python import TestPythonPackageBase class TestPythonPy2Can(TestPythonPackageBase): __test__ = True config = TestPythonPackageBase.config + \ """ BR2_PACKAGE_PYTHON=y BR2_PACKAGE_PYTHON_CAN=y """ sample_scripts = ["tests/package/sample_python_can.py"] timeout = 40 class TestPythonPy3Can(TestPythonPackageBase): __test__ = True config = TestPythonPackageBase.config + \ """ BR2_PACKAGE_PYTHON3=y BR2_PACKAGE_PYTHON_CAN=y """ sample_scripts = ["tests/package/sample_python_can.py"] timeout = 40
from boto.s3.user import User class ResultSet(list): """ The ResultSet is used to pass results back from the Amazon services to the client. It is light wrapper around Python's :py:class:`list` class, with some additional methods for parsing XML results from AWS. Because I don't really want any dependencies on external libraries, I'm using the standard SAX parser that comes with Python. The good news is that it's quite fast and efficient but it makes some things rather difficult. You can pass in, as the marker_elem parameter, a list of tuples. Each tuple contains a string as the first element which represents the XML element that the resultset needs to be on the lookout for and a Python class as the second element of the tuple. Each time the specified element is found in the XML, a new instance of the class will be created and popped onto the stack. :ivar str next_token: A hash used to assist in paging through very long result sets. In most cases, passing this value to certain methods will give you another 'page' of results. """ def __init__(self, marker_elem=None): list.__init__(self) if isinstance(marker_elem, list): self.markers = marker_elem else: self.markers = [] self.marker = None self.key_marker = None self.next_marker = None # avail when delimiter used self.next_key_marker = None self.next_upload_id_marker = None self.next_version_id_marker = None self.next_generation_marker= None self.version_id_marker = None self.is_truncated = False self.next_token = None self.status = True def startElement(self, name, attrs, connection): for t in self.markers: if name == t[0]: obj = t[1](connection) self.append(obj) return obj if name == 'Owner': # Makes owner available for get_service and # perhaps other lists where not handled by # another element. self.owner = User() return self.owner return None def to_boolean(self, value, true_value='true'): if value == true_value: return True else: return False def endElement(self, name, value, connection): if name == 'IsTruncated': self.is_truncated = self.to_boolean(value) elif name == 'Marker': self.marker = value elif name == 'KeyMarker': self.key_marker = value elif name == 'NextMarker': self.next_marker = value elif name == 'NextKeyMarker': self.next_key_marker = value elif name == 'VersionIdMarker': self.version_id_marker = value elif name == 'NextVersionIdMarker': self.next_version_id_marker = value elif name == 'NextGenerationMarker': self.next_generation_marker = value elif name == 'UploadIdMarker': self.upload_id_marker = value elif name == 'NextUploadIdMarker': self.next_upload_id_marker = value elif name == 'Bucket': self.bucket = value elif name == 'MaxUploads': self.max_uploads = int(value) elif name == 'MaxItems': self.max_items = int(value) elif name == 'Prefix': self.prefix = value elif name == 'return': self.status = self.to_boolean(value) elif name == 'StatusCode': self.status = self.to_boolean(value, 'Success') elif name == 'ItemName': self.append(value) elif name == 'NextToken': self.next_token = value elif name == 'nextToken': self.next_token = value # Code exists which expects nextToken to be available, so we # set it here to remain backwards-compatibile. self.nextToken = value elif name == 'BoxUsage': try: connection.box_usage += float(value) except: pass elif name == 'IsValid': self.status = self.to_boolean(value, 'True') else: setattr(self, name, value) class BooleanResult(object): def __init__(self, marker_elem=None): self.status = True self.request_id = None self.box_usage = None def __repr__(self): if self.status: return 'True' else: return 'False' def __nonzero__(self): return self.status def startElement(self, name, attrs, connection): return None def to_boolean(self, value, true_value='true'): if value == true_value: return True else: return False def endElement(self, name, value, connection): if name == 'return': self.status = self.to_boolean(value) elif name == 'StatusCode': self.status = self.to_boolean(value, 'Success') elif name == 'IsValid': self.status = self.to_boolean(value, 'True') elif name == 'RequestId': self.request_id = value elif name == 'requestId': self.request_id = value elif name == 'BoxUsage': self.request_id = value else: setattr(self, name, value)
"""Provides factories for Split.""" from xmodule.modulestore import ModuleStoreEnum from xmodule.course_module import CourseDescriptor from xmodule.x_module import XModuleDescriptor import factory from factory.helpers import lazy_attribute from opaque_keys.edx.keys import UsageKey class SplitFactory(factory.Factory): """ Abstracted superclass which defines modulestore so that there's no dependency on django if the caller passes modulestore in kwargs """ @lazy_attribute def modulestore(self): # Delayed import so that we only depend on django if the caller # hasn't provided their own modulestore from xmodule.modulestore.django import modulestore return modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.split) class PersistentCourseFactory(SplitFactory): """ Create a new course (not a new version of a course, but a whole new index entry). keywords: any xblock field plus (note, the below are filtered out; so, if they become legitimate xblock fields, they won't be settable via this factory) * org: defaults to textX * master_branch: (optional) defaults to ModuleStoreEnum.BranchName.draft * user_id: (optional) defaults to 'test_user' * display_name (xblock field): will default to 'Robot Super Course' unless provided """ FACTORY_FOR = CourseDescriptor # pylint: disable=W0613 @classmethod def _create(cls, target_class, course='999', run='run', org='testX', user_id=ModuleStoreEnum.UserID.test, master_branch=ModuleStoreEnum.BranchName.draft, **kwargs): modulestore = kwargs.pop('modulestore') root_block_id = kwargs.pop('root_block_id', 'course') # Write the data to the mongo datastore new_course = modulestore.create_course( org, course, run, user_id, fields=kwargs, master_branch=master_branch, root_block_id=root_block_id ) return new_course @classmethod def _build(cls, target_class, *args, **kwargs): raise NotImplementedError() class ItemFactory(SplitFactory): FACTORY_FOR = XModuleDescriptor display_name = factory.LazyAttributeSequence(lambda o, n: "{} {}".format(o.category, n)) # pylint: disable=W0613 @classmethod def _create(cls, target_class, parent_location, category='chapter', user_id=ModuleStoreEnum.UserID.test, definition_locator=None, force=False, continue_version=False, **kwargs): """ passes *kwargs* as the new item's field values: :param parent_location: (required) the location of the course & possibly parent :param category: (defaults to 'chapter') :param definition_locator (optional): the DescriptorLocator for the definition this uses or branches """ modulestore = kwargs.pop('modulestore') if isinstance(parent_location, UsageKey): return modulestore.create_child( user_id, parent_location, category, defintion_locator=definition_locator, force=force, continue_version=continue_version, **kwargs ) else: return modulestore.create_item( user_id, parent_location, category, defintion_locator=definition_locator, force=force, continue_version=continue_version, **kwargs ) @classmethod def _build(cls, target_class, *args, **kwargs): raise NotImplementedError()
from Model import *
import frappe from frappe.exceptions import ValidationError import unittest class TestBatch(unittest.TestCase): def test_item_has_batch_enabled(self): self.assertRaises(ValidationError, frappe.get_doc({ "doctype": "Batch", "name": "_test Batch", "item": "_Test Item" }).save)
from .....testing import assert_equal from ..specialized import BRAINSDemonWarp def test_BRAINSDemonWarp_inputs(): input_map = dict(args=dict(argstr='%s', ), arrayOfPyramidLevelIterations=dict(argstr='--arrayOfPyramidLevelIterations %s', sep=',', ), backgroundFillValue=dict(argstr='--backgroundFillValue %d', ), checkerboardPatternSubdivisions=dict(argstr='--checkerboardPatternSubdivisions %s', sep=',', ), environ=dict(nohash=True, usedefault=True, ), fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s', ), fixedVolume=dict(argstr='--fixedVolume %s', ), gradient_type=dict(argstr='--gradient_type %s', ), gui=dict(argstr='--gui ', ), histogramMatch=dict(argstr='--histogramMatch ', ), ignore_exception=dict(nohash=True, usedefault=True, ), initializeWithDisplacementField=dict(argstr='--initializeWithDisplacementField %s', ), initializeWithTransform=dict(argstr='--initializeWithTransform %s', ), inputPixelType=dict(argstr='--inputPixelType %s', ), interpolationMode=dict(argstr='--interpolationMode %s', ), lowerThresholdForBOBF=dict(argstr='--lowerThresholdForBOBF %d', ), maskProcessingMode=dict(argstr='--maskProcessingMode %s', ), max_step_length=dict(argstr='--max_step_length %f', ), medianFilterSize=dict(argstr='--medianFilterSize %s', sep=',', ), minimumFixedPyramid=dict(argstr='--minimumFixedPyramid %s', sep=',', ), minimumMovingPyramid=dict(argstr='--minimumMovingPyramid %s', sep=',', ), movingBinaryVolume=dict(argstr='--movingBinaryVolume %s', ), movingVolume=dict(argstr='--movingVolume %s', ), neighborhoodForBOBF=dict(argstr='--neighborhoodForBOBF %s', sep=',', ), numberOfBCHApproximationTerms=dict(argstr='--numberOfBCHApproximationTerms %d', ), numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), numberOfPyramidLevels=dict(argstr='--numberOfPyramidLevels %d', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputCheckerboardVolume=dict(argstr='--outputCheckerboardVolume %s', hash_files=False, ), outputDebug=dict(argstr='--outputDebug ', ), outputDisplacementFieldPrefix=dict(argstr='--outputDisplacementFieldPrefix %s', ), outputDisplacementFieldVolume=dict(argstr='--outputDisplacementFieldVolume %s', hash_files=False, ), outputNormalized=dict(argstr='--outputNormalized ', ), outputPixelType=dict(argstr='--outputPixelType %s', ), outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), promptUser=dict(argstr='--promptUser ', ), registrationFilterType=dict(argstr='--registrationFilterType %s', ), seedForBOBF=dict(argstr='--seedForBOBF %s', sep=',', ), smoothDisplacementFieldSigma=dict(argstr='--smoothDisplacementFieldSigma %f', ), terminal_output=dict(nohash=True, ), upFieldSmoothing=dict(argstr='--upFieldSmoothing %f', ), upperThresholdForBOBF=dict(argstr='--upperThresholdForBOBF %d', ), use_vanilla_dem=dict(argstr='--use_vanilla_dem ', ), ) inputs = BRAINSDemonWarp.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_BRAINSDemonWarp_outputs(): output_map = dict(outputCheckerboardVolume=dict(), outputDisplacementFieldVolume=dict(), outputVolume=dict(), ) outputs = BRAINSDemonWarp.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): yield assert_equal, getattr(outputs.traits()[key], metakey), value
def can_build(env, platform): return True def configure(env): pass def get_doc_classes(): return [ "NetworkedMultiplayerENet", ] def get_doc_path(): return "doc_classes"
from __future__ import division, print_function, absolute_import import numpy as np import scipy.interpolate as interp from numpy.testing import assert_almost_equal class TestRegression(object): def test_spalde_scalar_input(self): """Ticket #629""" x = np.linspace(0,10) y = x**3 tck = interp.splrep(x, y, k=3, t=[5]) res = interp.spalde(np.float64(1), tck) des = np.array([1., 3., 6., 6.]) assert_almost_equal(res, des)
class TestRouter(object): def allow_migrate(self, db, app_label, model_name=None, **hints): """ The Tribble model should be the only one to appear in the 'other' db. """ if model_name == 'tribble': return db == 'other' elif db == 'other': return False
""" Test cases related to XPath evaluation and the XPath class """ import unittest, sys, os.path this_dir = os.path.dirname(__file__) if this_dir not in sys.path: sys.path.insert(0, this_dir) # needed for Py3 from common_imports import etree, HelperTestCase, _bytes, BytesIO from common_imports import doctest, make_doctest class ETreeXPathTestCase(HelperTestCase): """XPath tests etree""" def test_xpath_boolean(self): tree = self.parse('<a><b></b><b></b></a>') self.assert_(tree.xpath('boolean(/a/b)')) self.assert_(not tree.xpath('boolean(/a/c)')) def test_xpath_number(self): tree = self.parse('<a>1</a>') self.assertEquals(1., tree.xpath('number(/a)')) tree = self.parse('<a>A</a>') actual = str(tree.xpath('number(/a)')) expected = ['nan', '1.#qnan', 'nanq'] if not actual.lower() in expected: self.fail('Expected a NAN value, got %s' % actual) def test_xpath_string(self): tree = self.parse('<a>Foo</a>') self.assertEquals('Foo', tree.xpath('string(/a/text())')) def test_xpath_document_root(self): tree = self.parse('<a><b/></a>') self.assertEquals([], tree.xpath('/')) def test_xpath_namespace(self): tree = self.parse('<a xmlns="test" xmlns:p="myURI"/>') self.assert_((None, "test") in tree.xpath('namespace::*')) self.assert_(('p', 'myURI') in tree.xpath('namespace::*')) def test_xpath_namespace_empty(self): tree = self.parse('<a/>') self.assertEquals([('xml', 'http://www.w3.org/XML/1998/namespace')], tree.xpath('namespace::*')) def test_xpath_list_elements(self): tree = self.parse('<a><b>Foo</b><b>Bar</b></a>') root = tree.getroot() self.assertEquals([root[0], root[1]], tree.xpath('/a/b')) def test_xpath_list_nothing(self): tree = self.parse('<a><b/></a>') self.assertEquals([], tree.xpath('/a/c')) # this seems to pass a different code path, also should return nothing self.assertEquals([], tree.xpath('/a/c/text()')) def test_xpath_list_text(self): tree = self.parse('<a><b>Foo</b><b>Bar</b></a>') root = tree.getroot() self.assertEquals(['Foo', 'Bar'], tree.xpath('/a/b/text()')) def test_xpath_list_text_parent(self): tree = self.parse('<a><b>FooBar</b><b>BarFoo</b></a>') root = tree.getroot() self.assertEquals(['FooBar', 'BarFoo'], tree.xpath('/a/b/text()')) self.assertEquals([root[0], root[1]], [r.getparent() for r in tree.xpath('/a/b/text()')]) def test_xpath_list_text_parent_no_smart_strings(self): tree = self.parse('<a><b>FooBar</b><b>BarFoo</b></a>') root = tree.getroot() self.assertEquals(['FooBar', 'BarFoo'], tree.xpath('/a/b/text()', smart_strings=True)) self.assertEquals([root[0], root[1]], [r.getparent() for r in tree.xpath('/a/b/text()', smart_strings=True)]) self.assertEquals(['FooBar', 'BarFoo'], tree.xpath('/a/b/text()', smart_strings=False)) self.assertEquals([False, False], [hasattr(r, 'getparent') for r in tree.xpath('/a/b/text()', smart_strings=False)]) def test_xpath_list_unicode_text_parent(self): xml = _bytes('<a><b>FooBar\\u0680\\u3120</b><b>BarFoo\\u0680\\u3120</b></a>').decode("unicode_escape") tree = self.parse(xml.encode('utf-8')) root = tree.getroot() self.assertEquals([_bytes('FooBar\\u0680\\u3120').decode("unicode_escape"), _bytes('BarFoo\\u0680\\u3120').decode("unicode_escape")], tree.xpath('/a/b/text()')) self.assertEquals([root[0], root[1]], [r.getparent() for r in tree.xpath('/a/b/text()')]) def test_xpath_list_attribute(self): tree = self.parse('<a b="B" c="C"/>') self.assertEquals(['B'], tree.xpath('/a/@b')) def test_xpath_list_attribute_parent(self): tree = self.parse('<a b="BaSdFgHjKl" c="CqWeRtZuI"/>') results = tree.xpath('/a/@c') self.assertEquals(1, len(results)) self.assertEquals('CqWeRtZuI', results[0]) self.assertEquals(tree.getroot().tag, results[0].getparent().tag) def test_xpath_list_attribute_parent_no_smart_strings(self): tree = self.parse('<a b="BaSdFgHjKl" c="CqWeRtZuI"/>') results = tree.xpath('/a/@c', smart_strings=True) self.assertEquals(1, len(results)) self.assertEquals('CqWeRtZuI', results[0]) self.assertEquals(tree.getroot().tag, results[0].getparent().tag) results = tree.xpath('/a/@c', smart_strings=False) self.assertEquals(1, len(results)) self.assertEquals('CqWeRtZuI', results[0]) self.assertEquals(False, hasattr(results[0], 'getparent')) def test_xpath_list_comment(self): tree = self.parse('<a><!-- Foo --></a>') self.assertEquals(['<!-- Foo -->'], list(map(repr, tree.xpath('/a/node()')))) def test_rel_xpath_boolean(self): root = etree.XML('<a><b><c/></b></a>') el = root[0] self.assert_(el.xpath('boolean(c)')) self.assert_(not el.xpath('boolean(d)')) def test_rel_xpath_list_elements(self): tree = self.parse('<a><c><b>Foo</b><b>Bar</b></c><c><b>Hey</b></c></a>') root = tree.getroot() c = root[0] self.assertEquals([c[0], c[1]], c.xpath('b')) self.assertEquals([c[0], c[1], root[1][0]], c.xpath('//b')) def test_xpath_ns(self): tree = self.parse('<a xmlns="uri:a"><b></b></a>') root = tree.getroot() self.assertEquals( [root[0]], tree.xpath('//foo:b', namespaces={'foo': 'uri:a'})) self.assertEquals( [], tree.xpath('//foo:b', namespaces={'foo': 'uri:c'})) self.assertEquals( [root[0]], root.xpath('//baz:b', namespaces={'baz': 'uri:a'})) def test_xpath_ns_none(self): tree = self.parse('<a xmlns="uri:a"><b></b></a>') root = tree.getroot() self.assertRaises( TypeError, root.xpath, '//b', namespaces={None: 'uri:a'}) def test_xpath_ns_empty(self): tree = self.parse('<a xmlns="uri:a"><b></b></a>') root = tree.getroot() self.assertRaises( TypeError, root.xpath, '//b', namespaces={'': 'uri:a'}) def test_xpath_error(self): tree = self.parse('<a/>') self.assertRaises(etree.XPathEvalError, tree.xpath, '\\fad') def test_xpath_class_error(self): self.assertRaises(SyntaxError, etree.XPath, '\\fad') self.assertRaises(etree.XPathSyntaxError, etree.XPath, '\\fad') def test_xpath_prefix_error(self): tree = self.parse('<a/>') self.assertRaises(etree.XPathEvalError, tree.xpath, '/fa:d') def test_xpath_class_prefix_error(self): tree = self.parse('<a/>') xpath = etree.XPath("/fa:d") self.assertRaises(etree.XPathEvalError, xpath, tree) def test_elementtree_getpath(self): a = etree.Element("a") b = etree.SubElement(a, "b") c = etree.SubElement(a, "c") d1 = etree.SubElement(c, "d") d2 = etree.SubElement(c, "d") tree = etree.ElementTree(a) self.assertEqual('/a/c/d', tree.getpath(d2)[:6]) self.assertEqual([d2], tree.xpath(tree.getpath(d2))) def test_elementtree_getpath_partial(self): a = etree.Element("a") b = etree.SubElement(a, "b") c = etree.SubElement(a, "c") d1 = etree.SubElement(c, "d") d2 = etree.SubElement(c, "d") tree = etree.ElementTree(c) self.assertEqual('/c/d', tree.getpath(d2)[:4]) self.assertEqual([d2], tree.xpath(tree.getpath(d2))) def test_xpath_evaluator(self): tree = self.parse('<a><b><c></c></b></a>') e = etree.XPathEvaluator(tree) root = tree.getroot() self.assertEquals( [root], e('//a')) def test_xpath_evaluator_tree(self): tree = self.parse('<a><b><c></c></b></a>') child_tree = etree.ElementTree(tree.getroot()[0]) e = etree.XPathEvaluator(child_tree) self.assertEquals( [], e('a')) root = child_tree.getroot() self.assertEquals( [root[0]], e('c')) def test_xpath_evaluator_tree_absolute(self): tree = self.parse('<a><b><c></c></b></a>') child_tree = etree.ElementTree(tree.getroot()[0]) e = etree.XPathEvaluator(child_tree) self.assertEquals( [], e('/a')) root = child_tree.getroot() self.assertEquals( [root], e('/b')) self.assertEquals( [], e('/c')) def test_xpath_evaluator_element(self): tree = self.parse('<a><b><c></c></b></a>') root = tree.getroot() e = etree.XPathEvaluator(root[0]) self.assertEquals( [root[0][0]], e('c')) def test_xpath_extensions(self): def foo(evaluator, a): return 'hello %s' % a extension = {(None, 'foo'): foo} tree = self.parse('<a><b></b></a>') e = etree.XPathEvaluator(tree, extensions=[extension]) self.assertEquals( "hello you", e("foo('you')")) def test_xpath_extensions_wrong_args(self): def foo(evaluator, a, b): return "hello %s and %s" % (a, b) extension = {(None, 'foo'): foo} tree = self.parse('<a><b></b></a>') e = etree.XPathEvaluator(tree, extensions=[extension]) self.assertRaises(TypeError, e, "foo('you')") def test_xpath_extensions_error(self): def foo(evaluator, a): return 1/0 extension = {(None, 'foo'): foo} tree = self.parse('<a/>') e = etree.XPathEvaluator(tree, extensions=[extension]) self.assertRaises(ZeroDivisionError, e, "foo('test')") def test_xpath_extensions_nodes(self): def f(evaluator, arg): r = etree.Element('results') b = etree.SubElement(r, 'result') b.text = 'Hoi' b = etree.SubElement(r, 'result') b.text = 'Dag' return r x = self.parse('<a/>') e = etree.XPathEvaluator(x, extensions=[{(None, 'foo'): f}]) r = e("foo('World')/result") self.assertEquals(2, len(r)) self.assertEquals('Hoi', r[0].text) self.assertEquals('Dag', r[1].text) def test_xpath_extensions_nodes_append(self): def f(evaluator, nodes): r = etree.SubElement(nodes[0], 'results') b = etree.SubElement(r, 'result') b.text = 'Hoi' b = etree.SubElement(r, 'result') b.text = 'Dag' return r x = self.parse('<a/>') e = etree.XPathEvaluator(x, extensions=[{(None, 'foo'): f}]) r = e("foo(/*)/result") self.assertEquals(2, len(r)) self.assertEquals('Hoi', r[0].text) self.assertEquals('Dag', r[1].text) def test_xpath_extensions_nodes_append2(self): def f(evaluator, nodes): r = etree.Element('results') b = etree.SubElement(r, 'result') b.text = 'Hoi' b = etree.SubElement(r, 'result') b.text = 'Dag' r.append(nodes[0]) return r x = self.parse('<result>Honk</result>') e = etree.XPathEvaluator(x, extensions=[{(None, 'foo'): f}]) r = e("foo(/*)/result") self.assertEquals(3, len(r)) self.assertEquals('Hoi', r[0].text) self.assertEquals('Dag', r[1].text) self.assertEquals('Honk', r[2].text) def test_xpath_context_node(self): tree = self.parse('<root><a/><b><c/></b></root>') check_call = [] def check_context(ctxt, nodes): self.assertEquals(len(nodes), 1) check_call.append(nodes[0].tag) self.assertEquals(ctxt.context_node, nodes[0]) return True find = etree.XPath("//*[p:foo(.)]", namespaces={'p' : 'ns'}, extensions=[{('ns', 'foo') : check_context}]) find(tree) check_call.sort() self.assertEquals(check_call, ["a", "b", "c", "root"]) def test_xpath_eval_context_propagation(self): tree = self.parse('<root><a/><b><c/></b></root>') check_call = {} def check_context(ctxt, nodes): self.assertEquals(len(nodes), 1) tag = nodes[0].tag # empty during the "b" call, a "b" during the "c" call check_call[tag] = ctxt.eval_context.get("b") ctxt.eval_context[tag] = tag return True find = etree.XPath("//b[p:foo(.)]/c[p:foo(.)]", namespaces={'p' : 'ns'}, extensions=[{('ns', 'foo') : check_context}]) result = find(tree) self.assertEquals(result, [tree.getroot()[1][0]]) self.assertEquals(check_call, {'b':None, 'c':'b'}) def test_xpath_eval_context_clear(self): tree = self.parse('<root><a/><b><c/></b></root>') check_call = {} def check_context(ctxt): check_call["done"] = True # context must be empty for each new evaluation self.assertEquals(len(ctxt.eval_context), 0) ctxt.eval_context["test"] = True return True find = etree.XPath("//b[p:foo()]", namespaces={'p' : 'ns'}, extensions=[{('ns', 'foo') : check_context}]) result = find(tree) self.assertEquals(result, [tree.getroot()[1]]) self.assertEquals(check_call["done"], True) check_call.clear() find = etree.XPath("//b[p:foo()]", namespaces={'p' : 'ns'}, extensions=[{('ns', 'foo') : check_context}]) result = find(tree) self.assertEquals(result, [tree.getroot()[1]]) self.assertEquals(check_call["done"], True) def test_xpath_variables(self): x = self.parse('<a attr="true"/>') e = etree.XPathEvaluator(x) expr = "/a[@attr=$aval]" r = e(expr, aval=1) self.assertEquals(0, len(r)) r = e(expr, aval="true") self.assertEquals(1, len(r)) self.assertEquals("true", r[0].get('attr')) r = e(expr, aval=True) self.assertEquals(1, len(r)) self.assertEquals("true", r[0].get('attr')) def test_xpath_variables_nodeset(self): x = self.parse('<a attr="true"/>') e = etree.XPathEvaluator(x) element = etree.Element("test-el") etree.SubElement(element, "test-sub") expr = "$value" r = e(expr, value=element) self.assertEquals(1, len(r)) self.assertEquals(element.tag, r[0].tag) self.assertEquals(element[0].tag, r[0][0].tag) def test_xpath_extensions_mix(self): x = self.parse('<a attr="true"><test/></a>') class LocalException(Exception): pass def foo(evaluator, a, varval): etree.Element("DUMMY") if varval == 0: raise LocalException elif varval == 1: return () elif varval == 2: return None elif varval == 3: return a[0][0] a = a[0] if a.get("attr") == str(varval): return a else: return etree.Element("NODE") extension = {(None, 'foo'): foo} e = etree.XPathEvaluator(x, extensions=[extension]) del x self.assertRaises(LocalException, e, "foo(., 0)") self.assertRaises(LocalException, e, "foo(., $value)", value=0) r = e("foo(., $value)", value=1) self.assertEqual(len(r), 0) r = e("foo(., 1)") self.assertEqual(len(r), 0) r = e("foo(., $value)", value=2) self.assertEqual(len(r), 0) r = e("foo(., $value)", value=3) self.assertEqual(len(r), 1) self.assertEqual(r[0].tag, "test") r = e("foo(., $value)", value="false") self.assertEqual(len(r), 1) self.assertEqual(r[0].tag, "NODE") r = e("foo(., 'false')") self.assertEqual(len(r), 1) self.assertEqual(r[0].tag, "NODE") r = e("foo(., 'true')") self.assertEqual(len(r), 1) self.assertEqual(r[0].tag, "a") self.assertEqual(r[0][0].tag, "test") r = e("foo(., $value)", value="true") self.assertEqual(len(r), 1) self.assertEqual(r[0].tag, "a") self.assertRaises(LocalException, e, "foo(., 0)") self.assertRaises(LocalException, e, "foo(., $value)", value=0) class ETreeXPathClassTestCase(HelperTestCase): "Tests for the XPath class" def test_xpath_compile_doc(self): x = self.parse('<a attr="true"/>') expr = etree.XPath("/a[@attr != 'true']") r = expr(x) self.assertEquals(0, len(r)) expr = etree.XPath("/a[@attr = 'true']") r = expr(x) self.assertEquals(1, len(r)) expr = etree.XPath( expr.path ) r = expr(x) self.assertEquals(1, len(r)) def test_xpath_compile_element(self): x = self.parse('<a><b/><c/></a>') root = x.getroot() expr = etree.XPath("./b") r = expr(root) self.assertEquals(1, len(r)) self.assertEquals('b', r[0].tag) expr = etree.XPath("./*") r = expr(root) self.assertEquals(2, len(r)) def test_xpath_compile_vars(self): x = self.parse('<a attr="true"/>') expr = etree.XPath("/a[@attr=$aval]") r = expr(x, aval=False) self.assertEquals(0, len(r)) r = expr(x, aval=True) self.assertEquals(1, len(r)) def test_xpath_compile_error(self): self.assertRaises(SyntaxError, etree.XPath, '\\fad') def test_xpath_elementtree_error(self): self.assertRaises(ValueError, etree.XPath('*'), etree.ElementTree()) class ETreeETXPathClassTestCase(HelperTestCase): "Tests for the ETXPath class" def test_xpath_compile_ns(self): x = self.parse('<a><b xmlns="nsa"/><b xmlns="nsb"/></a>') expr = etree.ETXPath("/a/{nsa}b") r = expr(x) self.assertEquals(1, len(r)) self.assertEquals('{nsa}b', r[0].tag) expr = etree.ETXPath("/a/{nsb}b") r = expr(x) self.assertEquals(1, len(r)) self.assertEquals('{nsb}b', r[0].tag) # disabled this test as non-ASCII characters in namespace URIs are # not acceptable def _test_xpath_compile_unicode(self): x = self.parse(_bytes('<a><b xmlns="http://nsa/\\uf8d2"/><b xmlns="http://nsb/\\uf8d1"/></a>' ).decode("unicode_escape")) expr = etree.ETXPath(_bytes("/a/{http://nsa/\\uf8d2}b").decode("unicode_escape")) r = expr(x) self.assertEquals(1, len(r)) self.assertEquals(_bytes('{http://nsa/\\uf8d2}b').decode("unicode_escape"), r[0].tag) expr = etree.ETXPath(_bytes("/a/{http://nsb/\\uf8d1}b").decode("unicode_escape")) r = expr(x) self.assertEquals(1, len(r)) self.assertEquals(_bytes('{http://nsb/\\uf8d1}b').decode("unicode_escape"), r[0].tag) SAMPLE_XML = etree.parse(BytesIO(""" <body> <tag>text</tag> <section> <tag>subtext</tag> </section> <tag /> <tag /> </body> """)) def tag(elem): return elem.tag def stringTest(ctxt, s1): return "Hello "+s1 def floatTest(ctxt, f1): return f1+4 def booleanTest(ctxt, b1): return not b1 def setTest(ctxt, st1): return st1[0] def setTest2(ctxt, st1): return st1[0:2] def argsTest1(ctxt, s, f, b, st): return ", ".join(map(str, (s, f, b, list(map(tag, st))))) def argsTest2(ctxt, st1, st2): st1.extend(st2) return st1 def resultTypesTest(ctxt): return ["x","y"] def resultTypesTest2(ctxt): return resultTypesTest uri = "http://www.example.com/" extension = {(None, 'stringTest'): stringTest, (None, 'floatTest'): floatTest, (None, 'booleanTest'): booleanTest, (None, 'setTest'): setTest, (None, 'setTest2'): setTest2, (None, 'argsTest1'): argsTest1, (None, 'argsTest2'): argsTest2, (None, 'resultTypesTest'): resultTypesTest, (None, 'resultTypesTest2'): resultTypesTest2,} def xpath(): """ Test xpath extension functions. >>> root = SAMPLE_XML >>> e = etree.XPathEvaluator(root, extensions=[extension]) >>> e("stringTest('you')") 'Hello you' >>> e(_bytes("stringTest('\\\\xe9lan')").decode("unicode_escape")) u'Hello \\xe9lan' >>> e("stringTest('you','there')") Traceback (most recent call last): ... TypeError: stringTest() takes exactly 2 arguments (3 given) >>> e("floatTest(2)") 6.0 >>> e("booleanTest(true())") False >>> list(map(tag, e("setTest(/body/tag)"))) ['tag'] >>> list(map(tag, e("setTest2(/body/*)"))) ['tag', 'section'] >>> e("argsTest1('a',1.5,true(),/body/tag)") "a, 1.5, True, ['tag', 'tag', 'tag']" >>> list(map(tag, e("argsTest2(/body/tag, /body/section)"))) ['tag', 'section', 'tag', 'tag'] >>> e("resultTypesTest()") Traceback (most recent call last): ... XPathResultError: This is not a node: 'x' >>> try: ... e("resultTypesTest2()") ... except etree.XPathResultError: ... print("Got error") Got error """ if sys.version_info[0] >= 3: xpath.__doc__ = xpath.__doc__.replace(" u'", " '") xpath.__doc__ = xpath.__doc__.replace(" XPathResultError", " lxml.etree.XPathResultError") xpath.__doc__ = xpath.__doc__.replace(" exactly 2 arguments", " exactly 2 positional arguments") def test_suite(): suite = unittest.TestSuite() suite.addTests([unittest.makeSuite(ETreeXPathTestCase)]) suite.addTests([unittest.makeSuite(ETreeXPathClassTestCase)]) suite.addTests([unittest.makeSuite(ETreeETXPathClassTestCase)]) suite.addTests([doctest.DocTestSuite()]) suite.addTests( [make_doctest('../../../doc/xpathxslt.txt')]) return suite if __name__ == '__main__': print('to test use test.py %s' % __file__)
from . import models
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urllib_parse_urlparse from ..utils import ( determine_ext, int_or_none, xpath_attr, xpath_text, ) class RuutuIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ruutu\.fi/video/(?P<id>\d+)' _TESTS = [ { 'url': 'http://www.ruutu.fi/video/2058907', 'md5': 'ab2093f39be1ca8581963451b3c0234f', 'info_dict': { 'id': '2058907', 'ext': 'mp4', 'title': 'Oletko aina halunnut tietää mitä tapahtuu vain hetki ennen lähetystä? - Nyt se selvisi!', 'description': 'md5:cfc6ccf0e57a814360df464a91ff67d6', 'thumbnail': 're:^https?://.*\.jpg$', 'duration': 114, 'age_limit': 0, }, }, { 'url': 'http://www.ruutu.fi/video/2057306', 'md5': '065a10ae4d5b8cfd9d0c3d332465e3d9', 'info_dict': { 'id': '2057306', 'ext': 'mp4', 'title': 'Superpesis: katso koko kausi Ruudussa', 'description': 'md5:da2736052fef3b2bd5e0005e63c25eac', 'thumbnail': 're:^https?://.*\.jpg$', 'duration': 40, 'age_limit': 0, }, }, ] def _real_extract(self, url): video_id = self._match_id(url) video_xml = self._download_xml( 'http://gatling.ruutu.fi/media-xml-cache?id=%s' % video_id, video_id) formats = [] processed_urls = [] def extract_formats(node): for child in node: if child.tag.endswith('Files'): extract_formats(child) elif child.tag.endswith('File'): video_url = child.text if (not video_url or video_url in processed_urls or any(p in video_url for p in ('NOT_USED', 'NOT-USED'))): return processed_urls.append(video_url) ext = determine_ext(video_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( video_url, video_id, f4m_id='hds', fatal=False)) else: proto = compat_urllib_parse_urlparse(video_url).scheme if not child.tag.startswith('HTTP') and proto != 'rtmp': continue preference = -1 if proto == 'rtmp' else 1 label = child.get('label') tbr = int_or_none(child.get('bitrate')) format_id = '%s-%s' % (proto, label if label else tbr) if label or tbr else proto if not self._is_valid_url(video_url, video_id, format_id): continue width, height = [int_or_none(x) for x in child.get('resolution', 'x').split('x')[:2]] formats.append({ 'format_id': format_id, 'url': video_url, 'width': width, 'height': height, 'tbr': tbr, 'preference': preference, }) extract_formats(video_xml.find('./Clip')) self._sort_formats(formats) return { 'id': video_id, 'title': xpath_attr(video_xml, './/Behavior/Program', 'program_name', 'title', fatal=True), 'description': xpath_attr(video_xml, './/Behavior/Program', 'description', 'description'), 'thumbnail': xpath_attr(video_xml, './/Behavior/Startpicture', 'href', 'thumbnail'), 'duration': int_or_none(xpath_text(video_xml, './/Runtime', 'duration')), 'age_limit': int_or_none(xpath_text(video_xml, './/AgeLimit', 'age limit')), 'formats': formats, }
""" sentry.plugins.base.structs ~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import, print_function __all__ = ['ReleaseHook'] from sentry.models import Release from sentry.plugins import ReleaseHook from sentry.testutils import TestCase class StartReleaseTest(TestCase): def test_minimal(self): project = self.create_project() version = 'bbee5b51f84611e4b14834363b8514c2' hook = ReleaseHook(project) hook.start_release(version) release = Release.objects.get( project=project, version=version, ) assert release.date_started class FinishReleaseTest(TestCase): def test_minimal(self): project = self.create_project() version = 'bbee5b51f84611e4b14834363b8514c2' hook = ReleaseHook(project) hook.finish_release(version) release = Release.objects.get( project=project, version=version, ) assert release.date_released
def test_local_variable(): x = 1 x = 2
import os import sys import types def setpath(): dllpath='%s/dll' %(os.path.dirname(os.path.realpath(__file__))) if 'PATH' in os.environ: if dllpath not in os.environ['PATH']: os.environ['PATH']='%s;%s' % (dllpath, os.environ['PATH']) else: os.environ['PATH']=dllpath setpath() from hpdf_consts import * from hpdf_types import * if os.sys.platform=='win32': harudll='libhpdf.dll' #haru=WinDLL(harudll) haru=CDLL(harudll) else: harudll='libhpdf.so' haru=CDLL(harudll) HPDF_HANDLE=c_void_p HPDF_Doc=HPDF_HANDLE HPDF_Page=HPDF_HANDLE HPDF_Pages=HPDF_HANDLE HPDF_Stream=HPDF_HANDLE HPDF_Image=HPDF_HANDLE HPDF_Font=HPDF_HANDLE HPDF_Outline=HPDF_HANDLE HPDF_Encoder=HPDF_HANDLE HPDF_Destination=HPDF_HANDLE HPDF_XObject=HPDF_HANDLE HPDF_Annotation=HPDF_HANDLE HPDF_ExtGState=HPDF_HANDLE HPDF_GetVersion=haru.HPDF_GetVersion HPDF_GetVersion.restype=c_char_p HPDF_NewEx=haru.HPDF_NewEx HPDF_NewEx.restype=HPDF_Doc HPDF_New=haru.HPDF_New HPDF_New.restype=HPDF_Doc HPDF_SetErrorHandler=haru.HPDF_SetErrorHandler HPDF_SetErrorHandler.restype=HPDF_STATUS HPDF_Free=haru.HPDF_Free HPDF_Free.restype=None HPDF_NewDoc=haru.HPDF_NewDoc HPDF_NewDoc.restype=HPDF_STATUS HPDF_FreeDoc=haru.HPDF_FreeDoc HPDF_FreeDoc.restype=None HPDF_HasDoc=haru.HPDF_HasDoc HPDF_HasDoc.restype=HPDF_BOOL HPDF_FreeDocAll=haru.HPDF_FreeDocAll HPDF_FreeDocAll.restype=None HPDF_SaveToStream=haru.HPDF_SaveToStream HPDF_SaveToStream.restype=HPDF_STATUS HPDF_GetStreamSize=haru.HPDF_GetStreamSize HPDF_GetStreamSize.restype=HPDF_UINT32 _HPDF_ReadFromStream=haru.HPDF_ReadFromStream _HPDF_ReadFromStream.restype=HPDF_STATUS def HPDF_ReadFromStream( pdf, #HPDF_Doc buf, #POINTER(HPDF_BYTE) size, #POINTER(HPDF_UINT32) ): if type(buf) in (types.ListType, types.TupleType): size=len(buf) buf=pointer((HPDF_BYTE*size)(*buf)) size=HPDF_UINT32(int(size)) return _HPDF_ReadFromStream( pdf, #HPDF_Doc buf, #POINTER(HPDF_BYTE) size, #POINTER(HPDF_UINT32) ) HPDF_ResetStream=haru.HPDF_ResetStream HPDF_ResetStream.restype=HPDF_STATUS HPDF_SaveToFile=haru.HPDF_SaveToFile HPDF_SaveToFile.restype=HPDF_STATUS HPDF_GetError=haru.HPDF_GetError HPDF_GetError.restype=HPDF_STATUS HPDF_GetErrorDetail=haru.HPDF_GetErrorDetail HPDF_GetErrorDetail.restype=HPDF_STATUS HPDF_ResetError=haru.HPDF_ResetError HPDF_ResetError.restype=None _HPDF_SetPagesConfiguration=haru.HPDF_SetPagesConfiguration _HPDF_SetPagesConfiguration.restype=HPDF_STATUS def HPDF_SetPagesConfiguration( pdf, #HPDF_Doc page_per_pages, #HPDF_UINT ): page_per_pages=HPDF_UINT(int(page_per_pages)) return _HPDF_SetPagesConfiguration( pdf, #HPDF_Doc page_per_pages, #HPDF_UINT ) HPDF_GetPageByIndex=haru.HPDF_GetPageByIndex HPDF_GetPageByIndex.restype=HPDF_Page HPDF_GetPageLayout=haru.HPDF_GetPageLayout HPDF_GetPageLayout.restype=HPDF_PageLayout HPDF_SetPageLayout=haru.HPDF_SetPageLayout HPDF_SetPageLayout.restype=HPDF_STATUS HPDF_GetPageMode=haru.HPDF_GetPageMode HPDF_GetPageMode.restype=HPDF_PageMode HPDF_SetPageMode=haru.HPDF_SetPageMode HPDF_SetPageMode.restype=HPDF_STATUS HPDF_GetViewerPreference=haru.HPDF_GetViewerPreference HPDF_GetViewerPreference.restype=HPDF_UINT HPDF_SetViewerPreference=haru.HPDF_SetViewerPreference HPDF_SetViewerPreference.restype=HPDF_STATUS HPDF_SetOpenAction=haru.HPDF_SetOpenAction HPDF_SetOpenAction.restype=HPDF_STATUS HPDF_GetCurrentPage=haru.HPDF_GetCurrentPage HPDF_GetCurrentPage.restype=HPDF_Page HPDF_AddPage=haru.HPDF_AddPage HPDF_AddPage.restype=HPDF_Page HPDF_InsertPage=haru.HPDF_InsertPage HPDF_InsertPage.restype=HPDF_Page _HPDF_Page_SetWidth=haru.HPDF_Page_SetWidth _HPDF_Page_SetWidth.restype=HPDF_STATUS def HPDF_Page_SetWidth( page, #HPDF_Page value, #HPDF_REAL ): value=HPDF_REAL(value) return _HPDF_Page_SetWidth( page, #HPDF_Page value, #HPDF_REAL ) _HPDF_Page_SetHeight=haru.HPDF_Page_SetHeight _HPDF_Page_SetHeight.restype=HPDF_STATUS def HPDF_Page_SetHeight( page, #HPDF_Page value, #HPDF_REAL ): value=HPDF_REAL(value) return _HPDF_Page_SetHeight( page, #HPDF_Page value, #HPDF_REAL ) HPDF_Page_SetSize=haru.HPDF_Page_SetSize HPDF_Page_SetSize.restype=HPDF_STATUS _HPDF_Page_SetRotate=haru.HPDF_Page_SetRotate _HPDF_Page_SetRotate.restype=HPDF_STATUS def HPDF_Page_SetRotate( page, #HPDF_Page angle, #HPDF_UINT16 ): angle=HPDF_UINT16(int(angle)) return _HPDF_Page_SetRotate( page, #HPDF_Page angle, #HPDF_UINT16 ) HPDF_GetFont=haru.HPDF_GetFont HPDF_GetFont.restype=HPDF_Font HPDF_LoadType1FontFromFile=haru.HPDF_LoadType1FontFromFile HPDF_LoadType1FontFromFile.restype=c_char_p HPDF_LoadTTFontFromFile=haru.HPDF_LoadTTFontFromFile HPDF_LoadTTFontFromFile.restype=c_char_p HPDF_LoadTTFontFromFile2=haru.HPDF_LoadTTFontFromFile2 HPDF_LoadTTFontFromFile2.restype=c_char_p _HPDF_AddPageLabel=haru.HPDF_AddPageLabel _HPDF_AddPageLabel.restype=HPDF_STATUS def HPDF_AddPageLabel( pdf, #HPDF_Doc page_num, #HPDF_UINT style, #HPDF_PageNumStyle first_page, #HPDF_UINT prefix, #c_char_p ): page_num, first_page=[HPDF_UINT(int(i))for i in (page_num, first_page)] return _HPDF_AddPageLabel( pdf, #HPDF_Doc page_num, #HPDF_UINT style, #HPDF_PageNumStyle first_page, #HPDF_UINT prefix, #c_char_p ) HPDF_UseJPFonts=haru.HPDF_UseJPFonts HPDF_UseJPFonts.restype=HPDF_STATUS HPDF_UseKRFonts=haru.HPDF_UseKRFonts HPDF_UseKRFonts.restype=HPDF_STATUS HPDF_UseCNSFonts=haru.HPDF_UseCNSFonts HPDF_UseCNSFonts.restype=HPDF_STATUS HPDF_UseCNTFonts=haru.HPDF_UseCNTFonts HPDF_UseCNTFonts.restype=HPDF_STATUS HPDF_CreateOutline=haru.HPDF_CreateOutline HPDF_CreateOutline.restype=HPDF_Outline HPDF_Outline_SetOpened=haru.HPDF_Outline_SetOpened HPDF_Outline_SetOpened.restype=HPDF_STATUS HPDF_Outline_SetDestination=haru.HPDF_Outline_SetDestination HPDF_Outline_SetDestination.restype=HPDF_STATUS HPDF_Page_CreateDestination=haru.HPDF_Page_CreateDestination HPDF_Page_CreateDestination.restype=HPDF_Destination _HPDF_Destination_SetXYZ=haru.HPDF_Destination_SetXYZ _HPDF_Destination_SetXYZ.restype=HPDF_STATUS def HPDF_Destination_SetXYZ( dst, #HPDF_Destination left, #HPDF_REAL top, #HPDF_REAL zoom, #HPDF_REAL ): left=HPDF_REAL(left) top=HPDF_REAL(top) zoom=HPDF_REAL(zoom) return _HPDF_Destination_SetXYZ( dst, #HPDF_Destination left, #HPDF_REAL top, #HPDF_REAL zoom, #HPDF_REAL ) HPDF_Destination_SetFit=haru.HPDF_Destination_SetFit HPDF_Destination_SetFit.restype=HPDF_STATUS _HPDF_Destination_SetFitH=haru.HPDF_Destination_SetFitH _HPDF_Destination_SetFitH.restype=HPDF_STATUS def HPDF_Destination_SetFitH( dst, #HPDF_Destination top, #HPDF_REAL ): top=HPDF_REAL(top) return _HPDF_Destination_SetFitH( dst, #HPDF_Destination top, #HPDF_REAL ) _HPDF_Destination_SetFitV=haru.HPDF_Destination_SetFitV _HPDF_Destination_SetFitV.restype=HPDF_STATUS def HPDF_Destination_SetFitV( dst, #HPDF_Destination left, #HPDF_REAL ): left=HPDF_REAL(left) return _HPDF_Destination_SetFitV( dst, #HPDF_Destination left, #HPDF_REAL ) _HPDF_Destination_SetFitR=haru.HPDF_Destination_SetFitR _HPDF_Destination_SetFitR.restype=HPDF_STATUS def HPDF_Destination_SetFitR( dst, #HPDF_Destination left, #HPDF_REAL bottom, #HPDF_REAL right, #HPDF_REAL top, #HPDF_REAL ): left=HPDF_REAL(left) bottom=HPDF_REAL(bottom) right=HPDF_REAL(right) top=HPDF_REAL(top) return _HPDF_Destination_SetFitR( dst, #HPDF_Destination left, #HPDF_REAL bottom, #HPDF_REAL right, #HPDF_REAL top, #HPDF_REAL ) HPDF_Destination_SetFitB=haru.HPDF_Destination_SetFitB HPDF_Destination_SetFitB.restype=HPDF_STATUS _HPDF_Destination_SetFitBH=haru.HPDF_Destination_SetFitBH _HPDF_Destination_SetFitBH.restype=HPDF_STATUS def HPDF_Destination_SetFitBH( dst, #HPDF_Destination top, #HPDF_REAL ): top=HPDF_REAL(top) return _HPDF_Destination_SetFitBH( dst, #HPDF_Destination top, #HPDF_REAL ) _HPDF_Destination_SetFitBV=haru.HPDF_Destination_SetFitBV _HPDF_Destination_SetFitBV.restype=HPDF_STATUS def HPDF_Destination_SetFitBV( dst, #HPDF_Destination left, #HPDF_REAL ): left=HPDF_REAL(left) return _HPDF_Destination_SetFitBV( dst, #HPDF_Destination left, #HPDF_REAL ) HPDF_GetEncoder=haru.HPDF_GetEncoder HPDF_GetEncoder.restype=HPDF_Encoder HPDF_GetCurrentEncoder=haru.HPDF_GetCurrentEncoder HPDF_GetCurrentEncoder.restype=HPDF_Encoder HPDF_SetCurrentEncoder=haru.HPDF_SetCurrentEncoder HPDF_SetCurrentEncoder.restype=HPDF_STATUS HPDF_Encoder_GetType=haru.HPDF_Encoder_GetType HPDF_Encoder_GetType.restype=HPDF_EncoderType _HPDF_Encoder_GetByteType=haru.HPDF_Encoder_GetByteType _HPDF_Encoder_GetByteType.restype=HPDF_ByteType def HPDF_Encoder_GetByteType( encoder, #HPDF_Encoder text, #const char * index #HPDF_UINT ): if type(text) in (types.ListType, types.TupleType): if type(text[-1]) != types.StringType: text=[chr(i) for i in text] text=''.join(text) return _HPDF_Encoder_GetByteType( encoder, #HPDF_Encoder text, #const char * index #HPDF_UINT ) HPDF_Encoder_GetUnicode=haru.HPDF_Encoder_GetUnicode HPDF_Encoder_GetUnicode.restype=HPDF_UNICODE HPDF_Encoder_GetWritingMode=haru.HPDF_Encoder_GetWritingMode HPDF_Encoder_GetWritingMode.restype=HPDF_WritingMode HPDF_UseJPEncodings=haru.HPDF_UseJPEncodings HPDF_UseJPEncodings.restype=HPDF_STATUS HPDF_UseKREncodings=haru.HPDF_UseKREncodings HPDF_UseKREncodings.restype=HPDF_STATUS HPDF_UseCNSEncodings=haru.HPDF_UseCNSEncodings HPDF_UseCNSEncodings.restype=HPDF_STATUS HPDF_UseCNTEncodings=haru.HPDF_UseCNTEncodings HPDF_UseCNTEncodings.restype=HPDF_STATUS HPDF_Page_CreateTextAnnot=haru.HPDF_Page_CreateTextAnnot HPDF_Page_CreateTextAnnot.restype=HPDF_Annotation HPDF_Page_CreateLinkAnnot=haru.HPDF_Page_CreateLinkAnnot HPDF_Page_CreateLinkAnnot.restype=HPDF_Annotation HPDF_Page_CreateURILinkAnnot=haru.HPDF_Page_CreateURILinkAnnot HPDF_Page_CreateURILinkAnnot.restype=HPDF_Annotation HPDF_LinkAnnot_SetHighlightMode=haru.HPDF_LinkAnnot_SetHighlightMode HPDF_LinkAnnot_SetHighlightMode.restype=HPDF_STATUS _HPDF_LinkAnnot_SetBorderStyle=haru.HPDF_LinkAnnot_SetBorderStyle _HPDF_LinkAnnot_SetBorderStyle.restype=HPDF_STATUS def HPDF_LinkAnnot_SetBorderStyle( annot, #HPDF_Annotation width, #HPDF_REAL dash_on, #HPDF_UINT16 dash_off, #HPDF_UINT16 ): width=HPDF_REAL(width) dash_on=HPDF_UINT16(dash_on) dash_off=HPDF_UINT16(dash_off) return _HPDF_LinkAnnot_SetBorderStyle( annot, #HPDF_Annotation width, #HPDF_REAL dash_on, #HPDF_UINT16 dash_off, #HPDF_UINT16 ) HPDF_TextAnnot_SetIcon=haru.HPDF_TextAnnot_SetIcon HPDF_TextAnnot_SetIcon.restype=HPDF_STATUS HPDF_TextAnnot_SetOpened=haru.HPDF_TextAnnot_SetOpened HPDF_TextAnnot_SetOpened.restype=HPDF_STATUS HPDF_LoadPngImageFromFile=haru.HPDF_LoadPngImageFromFile HPDF_LoadPngImageFromFile.restype=HPDF_Image HPDF_LoadPngImageFromFile2=haru.HPDF_LoadPngImageFromFile2 HPDF_LoadPngImageFromFile2.restype=HPDF_Image HPDF_LoadJpegImageFromFile=haru.HPDF_LoadJpegImageFromFile HPDF_LoadJpegImageFromFile.restype=HPDF_Image _HPDF_LoadRawImageFromFile=haru.HPDF_LoadRawImageFromFile _HPDF_LoadRawImageFromFile.restype=HPDF_Image def HPDF_LoadRawImageFromFile( pdf, #HPDF_Doc filename, #c_char_p width, #HPDF_UINT height, #HPDF_UINT color_space, #HPDF_ColorSpace ): width=HPDF_UINT(width) height=HPDF_UINT(height) return _HPDF_LoadRawImageFromFile( pdf, #HPDF_Doc filename, #c_char_p width, #HPDF_UINT height, #HPDF_UINT color_space, #HPDF_ColorSpace ) _HPDF_LoadRawImageFromMem=haru.HPDF_LoadRawImageFromMem _HPDF_LoadRawImageFromMem.restype=HPDF_Image def HPDF_LoadRawImageFromMem( pdf, #HPDF_Doc buf, #POINTER(HPDF_BYTE) width, #HPDF_UINT height, #HPDF_UINT color_space, #HPDF_ColorSpace bits_per_component, #HPDF_UINT ): if type(buf) in (types.ListType, types.TupleType): size=len(buf) buf=pointer((HPDF_BYTE*size)(*buf)) if height in [0, None]: height=size/width width=HPDF_UINT(width) height=HPDF_UINT(height) bits_per_component=HPDF_UINT(bits_per_component) return _HPDF_LoadRawImageFromMem( pdf, #HPDF_Doc buf, #POINTER(HPDF_BYTE) width, #HPDF_UINT height, #HPDF_UINT color_space, #HPDF_ColorSpace bits_per_component, #HPDF_UINT ) HPDF_Image_GetSize=haru.HPDF_Image_GetSize HPDF_Image_GetSize.restype=HPDF_Point _HPDF_Image_GetSize2=haru.HPDF_Image_GetSize2 _HPDF_Image_GetSize2.restype=HPDF_STATUS def HPDF_Image_GetSize2( image, #HPDF_Image size=None, #POINTER(HPDF_Point) ): size=HPDF_Point ret= _HPDF_Image_GetSize2( image, #HPDF_Image size, #POINTER(HPDF_Point) ) return ret, size.x, size.y HPDF_Image_GetWidth=haru.HPDF_Image_GetWidth HPDF_Image_GetWidth.restype=HPDF_UINT HPDF_Image_GetHeight=haru.HPDF_Image_GetHeight HPDF_Image_GetHeight.restype=HPDF_UINT HPDF_Image_GetBitsPerComponent=haru.HPDF_Image_GetBitsPerComponent HPDF_Image_GetBitsPerComponent.restype=HPDF_UINT HPDF_Image_GetColorSpace=haru.HPDF_Image_GetColorSpace HPDF_Image_GetColorSpace.restype=c_char_p _HPDF_Image_SetColorMask=haru.HPDF_Image_SetColorMask _HPDF_Image_SetColorMask.restype=HPDF_STATUS def HPDF_Image_SetColorMask( image, #HPDF_Image rmin, #HPDF_UINT rmax, #HPDF_UINT gmin, #HPDF_UINT gmax, #HPDF_UINT bmin, #HPDF_UINT bmax, #HPDF_UINT ): rmin=HPDF_UINT(rmin) rmax=HPDF_UINT(rmax) gmin=HPDF_UINT(gmin) gmax=HPDF_UINT(gmax) bmin=HPDF_UINT(bmin) bmax=HPDF_UINT(bmax) return _HPDF_Image_SetColorMask( image, #HPDF_Image rmin, #HPDF_UINT rmax, #HPDF_UINT gmin, #HPDF_UINT gmax, #HPDF_UINT bmin, #HPDF_UINT bmax, #HPDF_UINT ) HPDF_Image_SetMaskImage=haru.HPDF_Image_SetMaskImage HPDF_Image_SetMaskImage.restype=HPDF_STATUS HPDF_SetInfoAttr=haru.HPDF_SetInfoAttr HPDF_SetInfoAttr.restype=HPDF_STATUS HPDF_GetInfoAttr=haru.HPDF_GetInfoAttr HPDF_GetInfoAttr.restype=c_char_p HPDF_SetInfoDateAttr=haru.HPDF_SetInfoDateAttr HPDF_SetInfoDateAttr.restype=HPDF_STATUS HPDF_SetPassword=haru.HPDF_SetPassword HPDF_SetPassword.restype=HPDF_STATUS _HPDF_SetPermission=haru.HPDF_SetPermission _HPDF_SetPermission.restype=HPDF_STATUS def HPDF_SetPermission( pdf, #HPDF_Doc permission, #HPDF_UINT ): permission=HPDF_UINT(int(permission)) return _HPDF_SetPermission( pdf, #HPDF_Doc permission, #HPDF_UINT ) _HPDF_SetEncryptionMode=haru.HPDF_SetEncryptionMode _HPDF_SetEncryptionMode.restype=HPDF_STATUS def HPDF_SetEncryptionMode( pdf, #HPDF_Doc mode, #HPDF_EncryptMode key_len, #HPDF_UINT ): key_len=HPDF_UINT(int(key_len)) return _HPDF_SetEncryptionMode( pdf, #HPDF_Doc mode, #HPDF_EncryptMode key_len, #HPDF_UINT ) HPDF_SetCompressionMode=haru.HPDF_SetCompressionMode HPDF_SetCompressionMode.restype=HPDF_STATUS HPDF_Font_GetFontName=haru.HPDF_Font_GetFontName HPDF_Font_GetFontName.restype=c_char_p HPDF_Font_GetEncodingName=haru.HPDF_Font_GetEncodingName HPDF_Font_GetEncodingName.restype=c_char_p HPDF_Font_GetUnicodeWidth=haru.HPDF_Font_GetUnicodeWidth HPDF_Font_GetUnicodeWidth.restype=HPDF_INT HPDF_Font_GetBBox=haru.HPDF_Font_GetBBox HPDF_Font_GetBBox.restype=HPDF_Box HPDF_Font_GetAscent=haru.HPDF_Font_GetAscent HPDF_Font_GetAscent.restype=HPDF_INT HPDF_Font_GetDescent=haru.HPDF_Font_GetDescent HPDF_Font_GetDescent.restype=HPDF_INT HPDF_Font_GetXHeight=haru.HPDF_Font_GetXHeight HPDF_Font_GetXHeight.restype=HPDF_UINT HPDF_Font_GetCapHeight=haru.HPDF_Font_GetCapHeight HPDF_Font_GetCapHeight.restype=HPDF_UINT HPDF_Font_TextWidth=haru.HPDF_Font_TextWidth HPDF_Font_TextWidth.restype=HPDF_TextWidth _HPDF_Font_MeasureText=haru.HPDF_Font_MeasureText _HPDF_Font_MeasureText.restype=HPDF_UINT def HPDF_Font_MeasureText( font, #HPDF_Font text, #POINTER(HPDF_BYTE) length, #HPDF_UINT width, #HPDF_REAL font_size, #HPDF_REAL char_space, #HPDF_REAL word_space, #HPDF_REAL wordwrap, #HPDF_BOOL real_width, #POINTER(HPDF_REAL) ): if type(text) in (types.TupleType, types.ListType): length=len(text) text=pointer((HPDF_BYTE*length)(*text)) length=HPDF_UINT(int(length)) width=HPDF_REAL(width) font_size=HPDF_REAL(font_size) char_space=HPDF_REAL(char_space) word_space=HPDF_REAL(word_space) real_width=HPDF_REAL(real_width) return _HPDF_Font_MeasureText( font, #HPDF_Font text, #POINTER(HPDF_BYTE) length, #HPDF_UINT width, #HPDF_REAL font_size, #HPDF_REAL char_space, #HPDF_REAL word_space, #HPDF_REAL wordwrap, #HPDF_BOOL real_width, #POINTER(HPDF_REAL) ) HPDF_CreateExtGState=haru.HPDF_CreateExtGState HPDF_CreateExtGState.restype=HPDF_ExtGState _HPDF_ExtGState_SetAlphaStroke=haru.HPDF_ExtGState_SetAlphaStroke _HPDF_ExtGState_SetAlphaStroke.restype=HPDF_STATUS def HPDF_ExtGState_SetAlphaStroke( ext_gstate, #HPDF_ExtGState value, #HPDF_REAL ): value=HPDF_REAL(value) return _HPDF_ExtGState_SetAlphaStroke( ext_gstate, #HPDF_ExtGState value, #HPDF_REAL ) _HPDF_ExtGState_SetAlphaFill=haru.HPDF_ExtGState_SetAlphaFill _HPDF_ExtGState_SetAlphaFill.restype=HPDF_STATUS def HPDF_ExtGState_SetAlphaFill( ext_gstate, #HPDF_ExtGState value, #HPDF_REAL ): value=HPDF_REAL(value) return _HPDF_ExtGState_SetAlphaFill( ext_gstate, #HPDF_ExtGState value, #HPDF_REAL ) HPDF_ExtGState_SetBlendMode=haru.HPDF_ExtGState_SetBlendMode HPDF_ExtGState_SetBlendMode.restype=HPDF_STATUS _HPDF_Page_TextWidth=haru.HPDF_Page_TextWidth _HPDF_Page_TextWidth.restype=HPDF_REAL def HPDF_Page_TextWidth( page, #HPDF_Page text, #c_char_p ): if type(text) in (types.ListType, types.TupleType): if type(text[-1]) != types.StringType: text=[chr(i) for i in text] text=''.join(text) return _HPDF_Page_TextWidth( page, #HPDF_Page text, #c_char_p ) _HPDF_Page_MeasureText=haru.HPDF_Page_MeasureText _HPDF_Page_MeasureText.restype=HPDF_UINT def HPDF_Page_MeasureText( page, #HPDF_Page text, #c_char_p width, #HPDF_REAL wordwrap, #HPDF_BOOL real_width, #POINTER(HPDF_REAL) ): width=HPDF_REAL(width) real_width=HPDF_REAL(real_width) return _HPDF_Page_MeasureText( page, #HPDF_Page text, #c_char_p width, #HPDF_REAL wordwrap, #HPDF_BOOL real_width, #POINTER(HPDF_REAL) ) HPDF_Page_GetWidth=haru.HPDF_Page_GetWidth HPDF_Page_GetWidth.restype=HPDF_REAL HPDF_Page_GetHeight=haru.HPDF_Page_GetHeight HPDF_Page_GetHeight.restype=HPDF_REAL HPDF_Page_GetGMode=haru.HPDF_Page_GetGMode HPDF_Page_GetGMode.restype=HPDF_UINT16 HPDF_Page_GetCurrentPos=haru.HPDF_Page_GetCurrentPos HPDF_Page_GetCurrentPos.restype=HPDF_Point _HPDF_Page_GetCurrentPos2=haru.HPDF_Page_GetCurrentPos2 _HPDF_Page_GetCurrentPos2.restype=HPDF_STATUS def HPDF_Page_GetCurrentPos2( page, #HPDF_Page pos=None, #POINTER(HPDF_Point) ): pos=HPDF_Point() ret= _HPDF_Page_GetCurrentPos2( page, #HPDF_Page pos, #POINTER(HPDF_Point) ) return ret, pos.x, pos.y HPDF_Page_GetCurrentTextPos=haru.HPDF_Page_GetCurrentTextPos HPDF_Page_GetCurrentTextPos.restype=HPDF_Point _HPDF_Page_GetCurrentTextPos2=haru.HPDF_Page_GetCurrentTextPos2 _HPDF_Page_GetCurrentTextPos2.restype=HPDF_STATUS def HPDF_Page_GetCurrentTextPos2( page, #HPDF_Page pos=None, #POINTER(HPDF_Point) ): pos=HPDF_Point() ret= _HPDF_Page_GetCurrentTextPos2( page, #HPDF_Page pos, #POINTER(HPDF_Point) ) return ret, pos.x, pos.y HPDF_Page_GetCurrentFont=haru.HPDF_Page_GetCurrentFont HPDF_Page_GetCurrentFont.restype=HPDF_Font HPDF_Page_GetCurrentFontSize=haru.HPDF_Page_GetCurrentFontSize HPDF_Page_GetCurrentFontSize.restype=HPDF_REAL HPDF_Page_GetTransMatrix=haru.HPDF_Page_GetTransMatrix HPDF_Page_GetTransMatrix.restype=HPDF_TransMatrix HPDF_Page_GetLineWidth=haru.HPDF_Page_GetLineWidth HPDF_Page_GetLineWidth.restype=HPDF_REAL HPDF_Page_GetLineCap=haru.HPDF_Page_GetLineCap HPDF_Page_GetLineCap.restype=HPDF_LineCap HPDF_Page_GetLineJoin=haru.HPDF_Page_GetLineJoin HPDF_Page_GetLineJoin.restype=HPDF_LineJoin HPDF_Page_GetMiterLimit=haru.HPDF_Page_GetMiterLimit HPDF_Page_GetMiterLimit.restype=HPDF_REAL HPDF_Page_GetDash=haru.HPDF_Page_GetDash HPDF_Page_GetDash.restype=HPDF_DashMode HPDF_Page_GetFlat=haru.HPDF_Page_GetFlat HPDF_Page_GetFlat.restype=HPDF_REAL HPDF_Page_GetCharSpace=haru.HPDF_Page_GetCharSpace HPDF_Page_GetCharSpace.restype=HPDF_REAL HPDF_Page_GetWordSpace=haru.HPDF_Page_GetWordSpace HPDF_Page_GetWordSpace.restype=HPDF_REAL HPDF_Page_GetHorizontalScalling=haru.HPDF_Page_GetHorizontalScalling HPDF_Page_GetHorizontalScalling.restype=HPDF_REAL HPDF_Page_GetTextLeading=haru.HPDF_Page_GetTextLeading HPDF_Page_GetTextLeading.restype=HPDF_REAL HPDF_Page_GetTextRenderingMode=haru.HPDF_Page_GetTextRenderingMode HPDF_Page_GetTextRenderingMode.restype=HPDF_TextRenderingMode HPDF_Page_GetTextRaise=haru.HPDF_Page_GetTextRaise HPDF_Page_GetTextRaise.restype=HPDF_REAL HPDF_Page_GetTextRise=haru.HPDF_Page_GetTextRise HPDF_Page_GetTextRise.restype=HPDF_REAL HPDF_Page_GetRGBFill=haru.HPDF_Page_GetRGBFill HPDF_Page_GetRGBFill.restype=HPDF_RGBColor HPDF_Page_GetRGBStroke=haru.HPDF_Page_GetRGBStroke HPDF_Page_GetRGBStroke.restype=HPDF_RGBColor HPDF_Page_GetCMYKFill=haru.HPDF_Page_GetCMYKFill HPDF_Page_GetCMYKFill.restype=HPDF_CMYKColor HPDF_Page_GetCMYKStroke=haru.HPDF_Page_GetCMYKStroke HPDF_Page_GetCMYKStroke.restype=HPDF_CMYKColor HPDF_Page_GetGrayFill=haru.HPDF_Page_GetGrayFill HPDF_Page_GetGrayFill.restype=HPDF_REAL HPDF_Page_GetGrayStroke=haru.HPDF_Page_GetGrayStroke HPDF_Page_GetGrayStroke.restype=HPDF_REAL HPDF_Page_GetStrokingColorSpace=haru.HPDF_Page_GetStrokingColorSpace HPDF_Page_GetStrokingColorSpace.restype=HPDF_ColorSpace HPDF_Page_GetFillingColorSpace=haru.HPDF_Page_GetFillingColorSpace HPDF_Page_GetFillingColorSpace.restype=HPDF_ColorSpace HPDF_Page_GetTextMatrix=haru.HPDF_Page_GetTextMatrix HPDF_Page_GetTextMatrix.restype=HPDF_TransMatrix HPDF_Page_GetGStateDepth=haru.HPDF_Page_GetGStateDepth HPDF_Page_GetGStateDepth.restype=HPDF_UINT _HPDF_Page_SetLineWidth=haru.HPDF_Page_SetLineWidth _HPDF_Page_SetLineWidth.restype=HPDF_STATUS def HPDF_Page_SetLineWidth( page, #HPDF_Page line_width, #HPDF_REAL ): line_width=HPDF_REAL(line_width) return _HPDF_Page_SetLineWidth( page, #HPDF_Page line_width, #HPDF_REAL ) HPDF_Page_SetLineCap=haru.HPDF_Page_SetLineCap HPDF_Page_SetLineCap.restype=HPDF_STATUS HPDF_Page_SetLineJoin=haru.HPDF_Page_SetLineJoin HPDF_Page_SetLineJoin.restype=HPDF_STATUS _HPDF_Page_SetMiterLimit=haru.HPDF_Page_SetMiterLimit _HPDF_Page_SetMiterLimit.restype=HPDF_STATUS def HPDF_Page_SetMiterLimit( page, #HPDF_Page miter_limit, #HPDF_REAL ): miter_limit=HPDF_REAL(miter_limit) return _HPDF_Page_SetMiterLimit( page, #HPDF_Page miter_limit, #HPDF_REAL ) _HPDF_Page_SetDash=haru.HPDF_Page_SetDash _HPDF_Page_SetDash.restype=HPDF_STATUS def HPDF_Page_SetDash( page, #HPDF_Page dash_ptn, #POINTER(HPDF_UINT16) num_param, #HPDF_UINT phase, #HPDF_UINT ): if type(dash_ptn) in (types.ListType, types.TupleType): num_param=len(dash_ptn) dash_ptn=pointer((HPDF_UINT16*num_param)(*dash_ptn)) return _HPDF_Page_SetDash( page, #HPDF_Page dash_ptn, #POINTER(HPDF_UINT16) num_param, #HPDF_UINT phase, #HPDF_UINT ) _HPDF_Page_SetFlat=haru.HPDF_Page_SetFlat _HPDF_Page_SetFlat.restype=HPDF_STATUS def HPDF_Page_SetFlat( page, #HPDF_Page flatness, #HPDF_REAL ): flatness=HPDF_REAL(flatness) return _HPDF_Page_SetFlat( page, #HPDF_Page flatness, #HPDF_REAL ) HPDF_Page_SetExtGState=haru.HPDF_Page_SetExtGState HPDF_Page_SetExtGState.restype=HPDF_STATUS HPDF_Page_GSave=haru.HPDF_Page_GSave HPDF_Page_GSave.restype=HPDF_STATUS HPDF_Page_GRestore=haru.HPDF_Page_GRestore HPDF_Page_GRestore.restype=HPDF_STATUS _HPDF_Page_Concat=haru.HPDF_Page_Concat _HPDF_Page_Concat.restype=HPDF_STATUS def HPDF_Page_Concat( page, #HPDF_Page a, #HPDF_REAL b, #HPDF_REAL c, #HPDF_REAL d, #HPDF_REAL x, #HPDF_REAL y, #HPDF_REAL ): a=HPDF_REAL(a) b=HPDF_REAL(b) c=HPDF_REAL(c) d=HPDF_REAL(d) x=HPDF_REAL(x) y=HPDF_REAL(y) return _HPDF_Page_Concat( page, #HPDF_Page a, #HPDF_REAL b, #HPDF_REAL c, #HPDF_REAL d, #HPDF_REAL x, #HPDF_REAL y, #HPDF_REAL ) _HPDF_Page_MoveTo=haru.HPDF_Page_MoveTo _HPDF_Page_MoveTo.restype=HPDF_STATUS def HPDF_Page_MoveTo( page, #HPDF_Page x, #HPDF_REAL y, #HPDF_REAL ): x=HPDF_REAL(x) y=HPDF_REAL(y) return _HPDF_Page_MoveTo( page, #HPDF_Page x, #HPDF_REAL y, #HPDF_REAL ) _HPDF_Page_LineTo=haru.HPDF_Page_LineTo _HPDF_Page_LineTo.restype=HPDF_STATUS def HPDF_Page_LineTo( page, #HPDF_Page x, #HPDF_REAL y, #HPDF_REAL ): x=HPDF_REAL(x) y=HPDF_REAL(y) return _HPDF_Page_LineTo( page, #HPDF_Page x, #HPDF_REAL y, #HPDF_REAL ) _HPDF_Page_CurveTo=haru.HPDF_Page_CurveTo _HPDF_Page_CurveTo.restype=HPDF_STATUS def HPDF_Page_CurveTo( page, #HPDF_Page x1, #HPDF_REAL y1, #HPDF_REAL x2, #HPDF_REAL y2, #HPDF_REAL x3, #HPDF_REAL y3, #HPDF_REAL ): x1=HPDF_REAL(x1) y1=HPDF_REAL(y1) x2=HPDF_REAL(x2) y2=HPDF_REAL(y2) x3=HPDF_REAL(x3) y3=HPDF_REAL(y3) return _HPDF_Page_CurveTo( page, #HPDF_Page x1, #HPDF_REAL y1, #HPDF_REAL x2, #HPDF_REAL y2, #HPDF_REAL x3, #HPDF_REAL y3, #HPDF_REAL ) _HPDF_Page_CurveTo2=haru.HPDF_Page_CurveTo2 _HPDF_Page_CurveTo2.restype=HPDF_STATUS def HPDF_Page_CurveTo2( page, #HPDF_Page x2, #HPDF_REAL y2, #HPDF_REAL x3, #HPDF_REAL y3, #HPDF_REAL ): x2=HPDF_REAL(x2) y2=HPDF_REAL(y2) x3=HPDF_REAL(x3) y3=HPDF_REAL(y3) return _HPDF_Page_CurveTo2( page, #HPDF_Page x2, #HPDF_REAL y2, #HPDF_REAL x3, #HPDF_REAL y3, #HPDF_REAL ) _HPDF_Page_CurveTo3=haru.HPDF_Page_CurveTo3 _HPDF_Page_CurveTo3.restype=HPDF_STATUS def HPDF_Page_CurveTo3( page, #HPDF_Page x1, #HPDF_REAL y1, #HPDF_REAL x3, #HPDF_REAL y3, #HPDF_REAL ): x1=HPDF_REAL(x1) y1=HPDF_REAL(y1) x3=HPDF_REAL(x3) y3=HPDF_REAL(y3) return _HPDF_Page_CurveTo3( page, #HPDF_Page x1, #HPDF_REAL y1, #HPDF_REAL x3, #HPDF_REAL y3, #HPDF_REAL ) HPDF_Page_ClosePath=haru.HPDF_Page_ClosePath HPDF_Page_ClosePath.restype=HPDF_STATUS _HPDF_Page_Rectangle=haru.HPDF_Page_Rectangle _HPDF_Page_Rectangle.restype=HPDF_STATUS def HPDF_Page_Rectangle( page, #HPDF_Page x, #HPDF_REAL y, #HPDF_REAL width, #HPDF_REAL height, #HPDF_REAL ): x=HPDF_REAL(x) y=HPDF_REAL(y) width=HPDF_REAL(width) height=HPDF_REAL(height) return _HPDF_Page_Rectangle( page, #HPDF_Page x, #HPDF_REAL y, #HPDF_REAL width, #HPDF_REAL height, #HPDF_REAL ) _HPDF_Page_Stroke=haru.HPDF_Page_Stroke _HPDF_Page_Stroke.restype=HPDF_STATUS def HPDF_Page_Stroke( page, #HPDF_Page ): return _HPDF_Page_Stroke( page, #HPDF_Page ) HPDF_Page_ClosePathStroke=haru.HPDF_Page_ClosePathStroke HPDF_Page_ClosePathStroke.restype=HPDF_STATUS HPDF_Page_Fill=haru.HPDF_Page_Fill HPDF_Page_Fill.restype=HPDF_STATUS HPDF_Page_Eofill=haru.HPDF_Page_Eofill HPDF_Page_Eofill.restype=HPDF_STATUS HPDF_Page_FillStroke=haru.HPDF_Page_FillStroke HPDF_Page_FillStroke.restype=HPDF_STATUS HPDF_Page_EofillStroke=haru.HPDF_Page_EofillStroke HPDF_Page_EofillStroke.restype=HPDF_STATUS HPDF_Page_ClosePathFillStroke=haru.HPDF_Page_ClosePathFillStroke HPDF_Page_ClosePathFillStroke.restype=HPDF_STATUS HPDF_Page_ClosePathEofillStroke=haru.HPDF_Page_ClosePathEofillStroke HPDF_Page_ClosePathEofillStroke.restype=HPDF_STATUS HPDF_Page_EndPath=haru.HPDF_Page_EndPath HPDF_Page_EndPath.restype=HPDF_STATUS HPDF_Page_Clip=haru.HPDF_Page_Clip HPDF_Page_Clip.restype=HPDF_STATUS HPDF_Page_Eoclip=haru.HPDF_Page_Eoclip HPDF_Page_Eoclip.restype=HPDF_STATUS HPDF_Page_BeginText=haru.HPDF_Page_BeginText HPDF_Page_BeginText.restype=HPDF_STATUS HPDF_Page_EndText=haru.HPDF_Page_EndText HPDF_Page_EndText.restype=HPDF_STATUS _HPDF_Page_SetCharSpace=haru.HPDF_Page_SetCharSpace _HPDF_Page_SetCharSpace.restype=HPDF_STATUS def HPDF_Page_SetCharSpace( page, #HPDF_Page value, #HPDF_REAL ): value=HPDF_REAL(value) return _HPDF_Page_SetCharSpace( page, #HPDF_Page value, #HPDF_REAL ) _HPDF_Page_SetWordSpace=haru.HPDF_Page_SetWordSpace _HPDF_Page_SetWordSpace.restype=HPDF_STATUS def HPDF_Page_SetWordSpace( page, #HPDF_Page value, #HPDF_REAL ): value=HPDF_REAL(value) return _HPDF_Page_SetWordSpace( page, #HPDF_Page value, #HPDF_REAL ) _HPDF_Page_SetHorizontalScalling=haru.HPDF_Page_SetHorizontalScalling _HPDF_Page_SetHorizontalScalling.restype=HPDF_STATUS def HPDF_Page_SetHorizontalScalling( page, #HPDF_Page value, #HPDF_REAL ): value=HPDF_REAL(value) return _HPDF_Page_SetHorizontalScalling( page, #HPDF_Page value, #HPDF_REAL ) _HPDF_Page_SetTextLeading=haru.HPDF_Page_SetTextLeading _HPDF_Page_SetTextLeading.restype=HPDF_STATUS def HPDF_Page_SetTextLeading( page, #HPDF_Page value, #HPDF_REAL ): value=HPDF_REAL(value) return _HPDF_Page_SetTextLeading( page, #HPDF_Page value, #HPDF_REAL ) _HPDF_Page_SetFontAndSize=haru.HPDF_Page_SetFontAndSize _HPDF_Page_SetFontAndSize.restype=HPDF_STATUS def HPDF_Page_SetFontAndSize( page, #HPDF_Page font, #HPDF_Font size, #HPDF_REAL ): size=HPDF_REAL(size) return _HPDF_Page_SetFontAndSize( page, #HPDF_Page font, #HPDF_Font size, #HPDF_REAL ) HPDF_Page_SetTextRenderingMode=haru.HPDF_Page_SetTextRenderingMode HPDF_Page_SetTextRenderingMode.restype=HPDF_STATUS _HPDF_Page_SetTextRise=haru.HPDF_Page_SetTextRise _HPDF_Page_SetTextRise.restype=HPDF_STATUS def HPDF_Page_SetTextRise( page, #HPDF_Page value, #HPDF_REAL ): value=HPDF_REAL(value) return _HPDF_Page_SetTextRise( page, #HPDF_Page value, #HPDF_REAL ) _HPDF_Page_SetTextRaise=haru.HPDF_Page_SetTextRaise _HPDF_Page_SetTextRaise.restype=HPDF_STATUS def HPDF_Page_SetTextRaise( page, #HPDF_Page value, #HPDF_REAL ): value=HPDF_REAL(value) return _HPDF_Page_SetTextRaise( page, #HPDF_Page value, #HPDF_REAL ) _HPDF_Page_MoveTextPos=haru.HPDF_Page_MoveTextPos _HPDF_Page_MoveTextPos.restype=HPDF_STATUS def HPDF_Page_MoveTextPos( page, #HPDF_Page x, #HPDF_REAL y, #HPDF_REAL ): x=HPDF_REAL(x) y=HPDF_REAL(y) return _HPDF_Page_MoveTextPos( page, #HPDF_Page x, #HPDF_REAL y, #HPDF_REAL ) _HPDF_Page_MoveTextPos2=haru.HPDF_Page_MoveTextPos2 _HPDF_Page_MoveTextPos2.restype=HPDF_STATUS def HPDF_Page_MoveTextPos2( page, #HPDF_Page x, #HPDF_REAL y, #HPDF_REAL ): x=HPDF_REAL(x) y=HPDF_REAL(y) return _HPDF_Page_MoveTextPos2( page, #HPDF_Page x, #HPDF_REAL y, #HPDF_REAL ) _HPDF_Page_SetTextMatrix=haru.HPDF_Page_SetTextMatrix _HPDF_Page_SetTextMatrix.restype=HPDF_STATUS def HPDF_Page_SetTextMatrix( page, #HPDF_Page a, #HPDF_REAL b, #HPDF_REAL c, #HPDF_REAL d, #HPDF_REAL x, #HPDF_REAL y, #HPDF_REAL ): a=HPDF_REAL(a) b=HPDF_REAL(b) c=HPDF_REAL(c) d=HPDF_REAL(d) x=HPDF_REAL(x) y=HPDF_REAL(y) return _HPDF_Page_SetTextMatrix( page, #HPDF_Page a, #HPDF_REAL b, #HPDF_REAL c, #HPDF_REAL d, #HPDF_REAL x, #HPDF_REAL y, #HPDF_REAL ) HPDF_Page_MoveToNextLine=haru.HPDF_Page_MoveToNextLine HPDF_Page_MoveToNextLine.restype=HPDF_STATUS _HPDF_Page_ShowText=haru.HPDF_Page_ShowText _HPDF_Page_ShowText.restype=HPDF_STATUS def HPDF_Page_ShowText(page, text ): if type(text) in (types.ListType, types.TupleType): if type(text[-1]) != types.StringType: text=[chr(i) for i in text] text=''.join(text) return _HPDF_Page_ShowText(page, text ) HPDF_Page_ShowTextNextLine=haru.HPDF_Page_ShowTextNextLine HPDF_Page_ShowTextNextLine.restype=HPDF_STATUS _HPDF_Page_ShowTextNextLineEx=haru.HPDF_Page_ShowTextNextLineEx _HPDF_Page_ShowTextNextLineEx.restype=HPDF_STATUS def HPDF_Page_ShowTextNextLineEx( page, #HPDF_Page word_space, #HPDF_REAL char_space, #HPDF_REAL text, #c_char_p ): word_space=HPDF_REAL(word_space) char_space=HPDF_REAL(char_space) return _HPDF_Page_ShowTextNextLineEx( page, #HPDF_Page word_space, #HPDF_REAL char_space, #HPDF_REAL text, #c_char_p ) _HPDF_Page_SetGrayFill=haru.HPDF_Page_SetGrayFill _HPDF_Page_SetGrayFill.restype=HPDF_STATUS def HPDF_Page_SetGrayFill( page, #HPDF_Page gray, #HPDF_REAL ): gray=HPDF_REAL(gray) return _HPDF_Page_SetGrayFill( page, #HPDF_Page gray, #HPDF_REAL ) _HPDF_Page_SetGrayStroke=haru.HPDF_Page_SetGrayStroke _HPDF_Page_SetGrayStroke.restype=HPDF_STATUS def HPDF_Page_SetGrayStroke( page, #HPDF_Page gray, #HPDF_REAL ): gray=HPDF_REAL(gray) return _HPDF_Page_SetGrayStroke( page, #HPDF_Page gray, #HPDF_REAL ) _HPDF_Page_SetRGBFill=haru.HPDF_Page_SetRGBFill _HPDF_Page_SetRGBFill.restype=HPDF_STATUS def HPDF_Page_SetRGBFill( page, #HPDF_Page r, #HPDF_REAL g, #HPDF_REAL b, #HPDF_REAL ): r=HPDF_REAL(r) g=HPDF_REAL(g) b=HPDF_REAL(b) return _HPDF_Page_SetRGBFill( page, #HPDF_Page r, #HPDF_REAL g, #HPDF_REAL b, #HPDF_REAL ) _HPDF_Page_SetRGBStroke=haru.HPDF_Page_SetRGBStroke _HPDF_Page_SetRGBStroke.restype=HPDF_STATUS def HPDF_Page_SetRGBStroke( page, #HPDF_Page r, #HPDF_REAL g, #HPDF_REAL b, #HPDF_REAL ): r=HPDF_REAL(r) g=HPDF_REAL(g) b=HPDF_REAL(b) return _HPDF_Page_SetRGBStroke( page, #HPDF_Page r, #HPDF_REAL g, #HPDF_REAL b, #HPDF_REAL ) _HPDF_Page_SetCMYKFill=haru.HPDF_Page_SetCMYKFill _HPDF_Page_SetCMYKFill.restype=HPDF_STATUS def HPDF_Page_SetCMYKFill( page, #HPDF_Page c, #HPDF_REAL m, #HPDF_REAL y, #HPDF_REAL k, #HPDF_REAL ): c=HPDF_REAL(c) m=HPDF_REAL(m) y=HPDF_REAL(y) k=HPDF_REAL(k) return _HPDF_Page_SetCMYKFill( page, #HPDF_Page c, #HPDF_REAL m, #HPDF_REAL y, #HPDF_REAL k, #HPDF_REAL ) _HPDF_Page_SetCMYKStroke=haru.HPDF_Page_SetCMYKStroke _HPDF_Page_SetCMYKStroke.restype=HPDF_STATUS def HPDF_Page_SetCMYKStroke( page, #HPDF_Page c, #HPDF_REAL m, #HPDF_REAL y, #HPDF_REAL k, #HPDF_REAL ): c=HPDF_REAL(c) m=HPDF_REAL(m) y=HPDF_REAL(y) k=HPDF_REAL(k) return _HPDF_Page_SetCMYKStroke( page, #HPDF_Page c, #HPDF_REAL m, #HPDF_REAL y, #HPDF_REAL k, #HPDF_REAL ) HPDF_Page_ExecuteXObject=haru.HPDF_Page_ExecuteXObject HPDF_Page_ExecuteXObject.restype=HPDF_STATUS _HPDF_Page_DrawImage=haru.HPDF_Page_DrawImage _HPDF_Page_DrawImage.restype=HPDF_STATUS def HPDF_Page_DrawImage( page, #HPDF_Page image, #HPDF_Image x, #HPDF_REAL y, #HPDF_REAL width, #HPDF_REAL height, #HPDF_REAL ): x=HPDF_REAL(x) y=HPDF_REAL(y) width=HPDF_REAL(width) height=HPDF_REAL(height) return _HPDF_Page_DrawImage( page, #HPDF_Page image, #HPDF_Image x, #HPDF_REAL y, #HPDF_REAL width, #HPDF_REAL height, #HPDF_REAL ) _HPDF_Page_Circle=haru.HPDF_Page_Circle _HPDF_Page_Circle.restype=HPDF_STATUS def HPDF_Page_Circle( page, #HPDF_Page x, #HPDF_REAL y, #HPDF_REAL ray, #HPDF_REAL ): x=HPDF_REAL(x) y=HPDF_REAL(y) ray=HPDF_REAL(ray) return _HPDF_Page_Circle( page, #HPDF_Page x, #HPDF_REAL y, #HPDF_REAL ray, #HPDF_REAL ) _HPDF_Page_Ellipse=haru.HPDF_Page_Ellipse _HPDF_Page_Ellipse.restype=HPDF_STATUS def HPDF_Page_Ellipse( page, #HPDF_Page x, #HPDF_REAL y, #HPDF_REAL xray, #HPDF_REAL yray, #HPDF_REAL ): x=HPDF_REAL(x) y=HPDF_REAL(y) xray=HPDF_REAL(xray) yray=HPDF_REAL(yray) return _HPDF_Page_Ellipse( page, #HPDF_Page x, #HPDF_REAL y, #HPDF_REAL xray, #HPDF_REAL yray, #HPDF_REAL ) _HPDF_Page_Arc=haru.HPDF_Page_Arc _HPDF_Page_Arc.restype=HPDF_STATUS def HPDF_Page_Arc( page, #HPDF_Page x, #HPDF_REAL y, #HPDF_REAL ray, #HPDF_REAL ang1, #HPDF_REAL ang2, #HPDF_REAL ): x=HPDF_REAL(x) y=HPDF_REAL(y) ray=HPDF_REAL(ray) ang1=HPDF_REAL(ang1) ang2=HPDF_REAL(ang2) return _HPDF_Page_Arc( page, #HPDF_Page x, #HPDF_REAL y, #HPDF_REAL ray, #HPDF_REAL ang1, #HPDF_REAL ang2, #HPDF_REAL ) _HPDF_Page_TextOut=haru.HPDF_Page_TextOut _HPDF_Page_TextOut.restype=HPDF_STATUS def HPDF_Page_TextOut( page, #HPDF_Page xpos, #HPDF_REAL ypos, #HPDF_REAL text, #c_char_p ): xpos=HPDF_REAL(xpos) ypos=HPDF_REAL(ypos) if type(text) in (types.ListType, types.TupleType): if type(text[-1]) != types.StringType: text=[chr(i) for i in text] text=''.join(text) return _HPDF_Page_TextOut( page, #HPDF_Page xpos, #HPDF_REAL ypos, #HPDF_REAL text, #c_char_p ) _HPDF_Page_TextRect=haru.HPDF_Page_TextRect _HPDF_Page_TextRect.restype=HPDF_STATUS def HPDF_Page_TextRect( page, #HPDF_Page left, #HPDF_REAL top, #HPDF_REAL right, #HPDF_REAL bottom, #HPDF_REAL text, #c_char_p align, #HPDF_TextAlignment length, #POINTER(HPDF_UINT) ): left=HPDF_REAL(left) top=HPDF_REAL(top) right=HPDF_REAL(right) bottom=HPDF_REAL(bottom) if type(length) in (types.ListType, types.TupleType): size=len(length) length=pointer((HPDF_UINT*size)(*length)) return _HPDF_Page_TextRect( page, #HPDF_Page left, #HPDF_REAL top, #HPDF_REAL right, #HPDF_REAL bottom, #HPDF_REAL text, #c_char_p align, #HPDF_TextAlignment length, #POINTER(HPDF_UINT) ) _HPDF_Page_SetSlideShow=haru.HPDF_Page_SetSlideShow _HPDF_Page_SetSlideShow.restype=HPDF_STATUS def HPDF_Page_SetSlideShow( page, #HPDF_Page tType, #HPDF_TransitionStyle disp_time, #HPDF_REAL trans_time, #HPDF_REAL ): disp_time=HPDF_REAL(disp_time) trans_time=HPDF_REAL(trans_time) return _HPDF_Page_SetSlideShow( page, #HPDF_Page tType, #HPDF_TransitionStyle disp_time, #HPDF_REAL trans_time, #HPDF_REAL ) NULL=0 HPDF_NOPNGLIB=False
DOCUMENTATION = ''' --- module: raw version_added: historical short_description: Executes a low-down and dirty SSH command options: free_form: description: - the raw module takes a free form command to run required: true executable: description: - change the shell used to execute the command. Should be an absolute path to the executable. required: false version_added: "1.0" description: - Executes a low-down and dirty SSH command, not going through the module subsystem. This is useful and should only be done in two cases. The first case is installing C(python-simplejson) on older (Python 2.4 and before) hosts that need it as a dependency to run modules, since nearly all core modules require it. Another is speaking to any devices such as routers that do not have any Python installed. In any other case, using the M(shell) or M(command) module is much more appropriate. Arguments given to M(raw) are run directly through the configured remote shell. Standard output, error output and return code are returned when available. There is no change handler support for this module. - This module does not require python on the remote system, much like the M(script) module. notes: - If you want to execute a command securely and predictably, it may be better to use the M(command) module instead. Best practices when writing playbooks will follow the trend of using M(command) unless M(shell) is explicitly required. When running ad-hoc commands, use your best judgement. author: - Ansible Core Team - Michael DeHaan ''' EXAMPLES = ''' - raw: yum -y install python-simplejson '''
""" TODO: add a docstring. """ class Delimiters(object): def first(self): return "It worked the first time." def second(self): return "And it worked the second time." def third(self): return "Then, surprisingly, it worked the third time."
""" Various kinds of icon widgets. """ from __future__ import absolute_import from ...properties import Bool, Float, Enum from ...enums import NamedIcon from ..widget import Widget class AbstractIcon(Widget): """ An abstract base class for icon widgets. ``AbstractIcon`` is not generally useful to instantiate on its own. """ class Icon(AbstractIcon): """ A "stock" icon based on FontAwesome. """ name = Enum(NamedIcon, help=""" What icon to use. See http://fortawesome.github.io/Font-Awesome/icons/ for the list of available icons. """) size = Float(None, help=""" The size multiplier (1x, 2x, ..., 5x). """) flip = Enum("horizontal", "vertical", default=None, help=""" Optionally flip the icon horizontally or vertically. """) spin = Bool(False, help=""" Indicates a spinning (animated) icon. This value is ignored for icons that do not support spinning. """)
from __future__ import unicode_literals from .novamov import NovaMovIE class NowVideoIE(NovaMovIE): IE_NAME = 'nowvideo' IE_DESC = 'NowVideo' _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'nowvideo\.(?:ch|ec|sx|eu|at|ag|co|li)'} _HOST = 'www.nowvideo.ch' _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' _FILEKEY_REGEX = r'var fkzd="([^"]+)";' _TITLE_REGEX = r'<h4>([^<]+)</h4>' _DESCRIPTION_REGEX = r'</h4>\s*<p>([^<]+)</p>' _TEST = { 'url': 'http://www.nowvideo.ch/video/0mw0yow7b6dxa', 'md5': 'f8fbbc8add72bd95b7850c6a02fc8817', 'info_dict': { 'id': '0mw0yow7b6dxa', 'ext': 'flv', 'title': 'youtubedl test video _BaW_jenozKc.mp4', 'description': 'Description', } }
""" This file tests the MNISTPlus class. majorly concerning the X and y member of the dataset and their corresponding sizes, data scales and topological views. """ from pylearn2.datasets.mnistplus import MNISTPlus from pylearn2.space import IndexSpace, VectorSpace import unittest from pylearn2.testing.skip import skip_if_no_data import numpy as np def test_MNISTPlus(): """ Test the MNISTPlus warper. Tests the scale of data, the splitting of train, valid, test sets. Tests that a topological batch has 4 dimensions. Tests that it work well with selected type of augmentation. """ skip_if_no_data() for subset in ['train', 'valid', 'test']: ids = MNISTPlus(which_set=subset) assert 0.01 >= ids.X.min() >= 0.0 assert 0.99 <= ids.X.max() <= 1.0 topo = ids.get_batch_topo(1) assert topo.ndim == 4 del ids train_y = MNISTPlus(which_set='train', label_type='label') assert 0.99 <= train_y.X.max() <= 1.0 assert 0.0 <= train_y.X.min() <= 0.01 assert train_y.y.max() == 9 assert train_y.y.min() == 0 assert train_y.y.shape == (train_y.X.shape[0], 1) train_y = MNISTPlus(which_set='train', label_type='azimuth') assert 0.99 <= train_y.X.max() <= 1.0 assert 0.0 <= train_y.X.min() <= 0.01 assert 0.0 <= train_y.y.max() <= 1.0 assert 0.0 <= train_y.y.min() <= 1.0 assert train_y.y.shape == (train_y.X.shape[0], 1) train_y = MNISTPlus(which_set='train', label_type='rotation') assert 0.99 <= train_y.X.max() <= 1.0 assert 0.0 <= train_y.X.min() <= 0.01 assert train_y.y.max() == 9 assert train_y.y.min() == 0 assert train_y.y.shape == (train_y.X.shape[0], 1) train_y = MNISTPlus(which_set='train', label_type='texture_id') assert 0.99 <= train_y.X.max() <= 1.0 assert 0.0 <= train_y.X.min() <= 0.01 assert train_y.y.max() == 9 assert train_y.y.min() == 0 assert train_y.y.shape == (train_y.X.shape[0], 1)
"""Regresssion tests for urllib""" import urllib import httplib import unittest import os import sys import mimetools import tempfile import StringIO from test import test_support from base64 import b64encode def hexescape(char): """Escape char as RFC 2396 specifies""" hex_repr = hex(ord(char))[2:].upper() if len(hex_repr) == 1: hex_repr = "0%s" % hex_repr return "%" + hex_repr class FakeHTTPMixin(object): def fakehttp(self, fakedata): class FakeSocket(StringIO.StringIO): def sendall(self, data): FakeHTTPConnection.buf = data def makefile(self, *args, **kwds): return self def read(self, amt=None): if self.closed: return "" return StringIO.StringIO.read(self, amt) def readline(self, length=None): if self.closed: return "" return StringIO.StringIO.readline(self, length) class FakeHTTPConnection(httplib.HTTPConnection): # buffer to store data for verification in urlopen tests. buf = "" def connect(self): self.sock = FakeSocket(fakedata) assert httplib.HTTP._connection_class == httplib.HTTPConnection httplib.HTTP._connection_class = FakeHTTPConnection def unfakehttp(self): httplib.HTTP._connection_class = httplib.HTTPConnection class urlopen_FileTests(unittest.TestCase): """Test urlopen() opening a temporary file. Try to test as much functionality as possible so as to cut down on reliance on connecting to the Net for testing. """ def setUp(self): """Setup of a temp file to use for testing""" self.text = "test_urllib: %s\n" % self.__class__.__name__ FILE = file(test_support.TESTFN, 'wb') try: FILE.write(self.text) finally: FILE.close() self.pathname = test_support.TESTFN self.returned_obj = urllib.urlopen("file:%s" % self.pathname) def tearDown(self): """Shut down the open object""" self.returned_obj.close() os.remove(test_support.TESTFN) def test_interface(self): # Make sure object returned by urlopen() has the specified methods for attr in ("read", "readline", "readlines", "fileno", "close", "info", "geturl", "getcode", "__iter__"): self.assertTrue(hasattr(self.returned_obj, attr), "object returned by urlopen() lacks %s attribute" % attr) def test_read(self): self.assertEqual(self.text, self.returned_obj.read()) def test_readline(self): self.assertEqual(self.text, self.returned_obj.readline()) self.assertEqual('', self.returned_obj.readline(), "calling readline() after exhausting the file did not" " return an empty string") def test_readlines(self): lines_list = self.returned_obj.readlines() self.assertEqual(len(lines_list), 1, "readlines() returned the wrong number of lines") self.assertEqual(lines_list[0], self.text, "readlines() returned improper text") def test_fileno(self): file_num = self.returned_obj.fileno() self.assertIsInstance(file_num, int, "fileno() did not return an int") self.assertEqual(os.read(file_num, len(self.text)), self.text, "Reading on the file descriptor returned by fileno() " "did not return the expected text") def test_close(self): # Test close() by calling it hear and then having it be called again # by the tearDown() method for the test self.returned_obj.close() def test_info(self): self.assertIsInstance(self.returned_obj.info(), mimetools.Message) def test_geturl(self): self.assertEqual(self.returned_obj.geturl(), self.pathname) def test_getcode(self): self.assertEqual(self.returned_obj.getcode(), None) def test_iter(self): # Test iterator # Don't need to count number of iterations since test would fail the # instant it returned anything beyond the first line from the # comparison for line in self.returned_obj.__iter__(): self.assertEqual(line, self.text) def test_relativelocalfile(self): self.assertRaises(ValueError,urllib.urlopen,'./' + self.pathname) class ProxyTests(unittest.TestCase): def setUp(self): # Records changes to env vars self.env = test_support.EnvironmentVarGuard() # Delete all proxy related env vars for k in os.environ.keys(): if 'proxy' in k.lower(): self.env.unset(k) def tearDown(self): # Restore all proxy related env vars self.env.__exit__() del self.env def test_getproxies_environment_keep_no_proxies(self): self.env.set('NO_PROXY', 'localhost') proxies = urllib.getproxies_environment() # getproxies_environment use lowered case truncated (no '_proxy') keys self.assertEqual('localhost', proxies['no']) # List of no_proxies with space. self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com') self.assertTrue(urllib.proxy_bypass_environment('anotherdomain.com')) class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin): """Test urlopen() opening a fake http connection.""" def test_read(self): self.fakehttp('Hello!') try: fp = urllib.urlopen("http://python.org/") self.assertEqual(fp.readline(), 'Hello!') self.assertEqual(fp.readline(), '') self.assertEqual(fp.geturl(), 'http://python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_url_fragment(self): # Issue #11703: geturl() omits fragments in the original URL. url = 'http://docs.python.org/library/urllib.html#OK' self.fakehttp('Hello!') try: fp = urllib.urlopen(url) self.assertEqual(fp.geturl(), url) finally: self.unfakehttp() def test_read_bogus(self): # urlopen() should raise IOError for many error codes. self.fakehttp('''HTTP/1.1 401 Authentication Required Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Type: text/html; charset=iso-8859-1 ''') try: self.assertRaises(IOError, urllib.urlopen, "http://python.org/") finally: self.unfakehttp() def test_invalid_redirect(self): # urlopen() should raise IOError for many error codes. self.fakehttp("""HTTP/1.1 302 Found Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Location: file:README Connection: close Content-Type: text/html; charset=iso-8859-1 """) try: self.assertRaises(IOError, urllib.urlopen, "http://python.org/") finally: self.unfakehttp() def test_empty_socket(self): # urlopen() raises IOError if the underlying socket does not send any # data. (#1680230) self.fakehttp('') try: self.assertRaises(IOError, urllib.urlopen, 'http://something') finally: self.unfakehttp() def test_missing_localfile(self): self.assertRaises(IOError, urllib.urlopen, 'file://localhost/a/missing/file.py') fd, tmp_file = tempfile.mkstemp() tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/') try: self.assertTrue(os.path.exists(tmp_file)) fp = urllib.urlopen(tmp_fileurl) finally: os.close(fd) fp.close() os.unlink(tmp_file) self.assertFalse(os.path.exists(tmp_file)) self.assertRaises(IOError, urllib.urlopen, tmp_fileurl) def test_ftp_nonexisting(self): self.assertRaises(IOError, urllib.urlopen, 'ftp://localhost/not/existing/file.py') def test_userpass_inurl(self): self.fakehttp('Hello!') try: fakehttp_wrapper = httplib.HTTP._connection_class fp = urllib.urlopen("http://user:pass@python.org/") authorization = ("Authorization: Basic %s\r\n" % b64encode('user:pass')) # The authorization header must be in place self.assertIn(authorization, fakehttp_wrapper.buf) self.assertEqual(fp.readline(), "Hello!") self.assertEqual(fp.readline(), "") self.assertEqual(fp.geturl(), 'http://user:pass@python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_userpass_with_spaces_inurl(self): self.fakehttp('Hello!') try: url = "http://a b:c d@python.org/" fakehttp_wrapper = httplib.HTTP._connection_class authorization = ("Authorization: Basic %s\r\n" % b64encode('a b:c d')) fp = urllib.urlopen(url) # The authorization header must be in place self.assertIn(authorization, fakehttp_wrapper.buf) self.assertEqual(fp.readline(), "Hello!") self.assertEqual(fp.readline(), "") # the spaces are quoted in URL so no match self.assertNotEqual(fp.geturl(), url) self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() class urlretrieve_FileTests(unittest.TestCase): """Test urllib.urlretrieve() on local files""" def setUp(self): # Create a list of temporary files. Each item in the list is a file # name (absolute path or relative to the current working directory). # All files in this list will be deleted in the tearDown method. Note, # this only helps to makes sure temporary files get deleted, but it # does nothing about trying to close files that may still be open. It # is the responsibility of the developer to properly close files even # when exceptional conditions occur. self.tempFiles = [] # Create a temporary file. self.registerFileForCleanUp(test_support.TESTFN) self.text = 'testing urllib.urlretrieve' try: FILE = file(test_support.TESTFN, 'wb') FILE.write(self.text) FILE.close() finally: try: FILE.close() except: pass def tearDown(self): # Delete the temporary files. for each in self.tempFiles: try: os.remove(each) except: pass def constructLocalFileUrl(self, filePath): return "file://%s" % urllib.pathname2url(os.path.abspath(filePath)) def createNewTempFile(self, data=""): """Creates a new temporary file containing the specified data, registers the file for deletion during the test fixture tear down, and returns the absolute path of the file.""" newFd, newFilePath = tempfile.mkstemp() try: self.registerFileForCleanUp(newFilePath) newFile = os.fdopen(newFd, "wb") newFile.write(data) newFile.close() finally: try: newFile.close() except: pass return newFilePath def registerFileForCleanUp(self, fileName): self.tempFiles.append(fileName) def test_basic(self): # Make sure that a local file just gets its own location returned and # a headers value is returned. result = urllib.urlretrieve("file:%s" % test_support.TESTFN) self.assertEqual(result[0], test_support.TESTFN) self.assertIsInstance(result[1], mimetools.Message, "did not get a mimetools.Message instance as " "second returned value") def test_copy(self): # Test that setting the filename argument works. second_temp = "%s.2" % test_support.TESTFN self.registerFileForCleanUp(second_temp) result = urllib.urlretrieve(self.constructLocalFileUrl( test_support.TESTFN), second_temp) self.assertEqual(second_temp, result[0]) self.assertTrue(os.path.exists(second_temp), "copy of the file was not " "made") FILE = file(second_temp, 'rb') try: text = FILE.read() FILE.close() finally: try: FILE.close() except: pass self.assertEqual(self.text, text) def test_reporthook(self): # Make sure that the reporthook works. def hooktester(count, block_size, total_size, count_holder=[0]): self.assertIsInstance(count, int) self.assertIsInstance(block_size, int) self.assertIsInstance(total_size, int) self.assertEqual(count, count_holder[0]) count_holder[0] = count_holder[0] + 1 second_temp = "%s.2" % test_support.TESTFN self.registerFileForCleanUp(second_temp) urllib.urlretrieve(self.constructLocalFileUrl(test_support.TESTFN), second_temp, hooktester) def test_reporthook_0_bytes(self): # Test on zero length file. Should call reporthook only 1 time. report = [] def hooktester(count, block_size, total_size, _report=report): _report.append((count, block_size, total_size)) srcFileName = self.createNewTempFile() urllib.urlretrieve(self.constructLocalFileUrl(srcFileName), test_support.TESTFN, hooktester) self.assertEqual(len(report), 1) self.assertEqual(report[0][2], 0) def test_reporthook_5_bytes(self): # Test on 5 byte file. Should call reporthook only 2 times (once when # the "network connection" is established and once when the block is # read). Since the block size is 8192 bytes, only one block read is # required to read the entire file. report = [] def hooktester(count, block_size, total_size, _report=report): _report.append((count, block_size, total_size)) srcFileName = self.createNewTempFile("x" * 5) urllib.urlretrieve(self.constructLocalFileUrl(srcFileName), test_support.TESTFN, hooktester) self.assertEqual(len(report), 2) self.assertEqual(report[0][1], 8192) self.assertEqual(report[0][2], 5) def test_reporthook_8193_bytes(self): # Test on 8193 byte file. Should call reporthook only 3 times (once # when the "network connection" is established, once for the next 8192 # bytes, and once for the last byte). report = [] def hooktester(count, block_size, total_size, _report=report): _report.append((count, block_size, total_size)) srcFileName = self.createNewTempFile("x" * 8193) urllib.urlretrieve(self.constructLocalFileUrl(srcFileName), test_support.TESTFN, hooktester) self.assertEqual(len(report), 3) self.assertEqual(report[0][1], 8192) self.assertEqual(report[0][2], 8193) class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin): """Test urllib.urlretrieve() using fake http connections""" def test_short_content_raises_ContentTooShortError(self): self.fakehttp('''HTTP/1.1 200 OK Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Length: 100 Content-Type: text/html; charset=iso-8859-1 FF ''') def _reporthook(par1, par2, par3): pass try: self.assertRaises(urllib.ContentTooShortError, urllib.urlretrieve, 'http://example.com', reporthook=_reporthook) finally: self.unfakehttp() def test_short_content_raises_ContentTooShortError_without_reporthook(self): self.fakehttp('''HTTP/1.1 200 OK Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Length: 100 Content-Type: text/html; charset=iso-8859-1 FF ''') try: self.assertRaises(urllib.ContentTooShortError, urllib.urlretrieve, 'http://example.com/') finally: self.unfakehttp() class QuotingTests(unittest.TestCase): """Tests for urllib.quote() and urllib.quote_plus() According to RFC 2396 ("Uniform Resource Identifiers), to escape a character you write it as '%' + <2 character US-ASCII hex value>. The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly. Case does not matter on the hex letters. The various character sets specified are: Reserved characters : ";/?:@&=+$," Have special meaning in URIs and must be escaped if not being used for their special meaning Data characters : letters, digits, and "-_.!~*'()" Unreserved and do not need to be escaped; can be, though, if desired Control characters : 0x00 - 0x1F, 0x7F Have no use in URIs so must be escaped space : 0x20 Must be escaped Delimiters : '<>#%"' Must be escaped Unwise : "{}|\^[]`" Must be escaped """ def test_never_quote(self): # Make sure quote() does not quote letters, digits, and "_,.-" do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz", "0123456789", "_.-"]) result = urllib.quote(do_not_quote) self.assertEqual(do_not_quote, result, "using quote(): %s != %s" % (do_not_quote, result)) result = urllib.quote_plus(do_not_quote) self.assertEqual(do_not_quote, result, "using quote_plus(): %s != %s" % (do_not_quote, result)) def test_default_safe(self): # Test '/' is default value for 'safe' parameter self.assertEqual(urllib.quote.func_defaults[0], '/') def test_safe(self): # Test setting 'safe' parameter does what it should do quote_by_default = "<>" result = urllib.quote(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, "using quote(): %s != %s" % (quote_by_default, result)) result = urllib.quote_plus(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, "using quote_plus(): %s != %s" % (quote_by_default, result)) def test_default_quoting(self): # Make sure all characters that should be quoted are by default sans # space (separate test for that). should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F should_quote.append('<>#%"{}|\^[]`') should_quote.append(chr(127)) # For 0x7F should_quote = ''.join(should_quote) for char in should_quote: result = urllib.quote(char) self.assertEqual(hexescape(char), result, "using quote(): %s should be escaped to %s, not %s" % (char, hexescape(char), result)) result = urllib.quote_plus(char) self.assertEqual(hexescape(char), result, "using quote_plus(): " "%s should be escapes to %s, not %s" % (char, hexescape(char), result)) del should_quote partial_quote = "ab[]cd" expected = "ab%5B%5Dcd" result = urllib.quote(partial_quote) self.assertEqual(expected, result, "using quote(): %s != %s" % (expected, result)) result = urllib.quote_plus(partial_quote) self.assertEqual(expected, result, "using quote_plus(): %s != %s" % (expected, result)) self.assertRaises(TypeError, urllib.quote, None) def test_quoting_space(self): # Make sure quote() and quote_plus() handle spaces as specified in # their unique way result = urllib.quote(' ') self.assertEqual(result, hexescape(' '), "using quote(): %s != %s" % (result, hexescape(' '))) result = urllib.quote_plus(' ') self.assertEqual(result, '+', "using quote_plus(): %s != +" % result) given = "a b cd e f" expect = given.replace(' ', hexescape(' ')) result = urllib.quote(given) self.assertEqual(expect, result, "using quote(): %s != %s" % (expect, result)) expect = given.replace(' ', '+') result = urllib.quote_plus(given) self.assertEqual(expect, result, "using quote_plus(): %s != %s" % (expect, result)) def test_quoting_plus(self): self.assertEqual(urllib.quote_plus('alpha+beta gamma'), 'alpha%2Bbeta+gamma') self.assertEqual(urllib.quote_plus('alpha+beta gamma', '+'), 'alpha+beta+gamma') class UnquotingTests(unittest.TestCase): """Tests for unquote() and unquote_plus() See the doc string for quoting_Tests for details on quoting and such. """ def test_unquoting(self): # Make sure unquoting of all ASCII values works escape_list = [] for num in range(128): given = hexescape(chr(num)) expect = chr(num) result = urllib.unquote(given) self.assertEqual(expect, result, "using unquote(): %s != %s" % (expect, result)) result = urllib.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %s != %s" % (expect, result)) escape_list.append(given) escape_string = ''.join(escape_list) del escape_list result = urllib.unquote(escape_string) self.assertEqual(result.count('%'), 1, "using quote(): not all characters escaped; %s" % result) result = urllib.unquote(escape_string) self.assertEqual(result.count('%'), 1, "using unquote(): not all characters escaped: " "%s" % result) def test_unquoting_badpercent(self): # Test unquoting on bad percent-escapes given = '%xab' expect = given result = urllib.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) given = '%x' expect = given result = urllib.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) given = '%' expect = given result = urllib.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) def test_unquoting_mixed_case(self): # Test unquoting on mixed-case hex digits in the percent-escapes given = '%Ab%eA' expect = '\xab\xea' result = urllib.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) def test_unquoting_parts(self): # Make sure unquoting works when have non-quoted characters # interspersed given = 'ab%sd' % hexescape('c') expect = "abcd" result = urllib.unquote(given) self.assertEqual(expect, result, "using quote(): %s != %s" % (expect, result)) result = urllib.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %s != %s" % (expect, result)) def test_unquoting_plus(self): # Test difference between unquote() and unquote_plus() given = "are+there+spaces..." expect = given result = urllib.unquote(given) self.assertEqual(expect, result, "using unquote(): %s != %s" % (expect, result)) expect = given.replace('+', ' ') result = urllib.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %s != %s" % (expect, result)) def test_unquote_with_unicode(self): r = urllib.unquote(u'br%C3%BCckner_sapporo_20050930.doc') self.assertEqual(r, u'br\xc3\xbcckner_sapporo_20050930.doc') class urlencode_Tests(unittest.TestCase): """Tests for urlencode()""" def help_inputtype(self, given, test_type): """Helper method for testing different input types. 'given' must lead to only the pairs: * 1st, 1 * 2nd, 2 * 3rd, 3 Test cannot assume anything about order. Docs make no guarantee and have possible dictionary input. """ expect_somewhere = ["1st=1", "2nd=2", "3rd=3"] result = urllib.urlencode(given) for expected in expect_somewhere: self.assertIn(expected, result, "testing %s: %s not found in %s" % (test_type, expected, result)) self.assertEqual(result.count('&'), 2, "testing %s: expected 2 '&'s; got %s" % (test_type, result.count('&'))) amp_location = result.index('&') on_amp_left = result[amp_location - 1] on_amp_right = result[amp_location + 1] self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(), "testing %s: '&' not located in proper place in %s" % (test_type, result)) self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps "testing %s: " "unexpected number of characters: %s != %s" % (test_type, len(result), (5 * 3) + 2)) def test_using_mapping(self): # Test passing in a mapping object as an argument. self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'}, "using dict as input type") def test_using_sequence(self): # Test passing in a sequence of two-item sequences as an argument. self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')], "using sequence of two-item tuples as input") def test_quoting(self): # Make sure keys and values are quoted using quote_plus() given = {"&":"="} expect = "%s=%s" % (hexescape('&'), hexescape('=')) result = urllib.urlencode(given) self.assertEqual(expect, result) given = {"key name":"A bunch of pluses"} expect = "key+name=A+bunch+of+pluses" result = urllib.urlencode(given) self.assertEqual(expect, result) def test_doseq(self): # Test that passing True for 'doseq' parameter works correctly given = {'sequence':['1', '2', '3']} expect = "sequence=%s" % urllib.quote_plus(str(['1', '2', '3'])) result = urllib.urlencode(given) self.assertEqual(expect, result) result = urllib.urlencode(given, True) for value in given["sequence"]: expect = "sequence=%s" % value self.assertIn(expect, result) self.assertEqual(result.count('&'), 2, "Expected 2 '&'s, got %s" % result.count('&')) class Pathname_Tests(unittest.TestCase): """Test pathname2url() and url2pathname()""" def test_basic(self): # Make sure simple tests pass expected_path = os.path.join("parts", "of", "a", "path") expected_url = "parts/of/a/path" result = urllib.pathname2url(expected_path) self.assertEqual(expected_url, result, "pathname2url() failed; %s != %s" % (result, expected_url)) result = urllib.url2pathname(expected_url) self.assertEqual(expected_path, result, "url2pathame() failed; %s != %s" % (result, expected_path)) def test_quoting(self): # Test automatic quoting and unquoting works for pathnam2url() and # url2pathname() respectively given = os.path.join("needs", "quot=ing", "here") expect = "needs/%s/here" % urllib.quote("quot=ing") result = urllib.pathname2url(given) self.assertEqual(expect, result, "pathname2url() failed; %s != %s" % (expect, result)) expect = given result = urllib.url2pathname(result) self.assertEqual(expect, result, "url2pathname() failed; %s != %s" % (expect, result)) given = os.path.join("make sure", "using_quote") expect = "%s/using_quote" % urllib.quote("make sure") result = urllib.pathname2url(given) self.assertEqual(expect, result, "pathname2url() failed; %s != %s" % (expect, result)) given = "make+sure/using_unquote" expect = os.path.join("make+sure", "using_unquote") result = urllib.url2pathname(given) self.assertEqual(expect, result, "url2pathname() failed; %s != %s" % (expect, result)) @unittest.skipUnless(sys.platform == 'win32', 'test specific to the nturl2path library') def test_ntpath(self): given = ('/C:/', '///C:/', '/C|//') expect = 'C:\\' for url in given: result = urllib.url2pathname(url) self.assertEqual(expect, result, 'nturl2path.url2pathname() failed; %s != %s' % (expect, result)) given = '///C|/path' expect = 'C:\\path' result = urllib.url2pathname(given) self.assertEqual(expect, result, 'nturl2path.url2pathname() failed; %s != %s' % (expect, result)) class Utility_Tests(unittest.TestCase): """Testcase to test the various utility functions in the urllib.""" def test_splitpasswd(self): """Some of the password examples are not sensible, but it is added to confirming to RFC2617 and addressing issue4675. """ self.assertEqual(('user', 'ab'),urllib.splitpasswd('user:ab')) self.assertEqual(('user', 'a\nb'),urllib.splitpasswd('user:a\nb')) self.assertEqual(('user', 'a\tb'),urllib.splitpasswd('user:a\tb')) self.assertEqual(('user', 'a\rb'),urllib.splitpasswd('user:a\rb')) self.assertEqual(('user', 'a\fb'),urllib.splitpasswd('user:a\fb')) self.assertEqual(('user', 'a\vb'),urllib.splitpasswd('user:a\vb')) self.assertEqual(('user', 'a:b'),urllib.splitpasswd('user:a:b')) self.assertEqual(('user', 'a b'),urllib.splitpasswd('user:a b')) self.assertEqual(('user 2', 'ab'),urllib.splitpasswd('user 2:ab')) self.assertEqual(('user+1', 'a+b'),urllib.splitpasswd('user+1:a+b')) class URLopener_Tests(unittest.TestCase): """Testcase to test the open method of URLopener class.""" def test_quoted_open(self): class DummyURLopener(urllib.URLopener): def open_spam(self, url): return url self.assertEqual(DummyURLopener().open( 'spam://example/ /'),'//example/%20/') # test the safe characters are not quoted by urlopen self.assertEqual(DummyURLopener().open( "spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"), "//c:|windows%/:=&?~#+!$,;'@()*[]|/path/") def test_main(): import warnings with warnings.catch_warnings(): warnings.filterwarnings('ignore', ".*urllib\.urlopen.*Python 3.0", DeprecationWarning) test_support.run_unittest( urlopen_FileTests, urlopen_HttpTests, urlretrieve_FileTests, urlretrieve_HttpTests, ProxyTests, QuotingTests, UnquotingTests, urlencode_Tests, Pathname_Tests, Utility_Tests, URLopener_Tests, #FTPWrapperTests, ) if __name__ == '__main__': test_main()
""" Romanian specific form helpers. """ import re from django.core.validators import EMPTY_VALUES from django.forms import ValidationError, Field, RegexField, Select from django.utils.translation import ugettext_lazy as _ class ROCIFField(RegexField): """ A Romanian fiscal identity code (CIF) field For CIF validation algorithm see http://www.validari.ro/cui.html """ default_error_messages = { 'invalid': _("Enter a valid CIF."), } def __init__(self, *args, **kwargs): super(ROCIFField, self).__init__(r'^[0-9]{2,10}', max_length=10, min_length=2, *args, **kwargs) def clean(self, value): """ CIF validation """ value = super(ROCIFField, self).clean(value) if value in EMPTY_VALUES: return u'' # strip RO part if value[0:2] == 'RO': value = value[2:] key = '753217532'[::-1] value = value[::-1] key_iter = iter(key) checksum = 0 for digit in value[1:]: checksum += int(digit) * int(key_iter.next()) checksum = checksum * 10 % 11 if checksum == 10: checksum = 0 if checksum != int(value[0]): raise ValidationError(self.error_messages['invalid']) return value[::-1] class ROCNPField(RegexField): """ A Romanian personal identity code (CNP) field For CNP validation algorithm see http://www.validari.ro/cnp.html """ default_error_messages = { 'invalid': _("Enter a valid CNP."), } def __init__(self, *args, **kwargs): super(ROCNPField, self).__init__(r'^[1-9][0-9]{12}', max_length=13, min_length=13, *args, **kwargs) def clean(self, value): """ CNP validations """ value = super(ROCNPField, self).clean(value) # check birthdate digits import datetime try: datetime.date(int(value[1:3]),int(value[3:5]),int(value[5:7])) except: raise ValidationError(self.error_messages['invalid']) # checksum key = '279146358279' checksum = 0 value_iter = iter(value) for digit in key: checksum += int(digit) * int(value_iter.next()) checksum %= 11 if checksum == 10: checksum = 1 if checksum != int(value[12]): raise ValidationError(self.error_messages['invalid']) return value class ROCountyField(Field): """ A form field that validates its input is a Romanian county name or abbreviation. It normalizes the input to the standard vehicle registration abbreviation for the given county WARNING: This field will only accept names written with diacritics; consider using ROCountySelect if this behavior is unnaceptable for you Example: Argeş => valid Arges => invalid """ default_error_messages = { 'invalid': u'Enter a Romanian county code or name.', } def clean(self, value): from ro_counties import COUNTIES_CHOICES super(ROCountyField, self).clean(value) if value in EMPTY_VALUES: return u'' try: value = value.strip().upper() except AttributeError: pass # search for county code for entry in COUNTIES_CHOICES: if value in entry: return value # search for county name normalized_CC = [] for entry in COUNTIES_CHOICES: normalized_CC.append((entry[0],entry[1].upper())) for entry in normalized_CC: if entry[1] == value: return entry[0] raise ValidationError(self.error_messages['invalid']) class ROCountySelect(Select): """ A Select widget that uses a list of Romanian counties (judete) as its choices. """ def __init__(self, attrs=None): from ro_counties import COUNTIES_CHOICES super(ROCountySelect, self).__init__(attrs, choices=COUNTIES_CHOICES) class ROIBANField(RegexField): """ Romanian International Bank Account Number (IBAN) field For Romanian IBAN validation algorithm see http://validari.ro/iban.html """ default_error_messages = { 'invalid': _('Enter a valid IBAN in ROXX-XXXX-XXXX-XXXX-XXXX-XXXX format'), } def __init__(self, *args, **kwargs): super(ROIBANField, self).__init__(r'^[0-9A-Za-z\-\s]{24,40}$', max_length=40, min_length=24, *args, **kwargs) def clean(self, value): """ Strips - and spaces, performs country code and checksum validation """ value = super(ROIBANField, self).clean(value) value = value.replace('-','') value = value.replace(' ','') value = value.upper() if value[0:2] != 'RO': raise ValidationError(self.error_messages['invalid']) numeric_format = '' for char in value[4:] + value[0:4]: if char.isalpha(): numeric_format += str(ord(char) - 55) else: numeric_format += char if int(numeric_format) % 97 != 1: raise ValidationError(self.error_messages['invalid']) return value class ROPhoneNumberField(RegexField): """Romanian phone number field""" default_error_messages = { 'invalid': _('Phone numbers must be in XXXX-XXXXXX format.'), } def __init__(self, *args, **kwargs): super(ROPhoneNumberField, self).__init__(r'^[0-9\-\(\)\s]{10,20}$', max_length=20, min_length=10, *args, **kwargs) def clean(self, value): """ Strips -, (, ) and spaces. Checks the final length. """ value = super(ROPhoneNumberField, self).clean(value) value = value.replace('-','') value = value.replace('(','') value = value.replace(')','') value = value.replace(' ','') if len(value) != 10: raise ValidationError(self.error_messages['invalid']) return value class ROPostalCodeField(RegexField): """Romanian postal code field.""" default_error_messages = { 'invalid': _('Enter a valid postal code in the format XXXXXX'), } def __init__(self, *args, **kwargs): super(ROPostalCodeField, self).__init__(r'^[0-9][0-8][0-9]{4}$', max_length=6, min_length=6, *args, **kwargs)
from __future__ import unicode_literals import io import optparse import os import sys ROOT_DIR = os.path.join(os.path.dirname(__file__), '..') sys.path.insert(0, ROOT_DIR) import youtube_dl def main(): parser = optparse.OptionParser(usage='%prog OUTFILE.md') options, args = parser.parse_args() if len(args) != 1: parser.error('Expected an output filename') outfile, = args def gen_ies_md(ies): for ie in ies: ie_md = '**{0}**'.format(ie.IE_NAME) ie_desc = getattr(ie, 'IE_DESC', None) if ie_desc is False: continue if ie_desc is not None: ie_md += ': {0}'.format(ie.IE_DESC) if not ie.working(): ie_md += ' (Currently broken)' yield ie_md ies = sorted(youtube_dl.gen_extractors(), key=lambda i: i.IE_NAME.lower()) out = '# Supported sites\n' + ''.join( ' - ' + md + '\n' for md in gen_ies_md(ies)) with io.open(outfile, 'w', encoding='utf-8') as outf: outf.write(out) if __name__ == '__main__': main()
import _codecs_kr, codecs import _multibytecodec as mbc codec = _codecs_kr.getcodec('johab') class Codec(codecs.Codec): encode = codec.encode decode = codec.decode class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder): codec = codec class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder): codec = codec class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): codec = codec class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): codec = codec def getregentry(): return codecs.CodecInfo( name='johab', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
import BoostBuild t = BoostBuild.Tester(pass_toolset=0, pass_d0=False) t.write("sleep.bat", """\ ::@timeout /T %1 /NOBREAK >nul @ping 127.0.0.1 -n 2 -w 1000 >nul @ping 127.0.0.1 -n %1 -w 1000 >nul @exit /B 0 """) t.write("file.jam", """\ if $(NT) { SLEEP = @call sleep.bat ; } else { SLEEP = sleep ; } actions .gen. { echo 001 $(SLEEP) 4 echo 002 } rule .use.1 { DEPENDS $(<) : $(>) ; } actions .use.1 { echo 003 } rule .use.2 { DEPENDS $(<) : $(>) ; } actions .use.2 { $(SLEEP) 1 echo 004 } .gen. g1.generated g2.generated ; .use.1 u1.user : g1.generated ; .use.2 u2.user : g2.generated ; DEPENDS all : u1.user u2.user ; """) t.run_build_system(["-ffile.jam", "-j2"], stdout="""\ ...found 5 targets... ...updating 4 targets... .gen. g1.generated 001 002 .use.1 u1.user 003 .use.2 u2.user 004 ...updated 4 targets... """) t.cleanup()
ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nxos_vtp_password extends_documentation_fragment: nxos version_added: "2.2" short_description: Manages VTP password configuration. description: - Manages VTP password configuration. author: - Gabriele Gerbino (@GGabriele) notes: - VTP feature must be active on the device to use this module. - This module is used to manage only VTP passwords. - Use this in combination with M(nxos_vtp_domain) and M(nxos_vtp_version) to fully manage VTP operations. - You can set/remove password only if a VTP domain already exist. - If C(state=absent) and no C(vtp_password) is provided, it remove the current VTP password. - If C(state=absent) and C(vtp_password) is provided, the proposed C(vtp_password) has to match the existing one in order to remove it. options: vtp_password: description: - VTP password required: false default: null state: description: - Manage the state of the resource required: false default: present choices: ['present','absent'] ''' EXAMPLES = ''' - nxos_vtp_password: password: ntc state: present host: "{{ inventory_hostname }}" username: "{{ un }}" password: "{{ pwd }}" - nxos_vtp_password: password: ntc state: absent host: "{{ inventory_hostname }}" username: "{{ un }}" password: "{{ pwd }}" ''' RETURN = ''' proposed: description: k/v pairs of parameters passed into module returned: always type: dict sample: {"vtp_password": "new_ntc"} existing: description: - k/v pairs of existing vtp returned: always type: dict sample: {"domain": "ntc", "version": "1", "vtp_password": "ntc"} end_state: description: k/v pairs of vtp after module execution returned: always type: dict sample: {"domain": "ntc", "version": "1", "vtp_password": "new_ntc"} updates: description: command sent to the device returned: always type: list sample: ["vtp password new_ntc"] changed: description: check to see if a change was made on the device returned: always type: boolean sample: true ''' from ansible.module_utils.nxos import get_config, load_config, run_commands from ansible.module_utils.nxos import nxos_argument_spec, check_args from ansible.module_utils.basic import AnsibleModule import re def execute_show_command(command, module, command_type='cli_show'): if module.params['transport'] == 'cli': if 'show run' not in command: command += ' | json' cmds = [command] body = run_commands(module, cmds) elif module.params['transport'] == 'nxapi': cmds = [command] body = run_commands(module, cmds) return body def flatten_list(command_lists): flat_command_list = [] for command in command_lists: if isinstance(command, list): flat_command_list.extend(command) else: flat_command_list.append(command) return flat_command_list def apply_key_map(key_map, table): new_dict = {} for key, value in table.items(): new_key = key_map.get(key) if new_key: value = table.get(key) if value: new_dict[new_key] = str(value) else: new_dict[new_key] = value return new_dict def get_vtp_config(module): command = 'show vtp status' body = execute_show_command( command, module, command_type='cli_show_ascii')[0] vtp_parsed = {} if body: version_regex = '.*VTP version running\s+:\s+(?P<version>\d).*' domain_regex = '.*VTP Domain Name\s+:\s+(?P<domain>\S+).*' try: match_version = re.match(version_regex, body, re.DOTALL) version = match_version.groupdict()['version'] except AttributeError: version = '' try: match_domain = re.match(domain_regex, body, re.DOTALL) domain = match_domain.groupdict()['domain'] except AttributeError: domain = '' if domain and version: vtp_parsed['domain'] = domain vtp_parsed['version'] = version vtp_parsed['vtp_password'] = get_vtp_password(module) return vtp_parsed def get_vtp_password(module): command = 'show vtp password' body = execute_show_command(command, module)[0] password = body['passwd'] if password: return str(password) else: return "" def main(): argument_spec = dict( vtp_password=dict(type='str', no_log=True), state=dict(choices=['absent', 'present'], default='present'), ) argument_spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) warnings = list() check_args(module, warnings) vtp_password = module.params['vtp_password'] or None state = module.params['state'] existing = get_vtp_config(module) end_state = existing args = dict(vtp_password=vtp_password) changed = False proposed = dict((k, v) for k, v in args.items() if v is not None) delta = dict(set(proposed.items()).difference(existing.items())) commands = [] if state == 'absent': if vtp_password is not None: if existing['vtp_password'] == proposed['vtp_password']: commands.append(['no vtp password']) else: module.fail_json(msg="Proposed vtp password doesn't match " "current vtp password. It cannot be " "removed when state=absent. If you are " "trying to change the vtp password, use " "state=present.") else: if not existing.get('domain'): module.fail_json(msg='Cannot remove a vtp password ' 'before vtp domain is set.') elif existing['vtp_password'] != ('\\'): commands.append(['no vtp password']) elif state == 'present': if delta: if not existing.get('domain'): module.fail_json(msg='Cannot set vtp password ' 'before vtp domain is set.') else: commands.append(['vtp password {0}'.format(vtp_password)]) cmds = flatten_list(commands) if cmds: if module.check_mode: module.exit_json(changed=True, commands=cmds) else: changed = True load_config(module, cmds) end_state = get_vtp_config(module) if 'configure' in cmds: cmds.pop(0) results = {} results['proposed'] = proposed results['existing'] = existing results['end_state'] = end_state results['updates'] = cmds results['changed'] = changed results['warnings'] = warnings module.exit_json(**results) if __name__ == '__main__': main()
class C: def foo(self): x = 1 y = 2 x = 1 def foo(): pass
import getpass import os import urllib DEFAULT_GAIA_URL = "https://www.google.com:443/accounts/ClientLogin" class GaiaAuthenticator: def __init__(self, service, url = DEFAULT_GAIA_URL): self._service = service self._url = url ## Logins to gaia and returns auth token. def authenticate(self, email, passwd): params = urllib.urlencode({'Email': email, 'Passwd': passwd, 'source': 'chromoting', 'service': self._service, 'PersistentCookie': 'true', 'accountType': 'GOOGLE'}) f = urllib.urlopen(self._url, params); result = f.read() for line in result.splitlines(): if line.startswith('Auth='): auth_string = line[5:] return auth_string raise Exception("Gaia didn't return auth token: " + result)
"""Checker for spelling errors in comments and docstrings. """ import sys import tokenize import string import re if sys.version_info[0] >= 3: maketrans = str.maketrans else: maketrans = string.maketrans from pylint.interfaces import ITokenChecker, IAstroidChecker from pylint.checkers import BaseTokenChecker from pylint.checkers.utils import check_messages try: import enchant except ImportError: enchant = None if enchant is not None: br = enchant.Broker() dicts = br.list_dicts() dict_choices = [''] + [d[0] for d in dicts] dicts = ["%s (%s)" % (d[0], d[1].name) for d in dicts] dicts = ", ".join(dicts) instr = "" else: dicts = "none" dict_choices = [''] instr = " To make it working install python-enchant package." table = maketrans("", "") class SpellingChecker(BaseTokenChecker): """Check spelling in comments and docstrings""" __implements__ = (ITokenChecker, IAstroidChecker) name = 'spelling' msgs = { 'C0401': ('Wrong spelling of a word \'%s\' in a comment:\n%s\n' '%s\nDid you mean: \'%s\'?', 'wrong-spelling-in-comment', 'Used when a word in comment is not spelled correctly.'), 'C0402': ('Wrong spelling of a word \'%s\' in a docstring:\n%s\n' '%s\nDid you mean: \'%s\'?', 'wrong-spelling-in-docstring', 'Used when a word in docstring is not spelled correctly.'), } options = (('spelling-dict', {'default' : '', 'type' : 'choice', 'metavar' : '<dict name>', 'choices': dict_choices, 'help' : 'Spelling dictionary name. ' 'Available dictionaries: %s.%s' % (dicts, instr)}), ('spelling-ignore-words', {'default' : '', 'type' : 'string', 'metavar' : '<comma separated words>', 'help' : 'List of comma separated words that ' 'should not be checked.'}), ('spelling-private-dict-file', {'default' : '', 'type' : 'string', 'metavar' : '<path to file>', 'help' : 'A path to a file that contains private ' 'dictionary; one word per line.'}), ('spelling-store-unknown-words', {'default' : 'n', 'type' : 'yn', 'metavar' : '<y_or_n>', 'help' : 'Tells whether to store unknown words to ' 'indicated private dictionary in ' '--spelling-private-dict-file option instead of ' 'raising a message.'}), ) def open(self): self.initialized = False self.private_dict_file = None if enchant is None: return dict_name = self.config.spelling_dict if not dict_name: return self.ignore_list = [w.strip() for w in self.config.spelling_ignore_words.split(",")] # "param" appears in docstring in param description and # "pylint" appears in comments in pylint pragmas. self.ignore_list.extend(["param", "pylint"]) if self.config.spelling_private_dict_file: self.spelling_dict = enchant.DictWithPWL( dict_name, self.config.spelling_private_dict_file) self.private_dict_file = open( self.config.spelling_private_dict_file, "a") else: self.spelling_dict = enchant.Dict(dict_name) if self.config.spelling_store_unknown_words: self.unknown_words = set() # Prepare regex for stripping punctuation signs from text. # ' and _ are treated in a special way. puncts = string.punctuation.replace("'", "").replace("_", "") self.punctuation_regex = re.compile('[%s]' % re.escape(puncts)) self.initialized = True def close(self): if self.private_dict_file: self.private_dict_file.close() def _check_spelling(self, msgid, line, line_num): line2 = line.strip() # Replace ['afadf with afadf (but preserve don't) line2 = re.sub("'([^a-zA-Z]|$)", " ", line2) # Replace afadf'] with afadf (but preserve don't) line2 = re.sub("([^a-zA-Z]|^)'", " ", line2) # Replace punctuation signs with space e.g. and/or -> and or line2 = self.punctuation_regex.sub(' ', line2) words = [] for word in line2.split(): # Skip words with digits. if len(re.findall(r"\d", word)) > 0: continue # Skip words with mixed big and small letters, # they are probaly class names. if (len(re.findall("[A-Z]", word)) > 0 and len(re.findall("[a-z]", word)) > 0 and len(word) > 2): continue # Skip words with _ - they are probably function parameter names. if word.count('_') > 0: continue words.append(word) # Go through words and check them. for word in words: # Skip words from ignore list. if word in self.ignore_list: continue orig_word = word word = word.lower() # Strip starting u' from unicode literals and r' from raw strings. if (word.startswith("u'") or word.startswith('u"') or word.startswith("r'") or word.startswith('r"')) and len(word) > 2: word = word[2:] # If it is a known word, then continue. if self.spelling_dict.check(word): continue # Store word to private dict or raise a message. if self.config.spelling_store_unknown_words: if word not in self.unknown_words: self.private_dict_file.write("%s\n" % word) self.unknown_words.add(word) else: # Present up to 4 suggestions. # TODO: add support for customising this. suggestions = self.spelling_dict.suggest(word)[:4] m = re.search(r"(\W|^)(%s)(\W|$)" % word, line.lower()) if m: # Start position of second group in regex. col = m.regs[2][0] else: col = line.lower().index(word) indicator = (" " * col) + ("^" * len(word)) self.add_message(msgid, line=line_num, args=(orig_word, line, indicator, "' or '".join(suggestions))) def process_tokens(self, tokens): if not self.initialized: return # Process tokens and look for comments. for (tok_type, token, (start_row, _), _, _) in tokens: if tok_type == tokenize.COMMENT: self._check_spelling('wrong-spelling-in-comment', token, start_row) @check_messages('wrong-spelling-in-docstring') def visit_module(self, node): if not self.initialized: return self._check_docstring(node) @check_messages('wrong-spelling-in-docstring') def visit_class(self, node): if not self.initialized: return self._check_docstring(node) @check_messages('wrong-spelling-in-docstring') def visit_function(self, node): if not self.initialized: return self._check_docstring(node) def _check_docstring(self, node): """check the node has any spelling errors""" docstring = node.doc if not docstring: return start_line = node.lineno + 1 # Go through lines of docstring for idx, line in enumerate(docstring.splitlines()): self._check_spelling('wrong-spelling-in-docstring', line, start_line + idx) def register(linter): """required method to auto register this checker """ linter.register_checker(SpellingChecker(linter))
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urlparse, ) from ..utils import ( determine_ext, int_or_none, xpath_text, ) class InternetVideoArchiveIE(InfoExtractor): _VALID_URL = r'https?://video\.internetvideoarchive\.net/(?:player|flash/players)/.*?\?.*?publishedid.*?' _TEST = { 'url': 'http://video.internetvideoarchive.net/player/6/configuration.ashx?customerid=69249&publishedid=194487&reporttag=vdbetatitle&playerid=641&autolist=0&domain=www.videodetective.com&maxrate=high&minrate=low&socialplayer=false', 'info_dict': { 'id': '194487', 'ext': 'mp4', 'title': 'KICK-ASS 2', 'description': 'md5:c189d5b7280400630a1d3dd17eaa8d8a', }, 'params': { # m3u8 download 'skip_download': True, }, } @staticmethod def _build_json_url(query): return 'http://video.internetvideoarchive.net/player/6/configuration.ashx?' + query @staticmethod def _build_xml_url(query): return 'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?' + query def _real_extract(self, url): query = compat_urlparse.urlparse(url).query query_dic = compat_parse_qs(query) video_id = query_dic['publishedid'][0] if '/player/' in url: configuration = self._download_json(url, video_id) # There are multiple videos in the playlist whlie only the first one # matches the video played in browsers video_info = configuration['playlist'][0] title = video_info['title'] formats = [] for source in video_info['sources']: file_url = source['file'] if determine_ext(file_url) == 'm3u8': m3u8_formats = self._extract_m3u8_formats( file_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) if m3u8_formats: formats.extend(m3u8_formats) file_url = m3u8_formats[0]['url'] formats.extend(self._extract_f4m_formats( file_url.replace('.m3u8', '.f4m'), video_id, f4m_id='hds', fatal=False)) formats.extend(self._extract_mpd_formats( file_url.replace('.m3u8', '.mpd'), video_id, mpd_id='dash', fatal=False)) else: a_format = { 'url': file_url, } if source.get('label') and source['label'][-4:] == ' kbs': tbr = int_or_none(source['label'][:-4]) a_format.update({ 'tbr': tbr, 'format_id': 'http-%d' % tbr, }) formats.append(a_format) self._sort_formats(formats) description = video_info.get('description') thumbnail = video_info.get('image') else: configuration = self._download_xml(url, video_id) formats = [{ 'url': xpath_text(configuration, './file', 'file URL', fatal=True), }] thumbnail = xpath_text(configuration, './image', 'thumbnail') title = 'InternetVideoArchive video %s' % video_id description = None return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'description': description, }
from __future__ import unicode_literals import frappe test_records = frappe.get_test_records('Blog Category')
import re from django.template import Node, Variable, VariableNode from django.template import TemplateSyntaxError, TokenParser, Library from django.template import TOKEN_TEXT, TOKEN_VAR from django.template.base import _render_value_in_context from django.utils import translation from django.utils.encoding import force_unicode from django.template.defaulttags import token_kwargs register = Library() class GetAvailableLanguagesNode(Node): def __init__(self, variable): self.variable = variable def render(self, context): from django.conf import settings context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES] return '' class GetLanguageInfoNode(Node): def __init__(self, lang_code, variable): self.lang_code = Variable(lang_code) self.variable = variable def render(self, context): lang_code = self.lang_code.resolve(context) context[self.variable] = translation.get_language_info(lang_code) return '' class GetLanguageInfoListNode(Node): def __init__(self, languages, variable): self.languages = Variable(languages) self.variable = variable def get_language_info(self, language): # ``language`` is either a language code string or a sequence # with the language code as its first item if len(language[0]) > 1: return translation.get_language_info(language[0]) else: return translation.get_language_info(str(language)) def render(self, context): langs = self.languages.resolve(context) context[self.variable] = [self.get_language_info(lang) for lang in langs] return '' class GetCurrentLanguageNode(Node): def __init__(self, variable): self.variable = variable def render(self, context): context[self.variable] = translation.get_language() return '' class GetCurrentLanguageBidiNode(Node): def __init__(self, variable): self.variable = variable def render(self, context): context[self.variable] = translation.get_language_bidi() return '' class TranslateNode(Node): def __init__(self, filter_expression, noop): self.noop = noop self.filter_expression = filter_expression if isinstance(self.filter_expression.var, basestring): self.filter_expression.var = Variable(u"'%s'" % self.filter_expression.var) def render(self, context): self.filter_expression.var.translate = not self.noop output = self.filter_expression.resolve(context) return _render_value_in_context(output, context) class BlockTranslateNode(Node): def __init__(self, extra_context, singular, plural=None, countervar=None, counter=None): self.extra_context = extra_context self.singular = singular self.plural = plural self.countervar = countervar self.counter = counter def render_token_list(self, tokens): result = [] vars = [] for token in tokens: if token.token_type == TOKEN_TEXT: result.append(token.contents) elif token.token_type == TOKEN_VAR: result.append(u'%%(%s)s' % token.contents) vars.append(token.contents) return ''.join(result), vars def render(self, context): tmp_context = {} for var, val in self.extra_context.items(): tmp_context[var] = val.resolve(context) # Update() works like a push(), so corresponding context.pop() is at # the end of function context.update(tmp_context) singular, vars = self.render_token_list(self.singular) if self.plural and self.countervar and self.counter: count = self.counter.resolve(context) context[self.countervar] = count plural, plural_vars = self.render_token_list(self.plural) result = translation.ungettext(singular, plural, count) vars.extend(plural_vars) else: result = translation.ugettext(singular) # Escape all isolated '%' before substituting in the context. result = re.sub(u'%(?!\()', u'%%', result) data = dict([(v, _render_value_in_context(context[v], context)) for v in vars]) context.pop() return result % data def do_get_available_languages(parser, token): """ This will store a list of available languages in the context. Usage:: {% get_available_languages as languages %} {% for language in languages %} ... {% endfor %} This will just pull the LANGUAGES setting from your setting file (or the default settings) and put it into the named variable. """ args = token.contents.split() if len(args) != 3 or args[1] != 'as': raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args) return GetAvailableLanguagesNode(args[2]) def do_get_language_info(parser, token): """ This will store the language information dictionary for the given language code in a context variable. Usage:: {% get_language_info for LANGUAGE_CODE as l %} {{ l.code }} {{ l.name }} {{ l.name_local }} {{ l.bidi|yesno:"bi-directional,uni-directional" }} """ args = token.contents.split() if len(args) != 5 or args[1] != 'for' or args[3] != 'as': raise TemplateSyntaxError("'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:])) return GetLanguageInfoNode(args[2], args[4]) def do_get_language_info_list(parser, token): """ This will store a list of language information dictionaries for the given language codes in a context variable. The language codes can be specified either as a list of strings or a settings.LANGUAGES style tuple (or any sequence of sequences whose first items are language codes). Usage:: {% get_language_info_list for LANGUAGES as langs %} {% for l in langs %} {{ l.code }} {{ l.name }} {{ l.name_local }} {{ l.bidi|yesno:"bi-directional,uni-directional" }} {% endfor %} """ args = token.contents.split() if len(args) != 5 or args[1] != 'for' or args[3] != 'as': raise TemplateSyntaxError("'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:])) return GetLanguageInfoListNode(args[2], args[4]) def language_name(lang_code): return translation.get_language_info(lang_code)['name'] def language_name_local(lang_code): return translation.get_language_info(lang_code)['name_local'] def language_bidi(lang_code): return translation.get_language_info(lang_code)['bidi'] def do_get_current_language(parser, token): """ This will store the current language in the context. Usage:: {% get_current_language as language %} This will fetch the currently active language and put it's value into the ``language`` context variable. """ args = token.contents.split() if len(args) != 3 or args[1] != 'as': raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args) return GetCurrentLanguageNode(args[2]) def do_get_current_language_bidi(parser, token): """ This will store the current language layout in the context. Usage:: {% get_current_language_bidi as bidi %} This will fetch the currently active language's layout and put it's value into the ``bidi`` context variable. True indicates right-to-left layout, otherwise left-to-right """ args = token.contents.split() if len(args) != 3 or args[1] != 'as': raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args) return GetCurrentLanguageBidiNode(args[2]) def do_translate(parser, token): """ This will mark a string for translation and will translate the string for the current language. Usage:: {% trans "this is a test" %} This will mark the string for translation so it will be pulled out by mark-messages.py into the .po files and will run the string through the translation engine. There is a second form:: {% trans "this is a test" noop %} This will only mark for translation, but will return the string unchanged. Use it when you need to store values into forms that should be translated later on. You can use variables instead of constant strings to translate stuff you marked somewhere else:: {% trans variable %} This will just try to translate the contents of the variable ``variable``. Make sure that the string in there is something that is in the .po file. """ class TranslateParser(TokenParser): def top(self): value = self.value() # Backwards Compatiblity fix: # FilterExpression does not support single-quoted strings, # so we make a cheap localized fix in order to maintain # backwards compatibility with existing uses of ``trans`` # where single quote use is supported. if value[0] == "'": pos = None m = re.match("^'([^']+)'(\|.*$)",value) if m: value = '"%s"%s' % (m.group(1).replace('"','\\"'),m.group(2)) elif value[-1] == "'": value = '"%s"' % value[1:-1].replace('"','\\"') if self.more(): if self.tag() == 'noop': noop = True else: raise TemplateSyntaxError("only option for 'trans' is 'noop'") else: noop = False return (value, noop) value, noop = TranslateParser(token.contents).top() return TranslateNode(parser.compile_filter(value), noop) def do_block_translate(parser, token): """ This will translate a block of text with parameters. Usage:: {% blocktrans with bar=foo|filter boo=baz|filter %} This is {{ bar }} and {{ boo }}. {% endblocktrans %} Additionally, this supports pluralization:: {% blocktrans count count=var|length %} There is {{ count }} object. {% plural %} There are {{ count }} objects. {% endblocktrans %} This is much like ngettext, only in template syntax. The "var as value" legacy format is still supported:: {% blocktrans with foo|filter as bar and baz|filter as boo %} {% blocktrans count var|length as count %} """ bits = token.split_contents() options = {} remaining_bits = bits[1:] while remaining_bits: option = remaining_bits.pop(0) if option in options: raise TemplateSyntaxError('The %r option was specified more ' 'than once.' % option) if option == 'with': value = token_kwargs(remaining_bits, parser, support_legacy=True) if not value: raise TemplateSyntaxError('"with" in %r tag needs at least ' 'one keyword argument.' % bits[0]) elif option == 'count': value = token_kwargs(remaining_bits, parser, support_legacy=True) if len(value) != 1: raise TemplateSyntaxError('"count" in %r tag expected exactly ' 'one keyword argument.' % bits[0]) else: raise TemplateSyntaxError('Unknown argument for %r tag: %r.' % (bits[0], option)) options[option] = value if 'count' in options: countervar, counter = options['count'].items()[0] else: countervar, counter = None, None extra_context = options.get('with', {}) singular = [] plural = [] while parser.tokens: token = parser.next_token() if token.token_type in (TOKEN_VAR, TOKEN_TEXT): singular.append(token) else: break if countervar and counter: if token.contents.strip() != 'plural': raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it") while parser.tokens: token = parser.next_token() if token.token_type in (TOKEN_VAR, TOKEN_TEXT): plural.append(token) else: break if token.contents.strip() != 'endblocktrans': raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents) return BlockTranslateNode(extra_context, singular, plural, countervar, counter) register.tag('get_available_languages', do_get_available_languages) register.tag('get_language_info', do_get_language_info) register.tag('get_language_info_list', do_get_language_info_list) register.tag('get_current_language', do_get_current_language) register.tag('get_current_language_bidi', do_get_current_language_bidi) register.tag('trans', do_translate) register.tag('blocktrans', do_block_translate) register.filter(language_name) register.filter(language_name_local) register.filter(language_bidi)
"""This test checks for correct wait4() behavior. """ import os import time from test.fork_wait import ForkWait from test.test_support import run_unittest, reap_children, get_attribute get_attribute(os, 'fork') get_attribute(os, 'wait4') class Wait4Test(ForkWait): def wait_impl(self, cpid): for i in range(10): # wait4() shouldn't hang, but some of the buildbots seem to hang # in the forking tests. This is an attempt to fix the problem. spid, status, rusage = os.wait4(cpid, os.WNOHANG) if spid == cpid: break time.sleep(1.0) self.assertEqual(spid, cpid) self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8)) self.assertTrue(rusage) def test_main(): run_unittest(Wait4Test) reap_children() if __name__ == "__main__": test_main()
a = {'b',]
from __future__ import division, absolute_import, print_function import unittest import os import sys import copy from numpy import ( array, alltrue, ndarray, zeros, dtype, intp, clongdouble ) from numpy.testing import ( run_module_suite, assert_, assert_equal, SkipTest ) from numpy.core.multiarray import typeinfo import util wrap = None def setup(): """ Build the required testing extension module """ global wrap # Check compiler availability first if not util.has_c_compiler(): raise SkipTest("No C compiler available") if wrap is None: config_code = """ config.add_extension('test_array_from_pyobj_ext', sources=['wrapmodule.c', 'fortranobject.c'], define_macros=[]) """ d = os.path.dirname(__file__) src = [os.path.join(d, 'src', 'array_from_pyobj', 'wrapmodule.c'), os.path.join(d, '..', 'src', 'fortranobject.c'), os.path.join(d, '..', 'src', 'fortranobject.h')] wrap = util.build_module_distutils(src, config_code, 'test_array_from_pyobj_ext') def flags_info(arr): flags = wrap.array_attrs(arr)[6] return flags2names(flags) def flags2names(flags): info = [] for flagname in ['CONTIGUOUS', 'FORTRAN', 'OWNDATA', 'ENSURECOPY', 'ENSUREARRAY', 'ALIGNED', 'NOTSWAPPED', 'WRITEABLE', 'UPDATEIFCOPY', 'BEHAVED', 'BEHAVED_RO', 'CARRAY', 'FARRAY' ]: if abs(flags) & getattr(wrap, flagname, 0): info.append(flagname) return info class Intent(object): def __init__(self, intent_list=[]): self.intent_list = intent_list[:] flags = 0 for i in intent_list: if i == 'optional': flags |= wrap.F2PY_OPTIONAL else: flags |= getattr(wrap, 'F2PY_INTENT_' + i.upper()) self.flags = flags def __getattr__(self, name): name = name.lower() if name == 'in_': name = 'in' return self.__class__(self.intent_list + [name]) def __str__(self): return 'intent(%s)' % (','.join(self.intent_list)) def __repr__(self): return 'Intent(%r)' % (self.intent_list) def is_intent(self, *names): for name in names: if name not in self.intent_list: return False return True def is_intent_exact(self, *names): return len(self.intent_list) == len(names) and self.is_intent(*names) intent = Intent() _type_names = ['BOOL', 'BYTE', 'UBYTE', 'SHORT', 'USHORT', 'INT', 'UINT', 'LONG', 'ULONG', 'LONGLONG', 'ULONGLONG', 'FLOAT', 'DOUBLE', 'CFLOAT'] _cast_dict = {'BOOL': ['BOOL']} _cast_dict['BYTE'] = _cast_dict['BOOL'] + ['BYTE'] _cast_dict['UBYTE'] = _cast_dict['BOOL'] + ['UBYTE'] _cast_dict['BYTE'] = ['BYTE'] _cast_dict['UBYTE'] = ['UBYTE'] _cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE', 'SHORT'] _cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE', 'USHORT'] _cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT', 'INT'] _cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT', 'UINT'] _cast_dict['LONG'] = _cast_dict['INT'] + ['LONG'] _cast_dict['ULONG'] = _cast_dict['UINT'] + ['ULONG'] _cast_dict['LONGLONG'] = _cast_dict['LONG'] + ['LONGLONG'] _cast_dict['ULONGLONG'] = _cast_dict['ULONG'] + ['ULONGLONG'] _cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT', 'FLOAT'] _cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT', 'FLOAT', 'DOUBLE'] _cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT'] if ((intp().dtype.itemsize != 4 or clongdouble().dtype.alignment <= 8) and sys.platform != 'win32'): _type_names.extend(['LONGDOUBLE', 'CDOUBLE', 'CLONGDOUBLE']) _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + \ ['ULONG', 'FLOAT', 'DOUBLE', 'LONGDOUBLE'] _cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + \ ['CFLOAT', 'CDOUBLE', 'CLONGDOUBLE'] _cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT', 'CDOUBLE'] class Type(object): _type_cache = {} def __new__(cls, name): if isinstance(name, dtype): dtype0 = name name = None for n, i in typeinfo.items(): if isinstance(i, tuple) and dtype0.type is i[-1]: name = n break obj = cls._type_cache.get(name.upper(), None) if obj is not None: return obj obj = object.__new__(cls) obj._init(name) cls._type_cache[name.upper()] = obj return obj def _init(self, name): self.NAME = name.upper() self.type_num = getattr(wrap, 'NPY_' + self.NAME) assert_equal(self.type_num, typeinfo[self.NAME][1]) self.dtype = typeinfo[self.NAME][-1] self.elsize = typeinfo[self.NAME][2] / 8 self.dtypechar = typeinfo[self.NAME][0] def cast_types(self): return [self.__class__(_m) for _m in _cast_dict[self.NAME]] def all_types(self): return [self.__class__(_m) for _m in _type_names] def smaller_types(self): bits = typeinfo[self.NAME][3] types = [] for name in _type_names: if typeinfo[name][3] < bits: types.append(Type(name)) return types def equal_types(self): bits = typeinfo[self.NAME][3] types = [] for name in _type_names: if name == self.NAME: continue if typeinfo[name][3] == bits: types.append(Type(name)) return types def larger_types(self): bits = typeinfo[self.NAME][3] types = [] for name in _type_names: if typeinfo[name][3] > bits: types.append(Type(name)) return types class Array(object): def __init__(self, typ, dims, intent, obj): self.type = typ self.dims = dims self.intent = intent self.obj_copy = copy.deepcopy(obj) self.obj = obj # arr.dtypechar may be different from typ.dtypechar self.arr = wrap.call(typ.type_num, dims, intent.flags, obj) assert_(isinstance(self.arr, ndarray), repr(type(self.arr))) self.arr_attr = wrap.array_attrs(self.arr) if len(dims) > 1: if self.intent.is_intent('c'): assert_(intent.flags & wrap.F2PY_INTENT_C) assert_(not self.arr.flags['FORTRAN'], repr((self.arr.flags, getattr(obj, 'flags', None)))) assert_(self.arr.flags['CONTIGUOUS']) assert_(not self.arr_attr[6] & wrap.FORTRAN) else: assert_(not intent.flags & wrap.F2PY_INTENT_C) assert_(self.arr.flags['FORTRAN']) assert_(not self.arr.flags['CONTIGUOUS']) assert_(self.arr_attr[6] & wrap.FORTRAN) if obj is None: self.pyarr = None self.pyarr_attr = None return if intent.is_intent('cache'): assert_(isinstance(obj, ndarray), repr(type(obj))) self.pyarr = array(obj).reshape(*dims).copy() else: self.pyarr = array(array(obj, dtype=typ.dtypechar).reshape(*dims), order=self.intent.is_intent('c') and 'C' or 'F') assert_(self.pyarr.dtype == typ, repr((self.pyarr.dtype, typ))) assert_(self.pyarr.flags['OWNDATA'], (obj, intent)) self.pyarr_attr = wrap.array_attrs(self.pyarr) if len(dims) > 1: if self.intent.is_intent('c'): assert_(not self.pyarr.flags['FORTRAN']) assert_(self.pyarr.flags['CONTIGUOUS']) assert_(not self.pyarr_attr[6] & wrap.FORTRAN) else: assert_(self.pyarr.flags['FORTRAN']) assert_(not self.pyarr.flags['CONTIGUOUS']) assert_(self.pyarr_attr[6] & wrap.FORTRAN) assert_(self.arr_attr[1] == self.pyarr_attr[1]) # nd assert_(self.arr_attr[2] == self.pyarr_attr[2]) # dimensions if self.arr_attr[1] <= 1: assert_(self.arr_attr[3] == self.pyarr_attr[3], repr((self.arr_attr[3], self.pyarr_attr[3], self.arr.tobytes(), self.pyarr.tobytes()))) # strides assert_(self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:], repr((self.arr_attr[5], self.pyarr_attr[5]))) # descr assert_(self.arr_attr[6] == self.pyarr_attr[6], repr((self.arr_attr[6], self.pyarr_attr[6], flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]), flags2names(self.arr_attr[6]), intent))) # flags if intent.is_intent('cache'): assert_(self.arr_attr[5][3] >= self.type.elsize, repr((self.arr_attr[5][3], self.type.elsize))) else: assert_(self.arr_attr[5][3] == self.type.elsize, repr((self.arr_attr[5][3], self.type.elsize))) assert_(self.arr_equal(self.pyarr, self.arr)) if isinstance(self.obj, ndarray): if typ.elsize == Type(obj.dtype).elsize: if not intent.is_intent('copy') and self.arr_attr[1] <= 1: assert_(self.has_shared_memory()) def arr_equal(self, arr1, arr2): if arr1.shape != arr2.shape: return False s = arr1 == arr2 return alltrue(s.flatten()) def __str__(self): return str(self.arr) def has_shared_memory(self): """Check that created array shares data with input array. """ if self.obj is self.arr: return True if not isinstance(self.obj, ndarray): return False obj_attr = wrap.array_attrs(self.obj) return obj_attr[0] == self.arr_attr[0] class test_intent(unittest.TestCase): def test_in_out(self): assert_equal(str(intent.in_.out), 'intent(in,out)') assert_(intent.in_.c.is_intent('c')) assert_(not intent.in_.c.is_intent_exact('c')) assert_(intent.in_.c.is_intent_exact('c', 'in')) assert_(intent.in_.c.is_intent_exact('in', 'c')) assert_(not intent.in_.is_intent('c')) class _test_shared_memory: num2seq = [1, 2] num23seq = [[1, 2, 3], [4, 5, 6]] def test_in_from_2seq(self): a = self.array([2], intent.in_, self.num2seq) assert_(not a.has_shared_memory()) def test_in_from_2casttype(self): for t in self.type.cast_types(): obj = array(self.num2seq, dtype=t.dtype) a = self.array([len(self.num2seq)], intent.in_, obj) if t.elsize == self.type.elsize: assert_( a.has_shared_memory(), repr((self.type.dtype, t.dtype))) else: assert_(not a.has_shared_memory(), repr(t.dtype)) def test_inout_2seq(self): obj = array(self.num2seq, dtype=self.type.dtype) a = self.array([len(self.num2seq)], intent.inout, obj) assert_(a.has_shared_memory()) try: a = self.array([2], intent.in_.inout, self.num2seq) except TypeError as msg: if not str(msg).startswith('failed to initialize intent' '(inout|inplace|cache) array'): raise else: raise SystemError('intent(inout) should have failed on sequence') def test_f_inout_23seq(self): obj = array(self.num23seq, dtype=self.type.dtype, order='F') shape = (len(self.num23seq), len(self.num23seq[0])) a = self.array(shape, intent.in_.inout, obj) assert_(a.has_shared_memory()) obj = array(self.num23seq, dtype=self.type.dtype, order='C') shape = (len(self.num23seq), len(self.num23seq[0])) try: a = self.array(shape, intent.in_.inout, obj) except ValueError as msg: if not str(msg).startswith('failed to initialize intent' '(inout) array'): raise else: raise SystemError( 'intent(inout) should have failed on improper array') def test_c_inout_23seq(self): obj = array(self.num23seq, dtype=self.type.dtype) shape = (len(self.num23seq), len(self.num23seq[0])) a = self.array(shape, intent.in_.c.inout, obj) assert_(a.has_shared_memory()) def test_in_copy_from_2casttype(self): for t in self.type.cast_types(): obj = array(self.num2seq, dtype=t.dtype) a = self.array([len(self.num2seq)], intent.in_.copy, obj) assert_(not a.has_shared_memory(), repr(t.dtype)) def test_c_in_from_23seq(self): a = self.array([len(self.num23seq), len(self.num23seq[0])], intent.in_, self.num23seq) assert_(not a.has_shared_memory()) def test_in_from_23casttype(self): for t in self.type.cast_types(): obj = array(self.num23seq, dtype=t.dtype) a = self.array([len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) assert_(not a.has_shared_memory(), repr(t.dtype)) def test_f_in_from_23casttype(self): for t in self.type.cast_types(): obj = array(self.num23seq, dtype=t.dtype, order='F') a = self.array([len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) if t.elsize == self.type.elsize: assert_(a.has_shared_memory(), repr(t.dtype)) else: assert_(not a.has_shared_memory(), repr(t.dtype)) def test_c_in_from_23casttype(self): for t in self.type.cast_types(): obj = array(self.num23seq, dtype=t.dtype) a = self.array([len(self.num23seq), len(self.num23seq[0])], intent.in_.c, obj) if t.elsize == self.type.elsize: assert_(a.has_shared_memory(), repr(t.dtype)) else: assert_(not a.has_shared_memory(), repr(t.dtype)) def test_f_copy_in_from_23casttype(self): for t in self.type.cast_types(): obj = array(self.num23seq, dtype=t.dtype, order='F') a = self.array([len(self.num23seq), len(self.num23seq[0])], intent.in_.copy, obj) assert_(not a.has_shared_memory(), repr(t.dtype)) def test_c_copy_in_from_23casttype(self): for t in self.type.cast_types(): obj = array(self.num23seq, dtype=t.dtype) a = self.array([len(self.num23seq), len(self.num23seq[0])], intent.in_.c.copy, obj) assert_(not a.has_shared_memory(), repr(t.dtype)) def test_in_cache_from_2casttype(self): for t in self.type.all_types(): if t.elsize != self.type.elsize: continue obj = array(self.num2seq, dtype=t.dtype) shape = (len(self.num2seq),) a = self.array(shape, intent.in_.c.cache, obj) assert_(a.has_shared_memory(), repr(t.dtype)) a = self.array(shape, intent.in_.cache, obj) assert_(a.has_shared_memory(), repr(t.dtype)) obj = array(self.num2seq, dtype=t.dtype, order='F') a = self.array(shape, intent.in_.c.cache, obj) assert_(a.has_shared_memory(), repr(t.dtype)) a = self.array(shape, intent.in_.cache, obj) assert_(a.has_shared_memory(), repr(t.dtype)) try: a = self.array(shape, intent.in_.cache, obj[::-1]) except ValueError as msg: if not str(msg).startswith('failed to initialize' ' intent(cache) array'): raise else: raise SystemError( 'intent(cache) should have failed on multisegmented array') def test_in_cache_from_2casttype_failure(self): for t in self.type.all_types(): if t.elsize >= self.type.elsize: continue obj = array(self.num2seq, dtype=t.dtype) shape = (len(self.num2seq),) try: self.array(shape, intent.in_.cache, obj) # Should succeed except ValueError as msg: if not str(msg).startswith('failed to initialize' ' intent(cache) array'): raise else: raise SystemError( 'intent(cache) should have failed on smaller array') def test_cache_hidden(self): shape = (2,) a = self.array(shape, intent.cache.hide, None) assert_(a.arr.shape == shape) shape = (2, 3) a = self.array(shape, intent.cache.hide, None) assert_(a.arr.shape == shape) shape = (-1, 3) try: a = self.array(shape, intent.cache.hide, None) except ValueError as msg: if not str(msg).startswith('failed to create intent' '(cache|hide)|optional array'): raise else: raise SystemError( 'intent(cache) should have failed on undefined dimensions') def test_hidden(self): shape = (2,) a = self.array(shape, intent.hide, None) assert_(a.arr.shape == shape) assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) shape = (2, 3) a = self.array(shape, intent.hide, None) assert_(a.arr.shape == shape) assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) shape = (2, 3) a = self.array(shape, intent.c.hide, None) assert_(a.arr.shape == shape) assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) shape = (-1, 3) try: a = self.array(shape, intent.hide, None) except ValueError as msg: if not str(msg).startswith('failed to create intent' '(cache|hide)|optional array'): raise else: raise SystemError('intent(hide) should have failed' ' on undefined dimensions') def test_optional_none(self): shape = (2,) a = self.array(shape, intent.optional, None) assert_(a.arr.shape == shape) assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) shape = (2, 3) a = self.array(shape, intent.optional, None) assert_(a.arr.shape == shape) assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) shape = (2, 3) a = self.array(shape, intent.c.optional, None) assert_(a.arr.shape == shape) assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) def test_optional_from_2seq(self): obj = self.num2seq shape = (len(obj),) a = self.array(shape, intent.optional, obj) assert_(a.arr.shape == shape) assert_(not a.has_shared_memory()) def test_optional_from_23seq(self): obj = self.num23seq shape = (len(obj), len(obj[0])) a = self.array(shape, intent.optional, obj) assert_(a.arr.shape == shape) assert_(not a.has_shared_memory()) a = self.array(shape, intent.optional.c, obj) assert_(a.arr.shape == shape) assert_(not a.has_shared_memory()) def test_inplace(self): obj = array(self.num23seq, dtype=self.type.dtype) assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) shape = obj.shape a = self.array(shape, intent.inplace, obj) assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr))) a.arr[1][2] = 54 assert_(obj[1][2] == a.arr[1][2] == array(54, dtype=self.type.dtype), repr((obj, a.arr))) assert_(a.arr is obj) assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! assert_(not obj.flags['CONTIGUOUS']) def test_inplace_from_casttype(self): for t in self.type.cast_types(): if t is self.type: continue obj = array(self.num23seq, dtype=t.dtype) assert_(obj.dtype.type == t.dtype) assert_(obj.dtype.type is not self.type.dtype) assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) shape = obj.shape a = self.array(shape, intent.inplace, obj) assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr))) a.arr[1][2] = 54 assert_(obj[1][2] == a.arr[1][2] == array(54, dtype=self.type.dtype), repr((obj, a.arr))) assert_(a.arr is obj) assert_(obj.flags['FORTRAN']) # obj attributes changed inplace! assert_(not obj.flags['CONTIGUOUS']) assert_(obj.dtype.type is self.type.dtype) # obj changed inplace! for t in _type_names: exec('''\ class test_%s_gen(unittest.TestCase, _test_shared_memory ): def setUp(self): self.type = Type(%r) array = lambda self,dims,intent,obj: Array(Type(%r),dims,intent,obj) ''' % (t, t, t)) if __name__ == "__main__": setup() run_module_suite()
from __future__ import absolute_import import logging import re import pip from pip.req import InstallRequirement from pip.req.req_file import COMMENT_RE from pip.utils import get_installed_distributions from pip._vendor import pkg_resources from pip._vendor.packaging.utils import canonicalize_name from pip._vendor.pkg_resources import RequirementParseError logger = logging.getLogger(__name__) def freeze( requirement=None, find_links=None, local_only=None, user_only=None, skip_regex=None, default_vcs=None, isolated=False, wheel_cache=None, skip=()): find_links = find_links or [] skip_match = None if skip_regex: skip_match = re.compile(skip_regex).search dependency_links = [] for dist in pkg_resources.working_set: if dist.has_metadata('dependency_links.txt'): dependency_links.extend( dist.get_metadata_lines('dependency_links.txt') ) for link in find_links: if '#egg=' in link: dependency_links.append(link) for link in find_links: yield '-f %s' % link installations = {} for dist in get_installed_distributions(local_only=local_only, skip=(), user_only=user_only): try: req = pip.FrozenRequirement.from_dist( dist, dependency_links ) except RequirementParseError: logger.warning( "Could not parse requirement: %s", dist.project_name ) continue installations[req.name] = req if requirement: # the options that don't get turned into an InstallRequirement # should only be emitted once, even if the same option is in multiple # requirements files, so we need to keep track of what has been emitted # so that we don't emit it again if it's seen again emitted_options = set() for req_file_path in requirement: with open(req_file_path) as req_file: for line in req_file: if (not line.strip() or line.strip().startswith('#') or (skip_match and skip_match(line)) or line.startswith(( '-r', '--requirement', '-Z', '--always-unzip', '-f', '--find-links', '-i', '--index-url', '--pre', '--trusted-host', '--process-dependency-links', '--extra-index-url'))): line = line.rstrip() if line not in emitted_options: emitted_options.add(line) yield line continue if line.startswith('-e') or line.startswith('--editable'): if line.startswith('-e'): line = line[2:].strip() else: line = line[len('--editable'):].strip().lstrip('=') line_req = InstallRequirement.from_editable( line, default_vcs=default_vcs, isolated=isolated, wheel_cache=wheel_cache, ) else: line_req = InstallRequirement.from_line( COMMENT_RE.sub('', line).strip(), isolated=isolated, wheel_cache=wheel_cache, ) if not line_req.name: logger.info( "Skipping line in requirement file [%s] because " "it's not clear what it would install: %s", req_file_path, line.strip(), ) logger.info( " (add #egg=PackageName to the URL to avoid" " this warning)" ) elif line_req.name not in installations: logger.warning( "Requirement file [%s] contains %s, but that " "package is not installed", req_file_path, COMMENT_RE.sub('', line).strip(), ) else: yield str(installations[line_req.name]).rstrip() del installations[line_req.name] yield( '## The following requirements were added by ' 'pip freeze:' ) for installation in sorted( installations.values(), key=lambda x: x.name.lower()): if canonicalize_name(installation.name) not in skip: yield str(installation).rstrip()
""" Base classes for writing management commands (named commands which can be executed through ``django-admin`` or ``manage.py``). """ from __future__ import unicode_literals import os import sys import warnings from argparse import ArgumentParser from optparse import OptionParser import django from django.core import checks from django.core.management.color import color_style, no_style from django.db import connections from django.utils.deprecation import RemovedInDjango110Warning from django.utils.encoding import force_str class CommandError(Exception): """ Exception class indicating a problem while executing a management command. If this exception is raised during the execution of a management command, it will be caught and turned into a nicely-printed error message to the appropriate output stream (i.e., stderr); as a result, raising this exception (with a sensible description of the error) is the preferred way to indicate that something has gone wrong in the execution of a command. """ pass class SystemCheckError(CommandError): """ The system check framework detected unrecoverable errors. """ pass class CommandParser(ArgumentParser): """ Customized ArgumentParser class to improve some error messages and prevent SystemExit in several occasions, as SystemExit is unacceptable when a command is called programmatically. """ def __init__(self, cmd, **kwargs): self.cmd = cmd super(CommandParser, self).__init__(**kwargs) def parse_args(self, args=None, namespace=None): # Catch missing argument for a better error message if (hasattr(self.cmd, 'missing_args_message') and not (args or any(not arg.startswith('-') for arg in args))): self.error(self.cmd.missing_args_message) return super(CommandParser, self).parse_args(args, namespace) def error(self, message): if self.cmd._called_from_command_line: super(CommandParser, self).error(message) else: raise CommandError("Error: %s" % message) def handle_default_options(options): """ Include any default options that all commands should accept here so that ManagementUtility can handle them before searching for user commands. """ if options.settings: os.environ['DJANGO_SETTINGS_MODULE'] = options.settings if options.pythonpath: sys.path.insert(0, options.pythonpath) class OutputWrapper(object): """ Wrapper around stdout/stderr """ @property def style_func(self): return self._style_func @style_func.setter def style_func(self, style_func): if style_func and self.isatty(): self._style_func = style_func else: self._style_func = lambda x: x def __init__(self, out, style_func=None, ending='\n'): self._out = out self.style_func = None self.ending = ending def __getattr__(self, name): return getattr(self._out, name) def isatty(self): return hasattr(self._out, 'isatty') and self._out.isatty() def write(self, msg, style_func=None, ending=None): ending = self.ending if ending is None else ending if ending and not msg.endswith(ending): msg += ending style_func = style_func or self.style_func self._out.write(force_str(style_func(msg))) class BaseCommand(object): """ The base class from which all management commands ultimately derive. Use this class if you want access to all of the mechanisms which parse the command-line arguments and work out what code to call in response; if you don't need to change any of that behavior, consider using one of the subclasses defined in this file. If you are interested in overriding/customizing various aspects of the command-parsing and -execution behavior, the normal flow works as follows: 1. ``django-admin`` or ``manage.py`` loads the command class and calls its ``run_from_argv()`` method. 2. The ``run_from_argv()`` method calls ``create_parser()`` to get an ``ArgumentParser`` for the arguments, parses them, performs any environment changes requested by options like ``pythonpath``, and then calls the ``execute()`` method, passing the parsed arguments. 3. The ``execute()`` method attempts to carry out the command by calling the ``handle()`` method with the parsed arguments; any output produced by ``handle()`` will be printed to standard output and, if the command is intended to produce a block of SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``. 4. If ``handle()`` or ``execute()`` raised any exception (e.g. ``CommandError``), ``run_from_argv()`` will instead print an error message to ``stderr``. Thus, the ``handle()`` method is typically the starting point for subclasses; many built-in commands and command types either place all of their logic in ``handle()``, or perform some additional parsing work in ``handle()`` and then delegate from it to more specialized methods as needed. Several attributes affect behavior at various steps along the way: ``args`` A string listing the arguments accepted by the command, suitable for use in help messages; e.g., a command which takes a list of application names might set this to '<app_label app_label ...>'. ``can_import_settings`` A boolean indicating whether the command needs to be able to import Django settings; if ``True``, ``execute()`` will verify that this is possible before proceeding. Default value is ``True``. ``help`` A short description of the command, which will be printed in help messages. ``option_list`` This is the list of ``optparse`` options which will be fed into the command's ``OptionParser`` for parsing arguments. Deprecated and will be removed in Django 1.10. ``output_transaction`` A boolean indicating whether the command outputs SQL statements; if ``True``, the output will automatically be wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is ``False``. ``requires_system_checks`` A boolean; if ``True``, entire Django project will be checked for errors prior to executing the command. Default value is ``True``. To validate an individual application's models rather than all applications' models, call ``self.check(app_configs)`` from ``handle()``, where ``app_configs`` is the list of application's configuration provided by the app registry. ``leave_locale_alone`` A boolean indicating whether the locale set in settings should be preserved during the execution of the command instead of translations being deactivated. Default value is ``False``. Make sure you know what you are doing if you decide to change the value of this option in your custom command if it creates database content that is locale-sensitive and such content shouldn't contain any translations (like it happens e.g. with django.contrib.auth permissions) as activating any locale might cause unintended effects. This option can't be False when the can_import_settings option is set to False too because attempting to deactivate translations needs access to settings. This condition will generate a CommandError. """ # Metadata about this command. option_list = () help = '' args = '' # Configuration shortcuts that alter various logic. _called_from_command_line = False can_import_settings = True output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;" leave_locale_alone = False requires_system_checks = True def __init__(self, stdout=None, stderr=None, no_color=False): self.stdout = OutputWrapper(stdout or sys.stdout) self.stderr = OutputWrapper(stderr or sys.stderr) if no_color: self.style = no_style() else: self.style = color_style() self.stderr.style_func = self.style.ERROR @property def use_argparse(self): return not bool(self.option_list) def get_version(self): """ Return the Django version, which should be correct for all built-in Django commands. User-supplied commands should override this method. """ return django.get_version() def usage(self, subcommand): """ Return a brief description of how to use this command, by default from the attribute ``self.help``. """ usage = '%%prog %s [options] %s' % (subcommand, self.args) if self.help: return '%s\n\n%s' % (usage, self.help) else: return usage def create_parser(self, prog_name, subcommand): """ Create and return the ``ArgumentParser`` which will be used to parse the arguments to this command. """ if not self.use_argparse: def store_as_int(option, opt_str, value, parser): setattr(parser.values, option.dest, int(value)) # Backwards compatibility: use deprecated optparse module warnings.warn("OptionParser usage for Django management commands " "is deprecated, use ArgumentParser instead", RemovedInDjango110Warning) parser = OptionParser(prog=prog_name, usage=self.usage(subcommand), version=self.get_version()) parser.add_option('-v', '--verbosity', action='callback', dest='verbosity', default=1, type='choice', choices=['0', '1', '2', '3'], callback=store_as_int, help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output') parser.add_option('--settings', help=( 'The Python path to a settings module, e.g. ' '"myproject.settings.main". If this isn\'t provided, the ' 'DJANGO_SETTINGS_MODULE environment variable will be used.' ), ) parser.add_option('--pythonpath', help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'), parser.add_option('--traceback', action='store_true', help='Raise on CommandError exceptions') parser.add_option('--no-color', action='store_true', dest='no_color', default=False, help="Don't colorize the command output.") for opt in self.option_list: parser.add_option(opt) else: parser = CommandParser(self, prog="%s %s" % (os.path.basename(prog_name), subcommand), description=self.help or None) parser.add_argument('--version', action='version', version=self.get_version()) parser.add_argument('-v', '--verbosity', action='store', dest='verbosity', default='1', type=int, choices=[0, 1, 2, 3], help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output') parser.add_argument('--settings', help=( 'The Python path to a settings module, e.g. ' '"myproject.settings.main". If this isn\'t provided, the ' 'DJANGO_SETTINGS_MODULE environment variable will be used.' ), ) parser.add_argument('--pythonpath', help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".') parser.add_argument('--traceback', action='store_true', help='Raise on CommandError exceptions') parser.add_argument('--no-color', action='store_true', dest='no_color', default=False, help="Don't colorize the command output.") if self.args: # Keep compatibility and always accept positional arguments, like optparse when args is set parser.add_argument('args', nargs='*') self.add_arguments(parser) return parser def add_arguments(self, parser): """ Entry point for subclassed commands to add custom arguments. """ pass def print_help(self, prog_name, subcommand): """ Print the help message for this command, derived from ``self.usage()``. """ parser = self.create_parser(prog_name, subcommand) parser.print_help() def run_from_argv(self, argv): """ Set up any environment changes requested (e.g., Python path and Django settings), then run this command. If the command raises a ``CommandError``, intercept it and print it sensibly to stderr. If the ``--traceback`` option is present or the raised ``Exception`` is not ``CommandError``, raise it. """ self._called_from_command_line = True parser = self.create_parser(argv[0], argv[1]) if self.use_argparse: options = parser.parse_args(argv[2:]) cmd_options = vars(options) # Move positional args out of options to mimic legacy optparse args = cmd_options.pop('args', ()) else: options, args = parser.parse_args(argv[2:]) cmd_options = vars(options) handle_default_options(options) try: self.execute(*args, **cmd_options) except Exception as e: if options.traceback or not isinstance(e, CommandError): raise # SystemCheckError takes care of its own formatting. if isinstance(e, SystemCheckError): self.stderr.write(str(e), lambda x: x) else: self.stderr.write('%s: %s' % (e.__class__.__name__, e)) sys.exit(1) finally: connections.close_all() def execute(self, *args, **options): """ Try to execute this command, performing system checks if needed (as controlled by the ``requires_system_checks`` attribute, except if force-skipped). """ if options.get('no_color'): self.style = no_style() self.stderr.style_func = None if options.get('stdout'): self.stdout = OutputWrapper(options['stdout']) if options.get('stderr'): self.stderr = OutputWrapper(options.get('stderr'), self.stderr.style_func) saved_locale = None if not self.leave_locale_alone: # Only mess with locales if we can assume we have a working # settings file, because django.utils.translation requires settings # (The final saying about whether the i18n machinery is active will be # found in the value of the USE_I18N setting) if not self.can_import_settings: raise CommandError("Incompatible values of 'leave_locale_alone' " "(%s) and 'can_import_settings' (%s) command " "options." % (self.leave_locale_alone, self.can_import_settings)) # Deactivate translations, because django-admin creates database # content like permissions, and those shouldn't contain any # translations. from django.utils import translation saved_locale = translation.get_language() translation.deactivate_all() try: if (self.requires_system_checks and not options.get('skip_validation') and # Remove at the end of deprecation for `skip_validation`. not options.get('skip_checks')): self.check() output = self.handle(*args, **options) if output: if self.output_transaction: # This needs to be imported here, because it relies on # settings. from django.db import connections, DEFAULT_DB_ALIAS connection = connections[options.get('database', DEFAULT_DB_ALIAS)] if connection.ops.start_transaction_sql(): self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql())) self.stdout.write(output) if self.output_transaction: self.stdout.write('\n' + self.style.SQL_KEYWORD(connection.ops.end_transaction_sql())) finally: if saved_locale is not None: translation.activate(saved_locale) def check(self, app_configs=None, tags=None, display_num_errors=False, include_deployment_checks=False): """ Uses the system check framework to validate entire Django project. Raises CommandError for any serious message (error or critical errors). If there are only light messages (like warnings), they are printed to stderr and no exception is raised. """ all_issues = checks.run_checks( app_configs=app_configs, tags=tags, include_deployment_checks=include_deployment_checks, ) header, body, footer = "", "", "" visible_issue_count = 0 # excludes silenced warnings if all_issues: debugs = [e for e in all_issues if e.level < checks.INFO and not e.is_silenced()] infos = [e for e in all_issues if checks.INFO <= e.level < checks.WARNING and not e.is_silenced()] warnings = [e for e in all_issues if checks.WARNING <= e.level < checks.ERROR and not e.is_silenced()] errors = [e for e in all_issues if checks.ERROR <= e.level < checks.CRITICAL] criticals = [e for e in all_issues if checks.CRITICAL <= e.level] sorted_issues = [ (criticals, 'CRITICALS'), (errors, 'ERRORS'), (warnings, 'WARNINGS'), (infos, 'INFOS'), (debugs, 'DEBUGS'), ] for issues, group_name in sorted_issues: if issues: visible_issue_count += len(issues) formatted = ( self.style.ERROR(force_str(e)) if e.is_serious() else self.style.WARNING(force_str(e)) for e in issues) formatted = "\n".join(sorted(formatted)) body += '\n%s:\n%s\n' % (group_name, formatted) if visible_issue_count: header = "System check identified some issues:\n" if display_num_errors: if visible_issue_count: footer += '\n' footer += "System check identified %s (%s silenced)." % ( "no issues" if visible_issue_count == 0 else "1 issue" if visible_issue_count == 1 else "%s issues" % visible_issue_count, len(all_issues) - visible_issue_count, ) if any(e.is_serious() and not e.is_silenced() for e in all_issues): msg = self.style.ERROR("SystemCheckError: %s" % header) + body + footer raise SystemCheckError(msg) else: msg = header + body + footer if msg: if visible_issue_count: self.stderr.write(msg, lambda x: x) else: self.stdout.write(msg) def handle(self, *args, **options): """ The actual logic of the command. Subclasses must implement this method. """ raise NotImplementedError('subclasses of BaseCommand must provide a handle() method') class AppCommand(BaseCommand): """ A management command which takes one or more installed application labels as arguments, and does something with each of them. Rather than implementing ``handle()``, subclasses must implement ``handle_app_config()``, which will be called once for each application. """ missing_args_message = "Enter at least one application label." def add_arguments(self, parser): parser.add_argument('args', metavar='app_label', nargs='+', help='One or more application label.') def handle(self, *app_labels, **options): from django.apps import apps try: app_configs = [apps.get_app_config(app_label) for app_label in app_labels] except (LookupError, ImportError) as e: raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e) output = [] for app_config in app_configs: app_output = self.handle_app_config(app_config, **options) if app_output: output.append(app_output) return '\n'.join(output) def handle_app_config(self, app_config, **options): """ Perform the command's actions for app_config, an AppConfig instance corresponding to an application label given on the command line. """ raise NotImplementedError( "Subclasses of AppCommand must provide" "a handle_app_config() method.") class LabelCommand(BaseCommand): """ A management command which takes one or more arbitrary arguments (labels) on the command line, and does something with each of them. Rather than implementing ``handle()``, subclasses must implement ``handle_label()``, which will be called once for each label. If the arguments should be names of installed applications, use ``AppCommand`` instead. """ label = 'label' missing_args_message = "Enter at least one %s." % label def add_arguments(self, parser): parser.add_argument('args', metavar=self.label, nargs='+') def handle(self, *labels, **options): output = [] for label in labels: label_output = self.handle_label(label, **options) if label_output: output.append(label_output) return '\n'.join(output) def handle_label(self, label, **options): """ Perform the command's actions for ``label``, which will be the string as given on the command line. """ raise NotImplementedError('subclasses of LabelCommand must provide a handle_label() method') class NoArgsCommand(BaseCommand): """ A command which takes no arguments on the command line. Rather than implementing ``handle()``, subclasses must implement ``handle_noargs()``; ``handle()`` itself is overridden to ensure no arguments are passed to the command. Attempting to pass arguments will raise ``CommandError``. """ args = '' def __init__(self): warnings.warn( "NoArgsCommand class is deprecated and will be removed in Django 1.10. " "Use BaseCommand instead, which takes no arguments by default.", RemovedInDjango110Warning ) super(NoArgsCommand, self).__init__() def handle(self, *args, **options): if args: raise CommandError("Command doesn't accept any arguments") return self.handle_noargs(**options) def handle_noargs(self, **options): """ Perform this command's actions. """ raise NotImplementedError('subclasses of NoArgsCommand must provide a handle_noargs() method')
from django import template from django.apps import apps from django.utils.encoding import iri_to_uri from django.utils.six.moves.urllib.parse import urljoin register = template.Library() class PrefixNode(template.Node): def __repr__(self): return "<PrefixNode for %r>" % self.name def __init__(self, varname=None, name=None): if name is None: raise template.TemplateSyntaxError( "Prefix nodes must be given a name to return.") self.varname = varname self.name = name @classmethod def handle_token(cls, parser, token, name): """ Class method to parse prefix node and return a Node. """ # token.split_contents() isn't useful here because tags using this method don't accept variable as arguments tokens = token.contents.split() if len(tokens) > 1 and tokens[1] != 'as': raise template.TemplateSyntaxError( "First argument in '%s' must be 'as'" % tokens[0]) if len(tokens) > 1: varname = tokens[2] else: varname = None return cls(varname, name) @classmethod def handle_simple(cls, name): try: from django.conf import settings except ImportError: prefix = '' else: prefix = iri_to_uri(getattr(settings, name, '')) return prefix def render(self, context): prefix = self.handle_simple(self.name) if self.varname is None: return prefix context[self.varname] = prefix return '' @register.tag def get_static_prefix(parser, token): """ Populates a template variable with the static prefix, ``settings.STATIC_URL``. Usage:: {% get_static_prefix [as varname] %} Examples:: {% get_static_prefix %} {% get_static_prefix as static_prefix %} """ return PrefixNode.handle_token(parser, token, "STATIC_URL") @register.tag def get_media_prefix(parser, token): """ Populates a template variable with the media prefix, ``settings.MEDIA_URL``. Usage:: {% get_media_prefix [as varname] %} Examples:: {% get_media_prefix %} {% get_media_prefix as media_prefix %} """ return PrefixNode.handle_token(parser, token, "MEDIA_URL") class StaticNode(template.Node): def __init__(self, varname=None, path=None): if path is None: raise template.TemplateSyntaxError( "Static template nodes must be given a path to return.") self.path = path self.varname = varname def url(self, context): path = self.path.resolve(context) return self.handle_simple(path) def render(self, context): url = self.url(context) if self.varname is None: return url context[self.varname] = url return '' @classmethod def handle_simple(cls, path): if apps.is_installed('django.contrib.staticfiles'): from django.contrib.staticfiles.storage import staticfiles_storage return staticfiles_storage.url(path) else: return urljoin(PrefixNode.handle_simple("STATIC_URL"), path) @classmethod def handle_token(cls, parser, token): """ Class method to parse prefix node and return a Node. """ bits = token.split_contents() if len(bits) < 2: raise template.TemplateSyntaxError( "'%s' takes at least one argument (path to file)" % bits[0]) path = parser.compile_filter(bits[1]) if len(bits) >= 2 and bits[-2] == 'as': varname = bits[3] else: varname = None return cls(varname, path) @register.tag('static') def do_static(parser, token): """ Joins the given path with the STATIC_URL setting. Usage:: {% static path [as varname] %} Examples:: {% static "myapp/css/base.css" %} {% static variable_with_path %} {% static "myapp/css/base.css" as admin_base_css %} {% static variable_with_path as varname %} """ return StaticNode.handle_token(parser, token) def static(path): """ Given a relative path to a static asset, return the absolute path to the asset. """ return StaticNode.handle_simple(path)
urlpatterns = [] handler404 = 'csrf_tests.views.csrf_token_error_handler'
"""Class for storing shared keys.""" from utils.cryptomath import * from utils.compat import * from mathtls import * from Session import Session from BaseDB import BaseDB class SharedKeyDB(BaseDB): """This class represent an in-memory or on-disk database of shared keys. A SharedKeyDB can be passed to a server handshake function to authenticate a client based on one of the shared keys. This class is thread-safe. """ def __init__(self, filename=None): """Create a new SharedKeyDB. @type filename: str @param filename: Filename for an on-disk database, or None for an in-memory database. If the filename already exists, follow this with a call to open(). To create a new on-disk database, follow this with a call to create(). """ BaseDB.__init__(self, filename, "shared key") def _getItem(self, username, valueStr): session = Session() session._createSharedKey(username, valueStr) return session def __setitem__(self, username, sharedKey): """Add a shared key to the database. @type username: str @param username: The username to associate the shared key with. Must be less than or equal to 16 characters in length, and must not already be in the database. @type sharedKey: str @param sharedKey: The shared key to add. Must be less than 48 characters in length. """ BaseDB.__setitem__(self, username, sharedKey) def _setItem(self, username, value): if len(username)>16: raise ValueError("username too long") if len(value)>=48: raise ValueError("shared key too long") return value def _checkItem(self, value, username, param): newSession = self._getItem(username, param) return value.masterSecret == newSession.masterSecret
"""Utility functions for Windows builds. These functions are executed via gyp-win-tool when using the ninja generator. """ from ctypes import windll, wintypes import os import shutil import subprocess import sys BASE_DIR = os.path.dirname(os.path.abspath(__file__)) def main(args): executor = WinTool() exit_code = executor.Dispatch(args) if exit_code is not None: sys.exit(exit_code) class LinkLock(object): """A flock-style lock to limit the number of concurrent links to one. Uses a session-local mutex based on the file's directory. """ def __enter__(self): name = 'Local\\%s' % BASE_DIR.replace('\\', '_').replace(':', '_') self.mutex = windll.kernel32.CreateMutexW( wintypes.c_int(0), wintypes.c_int(0), wintypes.create_unicode_buffer(name)) assert self.mutex result = windll.kernel32.WaitForSingleObject( self.mutex, wintypes.c_int(0xFFFFFFFF)) # 0x80 means another process was killed without releasing the mutex, but # that this process has been given ownership. This is fine for our # purposes. assert result in (0, 0x80), ( "%s, %s" % (result, windll.kernel32.GetLastError())) def __exit__(self, type, value, traceback): windll.kernel32.ReleaseMutex(self.mutex) windll.kernel32.CloseHandle(self.mutex) class WinTool(object): """This class performs all the Windows tooling steps. The methods can either be executed directly, or dispatched from an argument list.""" def Dispatch(self, args): """Dispatches a string command to a method.""" if len(args) < 1: raise Exception("Not enough arguments") method = "Exec%s" % self._CommandifyName(args[0]) return getattr(self, method)(*args[1:]) def _CommandifyName(self, name_string): """Transforms a tool name like recursive-mirror to RecursiveMirror.""" return name_string.title().replace('-', '') def _GetEnv(self, arch): """Gets the saved environment from a file for a given architecture.""" # The environment is saved as an "environment block" (see CreateProcess # and msvs_emulation for details). We convert to a dict here. # Drop last 2 NULs, one for list terminator, one for trailing vs. separator. pairs = open(arch).read()[:-2].split('\0') kvs = [item.split('=', 1) for item in pairs] return dict(kvs) def ExecStamp(self, path): """Simple stamp command.""" open(path, 'w').close() def ExecRecursiveMirror(self, source, dest): """Emulation of rm -rf out && cp -af in out.""" if os.path.exists(dest): if os.path.isdir(dest): shutil.rmtree(dest) else: os.unlink(dest) if os.path.isdir(source): shutil.copytree(source, dest) else: shutil.copy2(source, dest) def ExecLinkWrapper(self, arch, *args): """Filter diagnostic output from link that looks like: ' Creating library ui.dll.lib and object ui.dll.exp' This happens when there are exports from the dll or exe. """ with LinkLock(): env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() for line in out.splitlines(): if not line.startswith(' Creating library '): print line return popen.returncode def ExecManifestWrapper(self, arch, *args): """Run manifest tool with environment set. Strip out undesirable warning (some XML blocks are recognized by the OS loader, but not the manifest tool).""" env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() for line in out.splitlines(): if line and 'manifest authoring warning 81010002' not in line: print line return popen.returncode def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl, *flags): """Filter noisy filenames output from MIDL compile step that isn't quietable via command line flags. """ args = ['midl', '/nologo'] + list(flags) + [ '/out', outdir, '/tlb', tlb, '/h', h, '/dlldata', dlldata, '/iid', iid, '/proxy', proxy, idl] env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() # Filter junk out of stdout, and write filtered versions. Output we want # to filter is pairs of lines that look like this: # Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl # objidl.idl lines = out.splitlines() prefix = 'Processing ' processing = set(os.path.basename(x) for x in lines if x.startswith(prefix)) for line in lines: if not line.startswith(prefix) and line not in processing: print line return popen.returncode def ExecAsmWrapper(self, arch, *args): """Filter logo banner from invocations of asm.exe.""" env = self._GetEnv(arch) # MSVS doesn't assemble x64 asm files. if arch == 'environment.x64': return 0 popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() for line in out.splitlines(): if (not line.startswith('Copyright (C) Microsoft Corporation') and not line.startswith('Microsoft (R) Macro Assembler') and not line.startswith(' Assembling: ') and line): print line return popen.returncode def ExecRcWrapper(self, arch, *args): """Filter logo banner from invocations of rc.exe. Older versions of RC don't support the /nologo flag.""" env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() for line in out.splitlines(): if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and not line.startswith('Copyright (C) Microsoft Corporation') and line): print line return popen.returncode def ExecActionWrapper(self, arch, rspfile, *dir): """Runs an action command line from a response file using the environment for |arch|. If |dir| is supplied, use that as the working directory.""" env = self._GetEnv(arch) args = open(rspfile).read() dir = dir[0] if dir else None popen = subprocess.Popen(args, shell=True, env=env, cwd=dir) popen.wait() return popen.returncode if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
from tests import unittest from tests import mock from unbound_ec2 import server from tests import attrs class TestServer(server.Server): HANDLE_FORWARD_RESULT = 'dummy_handle_forward' HANDLE_PASS_RESULT = True DNSMSG = mock.MagicMock() def handle_request(self, _id, event, qstate, qdata, request_type): return self.HANDLE_FORWARD_RESULT def new_dns_msg(self, qname): return self.DNSMSG class TestAbstractServer(unittest.TestCase): def setUp(self): server.log_info = mock.Mock() lookup_mock = mock.MagicMock() self.zone = '.bogus.tld' self.reverse_zone = '127.in-addr.arpa' self.ttl = 'bogus_ttl' self.ip_order = 'bogus_ip_order' self.forwarded_zones = '' self.srv = TestServer(self.zone, self.reverse_zone, self.ttl, lookup_mock, self.ip_order, self.forwarded_zones) def tearDown(self): self.srv = None def test_operate_event_new(self): id = 'bogus_id' event = attrs['MODULE_EVENT_NEW'] qstate = mock.MagicMock() qdata = mock.MagicMock() qstate.qinfo.qname_str = "fqdn.not-bogus.tld" self.assertTrue(self.srv.operate(id, event, qstate, qdata)) qstate.ext_state.__setitem__.assert_called_with(id, attrs['MODULE_WAIT_MODULE']) def test_operate_event_pass(self): id = 'bogus_id' event = attrs['MODULE_EVENT_PASS'] qstate = mock.MagicMock() qdata = mock.MagicMock() qstate.qinfo.qname_str = "fqdn.not-bogus.tld" self.assertTrue(self.srv.operate(id, event, qstate, qdata)) qstate.ext_state.__setitem__.assert_called_with(id, attrs['MODULE_WAIT_MODULE']) def test_operate_event_moddone(self): id = 'bogus_id' event = attrs['MODULE_EVENT_MODDONE'] qstate = mock.MagicMock() qdata = mock.MagicMock() self.assertTrue(self.srv.operate(id, event, qstate, qdata)) qstate.ext_state.__setitem__.assert_called_with(id, attrs['MODULE_FINISHED']) def test_operate_forward(self): id = 'bogus_id' event = attrs['MODULE_EVENT_NEW'] qstate = mock.MagicMock() qstate.qinfo.qtype = attrs['RR_TYPE_A'] qstate.qinfo.qname_str = 'bogus-name%s.' % self.zone qdata = mock.MagicMock() self.assertEqual(self.srv.operate(id, event, qstate, qdata), TestServer.HANDLE_FORWARD_RESULT) qstate.qinfo.qtype = attrs['RR_TYPE_ANY'] self.assertEqual(self.srv.operate(id, event, qstate, qdata), TestServer.HANDLE_FORWARD_RESULT) def test_forwarded_zones(self): server.log_info = mock.Mock() lookup_mock = mock.MagicMock() forwarded_zones = '.subdomain%s' % self.zone self.srv2 = TestServer(self.zone, self.reverse_zone, self.ttl, lookup_mock, self.ip_order, forwarded_zones) id = 'bogus_id' event = attrs['MODULE_EVENT_NEW'] qstate = mock.MagicMock() qstate.qinfo.qtype = attrs['RR_TYPE_A'] qstate.qinfo.qname_str = 'bogus-name%s' % self.forwarded_zones qdata = mock.MagicMock() self.assertEqual(self.srv.operate(id, event, qstate, qdata), TestServer.HANDLE_PASS_RESULT) qstate.ext_state.__setitem__.assert_called_with(id, attrs['MODULE_WAIT_MODULE']) class TestAuthoritativeServer(unittest.TestCase): def setUp(self): server.log_info = mock.Mock() lookup_mock = mock.MagicMock() self.zone = '.bogus.tld' self.reverse_zone = '127.in-addr.arpa' self.ttl = 'bogus_ttl' self.ip_order = 'bogus_ip_order' self.forwarded_zones = '' self.srv = server.Authoritative(self.zone, self.reverse_zone, self.ttl, lookup_mock, self.ip_order, self.forwarded_zones) def tearDown(self): self.srv = None def test_handle_forward(self): id = 'bogus_id' event = attrs['MODULE_EVENT_NEW'] qstate = mock.MagicMock() qstate.qinfo.qtype = attrs['RR_TYPE_A'] qstate.qinfo.qname_str = 'bogus-name%s.' % self.zone qdata = mock.MagicMock() server.DNSMessage = mock.MagicMock() self.assertTrue(self.srv.operate(id, event, qstate, qdata)) def test_handle_empty(self): id = 'bogus_id' event = attrs['MODULE_EVENT_NEW'] qstate = mock.MagicMock() qstate.qinfo.qtype = attrs['RR_TYPE_TXT'] qstate.qinfo.qname_str = 'bogus-name%s.' % self.zone qdata = mock.MagicMock() server.DNSMessage = mock.MagicMock() self.assertTrue(self.srv.operate(id, event, qstate, qdata)) class TestCachingServer(unittest.TestCase): def setUp(self): server.log_info = mock.Mock() self.lookup_mock = mock.MagicMock() self.zone = '.bogus.tld' self.reverse_zone = '127.in-addr.arpa' self.ttl = 88888881 self.ip_order = 'bogus_ip_order' self.forwarded_zones = '' self.srv = server.Caching(self.zone, self.reverse_zone, self.ttl, self.lookup_mock, self.ip_order, self.forwarded_zones) def tearDown(self): self.srv = None def test_handle_forward(self): server.storeQueryInCache = mock.Mock() server.DNSMessage = mock.MagicMock() instances_mock = mock.MagicMock() instances_mock.tags = {'Address': 'bogus_ip_address'} self.lookup_mock.lookup.return_value = [instances_mock] id = 'bogus_id' event = attrs['MODULE_EVENT_NEW'] qstate = mock.MagicMock() qstate.qinfo.qtype = attrs['RR_TYPE_A'] qstate.qinfo.qname_str = 'bogus-name%s.' % self.zone qdata = mock.MagicMock() self.assertTrue(self.srv.operate(id, event, qstate, qdata)) qstate.ext_state.__setitem__.assert_called_with(id, attrs['MODULE_FINISHED']) self.assertEqual(qstate.return_msg.rep.security, 2) server.DNSMessage.return_value.answer.append.assert_called_with( '%s %d IN A %s' % (qstate.qinfo.qname_str, self.ttl, 'bogus_ip_address'))
import csv import matplotlib.pyplot as plt import numpy as np import scipy.stats as stats from scipy.optimize import curve_fit def countKey(key,listDataDicts): outDict = {} for row in listDataDicts: try: outDict[row[key]] += 1 except KeyError: outDict[row[key]] = 1 return outDict def avgUse30Days(key, listDataDicts): totalDays = 0 numberUsers = 0 for person in listDataDicts: if int(person[key]) < 31 : totalDays += int(person[key]) numberUsers += 1 return (1.0*totalDays/numberUsers) def avgUse30DaysWithZeros(key, listDataDicts): totalDays = 0 numberUsers = 0 for person in listDataDicts: if ( int(person[key]) < 31 ): totalDays += int(person[key]) numberUsers += 1 elif ( int(person[key]) == 93 ): numberUsers += 1 else: pass return (1.0*totalDays/numberUsers) def powerLaw(x,a,b): return a*(x**(-b)) def expDecay(x,a,b): return a*np.exp(b*x) listDataDicts = [] with open('34933-0001-Data.tsv', 'rb') as tsvFile: tsvReader = csv.DictReader(tsvFile,delimiter='\t') for row in tsvReader: listDataDicts.append(row) ageFirstUseKeys = ['CIGTRY', 'SNUFTRY', 'CHEWTRY', 'CIGARTRY', 'ALCTRY', 'MJAGE', 'COCAGE', 'HERAGE', 'HALLAGE', 'INHAGE', 'ANALAGE', 'TRANAGE', 'STIMAGE', 'SEDAGE'] useLast30Keys = ['CIG30USE','SNF30USE','CHW30USE','CGR30USE','ALCDAYS','MJDAY30A','COCUS30A','HER30USE','HAL30USE','INHDY30A','PRDAYPMO','TRDAYPMO','STDAYPMO','SVDAYPMO'] xdata = [] ydata = [] for person in listDataDicts: for i in range(len(ageFirstUseKeys)): if (int(person[ageFirstUseKeys[i]]) < 900) and (int(person[useLast30Keys[i]]) < 31): xdata.append(int(person[ageFirstUseKeys[i]])) ydata.append(int(person[useLast30Keys[i]])) slope,intercept,rValue,pValue,stdErr = stats.linregress(xdata,ydata) print "Drug First Use Age vs Usage Frequency Linear Regression" print "Slope: %f, Intercept: %f, RSQ-Value: %f, P-Value: %f, Standard Error: %f,\n 95%% Confidence Interval: %f +- %f\n" %(slope,intercept,rValue*rValue,pValue,stdErr, slope, 1.96*stdErr) '''# Curve fit with a power law xfit = range(90) popt1, pcov1 = curve_fit(powerLaw, xdata, ydata) print "Power Law Curve fit: ",popt1,np.sqrt(np.diag(pcov1)),"\n" fitLiney1 = np.zeros(len(xfit)) for i in range(len(xfit)): fitLiney1[i] = powerLaw( xfit[i], popt1[0], popt1[1] ) ''' xdata2 = [ x for x in range(89) ] ydata2 = [ (x*slope + intercept) for x in range(89) ] plt.plot(xdata,ydata,'b.',xdata2,ydata2,'r-') plt.title("Age of First Use vs Usage in the Last 30 Days") plt.xlabel("Age of First Use") plt.ylabel("Usage in the Past 30 Days)") plt.legend(["Data","Linear Fit"]) plt.xlim(0,90) plt.ylim(0,31) plt.tight_layout() plt.show()
import sys import types import typing as t import decorator as deco from gssapi.raw.misc import GSSError if t.TYPE_CHECKING: from gssapi.sec_contexts import SecurityContext def import_gssapi_extension( name: str, ) -> t.Optional[types.ModuleType]: """Import a GSSAPI extension module This method imports a GSSAPI extension module based on the name of the extension (not including the 'ext_' prefix). If the extension is not available, the method retuns None. Args: name (str): the name of the extension Returns: module: Either the extension module or None """ try: path = 'gssapi.raw.ext_{0}'.format(name) __import__(path) return sys.modules[path] except ImportError: return None def inquire_property( name: str, doc: t.Optional[str] = None ) -> property: """Creates a property based on an inquire result This method creates a property that calls the :python:`_inquire` method, and return the value of the requested information. Args: name (str): the name of the 'inquire' result information Returns: property: the created property """ def inquire_property(self: "SecurityContext") -> t.Any: if not self._started: msg = (f"Cannot read {name} from a security context whose " "establishment has not yet been started.") raise AttributeError(msg) return getattr(self._inquire(**{name: True}), name) return property(inquire_property, doc=doc) _ENCODING = 'UTF-8' def _get_encoding() -> str: """Gets the current encoding used for strings. This value is used to encode and decode string values like names. Returns: str: the current encoding """ return _ENCODING def set_encoding( enc: str, ) -> None: """Sets the current encoding used for strings This value is used to encode and decode string values like names. Args: enc: the encoding to use """ global _ENCODING _ENCODING = enc def _encode_dict( d: t.Dict[t.Union[bytes, str], t.Union[bytes, str]], ) -> t.Dict[bytes, bytes]: """Encodes any relevant strings in a dict""" def enc(x: t.Union[bytes, str]) -> bytes: if isinstance(x, str): return x.encode(_ENCODING) else: return x return {enc(k): enc(v) for k, v in d.items()} @deco.decorator def catch_and_return_token( func: t.Callable, self: "SecurityContext", *args: t.Any, **kwargs: t.Any, ) -> t.Optional[bytes]: """Optionally defer exceptions and return a token instead When `__DEFER_STEP_ERRORS__` is set on the implementing class or instance, methods wrapped with this wrapper will catch and save their :python:`GSSError` exceptions and instead return the result token attached to the exception. The exception can be later retrived through :python:`_last_err` (and :python:`_last_tb` when Python 2 is in use). """ try: return func(self, *args, **kwargs) except GSSError as e: defer_step_errors = getattr(self, '__DEFER_STEP_ERRORS__', False) if e.token is not None and defer_step_errors: self._last_err = e # skip the "return func" line above in the traceback tb = e.__traceback__.tb_next # type: ignore[union-attr] self._last_err.__traceback__ = tb return e.token else: raise @deco.decorator def check_last_err( func: t.Callable, self: "SecurityContext", *args: t.Any, **kwargs: t.Any, ) -> t.Any: """Check and raise deferred errors before running the function This method checks :python:`_last_err` before running the wrapped function. If present and not None, the exception will be raised with its original traceback. """ if self._last_err is not None: try: raise self._last_err finally: self._last_err = None else: return func(self, *args, **kwargs) class CheckLastError(type): """Check for a deferred error on all methods This metaclass applies the :python:`check_last_err` decorator to all methods not prefixed by '_'. Additionally, it enabled `__DEFER_STEP_ERRORS__` by default. """ def __new__( cls, name: str, parents: t.Tuple[t.Type], attrs: t.Dict[str, t.Any], ) -> "CheckLastError": attrs['__DEFER_STEP_ERRORS__'] = True for attr_name in attrs: attr = attrs[attr_name] # wrap only methods if not isinstance(attr, types.FunctionType): continue if attr_name[0] != '_': attrs[attr_name] = check_last_err(attr) return super(CheckLastError, cls).__new__(cls, name, parents, attrs)
from __future__ import division from libtbx.test_utils import approx_equal from libtbx.utils import Usage from libtbx import easy_run import libtbx.load_env import platform import time import sys, os op = os.path __this_script__ = "cctbx_project/fable/test/sf_times.py" # based on cctbx_project/compcomm/newsletter09/sf_times.py setup_dir = "/net/cci/setup/Linux" ifort_versions = ["intel121.sh", "intel111.sh", "ifort91.sh"] icc_versions = [ "intel121.sh", "intel111.sh", "icc101.sh", "icc91.sh"] gcc_versions = [ "gcc-4.6.1_fc8.sh", "gcc-4.5.3_fc8.sh", "gcc-4.4.6_fc8.sh", "gcc-4.3.6_fc8.sh", "gcc-4.2.4_fc8.sh"] fortran_template = r"""C %(this_script)s subroutine cos_wrapper(result, arg) REAL result REAL arg result = COS(arg) return end subroutine exp_wrapper(result, arg) REAL result REAL arg result = EXP(arg) return end subroutine sf(abcss, n_scatt, xyz, b_iso, n_refl, hkl, f_calc) implicit none REAL abcss(3) integer n_scatt REAL xyz(3, *) REAL b_iso(*) integer n_refl integer hkl(3, *) REAL f_calc(2, *) integer i_refl, i_scatt, j, h REAL phi, cphi, sphi, dss, ldw, dw, a, b DO i_refl=1,n_refl a = 0 b = 0 DO i_scatt=1,n_scatt phi = 0 DO j=1,3 phi = phi + hkl(j,i_refl) * xyz(j,i_scatt) enddo phi = phi * 2 * 3.1415926535897931 call cos_wrapper(cphi, phi) call cos_wrapper(sphi, phi - 3.1415926535897931*0.5) dss = 0 DO j=1,3 h = hkl(j,i_refl) dss = dss + h*h * abcss(j) enddo ldw = -0.25 * dss * b_iso(i_scatt) call exp_wrapper(dw, ldw) a = a + dw * cphi b = b + dw * sphi enddo f_calc(1, i_refl) = a f_calc(2, i_refl) = b enddo return end program run implicit none REAL abcss(3) integer n_scatt parameter(n_scatt=%(n_scatt)s) REAL xyz(3, n_scatt) REAL b_iso(n_scatt) integer n_refl parameter(n_refl=%(n_refl)s) integer hkl(3, n_refl) REAL f_calc(2, n_refl) integer i, j, jr REAL a, b, max_a, max_b abcss(1) = 1/(11.0*11.0) abcss(2) = 1/(12.0*12.0) abcss(3) = 1/(13.0*13.0) jr = 0 DO i=1,n_scatt DO j=1,3 jr = mod(jr*1366+150889, 714025) xyz(j,i) = (mod(jr, 20000) - 10000) / 10000.0 enddo enddo DO i=1,n_scatt jr = mod(jr*1366+150889, 714025) b_iso(i) = mod(jr, 10000) / 100.0 enddo if (n_scatt .le. 10) then DO i=1,n_scatt write(6, '(4(1x,f9.6))') & xyz(1,i), xyz(2,i), xyz(3, i), b_iso(i) enddo endif DO i=1,n_refl DO j=1,3 jr = mod(jr*1366+150889, 714025) hkl(j,i) = mod(jr, 10) - 5 enddo enddo call sf(abcss, n_scatt, xyz, b_iso, n_refl, hkl, f_calc) if (n_refl .le. 100) then DO i=1,n_refl write(6, '(3(1x,i3),1x,f12.6,1x,f12.6)') & hkl(1,i), hkl(2,i), hkl(3,i), & f_calc(1,i), f_calc(2,i) enddo else max_a = 0 max_b = 0 DO i=1,n_refl a = f_calc(1,i) b = f_calc(2,i) if (max_a .lt. a) max_a = a if (max_b .lt. b) max_b = b enddo write(6, '(2(1x,f12.6))') max_a, max_b endif end """ def compare_with_cctbx_structure_factors(n_scatt, n_refl, output_lines): from cctbx import xray from cctbx import miller from cctbx import crystal from cctbx.array_family import flex crystal_symmetry = crystal.symmetry( unit_cell=(11,12,13,90,90,90), space_group_symbol="P1") scatterers = flex.xray_scatterer() miller_indices = flex.miller_index() f_calc = flex.complex_double() for line in output_lines: flds = line.split() assert len(flds) in [4,5] if (len(flds) == 4): x,y,z,b_iso = [float(s) for s in flds] scatterers.append( xray.scatterer(site=(x,y,z), b=b_iso, scattering_type="const")) else: miller_indices.append([int(s) for s in flds[:3]]) f_calc.append(complex(float(flds[3]), float(flds[4]))) assert scatterers.size() == n_scatt assert miller_indices.size() == n_refl xs = xray.structure( crystal_symmetry=crystal_symmetry, scatterers=scatterers) fc = miller_array = miller.set( crystal_symmetry=crystal_symmetry, indices=miller_indices, anomalous_flag=False).array(data=f_calc) fc2 = fc.structure_factors_from_scatterers( xray_structure=xs, algorithm="direct", cos_sin_table=False).f_calc() for f1,f2 in zip(fc.data(), fc2.data()): assert approx_equal(f1, f2, eps=1e-5) def build_run( setup_cmd, ld_preload_flag, n_scatt, n_refl, build_cmd, check_max_a_b): if (op.isfile("a.out")): os.remove("a.out") assert not op.isfile("a.out") print build_cmd buffers = easy_run.fully_buffered(command=build_cmd) msg = buffers.format_errors_if_any() if (msg is not None): if (0): print build_cmd print print msg print STOP() return None assert op.isfile("a.out") run_cmd = setup_cmd if (ld_preload_flag): run_cmd += 'env LD_PRELOAD='\ '"/net/marbles/raid1/rwgk/dist/opt_resources/linux64/libimf.so:"'\ '"/net/marbles/raid1/rwgk/dist/opt_resources/linux64/libirc.so" ' utimes = [] run_cmd += '/usr/bin/time -p ./a.out' def run_once(): buffers = easy_run.fully_buffered(command=run_cmd) if (len(buffers.stderr_lines) != 3): print "v"*79 print "\n".join(buffers.stderr_lines) print "^"*79 raise RuntimeError( "Unexpected number of output lines" " (3 expected; acutal output see above).") if (n_scatt == 0): pass elif (n_scatt <= 10 and n_refl <= 100): assert len(buffers.stdout_lines) == n_scatt + n_refl else: assert len(buffers.stdout_lines) == 1 max_a, max_b = [float(s) for s in buffers.stdout_lines[0].split()] if (check_max_a_b): if (n_scatt == 2000 and n_refl == 20000): assert approx_equal(max_a, 35.047157, eps=1e-4) assert approx_equal(max_b, 25.212738, eps=1e-4) elif (n_scatt == 100 and n_refl == 1000): assert approx_equal(max_a, 4.493645, eps=1e-4) assert approx_equal(max_b, 10.515532, eps=1e-4) elif (n_scatt <= 10 and n_refl <= 100): if (libtbx.env.has_module(name="cctbx")): compare_with_cctbx_structure_factors( n_scatt=n_scatt, n_refl=n_refl, output_lines=buffers.stdout_lines) else: raise RuntimeError, (max_a, max_b) utime = float(buffers.stderr_lines[1].split()[1]) utimes.append(utime) print "sample utime: %.2f" % utime sys.stdout.flush() for _ in xrange(8): run_once() return min(utimes) def finalize_cpp_build_cmd(source_cpp): from fable import simple_compilation comp_env = simple_compilation.environment() return comp_env.assemble_include_search_paths(no_quotes=False) \ + " " + source_cpp def write_build_run( setup_cmd, ld_preload_flag, n_scatt, n_refl, real, lang, build_cmd, replace_cos, replace_exp): this_script = __this_script__ for_txt = fortran_template % vars() if (replace_cos): for_txt = for_txt.replace( "COS(arg)", "arg / (abs(arg)+1.0)") if (replace_exp): for_txt = for_txt.replace( "EXP(arg)", "max(0.0, 1.0 - arg*arg)") for_txt = for_txt.replace("REAL", real) open("tmp.f", "w").write(for_txt) from fable import cout cpp_txt = cout.process( file_names=["tmp.f"], namespace="sf_test", fem_do_safe=False, inline_all=True) open("tmp.cpp", "w").write("\n".join(cpp_txt)+"\n") if (lang.lower() == "f"): build_cmd += " tmp.f" elif (lang.lower() == "c"): build_cmd += finalize_cpp_build_cmd("tmp.cpp") else: raise RuntimeError('Unknown lang: "%s"' % lang) return build_run( setup_cmd=setup_cmd, ld_preload_flag=ld_preload_flag, n_scatt=n_scatt, n_refl=n_refl, build_cmd=build_cmd, check_max_a_b=(not (replace_cos or replace_exp))) def run_combinations( compiler_versions, all_utimes, n_scatt, n_refl, compiler_build_opts_list, real_list): for lang,setup_sh_list,compiler,build_opts in compiler_build_opts_list: for setup_sh in setup_sh_list: if (setup_sh is None): setup_cmd = "" else: setup_cmd = ". %s/%s; " % (setup_dir, setup_sh) compiler_version = easy_run.fully_buffered( command=setup_cmd+compiler+" --version", join_stdout_stderr=True).stdout_lines[0] if (lang in ["f", "c"]): ld_preload_flags = [False, True] else: ld_preload_flags = [False] for ld_preload_flag in ld_preload_flags: iml = ["", " Intel Math Lib"][int(ld_preload_flag)] compiler_versions.append(compiler_version + iml) build_cmd = " ".join([setup_cmd+compiler, build_opts]) print build_cmd utimes = [] if (n_scatt != 0): for real in real_list: print " %s" % real for replace_cos in [False, True]: print " replace_cos", replace_cos for replace_exp in [False, True]: print " replace_exp", replace_exp sys.stdout.flush() if (compiler_version != "n/a"): utime = write_build_run( setup_cmd=setup_cmd, ld_preload_flag=ld_preload_flag, n_scatt=n_scatt, n_refl=n_refl, real=real, lang=lang, build_cmd=build_cmd, replace_cos=replace_cos, replace_exp=replace_exp) if (utime is not None): print " %4.2f" % utime else: utime = -1.0 print " err" else: utime = -1.0 print " n/a" utimes.append(utime) sys.stdout.flush() else: if (lang.lower() == "f"): f_source = libtbx.env.find_in_repositories( relative_path="lapack_fem/dsyev_test.f", test=op.isfile, optional=False) build_cmd_compl = build_cmd + " " + f_source else: cpp_source = libtbx.env.find_in_repositories( relative_path="lapack_fem/dsyev_test.cpp", test=op.isfile, optional=False) build_cmd_compl = build_cmd + finalize_cpp_build_cmd(cpp_source) utime = build_run( setup_cmd=setup_cmd, ld_preload_flag=ld_preload_flag, n_scatt=n_scatt, n_refl=n_refl, build_cmd=build_cmd_compl, check_max_a_b=False) if (utime is None): print "err" utime = -1.0 else: print "min utime: %.2f" % utime sys.stdout.flush() utimes.append(utime) all_utimes.append((utimes, build_cmd + iml)) def usage(): raise Usage("fable.python sf_times.py unit_test|quick|production") def run(args): if (len(args) != 1): usage() t_start = time.time() build_platform = platform.platform() build_node = platform.node() compiler_versions = [] if (args[0] == "unit_test"): n_scatt, n_refl = 10, 100 elif (args[0] == "quick"): n_scatt, n_refl = 100, 1000 elif (args[0] == "production"): n_scatt, n_refl = 2000, 20000 elif (args[0] == "dsyev"): n_scatt, n_refl = 0, 0 else: usage() gcc_sh = gcc_versions + [None] icc_sh = icc_versions if (args[0] == "quick"): gcc_sh = gcc_sh[:2] icc_sh = icc_sh[:1] all_utimes = [] run_combinations( compiler_versions, all_utimes, n_scatt=n_scatt, n_refl=n_refl, compiler_build_opts_list=[ ("F", ifort_versions, "ifort", "-O"), ("f", gcc_sh, "gfortran", "-O3 -ffast-math"), ("f", gcc_sh, "gfortran", "-O3 -ffast-math -march=native"), ("C", icc_sh, "icpc", "-O"), ("c", gcc_sh, "g++", "-O3 -ffast-math"), ("c", gcc_sh, "g++", "-O3 -ffast-math -march=native"), ("c", [None], "clang++", "-O3 -U__GXX_WEAK__ -Wno-logical-op-parentheses -ffast-math"), ("c", [None], "clang++", "-O3 -U__GXX_WEAK__ -Wno-logical-op-parentheses -ffast-math" " -march=native")], real_list=["real*4", "real*8"]) print print "current_platform:", platform.platform() print "current_node:", platform.node() print "build_platform:", build_platform print "build_node:", build_node for compiler_version in compiler_versions: print "compiler:", compiler_version if (n_scatt != 0): print "n_scatt * n_refl: %d * %d" % (n_scatt, n_refl) print '''\ "s" or "d": single-precision or double-precision floating-point variables "E" or "e": using the library exp(arg) function or "max(0.0, 1.0 - arg*arg)" "C" or "c": using the library cos(arg) function or "arg / (abs(arg)+1.0)"''' print " sEC seC sEc sec dEC deC dEc dec" else: print "dsyev times:" useful_utimes = [] for utimes,build_cmd in all_utimes: if (max(utimes) != -1.0): print " ".join(["%6.2f" % u for u in utimes]), build_cmd useful_utimes.append((utimes,build_cmd)) if (len(useful_utimes) > 1): print "Relative to first:" for utimes,build_cmd in useful_utimes: print " ".join(["%6.2f" % (u/max(u0,0.01)) for u,u0 in zip(utimes,useful_utimes[0][0])]), build_cmd print "Wall clock time: %.2f s" % (time.time()-t_start) if (__name__ == "__main__"): run(args=sys.argv[1:])
import numpy as np from metaworld.policies.action import Action from metaworld.policies.policy import Policy, assert_fully_parsed, move class SawyerCoffeeButtonV1Policy(Policy): @staticmethod @assert_fully_parsed def _parse_obs(obs): return { 'hand_pos': obs[:3], 'mug_pos': obs[3:6], 'unused_info': obs[6:], } def get_action(self, obs): o_d = self._parse_obs(obs) action = Action({ 'delta_pos': np.arange(3), 'grab_effort': 3 }) action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=10.) action['grab_effort'] = -1. return action.array @staticmethod def _desired_pos(o_d): pos_curr = o_d['hand_pos'] pos_mug = o_d['mug_pos'] + np.array([.0, .0, .01]) if abs(pos_curr[0] - pos_mug[0]) > 0.02: return np.array([pos_mug[0], pos_curr[1], .28]) else: return pos_curr + np.array([.0, .1, .0])
from runner.koan import * class AboutDictionaries(Koan): def test_creating_dictionaries(self): empty_dict = dict() self.assertEqual(dict, type(empty_dict)) self.assertEqual(dict(), empty_dict) self.assertEqual(0, len(empty_dict)) def test_dictionary_literals(self): empty_dict = {} self.assertEqual(dict, type(empty_dict)) babel_fish = {'one': 'uno', 'two': 'dos'} self.assertEqual(__, len(babel_fish)) def test_accessing_dictionaries(self): babel_fish = {'one': 'uno', 'two': 'dos'} self.assertEqual(__, babel_fish['one']) self.assertEqual(__, babel_fish['two']) def test_changing_dictionaries(self): babel_fish = {'one': 'uno', 'two': 'dos'} babel_fish['one'] = 'eins' expected = {'two': 'dos', 'one': __} self.assertEqual(expected, babel_fish) def test_dictionary_is_unordered(self): dict1 = {'one': 'uno', 'two': 'dos'} dict2 = {'two': 'dos', 'one': 'uno'} self.assertEqual(____, dict1 == dict2) def test_dictionary_keys_and_values(self): babel_fish = {'one': 'uno', 'two': 'dos'} self.assertEqual(__, len(babel_fish.keys())) self.assertEqual(__, len(babel_fish.values())) self.assertEqual(__, 'one' in babel_fish.keys()) self.assertEqual(__, 'two' in babel_fish.values()) self.assertEqual(__, 'uno' in babel_fish.keys()) self.assertEqual(__, 'dos' in babel_fish.values()) def test_making_a_dictionary_from_a_sequence_of_keys(self): cards = {}.fromkeys( ('red warrior', 'green elf', 'blue valkyrie', 'yellow dwarf', 'confused looking zebra'), 42) self.assertEqual(__, len(cards)) self.assertEqual(__, cards['green elf']) self.assertEqual(__, cards['yellow dwarf'])
import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='pogoprotos/networking/requests/messages/start_gym_battle_message.proto', package='pogoprotos.networking.requests.messages', syntax='proto3', serialized_pb=_b('\nFpogoprotos/networking/requests/messages/start_gym_battle_message.proto\x12\'pogoprotos.networking.requests.messages\"\x97\x01\n\x15StartGymBattleMessage\x12\x0e\n\x06gym_id\x18\x01 \x01(\t\x12\x1d\n\x15\x61ttacking_pokemon_ids\x18\x02 \x03(\x06\x12\x1c\n\x14\x64\x65\x66\x65nding_pokemon_id\x18\x03 \x01(\x06\x12\x17\n\x0fplayer_latitude\x18\x04 \x01(\x01\x12\x18\n\x10player_longitude\x18\x05 \x01(\x01\x62\x06proto3') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _STARTGYMBATTLEMESSAGE = _descriptor.Descriptor( name='StartGymBattleMessage', full_name='pogoprotos.networking.requests.messages.StartGymBattleMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='gym_id', full_name='pogoprotos.networking.requests.messages.StartGymBattleMessage.gym_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='attacking_pokemon_ids', full_name='pogoprotos.networking.requests.messages.StartGymBattleMessage.attacking_pokemon_ids', index=1, number=2, type=6, cpp_type=4, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='defending_pokemon_id', full_name='pogoprotos.networking.requests.messages.StartGymBattleMessage.defending_pokemon_id', index=2, number=3, type=6, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='player_latitude', full_name='pogoprotos.networking.requests.messages.StartGymBattleMessage.player_latitude', index=3, number=4, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='player_longitude', full_name='pogoprotos.networking.requests.messages.StartGymBattleMessage.player_longitude', index=4, number=5, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=116, serialized_end=267, ) DESCRIPTOR.message_types_by_name['StartGymBattleMessage'] = _STARTGYMBATTLEMESSAGE StartGymBattleMessage = _reflection.GeneratedProtocolMessageType('StartGymBattleMessage', (_message.Message,), dict( DESCRIPTOR = _STARTGYMBATTLEMESSAGE, __module__ = 'pogoprotos.networking.requests.messages.start_gym_battle_message_pb2' # @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.StartGymBattleMessage) )) _sym_db.RegisterMessage(StartGymBattleMessage)
from ..workspace import Block from twisted.internet import defer from .variables import lexical_variable import operator class logic_null (Block): def eval (self): return defer.succeed(None) class logic_boolean (Block): def eval (self): return defer.succeed(self.fields['BOOL'] == 'TRUE') class logic_negate (Block): outputType = bool def eval (self): def negate (result): if result is None: return None return result == False self._complete = self.getInputValue('BOOL').addCallback(negate) return self._complete _operators_map = { "EQ": operator.eq, "NEQ": operator.ne, "LT": operator.lt, "LTE": operator.le, "GT": operator.gt, "GTE": operator.ge } def _compare (lhs, rhs, op_id): if lhs is None or rhs is None: return None op = _operators_map[op_id] return op(lhs, rhs) # Emit a warning if bad op given class logic_compare (Block): outputType = bool def eval (self): lhs = self.getInputValue('A') rhs = self.getInputValue('B') op_id = self.fields['OP'] def _eval (results): lhs, rhs = results return _compare(lhs, rhs, op_id) self._complete = defer.gatherResults([lhs, rhs]).addCallback(_eval) return self._complete class lexical_variable_compare (lexical_variable): outputType = bool def eval (self): variable = self._getVariable() if variable is None: self.emitLogMessage( "Unknown variable: " + str(self.getFieldValue('VAR')), "error" ) return defer.succeed(None) value = self.getFieldValue('VALUE') op_id = self.getFieldValue('OP') unit = self.getFieldValue('UNIT', None) if isinstance(unit, (int, float)): value *= unit return defer.succeed(_compare(variable.value, value, op_id)) class logic_operation (Block): outputType = bool def eval (self): @defer.inlineCallbacks def _run (): op = self.fields['OP'] lhs = yield self.getInputValue('A') if lhs is None: return if op == "AND": if bool(lhs): rhs = yield self.getInputValue('B') if rhs is None: return defer.returnValue(bool(rhs)) else: defer.returnValue(False) elif op == "OR": if bool(lhs): defer.returnValue(True) else: rhs = yield self.getInputValue('B') if rhs is None: return defer.returnValue(bool(rhs)) # Emit a warning return self._complete = _run() return self._complete class logic_ternary (Block): # TODO: outputType of then and else should be the same. # this is then the outputType of the logic_ternary block. def eval (self): @defer.inlineCallbacks def _run (): test = yield self.getInputValue('IF') if test is None: return if bool(test): result = yield self.getInputValue('THEN') defer.returnValue(result) else: result = yield self.getInputValue('ELSE') defer.returnValue(result) self._complete = _run() return self._complete
""" Hack to get scripts to run from source checkout without having to set PYTHONPATH. """ import sys from os.path import dirname, join, abspath db_path = dirname(__file__) project_path = abspath(join(db_path, "..")) sys.path.insert(0, project_path)
__version__ = '0.3.9'
''' Grid Layout =========== .. only:: html .. image:: images/gridlayout.gif :align: right .. only:: latex .. image:: images/gridlayout.png :align: right .. versionadded:: 1.0.4 The :class:`GridLayout` arranges children in a matrix. It takes the available space and divides it into columns and rows, then adds widgets to the resulting "cells". .. versionchanged:: 1.0.7 The implementation has changed to use the widget size_hint for calculating column/row sizes. `uniform_width` and `uniform_height` have been removed and other properties have added to give you more control. Background ---------- Unlike many other toolkits, you cannot explicitly place a widget in a specific column/row. Each child is automatically assigned a position determined by the layout configuration and the child's index in the children list. A GridLayout must always have at least one input constraint: :attr:`GridLayout.cols` or :attr:`GridLayout.rows`. If you do not specify cols or rows, the Layout will throw an exception. Column Width and Row Height --------------------------- The column width/row height are determined in 3 steps: - The initial size is given by the :attr:`col_default_width` and :attr:`row_default_height` properties. To customize the size of a single column or row, use :attr:`cols_minimum` or :attr:`rows_minimum`. - The `size_hint_x`/`size_hint_y` of the children are taken into account. If no widgets have a size hint, the maximum size is used for all children. - You can force the default size by setting the :attr:`col_force_default` or :attr:`row_force_default` property. This will force the layout to ignore the `width` and `size_hint` properties of children and use the default size. Using a GridLayout ------------------ In the example below, all widgets will have an equal size. By default, the `size_hint` is (1, 1), so a Widget will take the full size of the parent:: layout = GridLayout(cols=2) layout.add_widget(Button(text='Hello 1')) layout.add_widget(Button(text='World 1')) layout.add_widget(Button(text='Hello 2')) layout.add_widget(Button(text='World 2')) .. image:: images/gridlayout_1.jpg Now, let's fix the size of Hello buttons to 100px instead of using size_hint_x=1:: layout = GridLayout(cols=2) layout.add_widget(Button(text='Hello 1', size_hint_x=None, width=100)) layout.add_widget(Button(text='World 1')) layout.add_widget(Button(text='Hello 2', size_hint_x=None, width=100)) layout.add_widget(Button(text='World 2')) .. image:: images/gridlayout_2.jpg Next, let's fix the row height to a specific size:: layout = GridLayout(cols=2, row_force_default=True, row_default_height=40) layout.add_widget(Button(text='Hello 1', size_hint_x=None, width=100)) layout.add_widget(Button(text='World 1')) layout.add_widget(Button(text='Hello 2', size_hint_x=None, width=100)) layout.add_widget(Button(text='World 2')) .. image:: images/gridlayout_3.jpg ''' __all__ = ('GridLayout', 'GridLayoutException') from kivy.logger import Logger from kivy.uix.layout import Layout from kivy.properties import NumericProperty, BooleanProperty, DictProperty, \ BoundedNumericProperty, ReferenceListProperty, VariableListProperty, \ ObjectProperty, StringProperty from math import ceil def nmax(*args): # merge into one list args = [x for x in args if x is not None] return max(args) def nmin(*args): # merge into one list args = [x for x in args if x is not None] return min(args) class GridLayoutException(Exception): '''Exception for errors if the grid layout manipulation fails. ''' pass class GridLayout(Layout): '''Grid layout class. See module documentation for more information. ''' spacing = VariableListProperty([0, 0], length=2) '''Spacing between children: [spacing_horizontal, spacing_vertical]. spacing also accepts a one argument form [spacing]. :attr:`spacing` is a :class:`~kivy.properties.VariableListProperty` and defaults to [0, 0]. ''' padding = VariableListProperty([0, 0, 0, 0]) '''Padding between the layout box and it's children: [padding_left, padding_top, padding_right, padding_bottom]. padding also accepts a two argument form [padding_horizontal, padding_vertical] and a one argument form [padding]. .. versionchanged:: 1.7.0 Replaced NumericProperty with VariableListProperty. :attr:`padding` is a :class:`~kivy.properties.VariableListProperty` and defaults to [0, 0, 0, 0]. ''' cols = BoundedNumericProperty(None, min=0, allownone=True) '''Number of columns in the grid. .. versionchanged:: 1.0.8 Changed from a NumericProperty to BoundedNumericProperty. You can no longer set this to a negative value. :attr:`cols` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' rows = BoundedNumericProperty(None, min=0, allownone=True) '''Number of rows in the grid. .. versionchanged:: 1.0.8 Changed from a NumericProperty to a BoundedNumericProperty. You can no longer set this to a negative value. :attr:`rows` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' col_default_width = NumericProperty(0) '''Default minimum size to use for a column. .. versionadded:: 1.0.7 :attr:`col_default_width` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' row_default_height = NumericProperty(0) '''Default minimum size to use for row. .. versionadded:: 1.0.7 :attr:`row_default_height` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' col_force_default = BooleanProperty(False) '''If True, ignore the width and size_hint_x of the child and use the default column width. .. versionadded:: 1.0.7 :attr:`col_force_default` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' row_force_default = BooleanProperty(False) '''If True, ignore the height and size_hint_y of the child and use the default row height. .. versionadded:: 1.0.7 :attr:`row_force_default` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' cols_minimum = DictProperty({}) '''Dict of minimum width for each column. The dictionary keys are the column numbers, e.g. 0, 1, 2... .. versionadded:: 1.0.7 :attr:`cols_minimum` is a :class:`~kivy.properties.DictProperty` and defaults to {}. ''' rows_minimum = DictProperty({}) '''Dict of minimum height for each row. The dictionary keys are the row numbers, e.g. 0, 1, 2... .. versionadded:: 1.0.7 :attr:`rows_minimum` is a :class:`~kivy.properties.DictProperty` and defaults to {}. ''' minimum_width = NumericProperty(0) '''Automatically computed minimum width needed to contain all children. .. versionadded:: 1.0.8 :attr:`minimum_width` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. It is read only. ''' minimum_height = NumericProperty(0) '''Automatically computed minimum height needed to contain all children. .. versionadded:: 1.0.8 :attr:`minimum_height` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. It is read only. ''' minimum_size = ReferenceListProperty(minimum_width, minimum_height) '''Automatically computed minimum size needed to contain all children. .. versionadded:: 1.0.8 :attr:`minimum_size` is a :class:`~kivy.properties.ReferenceListProperty` of (:attr:`minimum_width`, :attr:`minimum_height`) properties. It is read only. ''' def __init__(self, **kwargs): self._cols = self._rows = None super(GridLayout, self).__init__(**kwargs) fbind = self.fbind update = self._trigger_layout fbind('col_default_width', update) fbind('row_default_height', update) fbind('col_force_default', update) fbind('row_force_default', update) fbind('cols', update) fbind('rows', update) fbind('parent', update) fbind('spacing', update) fbind('padding', update) fbind('children', update) fbind('size', update) fbind('pos', update) def get_max_widgets(self): if self.cols and self.rows: return self.rows * self.cols else: return None def on_children(self, instance, value): # if that makes impossible to construct things with deffered method, # migrate this test in do_layout, and/or issue a warning. smax = self.get_max_widgets() if smax and len(value) > smax: raise GridLayoutException( 'Too many children in GridLayout. Increase rows/cols!') def _init_rows_cols_sizes(self, count): # the goal here is to calculate the minimum size of every cols/rows # and determine if they have stretch or not current_cols = self.cols current_rows = self.rows # if no cols or rows are set, we can't calculate minimum size. # the grid must be contrained at least on one side if not current_cols and not current_rows: Logger.warning('%r have no cols or rows set, ' 'layout is not triggered.' % self) return if current_cols is None: current_cols = int(ceil(count / float(current_rows))) elif current_rows is None: current_rows = int(ceil(count / float(current_cols))) current_cols = max(1, current_cols) current_rows = max(1, current_rows) self._has_hint_bound_x = False self._has_hint_bound_y = False self._cols_min_size_none = 0. # min size from all the None hint self._rows_min_size_none = 0. # min size from all the None hint self._cols = cols = [self.col_default_width] * current_cols self._cols_sh = [None] * current_cols self._cols_sh_min = [None] * current_cols self._cols_sh_max = [None] * current_cols self._rows = rows = [self.row_default_height] * current_rows self._rows_sh = [None] * current_rows self._rows_sh_min = [None] * current_rows self._rows_sh_max = [None] * current_rows # update minimum size from the dicts items = (i for i in self.cols_minimum.items() if i[0] < len(cols)) for index, value in items: cols[index] = max(value, cols[index]) items = (i for i in self.rows_minimum.items() if i[0] < len(rows)) for index, value in items: rows[index] = max(value, rows[index]) return True def _fill_rows_cols_sizes(self): cols, rows = self._cols, self._rows cols_sh, rows_sh = self._cols_sh, self._rows_sh cols_sh_min, rows_sh_min = self._cols_sh_min, self._rows_sh_min cols_sh_max, rows_sh_max = self._cols_sh_max, self._rows_sh_max # calculate minimum size for each columns and rows n_cols = len(cols) has_bound_y = has_bound_x = False for i, child in enumerate(reversed(self.children)): (shw, shh), (w, h) = child.size_hint, child.size shw_min, shh_min = child.size_hint_min shw_max, shh_max = child.size_hint_max row, col = divmod(i, n_cols) # compute minimum size / maximum stretch needed if shw is None: cols[col] = nmax(cols[col], w) else: cols_sh[col] = nmax(cols_sh[col], shw) if shw_min is not None: has_bound_x = True cols_sh_min[col] = nmax(cols_sh_min[col], shw_min) if shw_max is not None: has_bound_x = True cols_sh_max[col] = nmin(cols_sh_max[col], shw_max) if shh is None: rows[row] = nmax(rows[row], h) else: rows_sh[row] = nmax(rows_sh[row], shh) if shh_min is not None: has_bound_y = True rows_sh_min[row] = nmax(rows_sh_min[row], shh_min) if shh_max is not None: has_bound_y = True rows_sh_max[row] = nmin(rows_sh_max[row], shh_max) self._has_hint_bound_x = has_bound_x self._has_hint_bound_y = has_bound_y def _update_minimum_size(self): # calculate minimum width/height needed, starting from padding + # spacing l, t, r, b = self.padding spacing_x, spacing_y = self.spacing cols, rows = self._cols, self._rows width = l + r + spacing_x * (len(cols) - 1) self._cols_min_size_none = sum(cols) + width # we need to subtract for the sh_max/min the already guaranteed size # due to having a None in the col. So sh_min gets smaller by that size # since it's already covered. Similarly for sh_max, because if we # already exceeded the max, the subtracted max will be zero, so # it won't get larger if self._has_hint_bound_x: cols_sh_min = self._cols_sh_min cols_sh_max = self._cols_sh_max for i, (c, sh_min, sh_max) in enumerate( zip(cols, cols_sh_min, cols_sh_max)): if sh_min is not None: width += max(c, sh_min) cols_sh_min[i] = max(0., sh_min - c) else: width += c if sh_max is not None: cols_sh_max[i] = max(0., sh_max - c) else: width = self._cols_min_size_none height = t + b + spacing_y * (len(rows) - 1) self._rows_min_size_none = sum(rows) + height if self._has_hint_bound_y: rows_sh_min = self._rows_sh_min rows_sh_max = self._rows_sh_max for i, (r, sh_min, sh_max) in enumerate( zip(rows, rows_sh_min, rows_sh_max)): if sh_min is not None: height += max(r, sh_min) rows_sh_min[i] = max(0., sh_min - r) else: height += r if sh_max is not None: rows_sh_max[i] = max(0., sh_max - r) else: height = self._rows_min_size_none # finally, set the minimum size self.minimum_size = (width, height) def _finalize_rows_cols_sizes(self): selfw = self.width selfh = self.height # resolve size for each column if self.col_force_default: cols = [self.col_default_width] * len(self._cols) for index, value in self.cols_minimum.items(): cols[index] = value self._cols = cols else: cols = self._cols cols_sh = self._cols_sh cols_sh_min = self._cols_sh_min cols_weight = float(sum((x for x in cols_sh if x is not None))) stretch_w = max(0., selfw - self._cols_min_size_none) if stretch_w > 1e-9: if self._has_hint_bound_x: # fix the hints to be within bounds self.layout_hint_with_bounds( cols_weight, stretch_w, sum((c for c in cols_sh_min if c is not None)), cols_sh_min, self._cols_sh_max, cols_sh) for index, col_stretch in enumerate(cols_sh): # if the col don't have stretch information, nothing to do if not col_stretch: continue # add to the min width whatever remains from size_hint cols[index] += stretch_w * col_stretch / cols_weight # same algo for rows if self.row_force_default: rows = [self.row_default_height] * len(self._rows) for index, value in self.rows_minimum.items(): rows[index] = value self._rows = rows else: rows = self._rows rows_sh = self._rows_sh rows_sh_min = self._rows_sh_min rows_weight = float(sum((x for x in rows_sh if x is not None))) stretch_h = max(0., selfh - self._rows_min_size_none) if stretch_h > 1e-9: if self._has_hint_bound_y: # fix the hints to be within bounds self.layout_hint_with_bounds( rows_weight, stretch_h, sum((r for r in rows_sh_min if r is not None)), rows_sh_min, self._rows_sh_max, rows_sh) for index, row_stretch in enumerate(rows_sh): # if the row don't have stretch information, nothing to do if not row_stretch: continue # add to the min height whatever remains from size_hint rows[index] += stretch_h * row_stretch / rows_weight def _iterate_layout(self, count): selfx = self.x padding_left = self.padding[0] padding_top = self.padding[1] spacing_x, spacing_y = self.spacing i = count - 1 y = self.top - padding_top cols = self._cols for row_height in self._rows: x = selfx + padding_left for col_width in cols: if i < 0: break yield i, x, y - row_height, col_width, row_height i = i - 1 x = x + col_width + spacing_x y -= row_height + spacing_y def do_layout(self, *largs): children = self.children if not children or not self._init_rows_cols_sizes(len(children)): l, t, r, b = self.padding self.minimum_size = l + r, t + b return self._fill_rows_cols_sizes() self._update_minimum_size() self._finalize_rows_cols_sizes() for i, x, y, w, h in self._iterate_layout(len(children)): c = children[i] c.pos = x, y shw, shh = c.size_hint shw_min, shh_min = c.size_hint_min shw_max, shh_max = c.size_hint_max if shw_min is not None: if shw_max is not None: w = max(min(w, shw_max), shw_min) else: w = max(w, shw_min) else: if shw_max is not None: w = min(w, shw_max) if shh_min is not None: if shh_max is not None: h = max(min(h, shh_max), shh_min) else: h = max(h, shh_min) else: if shh_max is not None: h = min(h, shh_max) if shw is None: if shh is not None: c.height = h else: if shh is None: c.width = w else: c.size = (w, h)
""" 89. Gray Code https://leetcode.com/problems/gray-code/ """ from typing import List class Solution: def grayCode(self, n: int) -> List[int]: res = [0] for i in range(n): res += [x + 2**i for x in reversed(res)] return res def main(): s = Solution() print(s.grayCode(3)) if __name__ == '__main__': raise(SystemExit(main()))
""" Test storage """ from django.test import TestCase class StorageTestCase(TestCase): def test_import(self): from launchlab_django_utils.storage import StaticRootS3Boto3Storage from launchlab_django_utils.storage import MediaRootS3Boto3Storage
"""Linear Algebra Helper Routines.""" from warnings import warn import numpy as np from scipy import sparse from scipy.sparse.linalg import aslinearoperator from scipy.linalg import lapack, get_blas_funcs, eig, svd from .params import set_tol def norm(x, pnorm='2'): """2-norm of a vector. Parameters ---------- x : array_like Vector of complex or real values pnorm : string '2' calculates the 2-norm 'inf' calculates the infinity-norm Returns ------- n : float 2-norm of a vector Notes ----- - currently 1+ order of magnitude faster than scipy.linalg.norm(x), which calls sqrt(numpy.sum(real((conjugate(x)*x)),axis=0)) resulting in an extra copy - only handles the 2-norm and infinity-norm for vectors See Also -------- scipy.linalg.norm : scipy general matrix or vector norm """ x = np.ravel(x) if pnorm == '2': return np.sqrt(np.inner(x.conj(), x).real) if pnorm == 'inf': return np.max(np.abs(x)) raise ValueError('Only the 2-norm and infinity-norm are supported') def infinity_norm(A): """Infinity norm of a matrix (maximum absolute row sum). Parameters ---------- A : csr_matrix, csc_matrix, sparse, or numpy matrix Sparse or dense matrix Returns ------- n : float Infinity norm of the matrix Notes ----- - This serves as an upper bound on spectral radius. - csr and csc avoid a deep copy - dense calls scipy.linalg.norm See Also -------- scipy.linalg.norm : dense matrix norms Examples -------- >>> import numpy as np >>> from scipy.sparse import spdiags >>> from pyamg.util.linalg import infinity_norm >>> n=10 >>> e = np.ones((n,1)).ravel() >>> data = [ -1*e, 2*e, -1*e ] >>> A = spdiags(data,[-1,0,1],n,n) >>> print(infinity_norm(A)) 4.0 """ if sparse.isspmatrix_csr(A) or sparse.isspmatrix_csc(A): # avoid copying index and ptr arrays abs_A = A.__class__((np.abs(A.data), A.indices, A.indptr), shape=A.shape) return (abs_A * np.ones((A.shape[1]), dtype=A.dtype)).max() if sparse.isspmatrix(A): return (abs(A) * np.ones((A.shape[1]), dtype=A.dtype)).max() return np.dot(np.abs(A), np.ones((A.shape[1],), dtype=A.dtype)).max() def axpy(x, y, a=1.0): """Quick level-1 call to BLAS y = a*x+y. Parameters ---------- x : array_like nx1 real or complex vector y : array_like nx1 real or complex vector a : float real or complex scalar Returns ------- y : array_like Input variable y is rewritten Notes ----- The call to get_blas_funcs automatically determines the prefix for the blas call. """ fn = get_blas_funcs(['axpy'], [x, y])[0] fn(x, y, a) def _approximate_eigenvalues(A, maxiter, symmetric=None, initial_guess=None): """Apprixmate eigenvalues. Used by approximate_spectral_radius and condest. Returns [W, E, H, V, breakdown_flag], where W and E are the eigenvectors and eigenvalues of the Hessenberg matrix H, respectively, and V is the Krylov space. breakdown_flag denotes whether Lanczos/Arnoldi suffered breakdown. E is therefore the approximate eigenvalues of A. To obtain approximate eigenvectors of A, compute V*W. """ A = aslinearoperator(A) # A could be dense or sparse, or something weird # Choose tolerance for deciding if break-down has occurred breakdown = set_tol(A.dtype) breakdown_flag = False if A.shape[0] != A.shape[1]: raise ValueError('expected square matrix') maxiter = min(A.shape[0], maxiter) if initial_guess is None: v0 = np.random.rand(A.shape[1], 1) if A.dtype == complex: v0 = v0 + 1.0j * np.random.rand(A.shape[1], 1) else: v0 = initial_guess v0 /= norm(v0) # Important to type H based on v0, so that a real nonsymmetric matrix, can # have an imaginary initial guess for its Arnoldi Krylov space H = np.zeros((maxiter+1, maxiter), dtype=np.find_common_type([v0.dtype, A.dtype], [])) V = [v0] beta = 0.0 for j in range(maxiter): w = A * V[-1] if symmetric: if j >= 1: H[j-1, j] = beta w -= beta * V[-2] alpha = np.dot(np.conjugate(w.ravel()), V[-1].ravel()) H[j, j] = alpha w -= alpha * V[-1] # axpy(V[-1],w,-alpha) beta = norm(w) H[j+1, j] = beta if (H[j+1, j] < breakdown): breakdown_flag = True break w /= beta V.append(w) V = V[-2:] # retain only last two vectors else: # orthogonalize against Vs for i, v in enumerate(V): H[i, j] = np.dot(np.conjugate(v.ravel()), w.ravel()) w = w - H[i, j]*v H[j+1, j] = norm(w) if (H[j+1, j] < breakdown): breakdown_flag = True if H[j+1, j] != 0.0: w = w/H[j+1, j] V.append(w) break w = w/H[j+1, j] V.append(w) # if upper 2x2 block of Hessenberg matrix H is almost symmetric, # and the user has not explicitly specified symmetric=False, # then switch to symmetric Lanczos algorithm # if symmetric is not False and j == 1: # if abs(H[1,0] - H[0,1]) < 1e-12: # #print("using symmetric mode") # symmetric = True # V = V[1:] # H[1,0] = H[0,1] # beta = H[2,1] # print("Approximated spectral radius in %d iterations" % (j + 1)) Eigs, Vects = eig(H[:j+1, :j+1], left=False, right=True) return (Vects, Eigs, H, V, breakdown_flag) def approximate_spectral_radius(A, tol=0.01, maxiter=15, restart=5, symmetric=None, initial_guess=None, return_vector=False): """Approximate the spectral radius of a matrix. Parameters ---------- A : {dense or sparse matrix} E.g. csr_matrix, csc_matrix, ndarray, etc. tol : {scalar} Relative tolerance of approximation, i.e., the error divided by the approximate spectral radius is compared to tol. maxiter : {integer} Maximum number of iterations to perform restart : {integer} Number of restarted Arnoldi processes. For example, a value of 0 will run Arnoldi once, for maxiter iterations, and a value of 1 will restart Arnoldi once, using the maximal eigenvector from the first Arnoldi process as the initial guess. symmetric : {boolean} True - if A is symmetric Lanczos iteration is used (more efficient) False - if A is non-symmetric Arnoldi iteration is used (less efficient) initial_guess : {array|None} If n x 1 array, then use as initial guess for Arnoldi/Lanczos. If None, then use a random initial guess. return_vector : {boolean} True - return an approximate dominant eigenvector and the spectral radius. False - Do not return the approximate dominant eigenvector Returns ------- An approximation to the spectral radius of A, and if return_vector=True, then also return the approximate dominant eigenvector Notes ----- The spectral radius is approximated by looking at the Ritz eigenvalues. Arnoldi iteration (or Lanczos) is used to project the matrix A onto a Krylov subspace: H = Q* A Q. The eigenvalues of H (i.e. the Ritz eigenvalues) should represent the eigenvalues of A in the sense that the minimum and maximum values are usually well matched (for the symmetric case it is true since the eigenvalues are real). References ---------- .. [1] Z. Bai, J. Demmel, J. Dongarra, A. Ruhe, and H. van der Vorst, editors. "Templates for the Solution of Algebraic Eigenvalue Problems: A Practical Guide", SIAM, Philadelphia, 2000. Examples -------- >>> from pyamg.util.linalg import approximate_spectral_radius >>> import numpy as np >>> from scipy.linalg import eigvals, norm >>> A = np.array([[1.,0.],[0.,1.]]) >>> sr = approximate_spectral_radius(A,maxiter=3) >>> print(f'{sr:2.6}') 1.0 >>> print(max([norm(x) for x in eigvals(A)])) 1.0 """ if not hasattr(A, 'rho') or return_vector: # somehow more restart causes a nonsymmetric case to fail...look at # this what about A.dtype=int? convert somehow? # The use of the restart vector v0 requires that the full Krylov # subspace V be stored. So, set symmetric to False. symmetric = False if maxiter < 1: raise ValueError('expected maxiter > 0') if restart < 0: raise ValueError('expected restart >= 0') if A.dtype == int: raise ValueError('expected A to be float (complex or real)') if A.shape[0] != A.shape[1]: raise ValueError('expected square A') if initial_guess is None: v0 = np.random.rand(A.shape[1], 1) if A.dtype == complex: v0 = v0 + 1.0j * np.random.rand(A.shape[1], 1) else: if initial_guess.shape[0] != A.shape[0]: raise ValueError('initial_guess and A must have same shape') if (len(initial_guess.shape) > 1) and (initial_guess.shape[1] > 1): raise ValueError('initial_guess must be an (n,1) or\ (n,) vector') v0 = initial_guess.reshape(-1, 1) v0 = np.array(v0, dtype=A.dtype) for j in range(restart+1): [evect, ev, H, V, breakdown_flag] =\ _approximate_eigenvalues(A, maxiter, symmetric, initial_guess=v0) # Calculate error in dominant eigenvector nvecs = ev.shape[0] max_index = np.abs(ev).argmax() error = H[nvecs, nvecs-1] * evect[-1, max_index] # error is a fast way of calculating the following line # error2 = ( A - ev[max_index]*sp.mat( # sp.eye(A.shape[0],A.shape[1])) )*\ # ( sp.mat(sp.hstack(V[:-1]))*\ # evect[:,max_index].reshape(-1,1) ) # print(str(error) + " " + str(sp.linalg.norm(e2))) v0 = np.dot(np.hstack(V[:-1]), evect[:, max_index].reshape(-1, 1)) if np.abs(error)/np.abs(ev[max_index]) < tol: # halt if below relative tolerance break if breakdown_flag: warn(f'Breakdown occured in step {j}') break # end j-loop rho = np.abs(ev[max_index]) if sparse.isspmatrix(A): A.rho = rho if return_vector: return (rho, v0) return rho return A.rho def condest(A, maxiter=25, symmetric=False): r"""Estimates the condition number of A. Parameters ---------- A : {dense or sparse matrix} e.g. array, matrix, csr_matrix, ... maxiter: {int} Max number of Arnoldi/Lanczos iterations symmetric : {bool} If symmetric use the far more efficient Lanczos algorithm, Else use Arnoldi. If hermitian, use symmetric=True. If complex symmetric, use symmetric=False. Returns ------- Estimate of cond(A) with \|lambda_max\| / \|lambda_min\| or simga_max / sigma_min through the use of Arnoldi or Lanczos iterations, depending on the symmetric flag Notes ----- The condition number measures how large of a change in the the problems solution is caused by a change in problem's input. Large condition numbers indicate that small perturbations and numerical errors are magnified greatly when solving the system. Examples -------- >>> import numpy as np >>> from pyamg.util.linalg import condest >>> c = condest(np.array([[1.,0.],[0.,2.]])) >>> print(f'{c:2.6}') 2.0 """ C = aslinearoperator(A) power = 1 if not symmetric: def matvec(v): return C.rmatvec(C.A @ v) C.matvec = matvec power = 0.5 [evect, ev, H, V, breakdown_flag] =\ _approximate_eigenvalues(C, maxiter, symmetric) del evect, H, V, breakdown_flag return (np.max([norm(x) for x in ev])/min(norm(x) for x in ev))**power def cond(A): """Return condition number of A. Parameters ---------- A : {dense or sparse matrix} e.g. array, matrix, csr_matrix, ... Returns ------- 2-norm condition number through use of the SVD Use for small to moderate sized dense matrices. For large sparse matrices, use condest. Notes ----- The condition number measures how large of a change in the problems solution is caused by a change in problem's input. Large condition numbers indicate that small perturbations and numerical errors are magnified greatly when solving the system. Examples -------- >>> import numpy as np >>> from pyamg.util.linalg import condest >>> c = condest(np.array([[1.0,0.],[0.,2.0]])) >>> print(f'{c:2.6}') 2.0 """ if A.shape[0] != A.shape[1]: raise ValueError('expected square matrix') if sparse.isspmatrix(A): A = A.toarray() U, Sigma, Vh = svd(A) del U, Vh # 2-Norm Condition Number return np.max(Sigma)/min(Sigma) def ishermitian(A, fast_check=True, tol=1e-6, verbose=False): r"""Return True if A is Hermitian to within tol. Parameters ---------- A : {dense or sparse matrix} e.g. array, matrix, csr_matrix, ... fast_check : {bool} If True, use the heuristic < Ax, y> = < x, Ay> for random vectors x and y to check for conjugate symmetry. If False, compute A - A.conj().T. tol : {float} Symmetry tolerance verbose: {bool} prints max( \|A - A.conj().T\| ) if nonhermitian and fast_check=False.. \| <Ax, y> - <x, Ay> ) \| / sqrt( \| <Ax, y> * <x, Ay> \| ) if nonhermitian and fast_check=True Returns ------- True if hermitian False if nonhermitian Notes ----- This function applies a simple test of conjugate symmetry Examples -------- >>> import numpy as np >>> from pyamg.util.linalg import ishermitian >>> ishermitian(np.array([[1,2],[1,1]])) False >>> from pyamg.gallery import poisson >>> ishermitian(poisson((10,10))) True """ # convert to array type if not sparse.isspmatrix(A): A = np.asarray(A) if fast_check: x = np.random.rand(A.shape[0], 1) y = np.random.rand(A.shape[0], 1) if A.dtype == complex: x = x + 1.0j*np.random.rand(A.shape[0], 1) y = y + 1.0j*np.random.rand(A.shape[0], 1) xAy = np.dot((A.dot(x)).conjugate().T, y) xAty = np.dot(x.conjugate().T, A.dot(y)) diff = float(np.abs(xAy - xAty) / np.sqrt(np.abs(xAy*xAty))) else: # compute the difference, A - A.conj().T if sparse.isspmatrix(A): diff = np.ravel((A - A.conj().T).data) else: diff = np.ravel(A - A.conj().T) if np.max(diff.shape) == 0: diff = 0 else: diff = np.max(np.abs(diff)) if diff < tol: diff = 0 return True if verbose: print(diff) return False def pinv_array(a, tol=None): """Calculate the Moore-Penrose pseudo inverse of each block of the 3D array a. Parameters ---------- a : {dense array} Is of size (n, m, m) tol : {float} Used by gelss to filter numerically zeros singular values. If None, a suitable value is chosen for you. Returns ------- Nothing, a is modified in place so that a[k] holds the pseudoinverse of that block. Notes ----- By using lapack wrappers, this can be much faster for large n, than directly calling a pseudoinverse (SVD) Examples -------- >>> import numpy as np >>> from pyamg.util.linalg import pinv_array >>> a = np.array([[[1.,2.],[1.,1.]], [[1.,1.],[3.,3.]]]) >>> ac = a.copy() >>> # each block of a is inverted in-place >>> pinv_array(a) """ n = a.shape[0] m = a.shape[1] if m == 1: # Pseudo-inverse of 1 x 1 matrices is trivial zero_entries = (a == 0.0).nonzero()[0] a[zero_entries] = 1.0 a[:] = 1.0/a a[zero_entries] = 0.0 del zero_entries else: # The block size is greater than 1 # Create necessary arrays and function pointers for calculating pinv gelss, gelss_lwork = lapack.get_lapack_funcs(('gelss', 'gelss_lwork'), (np.ones((1,), dtype=a.dtype))) RHS = np.eye(m, dtype=a.dtype) # pylint: disable=protected-access lwork = lapack._compute_lwork(gelss_lwork, m, m, m) # pylint: enable=protected-access # Choose tolerance for which singular values are zero in *gelss below if tol is None: tol = set_tol(a.dtype) # Invert each block of a for kk in range(n): gelssoutput = gelss(a[kk], RHS, cond=tol, lwork=lwork, overwrite_a=True, overwrite_b=False) a[kk] = gelssoutput[1]
import sys sys.path.insert(0,'../src/') import rospy import math from math import sin, cos from geometry_msgs.msg import Twist from sensor_msgs.msg import LaserScan from collections import namedtuple Obstacle = namedtuple('Obstacle', ['r', 'theta']) from obstacle_avoidance import ObstacleAvoidance import unittest class TestCurvatureCalculations(unittest.TestCase): def test_left(self): # Obstacle = namedtuple('Obstacle', ['r', 'theta']) oa = ObstacleAvoidance() v = 2 omega = .1 originalCurvature = omega/v pathWidth = 1 filteredListOfRThetaPairs = [] filteredListOfRThetaPairs.append(Obstacle(r=1.6328, theta=-0.4421)) filteredListOfRThetaPairs.append(Obstacle(r=1.4904, theta=-0.2019)) filteredListOfRThetaPairs.append(Obstacle(r=1.0792, theta=-0.3143)) filteredListOfRThetaPairs.append(Obstacle(r=1.4444, theta=-0.3247)) filteredListOfRThetaPairs.append(Obstacle(r=1.1740, theta=-0.2601)) filteredListOfRThetaPairs.append(Obstacle(r=1.2565, theta=-0.2686)) filteredListOfRThetaPairs.append(Obstacle(r=1.5160, theta=-0.5730)) filteredListOfRThetaPairs.append(Obstacle(r=1.7103, theta=-0.5350)) filteredListOfRThetaPairs.append(Obstacle(r=1.2089, theta=-0.0008)) filteredListOfRThetaPairs.append(Obstacle(r=1.7064, theta=-0.5072)) curvatureToPassObstaclesOnLeft = oa.calculateCurvatureToPassObstaclesOnLeft(originalCurvature, pathWidth, filteredListOfRThetaPairs) print(str(curvatureToPassObstaclesOnLeft)) self.assertTrue(abs(curvatureToPassObstaclesOnLeft-0.8240)<0.001) def test_right(self): # Obstacle = namedtuple('Obstacle', ['r', 'theta']) oa = ObstacleAvoidance() v = 2 omega = .1 originalCurvature = omega/v pathWidth = 1 filteredListOfRThetaPairs = [] filteredListOfRThetaPairs.append(Obstacle(r=1.6328, theta=-0.4421)) filteredListOfRThetaPairs.append(Obstacle(r=1.4904, theta=-0.2019)) filteredListOfRThetaPairs.append(Obstacle(r=1.0792, theta=-0.3143)) filteredListOfRThetaPairs.append(Obstacle(r=1.4444, theta=-0.3247)) filteredListOfRThetaPairs.append(Obstacle(r=1.1740, theta=-0.2601)) filteredListOfRThetaPairs.append(Obstacle(r=1.2565, theta=-0.2686)) filteredListOfRThetaPairs.append(Obstacle(r=1.5160, theta=-0.5730)) filteredListOfRThetaPairs.append(Obstacle(r=1.7103, theta=-0.5350)) filteredListOfRThetaPairs.append(Obstacle(r=1.2089, theta=-0.0008)) filteredListOfRThetaPairs.append(Obstacle(r=1.7064, theta=-0.5072)) curvatureToPassObstaclesOnRight = oa.calculateCurvatureToPassObstaclesOnRight(originalCurvature, pathWidth, filteredListOfRThetaPairs) print(str(curvatureToPassObstaclesOnRight)) self.assertTrue(abs(curvatureToPassObstaclesOnRight-(-1.8228))<0.001) if __name__ == '__main__': unittest.main()
import os, struct, time try: import fcntl CAN_LOCK = True except ImportError: CAN_LOCK = False LOCK = False CACHE_HEADERS = False __headerCache = {} longFormat = "!L" longSize = struct.calcsize(longFormat) floatFormat = "!f" floatSize = struct.calcsize(floatFormat) timestampFormat = "!L" timestampSize = struct.calcsize(timestampFormat) valueFormat = "!d" valueSize = struct.calcsize(valueFormat) pointFormat = "!Ld" pointSize = struct.calcsize(pointFormat) metadataFormat = "!2LfL" metadataSize = struct.calcsize(metadataFormat) archiveInfoFormat = "!3L" archiveInfoSize = struct.calcsize(archiveInfoFormat) debug = startBlock = endBlock = lambda *a,**k: None class WhisperException(Exception): """Base class for whisper exceptions.""" class InvalidConfiguration(WhisperException): """Invalid configuration.""" class InvalidTimeInterval(WhisperException): """Invalid time interval.""" class TimestampNotCovered(WhisperException): """Timestamp not covered by any archives in this database.""" def enableDebug(): global open, debug, startBlock, endBlock class open(file): def __init__(self,*args,**kwargs): file.__init__(self,*args,**kwargs) self.writeCount = 0 self.readCount = 0 def write(self,data): self.writeCount += 1 debug('WRITE %d bytes #%d' % (len(data),self.writeCount)) return file.write(self,data) def read(self,bytes): self.readCount += 1 debug('READ %d bytes #%d' % (bytes,self.readCount)) return file.read(self,bytes) def debug(message): print 'DEBUG :: %s' % message __timingBlocks = {} def startBlock(name): __timingBlocks[name] = time.time() def endBlock(name): debug("%s took %.5f seconds" % (name,time.time() - __timingBlocks.pop(name))) def __readHeader(fh): info = __headerCache.get(fh.name) if info: return info #startBlock('__readHeader') originalOffset = fh.tell() fh.seek(0) packedMetadata = fh.read(metadataSize) (lastUpdate,maxRetention,xff,archiveCount) = struct.unpack(metadataFormat,packedMetadata) archives = [] for i in xrange(archiveCount): packedArchiveInfo = fh.read(archiveInfoSize) (offset,secondsPerPoint,points) = struct.unpack(archiveInfoFormat,packedArchiveInfo) archiveInfo = { 'offset' : offset, 'secondsPerPoint' : secondsPerPoint, 'points' : points, 'retention' : secondsPerPoint * points, 'size' : points * pointSize, } archives.append(archiveInfo) fh.seek(originalOffset) info = { #'lastUpdate' : lastUpdate, # Deprecated 'maxRetention' : maxRetention, 'xFilesFactor' : xff, 'archives' : archives, } if CACHE_HEADERS: __headerCache[fh.name] = info #endBlock('__readHeader') return info def __changeLastUpdate(fh): return #XXX Make this a NOP, use os.stat(filename).st_mtime instead startBlock('__changeLastUpdate()') originalOffset = fh.tell() fh.seek(0) #Based on assumption that first field is lastUpdate now = int( time.time() ) packedTime = struct.pack(timestampFormat,now) fh.write(packedTime) fh.seek(originalOffset) endBlock('__changeLastUpdate()') def create(path,archiveList,xFilesFactor=0.5): """create(path,archiveList,xFilesFactor=0.5) path is a string archiveList is a list of archives, each of which is of the form (secondsPerPoint,numberOfPoints) xFilesFactor specifies the fraction of data points in a propagation interval that must have known values for a propagation to occur """ #Validate archive configurations... if not archiveList: raise InvalidConfiguration("You must specify at least one archive configuration!") archiveList.sort(key=lambda a: a[0]) #sort by precision (secondsPerPoint) for i,archive in enumerate(archiveList): if i == len(archiveList) - 1: break next = archiveList[i+1] if not (archive[0] < next[0]): raise InvalidConfiguration("You cannot configure two archives " "with the same precision %s,%s" % (archive,next)) if (next[0] % archive[0]) != 0: raise InvalidConfiguration("Higher precision archives' precision " "must evenly divide all lower precision archives' precision %s,%s" \ % (archive[0],next[0])) retention = archive[0] * archive[1] nextRetention = next[0] * next[1] if not (nextRetention > retention): raise InvalidConfiguration("Lower precision archives must cover " "larger time intervals than higher precision archives %s,%s" \ % (archive,next)) #Looks good, now we create the file and write the header if os.path.exists(path): raise InvalidConfiguration("File %s already exists!" % path) fh = open(path,'wb') if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX ) lastUpdate = struct.pack( timestampFormat, int(time.time()) ) oldest = sorted([secondsPerPoint * points for secondsPerPoint,points in archiveList])[-1] maxRetention = struct.pack( longFormat, oldest ) xFilesFactor = struct.pack( floatFormat, float(xFilesFactor) ) archiveCount = struct.pack(longFormat, len(archiveList)) packedMetadata = lastUpdate + maxRetention + xFilesFactor + archiveCount fh.write(packedMetadata) headerSize = metadataSize + (archiveInfoSize * len(archiveList)) archiveOffsetPointer = headerSize for secondsPerPoint,points in archiveList: archiveInfo = struct.pack(archiveInfoFormat, archiveOffsetPointer, secondsPerPoint, points) fh.write(archiveInfo) archiveOffsetPointer += (points * pointSize) zeroes = '\x00' * (archiveOffsetPointer - headerSize) fh.write(zeroes) fh.close() def __propagate(fh,timestamp,xff,higher,lower): lowerIntervalStart = timestamp - (timestamp % lower['secondsPerPoint']) lowerIntervalEnd = lowerIntervalStart + lower['secondsPerPoint'] fh.seek(higher['offset']) packedPoint = fh.read(pointSize) (higherBaseInterval,higherBaseValue) = struct.unpack(pointFormat,packedPoint) if higherBaseInterval == 0: higherFirstOffset = higher['offset'] else: timeDistance = lowerIntervalStart - higherBaseInterval pointDistance = timeDistance / higher['secondsPerPoint'] byteDistance = pointDistance * pointSize higherFirstOffset = higher['offset'] + (byteDistance % higher['size']) higherPoints = lower['secondsPerPoint'] / higher['secondsPerPoint'] higherSize = higherPoints * pointSize relativeFirstOffset = higherFirstOffset - higher['offset'] relativeLastOffset = (relativeFirstOffset + higherSize) % higher['size'] higherLastOffset = relativeLastOffset + higher['offset'] fh.seek(higherFirstOffset) if higherFirstOffset < higherLastOffset: #we don't wrap the archive seriesString = fh.read(higherLastOffset - higherFirstOffset) else: #We do wrap the archive higherEnd = higher['offset'] + higher['size'] seriesString = fh.read(higherEnd - higherFirstOffset) fh.seek(higher['offset']) seriesString += fh.read(higherLastOffset - higher['offset']) #Now we unpack the series data we just read byteOrder,pointTypes = pointFormat[0],pointFormat[1:] points = len(seriesString) / pointSize seriesFormat = byteOrder + (pointTypes * points) unpackedSeries = struct.unpack(seriesFormat, seriesString) #And finally we construct a list of values neighborValues = [None] * points currentInterval = lowerIntervalStart step = higher['secondsPerPoint'] for i in xrange(0,len(unpackedSeries),2): pointTime = unpackedSeries[i] if pointTime == currentInterval: neighborValues[i/2] = unpackedSeries[i+1] currentInterval += step #Propagate aggregateValue to propagate from neighborValues if we have enough known points knownValues = [v for v in neighborValues if v is not None] if not knownValues: return False knownPercent = float(len(knownValues)) / float(len(neighborValues)) if knownPercent >= xff: #we have enough data to propagate a value! aggregateValue = float(sum(knownValues)) / float(len(knownValues)) #TODO another CF besides average? myPackedPoint = struct.pack(pointFormat,lowerIntervalStart,aggregateValue) fh.seek(lower['offset']) packedPoint = fh.read(pointSize) (lowerBaseInterval,lowerBaseValue) = struct.unpack(pointFormat,packedPoint) if lowerBaseInterval == 0: #First propagated update to this lower archive fh.seek(lower['offset']) fh.write(myPackedPoint) else: #Not our first propagated update to this lower archive timeDistance = lowerIntervalStart - lowerBaseInterval pointDistance = timeDistance / lower['secondsPerPoint'] byteDistance = pointDistance * pointSize lowerOffset = lower['offset'] + (byteDistance % lower['size']) fh.seek(lowerOffset) fh.write(myPackedPoint) return True else: return False def update(path,value,timestamp=None): """update(path,value,timestamp=None) path is a string value is a float timestamp is either an int or float """ value = float(value) fh = open(path,'r+b') return file_update(fh, value, timestamp) def file_update(fh, value, timestamp): if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX ) header = __readHeader(fh) now = int( time.time() ) if timestamp is None: timestamp = now timestamp = int(timestamp) diff = now - timestamp if not ((diff < header['maxRetention']) and diff >= 0): raise TimestampNotCovered("Timestamp not covered by any archives in " "this database.") for i,archive in enumerate(header['archives']): #Find the highest-precision archive that covers timestamp if archive['retention'] < diff: continue lowerArchives = header['archives'][i+1:] #We'll pass on the update to these lower precision archives later break #First we update the highest-precision archive myInterval = timestamp - (timestamp % archive['secondsPerPoint']) myPackedPoint = struct.pack(pointFormat,myInterval,value) fh.seek(archive['offset']) packedPoint = fh.read(pointSize) (baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint) if baseInterval == 0: #This file's first update fh.seek(archive['offset']) fh.write(myPackedPoint) baseInterval,baseValue = myInterval,value else: #Not our first update timeDistance = myInterval - baseInterval pointDistance = timeDistance / archive['secondsPerPoint'] byteDistance = pointDistance * pointSize myOffset = archive['offset'] + (byteDistance % archive['size']) fh.seek(myOffset) fh.write(myPackedPoint) #Now we propagate the update to lower-precision archives #startBlock('update propagation') higher = archive for lower in lowerArchives: if not __propagate(fh,myInterval,header['xFilesFactor'],higher,lower): break higher = lower #endBlock('update propagation') __changeLastUpdate(fh) fh.close() def update_many(path,points): """update_many(path,points) path is a string points is a list of (timestamp,value) points """ if not points: return points = [ (int(t),float(v)) for (t,v) in points] points.sort(key=lambda p: p[0],reverse=True) #order points by timestamp, newest first fh = open(path,'r+b') return file_update_many(fh, points) def file_update_many(fh, points): if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX ) header = __readHeader(fh) now = int( time.time() ) archives = iter( header['archives'] ) currentArchive = archives.next() #debug(' update_many currentArchive=%s' % str(currentArchive)) currentPoints = [] for point in points: age = now - point[0] #debug(' update_many iterating points, point=%s age=%d' % (str(point),age)) while currentArchive['retention'] < age: #we can't fit any more points in this archive #debug(' update_many this point is too old to fit here, currentPoints=%d' % len(currentPoints)) if currentPoints: #commit all the points we've found that it can fit currentPoints.reverse() #put points in chronological order __archive_update_many(fh,header,currentArchive,currentPoints) currentPoints = [] try: currentArchive = archives.next() #debug(' update_many using next archive %s' % str(currentArchive)) except StopIteration: #debug(' update_many no more archives!') currentArchive = None break if not currentArchive: break #drop remaining points that don't fit in the database #debug(' update_many adding point=%s' % str(point)) currentPoints.append(point) #debug(' update_many done iterating points') if currentArchive and currentPoints: #don't forget to commit after we've checked all the archives currentPoints.reverse() __archive_update_many(fh,header,currentArchive,currentPoints) __changeLastUpdate(fh) fh.close() def __archive_update_many(fh,header,archive,points): step = archive['secondsPerPoint'] #startBlock('__archive_update_many file=%s archive=%s points=%d' % (fh.name,step,len(points))) alignedPoints = [ (timestamp - (timestamp % step), value) for (timestamp,value) in points ] #Create a packed string for each contiguous sequence of points #startBlock('__archive_update_many string packing') packedStrings = [] previousInterval = None currentString = "" for (interval,value) in alignedPoints: #debug('__archive_update_many iterating alignedPoint at %s' % interval) if (not previousInterval) or (interval == previousInterval + step): #debug('__archive_update_many was expected, packing onto currentString') currentString += struct.pack(pointFormat,interval,value) previousInterval = interval else: numberOfPoints = len(currentString) / pointSize startInterval = previousInterval - (step * (numberOfPoints-1)) #debug('__archive_update_many was NOT expected, appending to packedStrings startInterval=%s currentString=%d bytes' % (startInterval,len(currentString))) packedStrings.append( (startInterval,currentString) ) currentString = struct.pack(pointFormat,interval,value) previousInterval = interval if currentString: #startInterval = previousInterval - (step * len(currentString) / pointSize) + step numberOfPoints = len(currentString) / pointSize startInterval = previousInterval - (step * (numberOfPoints-1)) #debug('__archive_update_many done iterating alignedPoints, remainder currentString of %d bytes, startInterval=%s' % (len(currentString),startInterval)) packedStrings.append( (startInterval,currentString) ) #endBlock('__archive_update_many string packing') #Read base point and determine where our writes will start fh.seek(archive['offset']) packedBasePoint = fh.read(pointSize) (baseInterval,baseValue) = struct.unpack(pointFormat,packedBasePoint) if baseInterval == 0: #This file's first update #debug('__archive_update_many first update') baseInterval = packedStrings[0][0] #use our first string as the base, so we start at the start #debug('__archive_update_many baseInterval is %s' % baseInterval) #Write all of our packed strings in locations determined by the baseInterval #startBlock('__archive_update_many write() operations') for (interval,packedString) in packedStrings: timeDistance = interval - baseInterval pointDistance = timeDistance / step byteDistance = pointDistance * pointSize myOffset = archive['offset'] + (byteDistance % archive['size']) fh.seek(myOffset) archiveEnd = archive['offset'] + archive['size'] bytesBeyond = (myOffset + len(packedString)) - archiveEnd #debug(' __archive_update_many myOffset=%d packedString=%d archiveEnd=%d bytesBeyond=%d' % (myOffset,len(packedString),archiveEnd,bytesBeyond)) if bytesBeyond > 0: fh.write( packedString[:-bytesBeyond] ) #debug('We wrapped an archive!') assert fh.tell() == archiveEnd, "archiveEnd=%d fh.tell=%d bytesBeyond=%d len(packedString)=%d" % (archiveEnd,fh.tell(),bytesBeyond,len(packedString)) fh.seek( archive['offset'] ) fh.write( packedString[-bytesBeyond:] ) #safe because it can't exceed the archive (retention checking logic above) else: fh.write(packedString) #endBlock('__archive_update_many write() operations') #Now we propagate the updates to lower-precision archives #startBlock('__archive_update_many propagation') higher = archive lowerArchives = [arc for arc in header['archives'] if arc['secondsPerPoint'] > archive['secondsPerPoint']] #debug('__archive_update_many I have %d lower archives' % len(lowerArchives)) for lower in lowerArchives: fit = lambda i: i - (i % lower['secondsPerPoint']) lowerIntervals = [fit(p[0]) for p in alignedPoints] uniqueLowerIntervals = set(lowerIntervals) #debug(' __archive_update_many points=%d unique=%d' % (len(alignedPoints),len(uniqueLowerIntervals))) propagateFurther = False for interval in uniqueLowerIntervals: #debug(' __archive_update_many propagating from %d to %d, interval=%d' % (higher['secondsPerPoint'],lower['secondsPerPoint'],interval)) if __propagate(fh,interval,header['xFilesFactor'],higher,lower): propagateFurther = True #debug(' __archive_update_many Successful propagation!') #debug(' __archive_update_many propagateFurther=%s' % propagateFurther) if not propagateFurther: break higher = lower #endBlock('__archive_update_many propagation') #endBlock('__archive_update_many file=%s archive=%s points=%d' % (fh.name,step,len(points))) def info(path): """info(path) path is a string """ fh = open(path,'rb') info = __readHeader(fh) fh.close() return info def fetch(path,fromTime,untilTime=None): """fetch(path,fromTime,untilTime=None) path is a string fromTime is an epoch time untilTime is also an epoch time, but defaults to now """ fh = open(path,'rb') return file_fetch(fh, fromTime, untilTime) def file_fetch(fh, fromTime, untilTime): header = __readHeader(fh) now = int( time.time() ) if untilTime is None: untilTime = now fromTime = int(fromTime) untilTime = int(untilTime) oldestTime = now - header['maxRetention'] if fromTime < oldestTime: fromTime = oldestTime if not (fromTime < untilTime): raise InvalidTimeInterval("Invalid time interval") if untilTime > now: untilTime = now if untilTime < fromTime: untilTime = now diff = now - fromTime for archive in header['archives']: if archive['retention'] >= diff: break fromInterval = int( fromTime - (fromTime % archive['secondsPerPoint']) ) + archive['secondsPerPoint'] untilInterval = int( untilTime - (untilTime % archive['secondsPerPoint']) ) + archive['secondsPerPoint'] fh.seek(archive['offset']) packedPoint = fh.read(pointSize) (baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint) if baseInterval == 0: step = archive['secondsPerPoint'] points = (untilInterval - fromInterval) / step timeInfo = (fromInterval,untilInterval,step) valueList = [None] * points return (timeInfo,valueList) #Determine fromOffset timeDistance = fromInterval - baseInterval pointDistance = timeDistance / archive['secondsPerPoint'] byteDistance = pointDistance * pointSize fromOffset = archive['offset'] + (byteDistance % archive['size']) #Determine untilOffset timeDistance = untilInterval - baseInterval pointDistance = timeDistance / archive['secondsPerPoint'] byteDistance = pointDistance * pointSize untilOffset = archive['offset'] + (byteDistance % archive['size']) #Read all the points in the interval fh.seek(fromOffset) if fromOffset < untilOffset: #If we don't wrap around the archive seriesString = fh.read(untilOffset - fromOffset) else: #We do wrap around the archive, so we need two reads archiveEnd = archive['offset'] + archive['size'] seriesString = fh.read(archiveEnd - fromOffset) fh.seek(archive['offset']) seriesString += fh.read(untilOffset - archive['offset']) #Now we unpack the series data we just read (anything faster than unpack?) byteOrder,pointTypes = pointFormat[0],pointFormat[1:] points = len(seriesString) / pointSize seriesFormat = byteOrder + (pointTypes * points) unpackedSeries = struct.unpack(seriesFormat, seriesString) #And finally we construct a list of values (optimize this!) valueList = [None] * points #pre-allocate entire list for speed currentInterval = fromInterval step = archive['secondsPerPoint'] for i in xrange(0,len(unpackedSeries),2): pointTime = unpackedSeries[i] if pointTime == currentInterval: pointValue = unpackedSeries[i+1] valueList[i/2] = pointValue #in-place reassignment is faster than append() currentInterval += step fh.close() timeInfo = (fromInterval,untilInterval,step) return (timeInfo,valueList)
''' :author: Patrick Lauer This class holds the Artificial Bee Colony(ABC) algorithm, based on Karaboga (2007): D. Karaboga, AN IDEA BASED ON HONEY BEE SWARM FOR NUMERICAL OPTIMIZATION,TECHNICAL REPORT-TR06, Erciyes University, Engineering Faculty, Computer Engineering Department 2005. D. Karaboga, B. Basturk, A powerful and Efficient Algorithm for Numerical Function Optimization: Artificial Bee Colony (ABC) Algorithm, Journal of Global Optimization, Volume:39, Issue:3,pp:459-171, November 2007,ISSN:0925-5001 , doi: 10.1007/s10898-007-9149-x ''' from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from . import _algorithm import spotpy import numpy as np import time import random import itertools class abc(_algorithm): ''' Implements the ABC algorithm from Karaboga (2007). Input ---------- spot_setup: class model: function Should be callable with a parameter combination of the parameter-function and return an list of simulation results (as long as evaluation list) parameter: function When called, it should return a random parameter combination. Which can be e.g. uniform or Gaussian objectivefunction: function Should return the objectivefunction for a given list of a model simulation and observation. evaluation: function Should return the true values as return by the model. dbname: str * Name of the database where parameter, objectivefunction value and simulation results will be saved. dbformat: str * ram: fast suited for short sampling time. no file will be created and results are saved in an array. * csv: A csv file will be created, which you can import afterwards. parallel: str * seq: Sequentiel sampling (default): Normal iterations on one core of your cpu. * mpc: Multi processing: Iterations on all available cores on your cpu (recommended for windows os). * mpi: Message Passing Interface: Parallel computing on cluster pcs (recommended for unix os). save_sim: boolean *True: Simulation results will be saved *False: Simulationt results will not be saved ''' def __init__(self, spot_setup, dbname=None, dbformat=None, parallel='seq',save_sim=True): _algorithm.__init__(self,spot_setup, dbname=dbname, dbformat=dbformat, parallel=parallel,save_sim=save_sim) def simulate(self,id_params_tuple): id,params = id_params_tuple simulations=self.model(params) return id,params,simulations def sample(self,repetitions,eb=48,a=(1/10),peps=0.0001,ownlimit=False,limit=24): """ Parameters ---------- repetitions: int maximum number of function evaluations allowed during optimization eb: int number of employed bees (half of population size) a: float mutation factor peps: float Convergence criterium ownlimit: boolean determines if an userdefined limit is set or not limit: int sets the limit """ #Initialize the Progress bar starttime = time.time() intervaltime = starttime #Initialize ABC parameters: randompar=self.parameter()['random'] self.nopt=randompar.size random.seed() if ownlimit == True: self.limit=limit else: self.limit=eb lb,ub=self.parameter()['minbound'],self.parameter()['maxbound'] #Initialization work=[] #Calculate the objective function param_generator = ((rep,list(self.parameter()['random'])) for rep in range(eb)) for rep,randompar,simulations in self.repeat(param_generator): #Calculate fitness like = self.objectivefunction(evaluation = self.evaluation, simulation = simulations) self.status(rep,like,randompar) #Save everything in the database self.datawriter.save(like,randompar,simulations=simulations) c=0 p=0 work.append([like,randompar,like,randompar,c,p])#(fit_x,x,fit_v,v,limit,normalized fitness) #Progress bar acttime=time.time() #get str showing approximate timeleft to end of simulation in H, M, S timestr = time.strftime("%H:%M:%S", time.gmtime(round(((acttime-starttime)/ (rep + 1))*(repetitions-(rep + 1 ))))) #Refresh progressbar every second if acttime-intervaltime>=2: text='%i of %i (best like=%g) est. time remaining: %s' % (rep,repetitions, self.status.objectivefunction,timestr) print(text) intervaltime=time.time() icall=0 gnrng=1e100 while icall<repetitions and gnrng>peps: #and criter_change>pcento: psum=0 #Employed bee phase #Generate new input parameters for i,val in enumerate(work): k=i while k==i: k=random.randint(0,(eb-1)) j=random.randint(0,(self.nopt-1)) work[i][3][j]=work[i][1][j]+random.uniform(-a,a)*(work[i][1][j]-work[k][1][j]) if work[i][3][j]<lb[j]: work[i][3][j]=lb[j] if work[i][3][j]>ub[j]: work[i][3][j]=ub[j] ''' #Scout bee phase if work[i][4] >= self.limit: work[i][3]=self.parameter()['random'] work[i][4]=0 ''' #Calculate the objective function param_generator = ((rep,work[rep][3]) for rep in range(eb)) for rep,randompar,simulations in self.repeat(param_generator): #Calculate fitness clike = self.objectivefunction(evaluation = self.evaluation, simulation = simulations) if clike > work[rep][0]: work[rep][1]=work[rep][3] work[rep][0]=clike work[rep][4]=0 else: work[rep][4]=work[rep][4]+1 self.status(rep,work[rep][0],work[rep][1]) self.datawriter.save(clike,work[rep][3],simulations=simulations,chains=icall) icall += 1 #Probability distribution for roulette wheel selection bn=[] for i,val in enumerate(work): psum=psum+(1/work[i][0]) for i,val in enumerate(work): work[i][5]=((1/work[i][0])/psum) bn.append(work[i][5]) bounds = np.cumsum(bn) #Onlooker bee phase #Roulette wheel selection for i,val in enumerate(work): pn=random.uniform(0,1) k=i while k==i: k=random.randint(0,eb-1) for t,vol in enumerate(bounds): if bounds[t]-pn>=0: z=t break j=random.randint(0,(self.nopt-1)) #Generate new input parameters work[i][3][j]=work[z][1][j]+random.uniform(-a,a)*(work[z][1][j]-work[k][1][j]) if work[i][3][j]<lb[j]: work[i][3][j]=lb[j] if work[i][3][j]>ub[j]: work[i][3][j]=ub[j] #Calculate the objective function param_generator = ((rep,work[rep][3]) for rep in range(eb)) for rep,randompar,simulations in self.repeat(param_generator): #Calculate fitness clike = self.objectivefunction(evaluation = self.evaluation, simulation = simulations) if clike > work[rep][0]: work[rep][1]=work[rep][3] work[rep][0]=clike work[rep][4]=0 else: work[rep][4]=work[rep][4]+1 self.status(rep,work[rep][0],work[rep][1]) self.datawriter.save(clike,work[rep][3],simulations=simulations,chains=icall) icall += 1 #Scout bee phase for i,val in enumerate(work): if work[i][4] >= self.limit: work[i][1]=self.parameter()['random'] work[i][4]=0 t,work[i][0],simulations=self.simulate((icall,work[i][1])) clike = self.objectivefunction(evaluation = self.evaluation, simulation = simulations) self.datawriter.save(clike,work[rep][3],simulations=simulations,chains=icall) work[i][0]=clike icall += 1 gnrng=-self.status.objectivefunction text='%i of %i (best like=%g) est. time remaining: %s' % (icall,repetitions,self.status.objectivefunction,timestr) print(text) if icall >= repetitions: print('*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT') print('ON THE MAXIMUM NUMBER OF TRIALS ') print(repetitions) print('HAS BEEN EXCEEDED.') if gnrng < peps: print('THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE') print('Best parameter set:') print(self.status.params) text='Duration:'+str(round((acttime-starttime),2))+' s' print(-self.status.objectivefunction) print(icall) try: self.datawriter.finalize() except AttributeError: #Happens if no database was assigned pass
import json import sys from importlib import import_module from importlib.util import find_spec from owlmixin import OwlMixin, TOption from owlmixin.util import load_json from jumeaux.addons.res2res import Res2ResExecutor from jumeaux.logger import Logger from jumeaux.models import Res2ResAddOnPayload, Response, Request from jumeaux.utils import when_filter logger: Logger = Logger(__name__) LOG_PREFIX = "[res2res/json]" def wrap(anything: bytes, encoding: str) -> str: """Use for example of Transformer.function """ return json.dumps({"wrap": load_json(anything.decode(encoding))}, ensure_ascii=False) class Transformer(OwlMixin): module: str function: str = "transform" class Config(OwlMixin): transformer: Transformer default_encoding: str = "utf8" when: TOption[str] class Executor(Res2ResExecutor): def __init__(self, config: dict) -> None: self.config: Config = Config.from_dict(config or {}) t: Transformer = self.config.transformer try: if not find_spec(t.module): raise ModuleNotFoundError except ModuleNotFoundError as e: logger.error(f"{LOG_PREFIX} Module {t.module} is not existed.") sys.exit(1) try: self.module = getattr(import_module(t.module), t.function) except AttributeError as e: logger.error(f"{LOG_PREFIX} {t.function} is not existed in {t.module} module") sys.exit(1) def exec(self, payload: Res2ResAddOnPayload) -> Res2ResAddOnPayload: req: Request = payload.req res: Response = payload.response if not self.config.when.map(lambda x: when_filter(x, {"req": req, "res": res})).get_or( True ): return payload json_str: str = self.module(res.body, res.encoding.get()) new_encoding: str = res.encoding.get_or(self.config.default_encoding) return Res2ResAddOnPayload.from_dict( { "response": { "body": json_str.encode(new_encoding, errors="replace"), "type": "json", "encoding": new_encoding, "headers": res.headers, "url": res.url, "status_code": res.status_code, "elapsed": res.elapsed, "elapsed_sec": res.elapsed_sec, }, "req": req, "tags": payload.tags, } )
""" Prototype to DOT (Graphviz) converter by Dario Gomez Table format from django-extensions """ from protoExt.utils.utilsBase import Enum, getClassName from protoExt.utils.utilsConvert import slugify2 class GraphModel(): def __init__(self): self.tblStyle = False self.dotSource = 'digraph Sm {' self.dotSource += 'fontname="Helvetica";fontsize = 8;' self.GRAPH_LEVEL = Enum(['all', 'essential', 'required' , 'primary', 'title']) self.GRAPH_FORM = Enum(['orf', 'erf', 'drn']) if self.tblStyle: self.dotSource += 'node [shape="plaintext"];\n' self.tblTitle = '\n{0} [label=<<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0" style="width:100px"><TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"> <FONT FACE="Helvetica Bold" COLOR="white">{1}</FONT> </TD></TR>' self.tblField = '\n<TR><TD ALIGN="LEFT" BORDER="0"><FONT FACE="Helvetica {2}">{0}</FONT></TD><TD ALIGN="LEFT"><FONT FACE="Helvetica {2}">{1}</FONT></TD></TR>' else: # Animal [label = "{{{1}|+ name : string\l+ age : int\l|+ die() : void\l}"] self.dotSource += 'rankdir = BT;node [shape=record,width=0,height=0,concentrate=true];\n' self.tblRecord = '\n{0} [label = "{{{1}|' self.lnkComposition = '[dir=both,arrowhead=diamond,arrowtail=none]\n' self.lnkAgregation = '[dir=both,arrowhead=ediamond,arrowtail=none]\n' self.lnkNoCascade = '[dir=both,arrowhead=diamondtee,arrowtail=none]\n' self.lnkHeritage = '[dir=both,arrowhead=empty,arrowtail=none]\n' self.lnkER = '[dir=both,arrowhead=none,arrowtail=invempty]\n' def getDiagramDefinition(self, diagramSet): self.diagrams = [] self.entities = [] for pDiag in diagramSet: gDiagram = { 'code': getClassName(pDiag.code) , 'label': slugify2( pDiag.code ), 'clusterName': slugify2( getattr(pDiag, 'title', pDiag.code)), 'graphLevel' : getattr(pDiag, 'graphLevel' , self.GRAPH_LEVEL.all), 'graphForm' : getattr(pDiag, 'graphForm' , self.GRAPH_FORM.orf), 'showPrpType': getattr(pDiag, 'showPrpType' , False), 'showBorder' : getattr(pDiag, 'showBorder' , False), 'showFKey' : getattr(pDiag, 'showFKey' , False), 'prefix' : slugify2( getattr(pDiag, 'prefix' , '')), 'entities': [] } for pDiagEntity in pDiag.diagramentity_set.all(): pEntity = pDiagEntity.entity enttCode = self.getEntityCode(pEntity.code, gDiagram.get('prefix')) # Si ya se encuentra en otro diagrama no la dibuja if enttCode in self.entities: continue self.entities.append(enttCode) gEntity = { 'code': enttCode, 'fields': [], 'relations': [] } for pProperty in pEntity.property_set.all(): pptCode = slugify2(pProperty.code, '_') if pProperty.isForeign: pLinkTo = self.getEntityCode(pProperty.relationship.refEntity.code, gDiagram.get('prefix')) gEntity['relations'].append({ 'code': pptCode, 'linkTo': pLinkTo, 'primary': pProperty.isPrimary, 'required': pProperty.isRequired, 'essential': pProperty.isEssential, 'foreign': True }) else: pType = slugify2(pProperty.baseType , '_') gEntity['fields'].append({ 'code': pptCode, 'type': pType or 'string', 'primary': pProperty.isPrimary, 'required': pProperty.isRequired, 'essential': pProperty.isEssential, 'foreign': False }) gDiagram['entities'].append(gEntity) self.diagrams.append(gDiagram) def generateDotModel(self): # Dibuja las entidades for gDiagram in self.diagrams: if gDiagram.get('graphLevel') < self.GRAPH_LEVEL.title : self.dotSource += '\nsubgraph cluster_{0} {{'.format(gDiagram.get('code')) if not gDiagram.get('showBorder', False) : self.dotSource += 'style=dotted;' if len(gDiagram.get('label', '')) > 0: self.dotSource += 'label="{}";'.format(gDiagram.get('label', '')) for gEntity in gDiagram['entities']: self.entity2dot(gDiagram, gEntity) self.dotSource += '}\n' # Dibuja los vinculos for gDiagram in self.diagrams: for gEntity in gDiagram['entities']: self.link2dot(gEntity, gDiagram.get( 'showFKey')) self.dotSource += '}' # Dibuja las relaciones return self.dotSource def link2dot(self, gEntity, showFKey): for gLink in gEntity['relations']: pEntity = gEntity.get('code') pLinkTo = gLink.get('linkTo') if ( not showFKey ) and ( pLinkTo not in self.entities ): continue self.dotSource += '{0} -> {1} '.format(pEntity, pLinkTo) + self.lnkComposition def entity2dot(self, gDiagram, gEntity): if self.tblStyle: enttTable = self.tblTitle.format(gEntity.get('code'), gEntity.get('label', gEntity.get('code'))) else: enttRecord = self.tblRecord.format(gEntity.get('code'), gEntity.get('label', gEntity.get('code'))) # 0 : colName; 1 : baseType; 2 : Bold / Italic for gField in gEntity['fields'] + gEntity['relations'] : if gDiagram.get('showPrpType') : sPrpType = gField.get('type', ' ') else : sPrpType = ' ' sPk = '' fildLv = 0 diagLv = gDiagram.get('graphLevel') if gField.get('primary') : fildLv = self.GRAPH_LEVEL.primary sPk = 'Bold' elif gField.get('required'): fildLv = self.GRAPH_LEVEL.required elif gField.get('essential'): fildLv = self.GRAPH_LEVEL.essential # Si no alcanza el nivel if fildLv >= diagLv: sFk = '' if gField.get('foreign'): sFk = ' Italic' if self.tblStyle: enttTable += self.tblField.format(gField.get('code'), sPrpType, sPk + sFk) else: if len(sPk) > 0: sPk = '*' if len(sFk) > 0: sPk += '+' if len(sPk) > 0: sPk += ' ' if len(sPrpType) > 1: sPrpType = ': ' + sPrpType enttRecord += '{2}{0}{1}\l'.format(gField.get('code'), sPrpType, sPk) if self.tblStyle: enttTable += '</TABLE>>]\n' else: enttRecord += '}"]\n' self.dotSource += enttRecord def getEntityCode(self, code, prefix): # Formatea el nombre de la entidad enttCode = code.lower() prefix = prefix or '' if len(prefix) and enttCode.startswith(prefix.lower()): enttCode = enttCode[len(prefix):] return getClassName(enttCode)
import json f = file('treasures.json', 'r') try: foo = json.load(f) json_contents = foo except ValueError: json_contents = dict() f.close() print 'Type \'q\' to [q]uit' while True: name = raw_input('Treasure Name: ') if name == 'q': break print 'Type \'n\' to stop entering heroes and go to [n]ext treasure' set_contents = dict() hero = '' while True: hero = raw_input('Hero name: ') if hero == 'n' or hero == 'q': break else: bundle_rating = raw_input('Item set rating [1-3]: ') set_contents[hero] = bundle_rating json_contents[name] = set_contents if hero == 'q': break f = open('treasures.json', 'w') json.dump(json_contents, f, indent=4) f.close()
import json from tornado import httpclient as hc from tornado import gen from graphite_beacon.handlers import LOGGER, AbstractHandler class HipChatHandler(AbstractHandler): name = 'hipchat' # Default options defaults = { 'url': 'https://api.hipchat.com', 'room': None, 'key': None, } colors = { 'critical': 'red', 'warning': 'yellow', 'normal': 'green', } def init_handler(self): self.room = self.options.get('room') self.key = self.options.get('key') assert self.room, 'Hipchat room is not defined.' assert self.key, 'Hipchat key is not defined.' self.client = hc.AsyncHTTPClient() @gen.coroutine def notify(self, level, *args, **kwargs): LOGGER.debug("Handler (%s) %s", self.name, level) data = { 'message': self.get_short(level, *args, **kwargs).decode('UTF-8'), 'notify': True, 'color': self.colors.get(level, 'gray'), 'message_format': 'text', } yield self.client.fetch('{url}/v2/room/{room}/notification?auth_token={token}'.format( url=self.options.get('url'), room=self.room, token=self.key), headers={ 'Content-Type': 'application/json'}, method='POST', body=json.dumps(data))
""" Task description (in Estonian): 3. Maatriksi vähendamine (6p) Kirjuta funktsioon vähenda, mis võtab argumendiks arvumaatriksi, milles ridu ja veerge on paarisarv, ning tagastab uue maatriksi, milles on kaks korda vähem ridu ja kaks korda vähem veerge, ja kus iga element on esialgse maatriksi nelja elemendi keskmine, järgnevas näites toodud skeemi järgi: See tähendab, et vähenda([[1,5,2,6,3,6], [1,3,2,7,3,3], [4,8,5,1,1,6], [4,4,9,5,6,1]]) peab tagastama [[2.5, 4.25, 3.75], [5.0, 5.0, 3.5]]. """ from grader import * from KT2_util import make_checker def vähenda(maatriks): tulemus = [] for r in range(0, len(maatriks), 2): rida = [] for c in range(0, len(maatriks[r]), 2): tul = 0 for i in range(4): tul += maatriks[r+i%2][c+i//2] rida.append(tul / 4.0) tulemus.append(rida) return tulemus checker = make_checker(vähenda) checker([[1, 2], [3, 4]], description="Ruudukujuline 2x2 maatriks- {function}({args}) == {expected}") checker([[1, 2, 3, 4], [5, 6, 7, 8]], description="Mitte-ruudukujuline maatriks - {function}({args}) == {expected}") checker([[1,5,2,6,3,6], [1,3,2,7,3,3], [4,8,5,1,1,6], [4,4,9,5,6,1]]) checker([[1,5,2,6,3,6], [1,3,2,7,3,3], [4,8,5,1,1,6], [4,4,9,5,6,1]]) checker([], description="Erijuht, tühi maatriks- {function}({args}) == {expected}") random_tests = [ [[7, 5, 2, 6, 6, 9], [2, 8, 6, 3, 8, 7]], [[3, 1, 0, 9], [0, 5, 1, 7]], [[4, 4], [0, 8], [4, 9], [3, 0], [3, 6], [8, 2]], [[9, 4, 6, 5, 4, 6], [3, 8, 7, 1, 2, 5], [8, 9, 8, 5, 0, 2], [2, 7, 2, 4, 3, 5], [2, 6, 8, 0, 2, 9], [7, 4, 6, 4, 8, 2]], [[-1, -3], [-6, 6], [5, -6], [1, 0]], [[-5, -10, 6, -1], [-8, -10, -5, 7], [-7, 9, -5, -5], [-8, -7, -10, 8]], [[-3, 6, -3, 6], [4, -6, 3, 8], [-9, -6, 7, -6], [6, 6, 4, -3]], [[1, 6], [2, -6]] ] for test_case in random_tests: checker(test_case)
from __future__ import division from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import numpy as np import xgboost as xgb import argparse from os import path import os from hyperopt import fmin, tpe, hp, STATUS_OK, Trials from utils import * import pickle np.random.seed(345345) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--yix', type=int, default=0) return parser.parse_args() def evalF1(preds, dtrain): from sklearn.metrics import f1_score labels = dtrain.get_label() return 'f1-score', f1_score(labels, preds > 0.5) def fpreproc(dtrain, dtest, param): label = dtrain.get_label() ratio = float(np.sum(label == 0)) / np.sum(label == 1) param['scale_pos_weight'] = ratio return (dtrain, dtest, param) class Score: def __init__(self, X, y): self.dtrain = xgb.DMatrix(X, label=y) def get_score(self, params): params['max_depth'] = int(params['max_depth']) params['min_child_weight'] = int(params['min_child_weight']) params['num_boost_round'] = int(params['num_boost_round']) print('Training with params:') print(params) cv_result = xgb.cv(params=params, dtrain=self.dtrain, num_boost_round=params['num_boost_round'], nfold=5, stratified=True, feval=evalF1, maximize=True, fpreproc=fpreproc, verbose_eval=True) score = cv_result.ix[params['num_boost_round'] - 1, 0] print(score) return {'loss': -score, 'status': STATUS_OK} def optimize(trials, X, y, max_evals): space = { 'num_boost_round': hp.quniform('num_boost_round', 10, 200, 10), 'eta': hp.quniform('eta', 0.1, 0.3, 0.1), 'gamma': hp.quniform('gamma', 0, 1, 0.2), 'max_depth': hp.quniform('max_depth', 1, 6, 1), 'min_child_weight': hp.quniform('min_child_weight', 1, 3, 1), 'subsample': hp.quniform('subsample', 0.8, 1, 0.1), 'silent': 1, 'objective': 'binary:logistic' } s = Score(X, y) best = fmin(s.get_score, space, algo=tpe.suggest, trials=trials, max_evals=max_evals ) best['max_depth'] = int(best['max_depth']) best['min_child_weight'] = int(best['min_child_weight']) best['num_boost_round'] = int(best['num_boost_round']) del s return best def out_fold_pred(params, X, y, reps): preds = np.zeros((y.shape[0])) params['silent'] = 1 params['objective'] = 'binary:logistic' params['scale_pos_weight'] = float(np.sum(y == 0)) / np.sum(y == 1) for train_ix, test_ix in makeKFold(5, y, reps): X_train, X_test = X[train_ix, :], X[test_ix, :] y_train = y[train_ix] dtrain = xgb.DMatrix(X_train, label=y_train) dtest = xgb.DMatrix(X_test) bst = xgb.train(params=params, dtrain=dtrain, num_boost_round=params['num_boost_round'], evals=[(dtrain, 'train')], feval=evalF1, maximize=True, verbose_eval=None) preds[test_ix] = bst.predict(dtest) return preds def get_model(params, X, y): dtrain = xgb.DMatrix(X, label=y) params['silent'] = 1 params['objective'] = 'binary:logistic' params['scale_pos_weight'] = float(np.sum(y == 0)) / np.sum(y == 1) bst = xgb.train(params=params, dtrain=dtrain, num_boost_round=params['num_boost_round'], evals=[(dtrain, 'train')], feval=evalF1, maximize=True, verbose_eval=None) return bst args = parse_args() data_dir = '../level3-feature/' + str(args.yix) X_train = np.load(path.join(data_dir, 'X_train.npy')) X_test = np.load(path.join(data_dir, 'X_test.npy')) y_train = np.load(path.join(data_dir, 'y_train.npy')) print(X_train.shape, X_test.shape, y_train.shape) X_train_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_train_ext.npy') X_test_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_test_ext.npy') print(X_train_ext.shape, X_test_ext.shape) X_train = np.hstack((X_train, X_train_ext)) X_test = np.hstack((X_test, X_test_ext)) print('Add Extra') print(X_train.shape, X_test.shape, y_train.shape) trials = Trials() params = optimize(trials, X_train, y_train, 100) out_fold = out_fold_pred(params, X_train, y_train, 1) clf = get_model(params, X_train, y_train) dtest = xgb.DMatrix(X_test) preds = clf.predict(dtest) save_dir = '../level3-model-final/' + str(args.yix) print(save_dir) if not path.exists(save_dir): os.makedirs(save_dir) with open(path.join(save_dir, 'model.pkl'), 'wb') as f_model: pickle.dump(clf, f_model) with open(path.join(save_dir, 'param.pkl'), 'wb') as f_param: pickle.dump(params, f_param) np.save(path.join(save_dir, 'pred.npy'), preds) np.save(path.join(save_dir, 'outFold.npy'), out_fold)
from __future__ import print_function from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import from builtins import * # NOQA from future import standard_library standard_library.install_aliases() # NOQA import chainer from chainer import functions as F from chainer import links as L from chainer import optimizers import numpy as np from chainerrl.envs.abc import ABC from chainerrl.explorers.epsilon_greedy import LinearDecayEpsilonGreedy from chainerrl.links import Sequence from chainerrl import policies from chainerrl import q_function from chainerrl import replay_buffer from basetest_training import _TestTraining class _TestPGTOnABC(_TestTraining): def make_agent(self, env, gpu): model = self.make_model(env) policy = model['policy'] q_func = model['q_function'] actor_opt = optimizers.Adam(alpha=1e-4) actor_opt.setup(policy) critic_opt = optimizers.Adam(alpha=1e-3) critic_opt.setup(q_func) explorer = self.make_explorer(env) rbuf = self.make_replay_buffer(env) return self.make_pgt_agent(env=env, model=model, actor_opt=actor_opt, critic_opt=critic_opt, explorer=explorer, rbuf=rbuf, gpu=gpu) def make_pgt_agent(self, env, model, actor_opt, critic_opt, explorer, rbuf, gpu): raise NotImplementedError() def make_explorer(self, env): def random_action_func(): a = env.action_space.sample() if isinstance(a, np.ndarray): return a.astype(np.float32) else: return a return LinearDecayEpsilonGreedy(1.0, 0.2, 1000, random_action_func) def make_replay_buffer(self, env): return replay_buffer.ReplayBuffer(10 ** 5) class _TestPGTOnContinuousPOABC(_TestPGTOnABC): def make_model(self, env): n_dim_obs = env.observation_space.low.size n_dim_action = env.action_space.low.size n_hidden_channels = 50 policy = Sequence( L.Linear(n_dim_obs, n_hidden_channels), F.relu, L.Linear(n_hidden_channels, n_hidden_channels), F.relu, L.LSTM(n_hidden_channels, n_hidden_channels), policies.FCGaussianPolicy( n_input_channels=n_hidden_channels, action_size=n_dim_action, min_action=env.action_space.low, max_action=env.action_space.high) ) q_func = q_function.FCLSTMSAQFunction( n_dim_obs=n_dim_obs, n_dim_action=n_dim_action, n_hidden_layers=2, n_hidden_channels=n_hidden_channels) return chainer.Chain(policy=policy, q_function=q_func) def make_env_and_successful_return(self, test): return ABC(discrete=False, partially_observable=True, deterministic=test), 1 def make_replay_buffer(self, env): return replay_buffer.EpisodicReplayBuffer(10 ** 5) class _TestPGTOnContinuousABC(_TestPGTOnABC): def make_model(self, env): n_dim_obs = env.observation_space.low.size n_dim_action = env.action_space.low.size n_hidden_channels = 50 policy = policies.FCGaussianPolicy( n_input_channels=n_dim_obs, n_hidden_layers=2, n_hidden_channels=n_hidden_channels, action_size=n_dim_action, min_action=env.action_space.low, max_action=env.action_space.high) q_func = q_function.FCSAQFunction( n_dim_obs=n_dim_obs, n_dim_action=n_dim_action, n_hidden_layers=2, n_hidden_channels=n_hidden_channels) return chainer.Chain(policy=policy, q_function=q_func) def make_env_and_successful_return(self, test): return ABC(discrete=False, deterministic=test), 1
import os os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'esite.settings') import django django.setup() from auto.models import Car def add_car(make, model, km, year, color, eng, drive,trans, icolor): c = Car.objects.get_or_create(make=make, model=model, kilometers=km, year=year, color=color, engine_size=eng, drivetrain=drive, transmition=trans, interanl_color=icolor) def populate(): # car = Car(make='Acura',model='TL', kilometers=74673, year=2012, color='White', engine_size=3.7, drivetrain='AWD', transmition='MA') add_car('Acura', 'TL', 74673, 2012, 'White', 3.7, 'AWD','MA','White') add_car('Volkswagen', 'Touareg', 5344, 2015, 'Silver', 3.6, 'AWD','AU','White') if __name__ == '__main__': print "Starting Car population script..." populate()
import aioamqp import asyncio import umsgpack as msgpack import logging from functools import wraps from uuid import uuid4 logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler()) class RemoteException(Exception): pass class Client(object): def __init__(self, queue='', host='localhost', port=None, ssl=False): self._transport = None self._protocol = None self._channel = None self._callback_queue = None self._queue = queue self._host = host self._port = port self._ssl = ssl self._waiter = asyncio.Event() async def _connect(self, *args, **kwargs): """ an `__init__` method can't be a coroutine""" self._transport, self._protocol = await aioamqp.connect(*args, **kwargs) host = kwargs.get('host', 'localhost') port = kwargs.get('port') ssl = kwargs.get('ssl', False) if port is None: port = 5671 if ssl else 5672 logger.info(f'Connected to amqp://{host}:{port}/') self._channel = await self._protocol.channel() result = await self._channel.queue_declare(queue_name='', exclusive=True) self._callback_queue = result['queue'] logger.info(f'Created callback queue: {self._callback_queue}') await self._channel.basic_consume( self._on_response, no_ack=True, queue_name=self._callback_queue, ) async def _on_response(self, channel, body, envelope, properties): if self._corr_id == properties.correlation_id: self._response = body logger.info(f'Received response for {self._corr_id}') self._waiter.set() async def __call__(self, method, *args, **kwargs): if not self._protocol: await self._connect(host=self._host, port=self._port, ssl=self._ssl) self._response = None self._corr_id = str(uuid4()) payload = msgpack.packb((method, args, kwargs)) logger.info(f'Publishing to {self._queue}: {method} ({self._corr_id})') await self._channel.basic_publish( payload=payload, exchange_name='', routing_key=self._queue, properties={ 'reply_to': self._callback_queue, 'correlation_id': self._corr_id, }, ) logger.info(f'Waiting for response on queue {self._callback_queue} ({self._corr_id})') await self._waiter.wait() await self._protocol.close() try: exc, result = msgpack.unpackb(self._response) except Exception as err: logger.error(f'Could not unpack response: {err}') return None if exc is not None: raise RemoteException(exc) return result def __getattr__(self, method): @wraps(self.__call__) async def wrapper(*args, **kwargs): return await self(method, *args, **kwargs) return wrapper
def my_max(a, b): if a > b: return a elif b > a: return b else: return None x = my_max(1, 2) print x print my_max(3, 2)
import pandas as pd import pytest from athletic_pandas.algorithms import heartrate_models def test_heartrate_model(): heartrate = pd.Series(range(50)) power = pd.Series(range(0, 100, 2)) model, predictions = heartrate_models.heartrate_model(heartrate, power) assert model.params['hr_rest'].value == 0.00039182374117378518 assert model.params['hr_max'].value == 195.75616175654685 assert model.params['dhr'].value == 0.49914432620946803 assert model.params['tau_rise'].value == 0.98614419733274383 assert model.params['tau_fall'].value == 22.975975612579408 assert model.params['hr_drift'].value == 6.7232899323328612 * 10**-5 assert len(predictions) == 50
__all__ = ['acfun_download'] from ..common import * from .letv import letvcloud_download_by_vu from .qq import qq_download_by_vid from .sina import sina_download_by_vid from .tudou import tudou_download_by_iid from .youku import youku_download_by_vid import json, re def get_srt_json(id): url = 'http://danmu.aixifan.com/V2/%s' % id return get_html(url) def acfun_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False, **kwargs): info = json.loads(get_html('http://www.acfun.tv/video/getVideo.aspx?id=' + vid)) sourceType = info['sourceType'] if 'sourceId' in info: sourceId = info['sourceId'] # danmakuId = info['danmakuId'] if sourceType == 'sina': sina_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only) elif sourceType == 'youku': youku_download_by_vid(sourceId, title=title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs) elif sourceType == 'tudou': tudou_download_by_iid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only) elif sourceType == 'qq': qq_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only) elif sourceType == 'letv': letvcloud_download_by_vu(sourceId, '2d8c027396', title, output_dir=output_dir, merge=merge, info_only=info_only) elif sourceType == 'zhuzhan': a = 'http://api.aixifan.com/plays/%s/realSource' % vid s = json.loads(get_content(a, headers={'deviceType': '1'})) urls = s['data']['files'][-1]['url'] size = urls_size(urls) print_info(site_info, title, 'mp4', size) if not info_only: download_urls(urls, title, 'mp4', size, output_dir=output_dir, merge=merge) else: raise NotImplementedError(sourceType) if not info_only and not dry_run: if not kwargs['caption']: print('Skipping danmaku.') return try: title = get_filename(title) print('Downloading %s ...\n' % (title + '.cmt.json')) cmt = get_srt_json(vid) with open(os.path.join(output_dir, title + '.cmt.json'), 'w', encoding='utf-8') as x: x.write(cmt) except: pass def acfun_download(url, output_dir='.', merge=True, info_only=False, **kwargs): assert re.match(r'http://[^\.]+.acfun.[^\.]+/\D/\D\D(\d+)', url) html = get_html(url) title = r1(r'<h1 id="txt-title-view">([^<>]+)<', html) title = unescape_html(title) title = escape_file_path(title) assert title videos = re.findall("data-vid=\"(\d+)\".*href=\"[^\"]+\".*title=\"([^\"]+)\"", html) for video in videos: p_vid = video[0] p_title = title + " - " + video[1] if video[1] != '删除标签' else title acfun_download_by_vid(p_vid, p_title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs) site_info = "AcFun.tv" download = acfun_download download_playlist = playlist_not_supported('acfun')
import math from autoprotocol import UserError from modules.utils import * def transform(protocol, params): # general parameters constructs = params['constructs'] num_constructs = len(constructs) plates = list(set([construct.container for construct in constructs])) if len(plates) != 1: raise UserError('You can only transform aliquots from one common container.') # **** need to be able to check if plate is sealed to add run-chaining **** mm_mult = 1.3 transformation_plate = protocol.ref("transformation_plate", None, "96-pcr", discard=True) protocol.incubate(transformation_plate, "cold_20", "10:minute") transformation_wells = transformation_plate.wells_from(0, num_constructs) for i in range(num_constructs): protocol.provision("rs16pbjc4r7vvz", transformation_wells[i], "50:microliter") for i, well in enumerate(constructs): protocol.transfer(well, transformation_wells[i], "2.0:microliter", dispense_speed="10:microliter/second", mix_after=False, new_group=det_new_group(i)) if well.name: transformation_wells[i].set_name(well.name) else: transformation_wells[i].set_name('construct_%s' % (i+1)) # NEED to confirm second de-seal is working OR move to cover/uncover 96-flat protocol.seal(transformation_plate) protocol.incubate(transformation_plate, "cold_4", "20:minute", shaking=False, co2=0) protocol.unseal(transformation_plate) protocol.dispense_full_plate( transformation_plate, 'soc', '50:microliter' ) protocol.seal(transformation_plate) protocol.incubate(transformation_plate, "warm_37", "10:minute", shaking=True) protocol.unseal(transformation_plate) # spread on agar plates # kan "ki17rs7j799zc2" # amp "ki17sbb845ssx9" # specto "ki17sbb9r7jf98" # cm "ki17urn3gg8tmj" # "noAB" "ki17reefwqq3sq" agar_plates = [] agar_wells = WellGroup([]) for well in range(0, len(transformation_wells), 6): agar_name = "agar-%s_%s" % (len(agar_plates), printdatetime(time=False)) agar_plate = ref_kit_container(protocol, agar_name, "6-flat", "ki17rs7j799zc2", discard=False, store='cold_4') agar_plates.append(agar_plate) for i, w in enumerate(transformation_wells[well:well + 6]): protocol.spread(w, agar_plate.well(i), "100:microliter") agar_wells.append(agar_plate.well(i).set_name(w.name)) for agar_p in agar_plates: protocol.incubate( agar_p, 'warm_37', '12:hour' ) protocol.image_plate( agar_p, mode='top', dataref=agar_p.name ) # return agar plates to end protocol return agar_plates if __name__ == '__main__': from autoprotocol.harness import run run(transform, 'Transform')
import sys import os import getopt import xattr import zlib def usage(e=None): if e: print(e) print("") name = os.path.basename(sys.argv[0]) print("usage: %s [-lz] file [file ...]" % (name,)) print(" %s -p [-lz] attr_name file [file ...]" % (name,)) print(" %s -w [-z] attr_name attr_value file [file ...]" % (name,)) print(" %s -d attr_name file [file ...]" % (name,)) print("") print("The first form lists the names of all xattrs on the given file(s).") print("The second form (-p) prints the value of the xattr attr_name.") print("The third form (-w) sets the value of the xattr attr_name to attr_value.") print("The fourth form (-d) deletes the xattr attr_name.") print("") print("options:") print(" -h: print this help") print(" -l: print long format (attr_name: attr_value)") print(" -z: compress or decompress (if compressed) attribute value in zip format") if e: sys.exit(64) else: sys.exit(0) class NullsInString(Exception): """Nulls in string.""" _FILTER=''.join([(len(repr(chr(x)))==3) and chr(x) or '.' for x in range(256)]) def _dump(src, length=16): result=[] for i in range(0, len(src), length): s = src[i:i+length] hexa = ' '.join(["%02X"%ord(x) for x in s]) printable = s.translate(_FILTER) result.append("%04X %-*s %s\n" % (i, length*3, hexa, printable)) return ''.join(result) def main(): try: (optargs, args) = getopt.getopt(sys.argv[1:], "hlpwdz", ["help"]) except getopt.GetoptError as e: usage(e) attr_name = None long_format = False read = False write = False delete = False compress = lambda x: x decompress = compress status = 0 for opt, arg in optargs: if opt in ("-h", "--help"): usage() elif opt == "-l": long_format = True elif opt == "-p": read = True if write or delete: usage("-p not allowed with -w or -d") elif opt == "-w": write = True if read or delete: usage("-w not allowed with -p or -d") elif opt == "-d": delete = True if read or write: usage("-d not allowed with -p or -w") elif opt == "-z": compress = zlib.compress decompress = zlib.decompress if write or delete: if long_format: usage("-l not allowed with -w or -p") if read or write or delete: if not args: usage("No attr_name") attr_name = args.pop(0) if write: if not args: usage("No attr_value") attr_value = args.pop(0) if len(args) > 1: multiple_files = True else: multiple_files = False for filename in args: def onError(e): if not os.path.exists(filename): sys.stderr.write("No such file: %s\n" % (filename,)) else: sys.stderr.write(str(e) + "\n") status = 1 try: attrs = xattr.xattr(filename) except (IOError, OSError) as e: onError(e) continue if write: try: attrs[attr_name] = compress(attr_value) except (IOError, OSError) as e: onError(e) continue elif delete: try: del attrs[attr_name] except (IOError, OSError) as e: onError(e) continue except KeyError: onError("No such xattr: %s" % (attr_name,)) continue else: try: if read: attr_names = (attr_name,) else: attr_names = list(attrs.keys()) except (IOError, OSError) as e: onError(e) continue if multiple_files: file_prefix = "%s: " % (filename,) else: file_prefix = "" for attr_name in attr_names: try: try: attr_value = decompress(attrs[attr_name]) except zlib.error: attr_value = attrs[attr_name] except KeyError: onError("%sNo such xattr: %s" % (file_prefix, attr_name)) continue if long_format: try: if attr_value.find('\0') >= 0: raise NullsInString; print("".join((file_prefix, "%s: " % (attr_name,), attr_value))) except (UnicodeDecodeError, NullsInString): print("".join((file_prefix, "%s:" % (attr_name,)))) print(_dump(attr_value)) else: if read: print("".join((file_prefix, attr_value))) else: print("".join((file_prefix, attr_name))) sys.exit(status) if __name__ == "__main__": main()
from datetime import datetime from threading import Timer from queue import Queue import uuid import logging try: from time import perf_counter except ImportError: from time import clock as perf_counter log = logging.getLogger(__name__) class _Task: _processing_time = 10 _scheduler = None def __init__(self, function, due=None, interval=None, repeat=0): self._function = function if hasattr(due, '__iter__'): self._due_iter = iter(due) self._due = self._due_iter.__next__() else: self._due_iter = None self._due = due self._interval = interval self._repeat = repeat if not (self._due or self._interval): raise ValueError def __call__(self, *args, job_uuid=None, **kwargs): start = perf_counter() result = self._function(*args, **kwargs) self._processing_time = perf_counter() - start if self._scheduler: del self._scheduler._scheduled[job_uuid] if self._interval and self._repeat != 1: if self._repeat > 0: self._repeat -= 1 self._scheduler.schedule(self, *args, job_uuid=job_uuid, **kwargs) if self._due_iter: self._due = self._due_iter.__next__() if self._due: self._scheduler.schedule(self, *args, job_uuid=job_uuid, **kwargs) return result def __get__(self, obj, type=None): if obj is None: return self new_func = self._function.__get__(obj, type) return self.__class__(new_func, self._due_iter or self._due, self._interval, self._repeat) class Task: def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs def __call__(self, function): return _Task(function, *self.args, **self.kwargs) class Scheduler: _queue = Queue() _scheduled = dict() def __init__(self): pass def schedule(self, function, *args, job_uuid=None, **kwargs): if isinstance(function, _Task): if not job_uuid: job_uuid = uuid.uuid4() kwargs['job_uuid'] = job_uuid function._scheduler = self if function._interval: timer = Timer(function._interval, function, args, kwargs) else: remainder = (function._due - datetime.now()).total_seconds() timer = Timer(remainder - function._processing_time, function, args, kwargs) self._scheduled[job_uuid] = timer timer.start() return job_uuid else: self.queue.put((function, args, kwargs)) def cancel(self, job_uuid=None): if job_uuid: self._scheduled[job_uuid].cancel() del self._scheduled[job_uuid] else: for job_uuid in self._scheduled: self._scheduled[job_uuid].cancel() del self._scheduled[job_uuid]
from functional_tests import FunctionalTest, ROOT, USERS import time from selenium.webdriver.support.ui import WebDriverWait class AddBasicAction (FunctionalTest): def setUp(self): self.url = ROOT + '/default/user/login' get_browser=self.browser.get(self.url) username = WebDriverWait(self, 10).until(lambda self : self.browser.find_element_by_name("username")) username.send_keys(USERS['USER2']) password = self.browser.find_element_by_name("password") password.send_keys(USERS['PASSWORD2']) submit_button = self.browser.find_element_by_css_selector("#submit_record__row input") submit_button.click() time.sleep(1) self.url = ROOT + '/submit/new_question/action' get_browser=self.browser.get(self.url) time.sleep(2) def test_can_view_submit_page(self): response_code = self.get_response_code(self.url) self.assertEqual(response_code, 200) def test_has_right_title(self): title = self.browser.title self.assertEqual('Networked Decision Making', title) def test_has_right_heading(self): body = self.browser.find_element_by_tag_name('body') self.assertIn('Submit Action', body.text) def test_question(self): time.sleep(2) #still getting blank category for some reason but not if loaded manually #questiontext = self.browser.find_element_by_name('questiontext') questiontext = WebDriverWait(self, 10).until(lambda self : self.browser.find_element_by_name('questiontext')) questiontext.send_keys("Lets get this done") submit_button = self.browser.find_element_by_css_selector("#submit_record__row input") submit_button.click() time.sleep(1) welcome_message = self.browser.find_element_by_css_selector(".flash") self.assertEqual(u'Details Submitted\n\xd7', welcome_message.text)
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('clients', '0002_contact'), ] operations = [ migrations.AlterField( model_name='contact', name='alternate_email', field=models.EmailField(blank=True, max_length=255), ), migrations.AlterField( model_name='contact', name='alternate_phone', field=models.CharField(blank=True, max_length=50), ), migrations.AlterField( model_name='contact', name='phone', field=models.CharField(blank=True, max_length=50), ), ]
from blob import Blob from foreground_processor import ForegroundProcessor import cv2 import operator import rospy from blob_detector.msg import Blob as BlobMsg from blob_detector.msg import Blobs as BlobsMsg import numpy as np class BlobDetector(ForegroundProcessor): def __init__(self, node_name): super(BlobDetector, self).__init__(node_name) self.pub = rospy.Publisher('/blobs', BlobsMsg) def find_blobs(self, rgbd): mask = rgbd.depth_mask_sm contours0 = cv2.findContours( mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) contours = [cv2.approxPolyDP(cnt, 3, True) for cnt in contours0[0]] blobs = [Blob(contour=c, source_rgbd=rgbd) for c in contours] blobs = [b for b in blobs if b.area > 800] # filter [b.compute_params() for b in blobs] # cpu intensive initialization return blobs def process_depth_mask_image(self, rgbd): blobs = self.find_blobs(rgbd) #for blob in blobs: # blob.set_world_coordinates_from_depth(rgbd.depth_raw) self.process_blobs(blobs, rgbd) def publish_blobs(self, blobs): blobs_msg = BlobsMsg() for blob in blobs: blob_msg = blob.to_msg() blobs_msg.blobs.append(blob_msg) self.pub.publish(blobs_msg) def show_blobs(self, blobs, rgbd): for blob in blobs: blob.draw(rgbd.depth_color_sm) self.show_depth_color(rgbd) def process_blobs(self, blobs, rgbd): self.publish_blobs(blobs) self.show_blobs(self, blobs, rgbd) if __name__ == '__main__': bd = BlobDetector('fg') bd.run()
import ctypes import numpy as np import os libpath = os.path.dirname(os.path.realpath(__file__)) lib = ctypes.cdll.LoadLibrary(libpath+'\libscatt_bg.so') scatt_bg_c = lib.scatt_bg scatt_bg_c.restype = ctypes.c_void_p # reset return types. default is c_int scatt_bg_c.argtypes = [ctypes.c_double, ctypes.POINTER(ctypes.c_double), ctypes.c_int, ctypes.c_int] subtend_c = lib.subtend subtend_c.restype = ctypes.c_double # reset return types. default is c_int subtend_c.argtypes = [ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_double] def scatt_bg(kev, Z_max = 98, theta_max = 90): # kev = np.asarray(kev) out = np.zeros(Z_max*theta_max) # Z_max = np.asarray(Z_max) # theta_max = np.asarray(theta_max) scatt_bg_c(ctypes.c_double(kev),out.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),ctypes.c_int(Z_max),ctypes.c_int(theta_max)) return out def subtend(theta0,theta1,beta0,beta1): return subtend_c(c_double(np.radians(theta0)),c_double(np.radians(theta1)),c_double(np.radians(beta0)),c_double(np.radians(beta1)))
"""Tests for forms in eCommerce app.""" from django.test import TestCase from ecommerce.forms import OrderForm required_fields = { 'phone': '123456789', 'email': 'valid@email.ru', } invalid_form_email = { 'email': 'clearly!not_@_email', 'phone': '123456789' } no_phone = {'email': 'sss@sss.sss'} class TestForm(TestCase): """Test suite for forms in eCommerce app.""" def test_empty_form(self): """Empty form shouldn't be valid.""" form = OrderForm() self.assertFalse(form.is_valid()) def test_filled_form_without_required_field(self): """Form is still not valid, if there are some required fields left unfilled.""" form = OrderForm(data=no_phone) self.assertFalse(form.is_valid()) def test_valid_form(self): """Form is valid, if there all required fields are filled.""" form = OrderForm(data=required_fields) self.assertTrue(form.is_valid()) def test_from_validation_on_email_field(self): """Form should validate user's email if it is filled.""" form = OrderForm(data=invalid_form_email) self.assertFalse(form.is_valid())
from builtins import range def writeMeshMatlabFormat(mesh,meshFileBase): """ build array data structures for matlab finite element mesh representation and write to a file to view and play with in matlatb in matlab can then print mesh with pdemesh(p,e,t) where p is the vertex or point matrix e is the edge matrix, and t is the element matrix points matrix is [2 x num vertices] format : row 1 = x coord, row 2 = y coord for nodes in mesh edge matrix is [7 x num edges] format: row 1 = start vertex number row 2 = end vertex number row 3 = start value in edge parameterization, should be 0 row 4 = end value in edge parameterization, should be 1 row 5 = global edge id, base 1 row 6 = subdomain on left? always 1 for now row 7 = subdomain on right? always 0 for now element matrix is [4 x num elements] row 1 = vertex 1 global number row 2 = vertex 2 global number row 3 = vertex 3 global number row 4 = triangle subdomain number where 1,2,3 is a local counter clockwise numbering of vertices in triangle """ import numpy as numpy matlabBase = 1 p = numpy.zeros((2,mesh['nNodes_global']),numpy.float_) e = numpy.zeros((7,mesh['nElementBoundaries_global']),numpy.float_) t = numpy.zeros((4,mesh['nElements_global']),numpy.float_) #load p,e,t and write file mfile = open(meshFileBase+'.m','w') mfile.write('p = [ ... \n') for nN in range(mesh['nNodes_global']): p[0,nN]=mesh['nodeArray'][nN,0] p[1,nN]=mesh['nodeArray'][nN,1] mfile.write('%g %g \n' % tuple(p[:,nN])) mfile.write(']; \n') mfile.write("p = p\';\n") #need transpose for matlab mfile.write('e = [ ... \n') for ebN in range(mesh['nElementBoundaries_global']): e[0,ebN]=mesh['elementBoundaryNodesArray'][ebN,0] + matlabBase #global node number of start node base 1 e[1,ebN]=mesh['elementBoundaryNodesArray'][ebN,1] + matlabBase #global node number of end node base 1 e[2,ebN]=0.0 #edge param. is 0 to 1 e[3,ebN]=1.0 e[4,ebN]=ebN + matlabBase #global edge number base 1 e[5,ebN]=0 #subdomain to left e[6,ebN]=1 #subdomain to right mfile.write('%g %g %g %g %g %g %g \n' % tuple(e[:,ebN])) mfile.write(']; \n') mfile.write("e = e\';\n") #need transpose for matlab #write triangles last mfile.write('t = [ ... \n') for eN in range(mesh['nElements_global']): t[0,eN]=mesh['elementNodesArray'][eN,0]+matlabBase #global node number for vertex 0 t[1,eN]=mesh['elementNodesArray'][eN,1]+matlabBase #global node number for vertex 0 t[2,eN]=mesh['elementNodesArray'][eN,2]+matlabBase #global node number for vertex 0 t[3,eN]=1 #subdomain id mfile.write('%g %g %g %g \n' % tuple(t[:,eN])) mfile.write(']; \n'); mfile.write("t = t\';\n") #need transpose for matlab mfile.close() return p,e,t if __name__ == '__main__': import os,shelve import ppmatlab,numpy.oldnumeric as numpy os.listdir('./results') filename = './results/re_forsyth2_ss_2d_pre_forsyth2_ss_2d_c0p1_n_mesh_results.dat' res = shelve.open(filename) mesh = res['mesh'] mmfile = 'forsyth2MeshMatlab' p,e,t = ppmatlab.writeMeshMatlabFormat(mesh,mmfile)
def test_root(client): response = client.get('/') assert response.status_code == 200 assert response.data.decode('utf-8') == 'Hey enlil'
import os.path from subprocess import call class InstallerTools(object): @staticmethod def update_environment(file_path,environment_path): update_file = open(file_path, 'r') original_lines = update_file.readlines() original_lines[0] = environment_path+'\n' update_file.close() update_file = open(file_path, 'w') for lines in original_lines: update_file.write(lines) update_file.close() @staticmethod def fix_migrate(base_directory): print "\nFixing the migrate bug \n" buggy_path = os.path.join(base_directory, 'env/lib/python2.7/site-packages/migrate/versioning/schema.py') buggy_file = open(buggy_path,'r') original_lines = buggy_file.readlines() original_lines[9] = "from sqlalchemy import exc as sa_exceptions\n" buggy_file.close() update_file = open(buggy_path,'w') for lines in original_lines: update_file.write(lines) update_file.close() @staticmethod def refresh_environment(framework_config): InstallerTools.update_environment(framework_config.yard_path,framework_config.environment_path) InstallerTools.update_environment(framework_config.blow_path,framework_config.environment_path) InstallerTools.update_environment(framework_config.try_path,framework_config.environment_path) @staticmethod def change_permissions(framework_config): call(['chmod', 'a+x', framework_config.yard_path]) call(['chmod', 'a+x', framework_config.blow_path]) call(['chmod', 'a+x', framework_config.try_path]) @staticmethod def create_db_directory(base_directory): if not os.path.exists(os.path.join(base_directory, 'storage/')): os.makedirs(os.path.join(base_directory, 'storage/')) @staticmethod def create_virtual_environment(framework_config): call(['python', framework_config.v_path, framework_config.environment_name]) InstallerTools.refresh_environment(framework_config) InstallerTools.change_permissions(framework_config)
import http.server import socketserver PORT = 8000 Handler = http.server.SimpleHTTPRequestHandler httpd = socketserver.TCPServer(("0.0.0.0", PORT), Handler) print("serving at port", PORT) httpd.serve_forever()
from django.conf.urls import patterns, include, url from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', # Examples: # url(r'^$', 'toolsforbiology.views.home', name='home'), # url(r'^blog/', include('blog.urls')), url(r'^admin/', include(admin.site.urls)), )
from django.conf import settings from django.db import models from .ticket import Ticket class Attachment(models.Model): """Ticket attachment model.""" ticket = models.ForeignKey( Ticket, blank=False, related_name='attachments', db_index=True, on_delete=models.DO_NOTHING) user = models.ForeignKey( settings.AUTH_USER_MODEL, blank=False, db_index=True, on_delete=models.DO_NOTHING) upload = models.FileField(upload_to='attachments/%Y/%m/%d', max_length=255) created_at = models.DateTimeField(auto_now_add=True) @classmethod def filter_by_user(cls, user, queryset=None): """Returns any user accessible attachments. Ones he has access to through the tickets. """ if queryset is None: queryset = cls.objects return queryset.filter(ticket__in=Ticket.filter_by_user(user))
import unittest from juggling.notation import siteswap class SiteswapUtilsTests(unittest.TestCase): def test_siteswap_char_to_int(self): self.assertEqual(siteswap.siteswap_char_to_int('0'), 0) self.assertEqual(siteswap.siteswap_char_to_int('1'), 1) self.assertEqual(siteswap.siteswap_char_to_int('a'), 10) self.assertEqual(siteswap.siteswap_char_to_int('f'), 15) self.assertEqual(siteswap.siteswap_char_to_int('z'), 35) def test_invalid_char(self): self.assertRaises(ValueError, siteswap.siteswap_char_to_int, [3]) self.assertRaises(ValueError, siteswap.siteswap_char_to_int, 10) self.assertRaises(ValueError, siteswap.siteswap_char_to_int, '#') self.assertRaises(ValueError, siteswap.siteswap_char_to_int, 'multichar') def test_siteswap_int_to_char(self): self.assertEqual(siteswap.siteswap_int_to_char(9), '9') self.assertEqual(siteswap.siteswap_int_to_char(0), '0') self.assertEqual(siteswap.siteswap_int_to_char(10), 'a') self.assertEqual(siteswap.siteswap_int_to_char(15), 'f') self.assertEqual(siteswap.siteswap_int_to_char(35), 'z') def test_invalid_int(self): self.assertRaises(ValueError, siteswap.siteswap_int_to_char, ['3']) self.assertRaises(ValueError, siteswap.siteswap_int_to_char, 'a') self.assertRaises(ValueError, siteswap.siteswap_int_to_char, 36) self.assertRaises(ValueError, siteswap.siteswap_int_to_char, -1) class SiteSwapSyntaxValidationTests(unittest.TestCase): def test_valid_syntax(self): solo_patterns = [ '441', '(6x,4)(4,6x)', '(6x,4)*', '[64]020', '[33](3,3)123', '(4,2)(2x,[44x])', ] for pattern in solo_patterns: self.assertTrue(siteswap.is_valid_siteswap_syntax(pattern)) passing_patterns = [ ('<4p|3><2|3p>', 2), ('<2|3p><2p|3><[3p22]|3p><3|3>', 2), ('<(2p3,4x)|(2xp3,4p1)|(2xp2,4xp2)>', 3) ] for pattern, num_jugglers in passing_patterns: self.assertTrue(siteswap.is_valid_siteswap_syntax(pattern, num_jugglers)) def test_return_match(self): import re sre_match_object = type(re.match('', '')) self.assertTrue(siteswap.is_valid_siteswap_syntax('441', return_match=False)) _, match = siteswap.is_valid_siteswap_syntax('441', return_match=True) self.assertIsInstance(match, sre_match_object) _, match = siteswap.is_valid_siteswap_syntax('###', return_match=True) self.assertIsNone(match) def test_invalid_syntax(self): solo_patterns = [ '#!j', '((3232,3)', '(3232,3))', '[(3232,3)])', ] for pattern in solo_patterns: self.assertFalse(siteswap.is_valid_siteswap_syntax(pattern))
"""Run pytest with coverage and generate an html report.""" from sys import argv from os import system as run def main(): # noqa run_str = 'python -m coverage run --include={} --omit=./* -m pytest {} {}' arg = '' # All source files included in coverage includes = '../*' if len(argv) >= 2: arg = argv[1] if ':' in argv[1]: includes = argv[1].split('::')[0] other_args = ' '.join(argv[2:]) run(run_str.format(includes, arg, other_args)) # Generate the html coverage report and ignore errors run('python -m coverage html -i') if __name__ == '__main__': main()
from copy import deepcopy class IModel(): def __init__(self): self.list = [] def __getitem__(self, index): ''' Getter for the [] operator ''' if index >= len(self.list): raise IndexError("Index out of range.") return self.list[index] def __setitem__(self, index, value): ''' Setter for the [] operator ''' self.list[index] = value def extend(self, val): self.list.extend([0] * val) def append(self, val): self.list.append(val) def __iter__(self): i = 0 while i < len(self.list): yield self.list[i] i += 1 def __len__(self): return len(self.list) def __delitem__(self, index): del self.list[index] return def gnomeSort(list): i = 0 n = len(list) cpy = deepcopy(list) while i < n: if i and cpy[i] < cpy[i-1]: cpy[i], cpy[i-1] = cpy[i-1], cpy[i] i -= 1 else: i += 1 return cpy def _filter(list, f): result = [] for e in list: if f(e): result.append(e) return result if __name__ == "__main__": a = IModel() a.extend(4) print(a.list) a[0] = 15 a[1] = 10 a[2] = 5 a[3] = 3 for i in a: print(i) print("-----------") for i in gnomeSort(a): print(i) print("-----------") for i in _filter(a, lambda x: x % 5): print(i)
"""XML files parser backend, should be available always. .. versionchanged:: 0.1.0 Added XML dump support. - Format to support: XML, e.g. http://www.w3.org/TR/xml11/ - Requirements: one of the followings - lxml2.etree if available - xml.etree.ElementTree in standard lib if python >= 2.5 - elementtree.ElementTree (otherwise) - Limitations: - '<prefix>attrs', '<prefix>text' and '<prefix>children' are used as special parameter to keep XML structure of original data. You have to cusomize <prefix> (default: '@') if any config parameters conflict with some of them. - Some data or structures of original XML file may be lost if make it backed to XML file; XML file - (anyconfig.load) -> config - (anyconfig.dump) -> XML file - XML specific features (namespace, etc.) may not be processed correctly. - Special Options: None supported """ from __future__ import absolute_import from io import BytesIO import sys import anyconfig.backend.base import anyconfig.compat try: # First, try lxml which is compatible with elementtree and looks faster a # lot. See also: http://getpython3.com/diveintopython3/xml.html from lxml2 import etree as ET except ImportError: try: import xml.etree.ElementTree as ET except ImportError: import elementtree.ElementTree as ET _PARAM_PREFIX = "@" _IS_OLDER_PYTHON = sys.version_info[0] < 3 and sys.version_info[1] < 7 def etree_to_container(root, cls, pprefix=_PARAM_PREFIX): """ Convert XML ElementTree to a collection of container objects. :param root: etree root object or None :param cls: Container class :param pprefix: Special parameter name prefix """ (attrs, text, children) = [pprefix + x for x in ("attrs", "text", "children")] tree = cls() if root is None: return tree tree[root.tag] = cls() if root.attrib: tree[root.tag][attrs] = cls(anyconfig.compat.iteritems(root.attrib)) if root.text and root.text.strip(): tree[root.tag][text] = root.text.strip() if len(root): # It has children. # Note: Configuration item cannot have both attributes and values # (list) at the same time in current implementation: tree[root.tag][children] = [etree_to_container(c, cls, pprefix) for c in root] return tree def container_to_etree(obj, cls, parent=None, pprefix=_PARAM_PREFIX): """ Convert a container object to XML ElementTree. :param obj: Container instance to convert to :param cls: Container class :param parent: XML ElementTree parent node object or None :param pprefix: Special parameter name prefix """ if not isinstance(obj, (cls, dict)): return # All attributes and text should be set already. (attrs, text, children) = [pprefix + x for x in ("attrs", "text", "children")] for key, val in anyconfig.compat.iteritems(obj): if key == attrs: for attr, aval in anyconfig.compat.iteritems(val): parent.set(attr, aval) elif key == text: parent.text = val elif key == children: for child in val: # child should be a dict-like object. for ckey, cval in anyconfig.compat.iteritems(child): celem = ET.Element(ckey) container_to_etree(cval, cls, celem, pprefix) parent.append(celem) else: elem = ET.Element(key) container_to_etree(val, cls, elem, pprefix) return ET.ElementTree(elem) def etree_write(tree, stream): """ Write XML ElementTree `root` content into `stream`. :param tree: XML ElementTree object :param stream: File or file-like object can write to """ if _IS_OLDER_PYTHON: tree.write(stream, encoding='UTF-8') else: tree.write(stream, encoding='UTF-8', xml_declaration=True) class Parser(anyconfig.backend.base.D2Parser): """ Parser for XML files. """ _type = "xml" _extensions = ["xml"] _open_flags = ('rb', 'wb') def load_from_string(self, content, **kwargs): """ Load config from XML snippet (a string `content`). :param content: XML snippet (a string) :param kwargs: optional keyword parameters passed to :return: self.container object holding config parameters """ root = ET.ElementTree(ET.fromstring(content)).getroot() return etree_to_container(root, self.container) def load_from_path(self, filepath, **kwargs): """ :param filepath: XML file path :param kwargs: optional keyword parameters to be sanitized :: dict :return: self.container object holding config parameters """ root = ET.parse(filepath).getroot() return etree_to_container(root, self.container) def load_from_stream(self, stream, **kwargs): """ :param stream: XML file or file-like object :param kwargs: optional keyword parameters to be sanitized :: dict :return: self.container object holding config parameters """ return self.load_from_path(stream, **kwargs) def dump_to_string(self, cnf, **kwargs): """ :param cnf: Configuration data to dump :: self.container :param kwargs: optional keyword parameters :return: string represents the configuration """ tree = container_to_etree(cnf, self.container) buf = BytesIO() etree_write(tree, buf) return buf.getvalue() def dump_to_stream(self, cnf, stream, **kwargs): """ :param cnf: Configuration data to dump :: self.container :param stream: Config file or file like object write to :param kwargs: optional keyword parameters """ tree = container_to_etree(cnf, self.container) etree_write(tree, stream)
""" Hannah Aizenman 10/13/2013 Generates a random subset of size 10^P for p in [1,MAX_P) from [0, 10^8) """ import random MAX_P = 8 max_value = 10**MAX_P large_set = range(max_value) for p in xrange(1,MAX_P): print "list of size: 10^{0}".format(p) f = open("p{0}.txt".format(p), 'w') sample = random.sample(large_set, 10**p) f.write("\n".join(map(lambda x: str(x), sample))) f.close()
import sys import os import re import optparse import math buildscriptDir = os.path.dirname(__file__) buildscriptDir = os.path.abspath(os.path.join(buildscriptDir, os.path.pardir)) sys.path.append(buildscriptDir) import sandbox import codescan import xmail import metadata from ioutil import * EXT_PAT = metadata.INTERESTING_EXT_PAT FROM = 'Code Stat Scanner <code.scan@example.com>' parser = optparse.OptionParser('Usage: %prog [options] [folder]\n\nCompiles stats about a code base; optionally emails report.') xmail.addMailOptions(parser) def getRelevantPaths(p): relevant = [] if not p.endswith('/'): relevant.append(p) while p: i = p.rfind('/') if i == -1: relevant.append('') break else: p = p[0:i+1] relevant.append(p) p = p[0:-1] return relevant def getValuesKeyName(key): return '[' + key + ']' def isValuesKeyName(key): return key[0] == '[' class StatsHolder: def __init__(self, rootPath): rootPath = norm_folder(rootPath) self.rootPath = rootPath self.statsByPath = {} self.statsByExtension = {} def getSandboxName(self): i = self.rootPath.find('/sandboxes/') if i != -1: x = self.rootPath[i + 11:] i = x.find('/code') if i > -1: x = x[0:i] i = x.rfind('/') if i > -1: x = x[0:i] return x else: return self.rootPath def getRelativePath(self, path): endsWithSlash = path.endswith('/') path = os.path.abspath(path).replace('\\', '/') # abspath() removes trailing slash; undo if endsWithSlash and path[-1] != '/': path = path + '/' return path[len(self.rootPath):] def addStat(self, path, statName, number): shouldAggregate = not path.endswith('/') if shouldAggregate: k = getValuesKeyName(statName) dict = self.statsByExtension ignored, ext = os.path.splitext(path) #print('ext = %s' % ext) #sys.exit(0) if not ext in dict: dict[ext] = {} dict = dict[ext] if not statName in dict: dict[statName] = number dict[k] = [number] else: dict[statName] = dict[statName] + number dict[k].append(number) relativePath = self.getRelativePath(path) sbp = self.statsByPath for p in getRelevantPaths(relativePath): if not p in sbp: sbp[p] = {} dict = sbp[p] if not statName in dict: dict[statName] = number if shouldAggregate: #print('aggregating %s for %s', (k, p)) dict[k] = [number] else: dict[statName] = dict[statName] + number if shouldAggregate: dict[k].append(number) _CPP_TESTNAME_PAT = re.compile(r'^\s*(SIMPLE_TEST\s*\(\s*(.*?)\s*\)|class\s+([a-zA-Z_0-9]+)\s*:\s*(public|protected|private)\s+[a-zA-Z_0-9]+Test)', re.MULTILINE | re.DOTALL) _JAVA_TESTNAME_PAT = re.compile(r'^\s*public\s+void\s+([a-zA-Z_0-9]+)\s*\(', re.MULTILINE | re.DOTALL) _PY_TESTNAME_PAT = re.compile(r'^\s*def test([a-zA-Z_0-9]+)\s*\(\s*self\s*\)\s*:', re.MULTILINE | re.DOTALL) _CPP_CLASS_PAT = re.compile(r'^\s*(template\s*<.*?>\s*)?(class|struct|union)\s+([a-zA-Z_0-9]+)', re.MULTILINE | re.DOTALL) _JAVA_CLASS_PAT = re.compile(r'^\s*((abstract|public|private|protected|static|final)\s+)*(class|interface)\s+([a-zA-Z_0-9]+)', re.MULTILINE | re.DOTALL) _PY_CLASS_PAT = re.compile(r'^\s*class\s+([a-zA-Z_0-9]+).*?:', re.MULTILINE | re.DOTALL) _TEST_FILE_PAT = re.compile(r'/test/', re.IGNORECASE) _CLASS_PATS = [_CPP_CLASS_PAT, _JAVA_CLASS_PAT, _PY_CLASS_PAT] _TESTNAME_PATS = [_CPP_TESTNAME_PAT, _JAVA_TESTNAME_PAT, _PY_TESTNAME_PAT] def getFileTypeIndex(path): path = path.lower() if path.endswith('.cpp') or path.endswith('.h'): return 0 elif path.endswith('.java'): return 1 elif path.endswith('.py'): return 2 return -1 def getClassPatForPath(path): i = getFileTypeIndex(path) if i != -1: return _CLASS_PATS[i] def getTestnamePatForPath(path): i = getFileTypeIndex(path) if i != -1: return _TESTNAME_PATS[i] def analyzeFile(fpath, stats): fpath = os.path.abspath(fpath) rel = stats.getRelativePath(fpath) #print('analyzing %s' % rel) txt = read_file(fpath) byteCount = len(txt) stats.addStat(fpath, 'byte count, impl + test', byteCount) lineCount = codescan.getLineNumForOffset(txt, byteCount) stats.addStat(fpath, 'line count, impl + test', lineCount) isTest = bool(_TEST_FILE_PAT.search(fpath)) codeType = 'impl' if isTest: codeType = 'test' stats.addStat(fpath, 'byte count, ' + codeType, byteCount) stats.addStat(fpath, 'line count, ' + codeType, lineCount) # See if we know how to do any further analysis on this file. pat = getClassPatForPath(fpath) if pat: if isTest: pat = getTestnamePatForPath(fpath) if pat: stats.addStat(fpath, 'unit test count', len(pat.findall(txt))) else: stats.addStat(fpath, 'class count', len(pat.findall(txt))) def statPathIsFile(p): i = p.rfind('.') if i > -1: return p[i+1:] in ['cpp','h','java','py'] return False def statPathIsComponent(p): return p == '' or (p.endswith('/') and p.find('/') == len(p) - 1) _FLOAT_TYPE = type(0.1) def getReportLine(key, number, showKB = False, formatSpecifier='%02f'): numtxt = number ntype = type(number) if ntype == _FLOAT_TYPE: numtxt = formatSpecifier % number if numtxt.endswith('00'): numtxt = numtxt[0:-3] else: numtxt = str(number) line = '%s = %s' % (key, numtxt) if showKB: line += ' (%0.0f KB)' % (number / 1024.0) return line def getAggregateStats(dict, key): values = dict.get(getValuesKeyName(key)) avg = mean(values) stdev = stddev(values) return avg, stdev def describeTestRatio(ratio, multiplier = 1.0): if ratio < 0.085 * multiplier: lbl = 'POOR COVERAGE' elif ratio < 0.20 * multiplier: lbl = 'fair coverage' elif ratio < 0.5 * multiplier: lbl = 'good coverage' else: lbl = 'excellent coverage' return '%0.2f (%s)' % (ratio, lbl) def generateReport(stats): #print(stats.statsByPath) report = '' components = [p for p in stats.statsByPath.keys() if statPathIsComponent(p)] files = [p for p in stats.statsByPath.keys() if statPathIsFile(p)] components.sort() files.sort() uberDict = stats.statsByPath[''] avg, stdev = getAggregateStats(uberDict, 'byte count, impl') tooBigs = {'': max(avg + 2.5 * stdev, 20000)} avg, stdev = getAggregateStats(uberDict, 'line count, impl') tooLongs = {'': max(avg + 2.5 * stdev, 1000)} for ext in stats.statsByExtension.keys(): dict = stats.statsByExtension[ext] avg, stdev = getAggregateStats(dict, 'byte count, impl') tooBigs[ext] = avg + 2.5 * stdev avg, stdev = getAggregateStats(dict, 'line count, impl') tooLongs[ext] = max(avg + 2.5 * stdev, 1000) for path in components: desc = path if desc == '': desc = 'entire folder tree' report += '\nStats for %s' % desc dict = stats.statsByPath[path] keys = [k for k in dict.keys() if not isValuesKeyName(k)] keys.sort() for key in keys: showKB = key.startswith('byte') report += '\n ' + getReportLine(key, dict[key], showKB) if showKB or key.startswith('line'): values = dict[getValuesKeyName(key)] avg = mean(values) report += '; ' + getReportLine('mean', avg, showKB, formatSpecifier='%0.0f') report += '; ' + getReportLine('std dev', stddev(values), False, formatSpecifier='%0.1f') classCount = dict.get('class count', 0) unitTestCount = dict.get('unit test count', 0) if unitTestCount: implLineCount = dict.get('line count, impl', 0) testLineCount = dict.get('line count, test', 0) if implLineCount: ratio = describeTestRatio(testLineCount / float(implLineCount)) report += '\n ' + getReportLine('test lines per impl line', ratio) implByteCount = dict.get('byte count, impl', 0) testByteCount = dict.get('byte count, test', 0) if implByteCount: ratio = describeTestRatio(testByteCount / float(implByteCount)) report += '\n ' + getReportLine('test bytes per impl byte', ratio) if classCount: ratio = describeTestRatio(float(unitTestCount) / classCount, 2.5) else: ratio = '(undefined; no classes)' else: ratio = 'NO UNIT TESTS!' report += '\n ' + getReportLine('tests per class', ratio) if path: myFiles = [f for f in files if f.startswith(path)] #testFiles = [f for f in myFiles if _TEST_FILE_PAT.search(f)] #implFiles = [f for f in myFiles if not _TEST_FILE_PAT.search(f)] tooComplex = [] for implF in myFiles: ignored, ext = os.path.splitext(implF) size = stats.statsByPath[implF].get('byte count, impl') length = stats.statsByPath[implF].get('line count, impl') if size > tooBigs[''] or size > tooBigs[ext] or length > tooLongs[''] or length > tooLongs[ext]: tooComplex.append((implF, size, length)) if tooComplex: # Java doesn't support partial classes, so splitting classes into multiple # files isn't always practical. In C++ and python, however, there are good # ways to split into smaller files. if tooComplex[0][0].endswith('.java'): comment = 'refactor suggested' else: comment = 'REFACTOR NEEDED' report += '\n unusually complex files (%s):' % comment for tc in tooComplex: report += '\n %s (%0.0f KB, %d lines)' % (tc[0], tc[1] / 1024.0, tc[2]) report += '\n' return report def sum(numbers): n = 0 for x in numbers: n += x return n def mean(numbers): return sum(numbers) / float(len(numbers)) def variance(numbers): avg = mean(numbers) diffsFromMean = [n - avg for n in numbers] squaredDfm = [n * n for n in diffsFromMean] variance = sum(squaredDfm) / len(numbers) return variance def stddev(numbers): # This is a *population* stddev, not a sample stddev. # The difference is that we assume we have all possible # values, not just a representative sample. return math.sqrt(variance(numbers)) class StatsRecurser: def __init__(self, stats): self.stats = stats def select(self, folder, dirs): self.stats.addStat(folder, "scanned subdir count", len(dirs)) return dirs class StatsVisitor: def __init__(self, stats): self.stats = stats def visit(self, folder, item, relativePath): analyzeFile(folder + item, self.stats) self.stats.addStat(folder, "scanned file count", 1) def analyze(path, prebuilt, options): if not os.path.isdir(path): sys.stderr.write('%s is not a valid folder.\n' % path) return 1 path = norm_folder(path) stats = StatsHolder(path) print('\nCompiling stats for %s...' % metadata.get_friendly_name_for_path(path)) visitor = StatsVisitor(stats) recurser = StatsRecurser(stats) visitedFiles, visitedFolders = metadata.visit(path, visitor, recurser, excludePrograms=True)#, debug=True) report = generateReport(stats) print(report) if xmail.hasDest(options): xmail.sendmail(report, subject='code stats for %s' % metadata.get_friendly_name_for_path(path), sender='Code Stat Scanner <code.scan@example.com>', options=options) if __name__ == '__main__': options, args = parser.parse_args() prebuilt = [] if args: folder = args[0] else: folder = sandbox.current.get_code_root() exitCode = analyze(folder, prebuilt, options) sys.exit(exitCode)
from __future__ import division, print_function from builtins import object import time, copy import numpy as np from climlab.domain.field import Field from climlab.domain.domain import _Domain, zonal_mean_surface from climlab.utils import walk from attrdict import AttrDict from climlab.domain.xarray import state_to_xarray def _make_dict(arg, argtype): if arg is None: return {} elif isinstance(arg, dict): return arg elif isinstance(arg, argtype): return {'default': arg} else: raise ValueError('Problem with input type') class Process(object): """A generic parent class for all climlab process objects. Every process object has a set of state variables on a spatial grid. For more general information about `Processes` and their role in climlab, see :ref:`process_architecture` section climlab-architecture. **Initialization parameters** \n An instance of ``Process`` is initialized with the following arguments *(for detailed information see Object attributes below)*: :param Field state: spatial state variable for the process. Set to ``None`` if not specified. :param domains: domain(s) for the process :type domains: :class:`~climlab.domain.domain._Domain` or dict of :class:`~climlab.domain.domain._Domain` :param subprocess: subprocess(es) of the process :type subprocess: :class:`~climlab.process.process.Process` or dict of :class:`~climlab.process.process.Process` :param array lat: latitudinal points (optional) :param lev: altitudinal points (optional) :param int num_lat: number of latitudional points (optional) :param int num_levels: number of altitudinal points (optional) :param dict input: collection of input quantities :param bool verbose: Flag to control text output during instantiation of the Process [default: True] **Object attributes** \n Additional to the parent class :class:`~climlab.process.process.Process` following object attributes are generated during initialization: :ivar dict domains: dictionary of process :class:`~climlab.domain.domain._Domain` :ivar dict state: dictionary of process states (of type :class:`~climlab.domain.field.Field`) :ivar dict param: dictionary of model parameters which are given through ``**kwargs`` :ivar dict diagnostics: a dictionary with all diagnostic variables :ivar dict _input_vars: collection of input quantities like boundary conditions and other gridded quantities :ivar str creation_date: date and time when process was created :ivar subprocess: dictionary of suprocesses of the process :vartype subprocess: dict of :class:`~climlab.process.process.Process` """ def __str__(self): str1 = 'climlab Process of type {0}. \n'.format(type(self)) str1 += 'State variables and domain shapes: \n' for varname in list(self.state.keys()): str1 += ' {0}: {1} \n'.format(varname, self.domains[varname].shape) str1 += 'The subprocess tree: \n' str1 += walk.process_tree(self, name=self.name) return str1 def __init__(self, name='Untitled', state=None, domains=None, subprocess=None, lat=None, lev=None, num_lat=None, num_levels=None, input=None, verbose=True, **kwargs): # verbose flag used to control text output at process creation time self.verbose = verbose self.name = name # dictionary of domains. Keys are the domain names self.domains = _make_dict(domains, _Domain) # If lat is given, create a simple domains if lat is not None: sfc = zonal_mean_surface() self.domains.update({'default': sfc}) # dictionary of state variables (all of type Field) self.state = AttrDict() states = _make_dict(state, Field) for name, value in states.items(): self.set_state(name, value) # dictionary of model parameters self.param = kwargs # dictionary of diagnostic quantities #self.diagnostics = AttrDict() #self._diag_vars = frozenset() self._diag_vars = [] # dictionary of input quantities #self.input = _make_dict(input, Field) if input is None: #self._input_vars = frozenset() self._input_vars = [] else: self.add_input(list(input.keys())) for name, var in input: self.__dict__[name] = var self.creation_date = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.localtime()) # subprocess is a dictionary of any sub-processes self.subprocess = AttrDict() if subprocess is not None: self.add_subprocesses(subprocess) #if subprocess is None: # #self.subprocess = {} # # a dictionary whose items can be accessed as attributes # self.subprocess = AttrDict() #else: # self.add_subprocesses(subprocess) def add_subprocesses(self, procdict): """Adds a dictionary of subproceses to this process. Calls :func:`add_subprocess` for every process given in the input-dictionary. It can also pass a single process, which will be given the name *default*. :param procdict: a dictionary with process names as keys :type procdict: dict """ if isinstance(procdict, Process): try: name = procdict.name except: name = 'default' self.add_subprocess(name, procdict) else: for name, proc in procdict.items(): self.add_subprocess(name, proc) def add_subprocess(self, name, proc): """Adds a single subprocess to this process. :param string name: name of the subprocess :param proc: a Process object :type proc: :class:`~climlab.process.process.Process` :raises: :exc:`ValueError` if ``proc`` is not a process :Example: Replacing an albedo subprocess through adding a subprocess with same name:: >>> from climlab.model.ebm import EBM_seasonal >>> from climlab.surface.albedo import StepFunctionAlbedo >>> # creating EBM model >>> ebm_s = EBM_seasonal() >>> print ebm_s .. code-block:: none :emphasize-lines: 8 climlab Process of type <class 'climlab.model.ebm.EBM_seasonal'>. State variables and domain shapes: Ts: (90, 1) The subprocess tree: top: <class 'climlab.model.ebm.EBM_seasonal'> diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'> LW: <class 'climlab.radiation.AplusBT.AplusBT'> albedo: <class 'climlab.surface.albedo.P2Albedo'> insolation: <class 'climlab.radiation.insolation.DailyInsolation'> :: >>> # creating and adding albedo feedback subprocess >>> step_albedo = StepFunctionAlbedo(state=ebm_s.state, **ebm_s.param) >>> ebm_s.add_subprocess('albedo', step_albedo) >>> >>> print ebm_s .. code-block:: none :emphasize-lines: 8 climlab Process of type <class 'climlab.model.ebm.EBM_seasonal'>. State variables and domain shapes: Ts: (90, 1) The subprocess tree: top: <class 'climlab.model.ebm.EBM_seasonal'> diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'> LW: <class 'climlab.radiation.AplusBT.AplusBT'> albedo: <class 'climlab.surface.albedo.StepFunctionAlbedo'> iceline: <class 'climlab.surface.albedo.Iceline'> cold_albedo: <class 'climlab.surface.albedo.ConstantAlbedo'> warm_albedo: <class 'climlab.surface.albedo.P2Albedo'> insolation: <class 'climlab.radiation.insolation.DailyInsolation'> """ if isinstance(proc, Process): self.subprocess.update({name: proc}) self.has_process_type_list = False # Add subprocess diagnostics to parent # (if there are no name conflicts) for diagname, value in proc.diagnostics.items(): #if not (diagname in self.diagnostics or hasattr(self, diagname)): # self.add_diagnostic(diagname, value) self.add_diagnostic(diagname, value) else: raise ValueError('subprocess must be Process object') def remove_subprocess(self, name, verbose=True): """Removes a single subprocess from this process. :param string name: name of the subprocess :param bool verbose: information whether warning message should be printed [default: True] :Example: Remove albedo subprocess from energy balance model:: >>> import climlab >>> model = climlab.EBM() >>> print model climlab Process of type <class 'climlab.model.ebm.EBM'>. State variables and domain shapes: Ts: (90, 1) The subprocess tree: top: <class 'climlab.model.ebm.EBM'> diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'> LW: <class 'climlab.radiation.AplusBT.AplusBT'> albedo: <class 'climlab.surface.albedo.StepFunctionAlbedo'> iceline: <class 'climlab.surface.albedo.Iceline'> cold_albedo: <class 'climlab.surface.albedo.ConstantAlbedo'> warm_albedo: <class 'climlab.surface.albedo.P2Albedo'> insolation: <class 'climlab.radiation.insolation.P2Insolation'> >>> model.remove_subprocess('albedo') >>> print model climlab Process of type <class 'climlab.model.ebm.EBM'>. State variables and domain shapes: Ts: (90, 1) The subprocess tree: top: <class 'climlab.model.ebm.EBM'> diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'> LW: <class 'climlab.radiation.AplusBT.AplusBT'> insolation: <class 'climlab.radiation.insolation.P2Insolation'> """ try: self.subprocess.pop(name) except KeyError: if verbose: print('WARNING: {} not found in subprocess dictionary.'.format(name)) self.has_process_type_list = False def set_state(self, name, value): """Sets the variable ``name`` to a new state ``value``. :param string name: name of the state :param value: state variable :type value: :class:`~climlab.domain.field.Field` or *array* :raises: :exc:`ValueError` if state variable ``value`` is not having a domain. :raises: :exc:`ValueError` if shape mismatch between existing domain and new state variable. :Example: Resetting the surface temperature of an EBM to :math:`-5 ^{\circ} \\textrm{C}` on all latitues:: >>> import climlab >>> from climlab import Field >>> import numpy as np >>> # setup model >>> model = climlab.EBM(num_lat=36) >>> # create new temperature distribution >>> initial = -5 * ones(size(model.lat)) >>> model.set_state('Ts', Field(initial, domain=model.domains['Ts'])) >>> np.squeeze(model.Ts) Field([-5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5., -5.]) """ if isinstance(value, Field): # populate domains dictionary with domains from state variables self.domains.update({name: value.domain}) else: try: thisdom = self.state[name].domain domshape = thisdom.shape except: raise ValueError('State variable needs a domain.') value = np.atleast_1d(value) if value.shape == domshape: value = Field(value, domain=thisdom) else: raise ValueError('Shape mismatch between existing domain and new state variable.') # set the state dictionary self.state[name] = value for name, value in self.state.items(): #convert int dtype to float if np.issubdtype(self.state[name].dtype, np.dtype('int').type): value = self.state[name].astype(float) self.state[name]=value self.__setattr__(name, value) def _guess_state_domains(self): for name, value in self.state.items(): for domname, dom in self.domains.items(): if value.shape == dom.shape: # same shape, assume it's the right domain self.state_domain[name] = dom def _add_field(self, field_type, name, value): """Adds a new field to a specified dictionary. The field is also added as a process attribute. field_type can be 'input', 'diagnostics' """ try: self.__getattribute__(field_type).update({name: value}) except: raise ValueError('Problem with field_type %s' %field_type) # Note that if process has attribute name, this will trigger The # setter method for that attribute self.__setattr__(name, value) def add_diagnostic(self, name, value=None): """Create a new diagnostic variable called ``name`` for this process and initialize it with the given ``value``. Quantity is accessible in two ways: * as a process attribute, i.e. ``proc.name`` * as a member of the diagnostics dictionary, i.e. ``proc.diagnostics['name']`` Use attribute method to set values, e.g. ```proc.name = value ``` :param str name: name of diagnostic quantity to be initialized :param array value: initial value for quantity [default: None] :Example: Add a diagnostic CO2 variable to an energy balance model:: >>> import climlab >>> model = climlab.EBM() >>> # initialize CO2 variable with value 280 ppm >>> model.add_diagnostic('CO2',280.) >>> # access variable directly or through diagnostic dictionary >>> model.CO2 280 >>> model.diagnostics.keys() ['ASR', 'CO2', 'net_radiation', 'icelat', 'OLR', 'albedo'] """ self._diag_vars.append(name) self.__setattr__(name, value) def add_input(self, name, value=None): '''Create a new input variable called ``name`` for this process and initialize it with the given ``value``. Quantity is accessible in two ways: * as a process attribute, i.e. ``proc.name`` * as a member of the input dictionary, i.e. ``proc.input['name']`` Use attribute method to set values, e.g. ```proc.name = value ``` :param str name: name of diagnostic quantity to be initialized :param array value: initial value for quantity [default: None] ''' self._input_vars.append(name) self.__setattr__(name, value) def declare_input(self, inputlist): '''Add the variable names in ``inputlist`` to the list of necessary inputs.''' for name in inputlist: self._input_vars.append(name) def declare_diagnostics(self, diaglist): '''Add the variable names in ``inputlist`` to the list of diagnostics.''' for name in diaglist: self._diag_vars.append(name) def remove_diagnostic(self, name): """ Removes a diagnostic from the ``process.diagnostic`` dictionary and also delete the associated process attribute. :param str name: name of diagnostic quantity to be removed :Example: Remove diagnostic variable 'icelat' from energy balance model:: >>> import climlab >>> model = climlab.EBM() >>> # display all diagnostic variables >>> model.diagnostics.keys() ['ASR', 'OLR', 'net_radiation', 'albedo', 'icelat'] >>> model.remove_diagnostic('icelat') >>> model.diagnostics.keys() ['ASR', 'OLR', 'net_radiation', 'albedo'] >>> # Watch out for subprocesses that may still want >>> # to access the diagnostic 'icelat' variable !!! """ #_ = self.diagnostics.pop(name) #delattr(type(self), name) try: delattr(self, name) self._diag_vars.remove(name) except: print('No diagnostic named {} was found.'.format(name)) def to_xarray(self, diagnostics=False): """ Convert process variables to ``xarray.Dataset`` format. With ``diagnostics=True``, both state and diagnostic variables are included. Otherwise just the state variables are included. Returns an ``xarray.Dataset`` object with all spatial axes, including 'bounds' axes indicating cell boundaries in each spatial dimension. :Example: Create a single column radiation model and view as ``xarray`` object:: >>> import climlab >>> state = climlab.column_state(num_lev=20) >>> model = climlab.radiation.RRTMG(state=state) >>> # display model state as xarray: >>> model.to_xarray() <xarray.Dataset> Dimensions: (depth: 1, depth_bounds: 2, lev: 20, lev_bounds: 21) Coordinates: * depth (depth) float64 0.5 * depth_bounds (depth_bounds) float64 0.0 1.0 * lev (lev) float64 25.0 75.0 125.0 175.0 225.0 275.0 325.0 ... * lev_bounds (lev_bounds) float64 0.0 50.0 100.0 150.0 200.0 250.0 ... Data variables: Ts (depth) float64 288.0 Tatm (lev) float64 200.0 204.1 208.2 212.3 216.4 220.5 224.6 ... >>> # take a single timestep to populate the diagnostic variables >>> model.step_forward() >>> # Now look at the full output in xarray format >>> model.to_xarray(diagnostics=True) <xarray.Dataset> Dimensions: (depth: 1, depth_bounds: 2, lev: 20, lev_bounds: 21) Coordinates: * depth (depth) float64 0.5 * depth_bounds (depth_bounds) float64 0.0 1.0 * lev (lev) float64 25.0 75.0 125.0 175.0 225.0 275.0 325.0 ... * lev_bounds (lev_bounds) float64 0.0 50.0 100.0 150.0 200.0 250.0 ... Data variables: Ts (depth) float64 288.7 Tatm (lev) float64 201.3 204.0 208.0 212.0 216.1 220.2 ... ASR (depth) float64 240.0 ASRcld (depth) float64 0.0 ASRclr (depth) float64 240.0 LW_flux_down (lev_bounds) float64 0.0 12.63 19.47 26.07 32.92 40.1 ... LW_flux_down_clr (lev_bounds) float64 0.0 12.63 19.47 26.07 32.92 40.1 ... LW_flux_net (lev_bounds) float64 240.1 231.2 227.6 224.1 220.5 ... LW_flux_net_clr (lev_bounds) float64 240.1 231.2 227.6 224.1 220.5 ... LW_flux_up (lev_bounds) float64 240.1 243.9 247.1 250.2 253.4 ... LW_flux_up_clr (lev_bounds) float64 240.1 243.9 247.1 250.2 253.4 ... LW_sfc (depth) float64 128.9 LW_sfc_clr (depth) float64 128.9 OLR (depth) float64 240.1 OLRcld (depth) float64 0.0 OLRclr (depth) float64 240.1 SW_flux_down (lev_bounds) float64 341.3 323.1 318.0 313.5 309.5 ... SW_flux_down_clr (lev_bounds) float64 341.3 323.1 318.0 313.5 309.5 ... SW_flux_net (lev_bounds) float64 240.0 223.3 220.2 217.9 215.9 ... SW_flux_net_clr (lev_bounds) float64 240.0 223.3 220.2 217.9 215.9 ... SW_flux_up (lev_bounds) float64 101.3 99.88 97.77 95.64 93.57 ... SW_flux_up_clr (lev_bounds) float64 101.3 99.88 97.77 95.64 93.57 ... SW_sfc (depth) float64 163.8 SW_sfc_clr (depth) float64 163.8 TdotLW (lev) float64 -1.502 -0.6148 -0.5813 -0.6173 -0.6426 ... TdotLW_clr (lev) float64 -1.502 -0.6148 -0.5813 -0.6173 -0.6426 ... TdotSW (lev) float64 2.821 0.5123 0.3936 0.3368 0.3174 0.3299 ... TdotSW_clr (lev) float64 2.821 0.5123 0.3936 0.3368 0.3174 0.3299 ... """ if diagnostics: dic = self.state.copy() dic.update(self.diagnostics) return state_to_xarray(dic) else: return state_to_xarray(self.state) @property def diagnostics(self): """Dictionary access to all diagnostic variables :type: dict """ diag_dict = {} for key in self._diag_vars: try: #diag_dict[key] = getattr(self,key) # using self.__dict__ doesn't count diagnostics defined as properties diag_dict[key] = self.__dict__[key] except: pass return diag_dict @property def input(self): """Dictionary access to all input variables That can be boundary conditions and other gridded quantities independent of the `process` :type: dict """ input_dict = {} for key in self._input_vars: try: input_dict[key] = getattr(self,key) except: pass return input_dict # Some handy shortcuts... only really make sense when there is only # a single axis of that type in the process. @property def lat(self): """Latitude of grid centers (degrees North) :getter: Returns the points of axis ``'lat'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'lat'`` axis can be found. """ try: for domname, dom in self.domains.items(): try: thislat = dom.axes['lat'].points except: pass return thislat except: raise ValueError('Can\'t resolve a lat axis.') @property def lat_bounds(self): """Latitude of grid interfaces (degrees North) :getter: Returns the bounds of axis ``'lat'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'lat'`` axis can be found. """ try: for domname, dom in self.domains.items(): try: thislat = dom.axes['lat'].bounds except: pass return thislat except: raise ValueError('Can\'t resolve a lat axis.') @property def lon(self): """Longitude of grid centers (degrees) :getter: Returns the points of axis ``'lon'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'lon'`` axis can be found. """ try: for domname, dom in self.domains.items(): try: thislon = dom.axes['lon'].points except: pass return thislon except: raise ValueError('Can\'t resolve a lon axis.') @property def lon_bounds(self): """Longitude of grid interfaces (degrees) :getter: Returns the bounds of axis ``'lon'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'lon'`` axis can be found. """ try: for domname, dom in self.domains.items(): try: thislon = dom.axes['lon'].bounds except: pass return thislon except: raise ValueError('Can\'t resolve a lon axis.') @property def lev(self): """Pressure levels at grid centers (hPa or mb) :getter: Returns the points of axis ``'lev'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'lev'`` axis can be found. """ try: for domname, dom in self.domains.items(): try: thislev = dom.axes['lev'].points except: pass return thislev except: raise ValueError('Can\'t resolve a lev axis.') @property def lev_bounds(self): """Pressure levels at grid interfaces (hPa or mb) :getter: Returns the bounds of axis ``'lev'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'lev'`` axis can be found. """ try: for domname, dom in self.domains.items(): try: thislev = dom.axes['lev'].bounds except: pass return thislev except: raise ValueError('Can\'t resolve a lev axis.') @property def depth(self): """Depth at grid centers (m) :getter: Returns the points of axis ``'depth'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'depth'`` axis can be found. """ try: for domname, dom in self.domains.items(): try: thisdepth = dom.axes['depth'].points except: pass return thisdepth except: raise ValueError('Can\'t resolve a depth axis.') @property def depth_bounds(self): """Depth at grid interfaces (m) :getter: Returns the bounds of axis ``'depth'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'depth'`` axis can be found. """ try: for domname, dom in self.domains.items(): try: thisdepth = dom.axes['depth'].bounds except: pass return thisdepth except: raise ValueError('Can\'t resolve a depth axis.') def process_like(proc): """Make an exact clone of a process, including state and all subprocesses. The creation date is updated. :param proc: process :type proc: :class:`~climlab.process.process.Process` :return: new process identical to the given process :rtype: :class:`~climlab.process.process.Process` :Example: :: >>> import climlab >>> from climlab.process.process import process_like >>> model = climlab.EBM() >>> model.subprocess.keys() ['diffusion', 'LW', 'albedo', 'insolation'] >>> albedo = model.subprocess['albedo'] >>> albedo_copy = process_like(albedo) >>> albedo.creation_date 'Thu, 24 Mar 2016 01:32:25 +0000' >>> albedo_copy.creation_date 'Thu, 24 Mar 2016 01:33:29 +0000' """ newproc = copy.deepcopy(proc) newproc.creation_date = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.localtime()) return newproc def get_axes(process_or_domain): """Returns a dictionary of all Axis in a domain or dictionary of domains. :param process_or_domain: a process or a domain object :type process_or_domain: :class:`~climlab.process.process.Process` or :class:`~climlab.domain.domain._Domain` :raises: :exc: `TypeError` if input is not or not having a domain :returns: dictionary of input's Axis :rtype: dict :Example: :: >>> import climlab >>> from climlab.process.process import get_axes >>> model = climlab.EBM() >>> get_axes(model) {'lat': <climlab.domain.axis.Axis object at 0x7ff13b9dd2d0>, 'depth': <climlab.domain.axis.Axis object at 0x7ff13b9dd310>} """ if isinstance(process_or_domain, Process): dom = process_or_domain.domains else: dom = process_or_domain if isinstance(dom, _Domain): return dom.axes elif isinstance(dom, dict): axes = {} for thisdom in list(dom.values()): assert isinstance(thisdom, _Domain) axes.update(thisdom.axes) return axes else: raise TypeError('dom must be a domain or dictionary of domains.')