text
stringlengths
29
850k
""" storlever.mngr.utils.ntpmgr ~~~~~~~~~~~~~~~~ This module implements ntp server management. :copyright: (c) 2014 by OpenSight (www.opensight.cn). :license: AGPLv3, see LICENSE for more details. """ import os import os.path import subprocess from storlever.lib.config import Config from storlever.lib.command import check_output from storlever.lib.exception import StorLeverError from storlever.lib import logger from storlever.lib.utils import filter_dict import logging from storlever.lib.schema import Schema, Use, Optional, \ Default, DoNotCare, BoolVal, IntVal, AutoDel from storlever.lib.confparse import properties from storlever.lib.lock import lock from storlever.mngr.system.cfgmgr import STORLEVER_CONF_DIR, cfg_mgr from storlever.mngr.system.servicemgr import service_mgr from storlever.mngr.system.modulemgr import ModuleManager MODULE_INFO = { "module_name": "zabbix_agent", "rpms": [ "zabbix-agent" ], "comment": "Provides the support of zabbix agent config for storlever" } ZABBIX_AGENT_CONF_FILE_NAME = "zabbix_agentd_conf.yaml" ZABBIX_AGENT_ETC_CONF_DIR = "/etc/zabbix/" ZABBIX_AGENT_CONF_FILE = "zabbix_agentd.conf" ZABBIX_AGENT_CONF_SCHEMA = Schema({ Optional("hostname"): Default(Use(str), default=""), # How often list of active checks is refreshed, in seconds. # Note that after failing to refresh active checks the next refresh # will be attempted after 60 seconds. Optional("refresh_active_check"): Default(IntVal(min=60, max=3600), default=120), # the server ip:port list for active check.zabbix_agent would get the active check list # from each server at the refresh_active_check frequency. Entry string Format is IP:PORT Optional("active_check_server_list"): Default([Use(str)], default=[]), # the server ip list for passive check. each passive check's source ip must # exist in this list. Entry string Format is IP Optional("passive_check_server_list"): Default([Use(str)], default=[]), AutoDel(str): object # for all other key we auto delete }) class ZabbixAgentManager(object): """contains all methods to manage NTP server in linux system""" def __init__(self): # need a mutex to protect create/delete bond interface self.lock = lock() self.conf_file = os.path.join(STORLEVER_CONF_DIR, ZABBIX_AGENT_CONF_FILE_NAME) self.zabbix_agentd_conf_schema = ZABBIX_AGENT_CONF_SCHEMA def _load_conf(self): zabbix_agent_conf = {} cfg_mgr().check_conf_dir() if os.path.exists(self.conf_file): zabbix_agent_conf = \ Config.from_file(self.conf_file, self.zabbix_agentd_conf_schema).conf else: zabbix_agent_conf = self.zabbix_agentd_conf_schema.validate(zabbix_agent_conf) return zabbix_agent_conf def _save_conf(self, zabbix_agent_conf): cfg_mgr().check_conf_dir() Config.to_file(self.conf_file, zabbix_agent_conf) def _sync_to_system_conf(self, zabbix_agent_conf): if not os.path.exists(ZABBIX_AGENT_ETC_CONF_DIR): os.makedirs(ZABBIX_AGENT_ETC_CONF_DIR) # conf file zabbix_agent_property = properties() # active server if zabbix_agent_conf["active_check_server_list"]: zabbix_agent_property["ServerActive"] = \ ",".join(zabbix_agent_conf["active_check_server_list"]) else: zabbix_agent_property.delete("ServerActive") # Server server_list = list(zabbix_agent_conf["passive_check_server_list"]) if not server_list: server_list.append("127.0.0.1") zabbix_agent_property["Server"] = ",".join(server_list) # hostname if zabbix_agent_conf["hostname"] == "": zabbix_agent_property.delete("Hostname") else: zabbix_agent_property["Hostname"] = zabbix_agent_conf["hostname"] # RefreshActiveChecks zabbix_agent_property["RefreshActiveChecks"] = str(zabbix_agent_conf["refresh_active_check"]) etc_conf_file = os.path.join(ZABBIX_AGENT_ETC_CONF_DIR, ZABBIX_AGENT_CONF_FILE) zabbix_agent_property.apply_to(etc_conf_file) def sync_to_system_conf(self, *args, **kwargs): """sync the ntp conf to /etc/ntp.conf""" if not os.path.exists(self.conf_file): return # if not conf file, don't change the system config with self.lock: zabbix_agent_conf = self._load_conf() self._sync_to_system_conf(zabbix_agent_conf) def system_restore_cb(self, *args, **kwargs): """sync the ntp conf to /etc/ntp""" if not os.path.exists(self.conf_file): return # if not conf file, don't change the system config os.remove(self.conf_file) with self.lock: zabbix_agent_conf = self._load_conf() self._sync_to_system_conf(zabbix_agent_conf) def set_agent_conf(self, config={}, operator="unkown", *args, **kwargs): if not isinstance(config, dict): raise StorLeverError("Parameter type error", 500) if len(config) == 0 and len(kwargs) == 0: return config.update(kwargs) not_allow_keys = ( "active_check_server_list", "passive_check_server_list" ) config = filter_dict(config, not_allow_keys, True) with self.lock: zabbix_agent_conf = self._load_conf() for name, value in config.items(): if name in zabbix_agent_conf and value is not None: zabbix_agent_conf[name] = value # check config conflict zabbix_agent_conf = self.zabbix_agentd_conf_schema.validate(zabbix_agent_conf) # save new conf self._save_conf(zabbix_agent_conf) self._sync_to_system_conf(zabbix_agent_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Zabbix agent config is updated by operator(%s)" % (operator)) def get_agent_conf(self, *args, **kwargs): with self.lock: zabbix_agent_conf = self._load_conf() not_allow_keys = ( "active_check_server_list", "passive_check_server_list" ) zabbix_agent_conf = filter_dict(zabbix_agent_conf, not_allow_keys, True) return zabbix_agent_conf def get_passive_check_server_list(self, *args, **kwargs): with self.lock: zabbix_agent_conf = self._load_conf() return zabbix_agent_conf["passive_check_server_list"] def set_passive_check_server_list(self, servers=[], operator="unkown", *args, **kwargs): with self.lock: zabbix_agent_conf = self._load_conf() zabbix_agent_conf["passive_check_server_list"] = servers # check config conflict zabbix_agent_conf = self.zabbix_agentd_conf_schema.validate(zabbix_agent_conf) # save new conf self._save_conf(zabbix_agent_conf) self._sync_to_system_conf(zabbix_agent_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Zabbix agent passive server list is updated by operator(%s)" % (operator)) def get_active_check_server_list(self, *args, **kwargs): with self.lock: zabbix_agent_conf = self._load_conf() return zabbix_agent_conf["active_check_server_list"] def set_active_check_server_list(self, servers=[], operator="unkown", *args, **kwargs): with self.lock: zabbix_agent_conf = self._load_conf() zabbix_agent_conf["active_check_server_list"] = servers # check config conflict zabbix_agent_conf = self.zabbix_agentd_conf_schema.validate(zabbix_agent_conf) # save new conf self._save_conf(zabbix_agent_conf) self._sync_to_system_conf(zabbix_agent_conf) logger.log(logging.INFO, logger.LOG_TYPE_CONFIG, "Zabbix agent active server list is updated by operator(%s)" % (operator)) ZabbixAgentManager = ZabbixAgentManager() # register ftp manager callback functions to basic manager cfg_mgr().register_restore_from_file_cb(ZabbixAgentManager.sync_to_system_conf) cfg_mgr().register_system_restore_cb(ZabbixAgentManager.system_restore_cb) service_mgr().register_service("zabbix-agent", "zabbix-agent", "/usr/sbin/zabbix_agentd", "zabbix agent for system/network monitor") ModuleManager.register_module(**MODULE_INFO) def zabbix_agent_mgr(): """return the global user manager instance""" return ZabbixAgentManager
Hard reset Oppo K1: Oppo Oppo K1 full Specifications:- Hard reset Oppo K1, With the factory, reset Oppo K1 or hard reset you can remove pattern lock, pin lock, and all hang will remove from your mobile. If you are looking for your mobile locked or you can not unlock mobile or forget password or PIN. If you learned it you can fix quickly never need to go mobile maker shop. I think you should learn it. . Now you can see the mobile all specifications. This mobile release 2018 in October. Oppo K1 network supported GSM/ CDMA/HSPA/ LTE. Oppo K1 Dual SIM, nano, Dual standby Supported. Oppo K1 display is Super AMOLED capacitive touchscreen, 16M colors. Oppo K1 OS version Is 8.1 Oreo. Oppo K1 Internal memory 64GB and Ram 4/6GB. Oppo K1 Dual Camera duel 25MP + 16MP autofocus. Oppo K1 Battery Non-removable Li-Lon 3500 mAh. Oppo K1 three Colors available red, blue. Now see the below how to pressed factory reset or hard reset your Oppo K1. Sometimes your Oppo K1 touch not working and all mobile very slow. If you forgot your Oppo K1 passwords pattern lock or Pin lock after locked your mobile, Then you can reset your Oppo K1 pattern lock, pin lock by hard reset Oppo K1. If you are looking your Oppo K1 are auto working such as some virus are affected your mobile. Before Hard reset Oppo K1. Need 80% your Oppo K1 battery charger. And Also remove SD card Or SIM card from your Oppo K1. And Backup also All important data from your Oppo K1. Otherwise, after format your Oppo K1 all important data you will be lost forever. First Turn Off your Oppo K1 by Power button. Turn On Your Oppo K1 press by Power button. Done, Now complete your Oppo K1 factory reset.
# Twisted, the Framework of Your Internet # Copyright (C) 2001-2002 Matthew W. Lefkowitz # # This library is free software; you can redistribute it and/or # modify it under the terms of version 2.1 of the GNU Lesser General Public # License as published by the Free Software Foundation. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # from twisted.scripts import twistd from twisted.python import usage try: import cPickle as pickle except ImportError: import pickle class Options(usage.Options): optFlags = [ ['encrypted', 'e' , "The specified tap/aos/xml file is encrypted."] ] optParameters = [ ['port','p', 80, "The port the web server is running on"], ['file','f','twistd.tap', "read the given .tap file"], ['python','y', None, "read an application from within a Python file"], ['xml', 'x', None, "Read an application from a .tax file (Marmalade format)."], ['source', 's', None, "Read an application from a .tas file (AOT format)."], ] def opt_script(self, scriptname): """Set the root resource of the web server to the resource created (and put into the `resource' variable) by this script.""" d = {} execfile(scriptname, d) self['root'] = d['resource'] def opt_pickle(self, picklename): """Set the root resource of the web server to the resource saved in this pickle.""" self['root'] = pickle.load(open(picklename)) def getFactory(app, port): for (num, fact, _, _) in app.tcpPorts: if num == port: return fact raise LookupError('no such port') def main(config): if config['encrypted']: import getpass passphrase = getpass.getpass('Passphrase: ') else: passphrase = None application = twistd.loadApplication(config, passphrase) site = getFactory(application, int(config['port'])) site.resource = config['root'] application.save() def run(): import sys config = Options() config.parseOptions() try: main(config) except LookupError, err: sys.exit(sys.argv[0]+": "+str(err)) except IOError, err: sys.exit(sys.argv[0]+": %s: %s" % (err.filename, err.strerror)) if __name__ == '__main__': run()
Is this the generation of "Broken Multiplayer"? Lets face it, with the latest batch of games released over the past few months, online multiplayer has taken a huge hit. Driveclub, on the PS4, had major issues with its online components. The online aspect has been broken for months, and the game was released "UNFINISHED"! Yes, all the amazing weather effects and several textures they showed off have yet to be installed in the game, as they will be released at an unknown time. Onto the Xbox One. Look at all the issues we have experienced with Destiny. It seemed like almost every night we would have some sore of connection issue when playing online. But Destiny is not the only game having issues. CoD Advanced Warfare has had major issues reported online. Halo: MCC, again, players have major issues with multiplayer. GTA V is having issues with players transferring their data from last gen versions. If this is the "Next-Gen" of consoles, should we have better experiences than last gen? What could be causing all these issues? I downloaded GTA V last week. Been waiting to play it, then come midnight launch night, I am told to "Insert Disc"! What the hell, I got the digital copy, This so called "Next-Gen" of gaming has become more disappointing as it really seems like these developers have forgotten how to make a game play "ONLINE". It looks like we will be going back to single player story modes from here, except several companies don't know how to create a single player story either. How do you all feel? Do you feel that all these issues across several games is ruining the Next-Gen of gaming or do you feel that this is all part of new consoles? Though these are not exactly new, (been out over a year now), I didn't see this many issues with games on last gen consoles at launch. Lemme hear your thoughts, if you have experienced these issues as well. It is almost that time. Who here is going to be getting Halo Tuesday? I know I am. I do hope to see several people on here as this game has a special place in our hearts from way back when. Besides coop, I cannot wait to play Halo 2 online again. It is going to be amazing. Well, Destiny is out now. Though the game is not quite as large as I had hoped it would be, it is still amazing. I know three of us have been playing Destiny on a regular basis. I absolutely love this game. Myself, Hare Krazy and Fishguts have been playing almost nightly. I do have Sworn Brotherhood up on Bungie.net. I am also going to be posting on forums trying to get more members on board to play online with. I would like to get more into doing the raids, and we need at least 3 more players. If anybody else has the game lemme know. Also, those that got the PS4 over the Xbox One, if you are playing Destiny on that platform, you can also join Sworn Brotherhood on Bungie.net as I registered for both consoles. Ok, so we have now had our "Next-Gen" consoles for nearly a year. Not all of us were able to make the jump (they are expensive), however we are slowly getting everybody on a next gen console. We have become somewhat separated as some chose the PS4 where others went with the Xbox One. The games coming out have been a big let down in my opinion. Though we had a few solid titles early on, most were just a slightly prettified version of last gen's titles. With that said, the past few months have been horribly slow across all platforms. The most action was the Steam Summer Sale. The fall looks to be the complete opposite. Instead of being crazy slow on new releases, it looks to be unaffordably loaded with new titles. The biggest of which are the likes of Destiny, Assassin's Creed Unity, and Halo: Master Chief Collection. I do feel that Destiny and Halo will dominate most of the SB crew. I know if the Halo 2 remake is exactly like the Halo 2 we all know and love from the original Xbox, I will spend most of my time on that. I wish we could get all the old school members that used to play that game and relive the memories. Until then, we will just be trying to find something to play. Some of us have been playing GTA Online, though a couple members are now having extreme trouble getting online (damn cloud connection errors). We hope they can get it fixed, would have been nice this weekend though with double RP points. I know next year we will have a good year as well as Rainbow Six Siege will be releasing. At least I hope it does, been waiting for a new RB6 game for what seems an eternity. Either way, SB will go on; whether we become competitive on the gaming scene once again or stick with the community aspect. It has been rumoured that there is going to be a HD Remaster of some of our favorite games coming this year on the Xbox One. This bundle can be the game-changer that Microsoft really needs. The rumor is a bundle of Halo games, remastered in 1080p. Halo: Combat Evolved, Halo 2, Halo 3, and Halo 4. So, the question is "Why release these now?" As explained by the dev team, this is so those who bought the Xbox One, but never played Halo, can get to know the story before Halo 5 launches. I just hope they keep the multi-player of Halo 2 in tact, with no changes like "armor abilities". Just imagine if we get to relive those days of Halo 2 all over again... I am sold. Well, six months after launch, the new generation of consoles have started to settle in. As of right now, the PS4 is beating the Xbox One... seriously! However, Microsoft has finally realized that they have been making the wrong decisions. Whether you consider this the "Third Party Curse" (PS3 had a very rough start, N64 sold less than expected, Sega Saturn bombed), or stupidity on Microsoft's part, the One has had several struggles. Yes, it has sold 5 million units, however outside of the actual sales, it has grown a huge crowd of haters, many of which were die hard Xbox fanboys. So now MS is considering changing their strategies. When the Xbox One launched, it was aimed to be the center of you entertainment center. Games, movies, DVR, Bluray and more. MS seemed more focused on the side features and had forgotten that we all buy gaming consoles solely on the games. Everything else is just an add-on. Side features that we can do without, but get to enjoy. With a stronger focus on games, they are also looking to change other things. Xbox Gold - You no longer need gold to watch streaming services such as Netflix, HBO Go and so forth. The "Games with Gold" will now transition over to Xbox One, with titles like Halo: Spartan Assault being given out to gold members. Kinect - In june, MS will launch the Xbox One without the Kinect sensor. This will also cause a price drop toe $399. This may help sell more consoles, however I still think the Xbox One Titanfall bundle that came with the Kinect sensor was a better deal (At least if bought through Walmart or Best Buy). Yes, those two stores sold that bundle for $449. Great price compared to what it is everywhere else. Also note, Sony decided to sell the PS4 without the PS move; it helped drop the price of the console, but it seemed to add more demand for the PS move bundle. So what do you think? Is Microsoft making the right move? Is it too late for them? With the new generation of consoles finally under way, can Microsoft repeat the same success they enjoyed with the Xbox 360? Give me your thoughts and share your opinions. As for me, I do feel MS made way too many mistakes in the beginning, however it is early and they can come out on top, they just need to get more triple A exclusives, Titanfall alone will not save them. Ok, so this game has been one game I have been waiting for. I have always loved the series, and wanted a multi-player version of it. This is only the second MMO I got. WoW is the first, and I didn't play that much. I do see myself getting into this game however. I have started a guild, Sworn-Brotherhood. I had to put the dash (-) because there is already a Sworn Brotherhood guild in that game. Maybe it is some of our long lost members we lost when WoW launched. (That would rock) I am just starting in the game and am a level 3. I will be playing it more often though as I move along with the story. I hope to see some others on it, and maybe recruit a few new players. On that note, I am also looking to recruit for Titanfall on all three systems, X1, Xbox 360 and PC. Is the "Next-Gen" really Next-Gen? Ok, so I bought the Xbox One. I have several games for it including top sellers Battlefield 4, Assassin's Creed: Black Flag, Forza, Dead Rising 3 and many others; most recently Titanfall. Let me say this first; YES, the games do look better than the Xbox 360 games. But do they look like $500 better? That is the biggest issue. They do look better without a doubt, just not much better. Forza looks great as they added a few new details, but I would have expected much more graphically as far as the backgrounds and special effects go. As far as graphics, the jump is kind of disappointing to me. I know they will look better in the future, but I expected them to WOW me at launch. The Kinect was forced upon purchase. We weren't given it for free, we were forced to buy it as the console costs $100 over the competition. I was hoping to see some great innovations with the Kinect, but once again I am disappointed. The best use I have witnessed from the Kinect is playing Dead Rising 3. Making noise, or calling the undead to you will actually attract them. Outside of this, the Kinect has given very little innovation to gaming. The only other good feature is having it recognize you and log you into your profile automatically. But nothing the justifies Microsoft forcing us to buy this gimmick. The best example on the Xbox One for a Next-Gen game is Titanfall. It does offer a very unique feeling, yet something we have all played before. It is like a mix of old school and new school. Very fast and fluid gameplay. But, it is still an unfinished game that was rushed out just to give a reason to buy the Xbox One. There are no was to setup a private match with 11 of your other friends, you can only have a group of 6 and are forced to play against other random opponents. Limited gameplay, and limited changes to your titan, and you start to feel you have been given an unfinished online experience. Yes, all the titans look the same, you have 3 different titans, and you can choose different weapon loadouts. But the only way to tell if you are looking at a friend or a foe is the name above the titan. I personally would have loved to see clan features, as well as the ability to customize the overall look of your titan. Change the color, add custom armor; even if it serves no mechanical purpose, just so we can make it our very own titan. You see a black, orange and white Stryder with the SB logo on it, you know its me. So is this really the defining moments of the "Next-Gen" wars? I see Sony has some great games for their console, the PS4. Outside of Titanfall, I do feel that Sony has a better plan and brighter future than Microsoft. MS to me doesn't care that they are screwing their long time customers and supporters over. They seem to have lost that whole "It's all about the consumers" attitude. Nevermind what the gamers want, nevermind giving them a deal, lets rob them of all their hard earned cash for a slightly better looking version of last year's titles. I may be harsh, but something does need to change if the XB1 is going to have any chance to survive the PS4. I have been playing Titanfall lately on the Xbox One and I must say, I am loving this game. To me it feels like a mixture of Unreal and Mechassault. I have a lot of fun playing this game, in fact I haven't enjoyed a game like this since the original Xbox. I know there are three of us playing on a regular basis, including myself, Kingasscrack and Gdag. We are getting better as a team. Do note that I have not seen much in this game as far as clans. I do not know how we would be able to do clan battles considering there is no true private sessions. I am sure it will come within a patch in the next few months. I do encourage those looking for a fun, fast, action packed game to check this out. I hope to see more of us on this game, hopefully on the Xbox One. However, there is a Xbox 360 version launching early next month.
import re from pathlib import Path from setuptools import setup def find_version(filename): _version_re = re.compile(r"__version__ = '(.*)'") for line in open(filename): version_match = _version_re.match(line) if version_match: return version_match.group(1) __version__ = find_version('nifstd_tools/__init__.py') with open('README.md', 'rt') as f: long_description = f.read() tests_require = ['pytest'] setup( name='nifstd-tools', version=__version__, description='utilities for working with the NIF ontology', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/tgbugs/pyontutils/tree/master/nifstd', author='Tom Gillespie', author_email='tgbugs@gmail.com', license='MIT', classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ], keywords='nif nifstd ontology pyontutils neuroscience', packages=['nifstd_tools'], python_requires='>=3.6', tests_require=tests_require, install_requires=[ 'beautifulsoup4', 'flask', 'nbconvert', 'nbformat', 'networkx', 'psutil', 'pymysql', 'pyontutils>=0.1.26', 'sqlalchemy', ], extras_require={'dev': ['mysql-connector', 'protobuf', 'pytest-cov', 'wheel', ], 'spell': ['hunspell'], 'test': tests_require, }, scripts=['bin/ttlcmp'], entry_points={ 'console_scripts': [ 'ont-docs=nifstd_tools.docs:main', 'ontree=nifstd_tools.ontree:main', 'registry-sync=nifstd_tools.scr_sync:main', 'slimgen=nifstd_tools.slimgen:main', ], }, data_files=[('share/nifstd/resources/sparc_term_versions/', ['resources/sparc_term_versions/sparc_terms2-mod.txt']), ('share/nifstd/resources/', [p.as_posix() for p in Path('resources').iterdir() if p.is_file() and p.suffix[1:] not in ('confd', 'rc', 'service', 'socket', 'tmp', 'spec')],)])
The Department for Education has pledged to ease pressures on teachers in England, after it accepted the recommendations of a new report that said an “audit culture” in schools was causing anxiety and staff burnout without improving results. The report by the DfE’s teacher workload advisory group says teachers have to waste time producing data on their pupils, with the recording, monitoring and analysing of data being demanded by multiple sources, including local and central government, Ofsted school inspectors and multiple tiers of school management. In some cases teachers are expected to report on up to 30 different elements of data for 30 children in a class, which the report described as an attempt to provide “spurious precision” in tracking pupil attainment. The workload of teachers in England is among the highest in the world according to international surveys, and is often cited as a cause of experienced teachers leaving the profession. Prof Becky Allen of the UCL Institute of Education, who chaired the advisory group, said the collection of data by schools has led to unsustainable workload and stress for many teachers. The report recommends that the DfE should not request regular attainment data from schools other than its statutory requirements, while Ofsted inspectors should examine if a school’s data collections are efficient. Damian Hinds, the education secretary, signalled his commitment by signing a joint letter alongside Amanda Spielman, the head of Ofsted, as well as leaders of the Association of School and College Leaders and the National Association of Head Teachers. The letter, to be sent to every school leader in England, has “a straightforward message” that headteachers and managers who want to cut back on unnecessary workload will be supported. “None of us wants staff in schools to feel like they are drowning in unnecessary and meaningless data,” the letter states. Tackling excessive workload is a key plank in the government’s drive to improve teacher retention. But it came under attack from Labour for continuing cuts to school budgets, including a failure to fully fund the recent pay rise awarded to teachers. Labour said analysis of research from the Institute for Fiscal Studies revealed that annual spending on schools would be £1.7bn higher in 2019-20 if funding per pupil had been maintained in real terms since 2015. Angela Rayner, Labour’s shadow education secretary, said the cuts made a mockery of chancellor Philip Hammond’s “little extra” for schools in the recent budget. “Instead of offering a sticking plaster to schools this government should be genuinely investing in them, reversing their unjustifiable cuts,” Rayner said.
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Companion script to doppia/src/tests/objects_detection/test_objects_detection Will plot the content of channel_statistics.txt """ from __future__ import print_function import pylab def main(): filename = "channel_statistics.txt" # parse the content -- data = pylab.loadtxt(filename) scales = data[0,:] print("num_scales == ", len(scales)) for i in range(len(scales)): if scales[i] > 1: break down_scales_slice = slice(0, i) up_scales_slice = slice(i, None) num_channels = (len(data) - 1)/2 # do regressions -- do_regressions = True # channels to be estimated together # this values where set after observing the data a first time regression_groups = {"HOG":range(0,7), "L":[7], "UV":range(8,10)} down_scales_regressions = {} up_scales_regressions = {} if do_regressions: log_data = pylab.log(data) log_scales = log_data[0,:] for name, channels_indices in regression_groups.items(): down_scales_x = list(log_scales[down_scales_slice])*len(channels_indices) up_scales_x = list(log_scales[up_scales_slice])*len(channels_indices) down_scales_y = [] up_scales_y = [] for c in channels_indices: down_scales_y.extend(log_data[c*2 + 1, down_scales_slice]) up_scales_y.extend(log_data[c*2 + 1, up_scales_slice]) # r = a*(k**b) => log(r) = b*log(k) + log(a) down_b, down_log_a = pylab.polyfit(down_scales_x, down_scales_y, 1) up_b, up_log_a = pylab.polyfit(up_scales_x, up_scales_y, 1) down_scales_regressions[name] = [down_b, pylab.exp(down_log_a)] up_scales_regressions[name] = [up_b, pylab.exp(up_log_a)] print("%s\tfor downscaling r = %.3f*(x**%.3f), " "for upscaling r = %.3f*(x**%.3f)" % (name, down_scales_regressions[name][1], down_scales_regressions[name][0], up_scales_regressions[name][1], up_scales_regressions[name][0])) #print(regression_groups) # plot the content -- pylab.figure(0) pylab.gcf().set_facecolor("w") # set white background pylab.grid(True) colormap = pylab.cm.Spectral #colormap = pylab.cm.gist_rainbow #colormap = pylab.cm.brg for channel_index in range(num_channels): color = colormap( channel_index / float(num_channels) ) label = "channel %i" % channel_index #label = None # mean down pylab.subplot(2,2,1) x = scales[down_scales_slice] y = data[channel_index*2 + 1, down_scales_slice] pylab.plot(x,y, color=color)#, label=label) # std dev down pylab.subplot(2,2,3) x = scales[down_scales_slice] y = data[channel_index*2 + 2, down_scales_slice] pylab.plot(x,y, color=color)#, label=label) # mean up pylab.subplot(2,2,2) x = scales[up_scales_slice] y = data[channel_index*2 + 1, up_scales_slice] pylab.plot(x,y, color=color)#, label=label) # std dev up pylab.subplot(2,2,4) x = scales[up_scales_slice] y = data[channel_index*2 + 2, up_scales_slice] pylab.plot(x,y, color=color, label=label) for label, b_a in down_scales_regressions.items(): b,a = b_a # mean down pylab.subplot(2,2,1) x = scales[down_scales_slice] y = [a*(k**b) for k in x] color = colormap( regression_groups[label][0] / float(num_channels) ) pylab.plot(x,y, color=color, label=label, linewidth=1.5, linestyle="--") for label, b_a in up_scales_regressions.items(): b,a = b_a # mean down pylab.subplot(2,2,2) x = scales[up_scales_slice] y = [a*(k**b) for k in x] color = colormap( regression_groups[label][0] / float(num_channels) ) pylab.plot(x,y, color=color, label=label, linewidth=1.5, linestyle="--") pylab.subplot(2,2,1) pylab.xlabel("scales") pylab.ylabel("mean ratio") pylab.title("Mean ratio when downscaling") pylab.subplot(2,2,3) #pylab.legend(loc ="lower right", fancybox=True) pylab.xlabel("scales") pylab.ylabel("Standard deviation of ratio") pylab.title("Standard deviation of when downscaling") pylab.subplot(2,2,2) pylab.legend(loc ="lower right", fancybox=True) pylab.xlabel("scales") pylab.ylabel("mean ratio") pylab.title("Mean ratio when upscaling") pylab.subplot(2,2,4) pylab.legend(loc ="lower right", fancybox=True) pylab.xlabel("scales") pylab.ylabel("Standard deviation of ratio") pylab.title("Standard deviation of when upscaling") pylab.suptitle("Channel statistics") pylab.draw() pylab.show() # blocking call return if __name__ == "__main__": # Import Psyco if available try: import psyco psyco.full() except ImportError: #print("(psyco not found)") pass else: print("(using psyco)") main()
A basic 12x15 basement can be finished for about $10,500. Every basement I've finished gets more use than any other portion of the house. The Westchester job is my most recent basement job. I took 5 rolls of film and got shots of before, during and after. The quality of the photos could be better but at least you can get a general idea. I Converted this basement to an apartment but didn't take any photographs. I'm not finished yet and am now using it as my office. When I get a chance I will take some photos of what I did and I will take some of the work that I do to finish.
from __future__ import print_function import sys from argparse import ArgumentParser from . import S3Remote def command_line(remote): prog = 'crawl_index' description = 'Helper tool to run MapReduce jobs over Common Crawl' crawl_list = ArgumentParser(add_help=False) crawl_list.add_argument( '-l', '--list', action='store_true', help='Enumerate all possible crawl dates', ) # Preparse Date Codes crawl, _ = crawl_list.parse_known_args() if crawl.list: remote.print_buckets() exit(0) parser = ArgumentParser( parents=[crawl_list], prog=prog, description=description, ) parser.add_argument( '-v', '--version', action='version', version="%s v0.1.0" % prog ) parser.add_argument( '-d', '--date', nargs='?', default='latest', help='Specify crawl date', metavar='d', ) parser.add_argument( '-f', '--file', nargs='?', metavar='f', default=None, help='Output to a file' ) return parser.parse_args() def main(): remote = S3Remote() args = command_line(remote) crawl = remote.select_crawl() if args.date == 'latest' else remote.select_crawl(args.date) fp = open(args.file, 'wt') if args.file else sys.stdout idx = remote.get_index(crawl) for i in idx: print(i, file=fp) if __name__ == '__main__': sys.exit(main())
I’ve always been somewhat confused by the odd position that the Holocaust did not occur or was much less serious than is generally held – people died, but it wasn’t really as bad as the Jews want you to believe. We will never know the name of each and every victim, but photographic evidence and first-hand stories make it clear that something extensive and quite barbaric occurred. And yet, apparently otherwise rational people deny it. Maybe their blindness is psychosomatic; maybe it grows out of prejudice. Many more people deny the animal holocaust. They just can’t see the suffering apparently, suffering on a magnitude that dwarfs the combined evils we have perpetrated on ourselves. Certain skills are considered key signs of higher mental abilities: good memory, a grasp of grammar and symbols, self-awareness, understanding others' motives, imitating others, and being creative. Bit by bit, in ingenious experiments, researchers have documented these talents in other species, gradually chipping away at what we thought made human beings distinctive while offering a glimpse of where our own abilities came from. Minds of their Own. National Geographic. 2008. Signs of higher mental ability in animals are cause for great alarm. We hurt and kill billions of animals every year. Literally. Billions. To a growing number of people, particularly to those who understand that the fundamental idea behind human rights is that our similarities matter immeasurably more that our differences, it is these signs of higher mental ability, our similarities, which make the animals’ plight so immediately important. [Irene] Pepperberg carried Alex [(1976 - 2007), an African grey parrot] on her arm to a tall wooden perch in the middle of the room. She then retrieved a green key and a small green cup from a basket on a shelf. She held up the two items to Alex's eye. “Shape,” Alex said. His voice had the digitized sound of a cartoon character. Since parrots lack lips (another reason it was difficult for Alex to pronounce some sounds, such as ba), the words seemed to come from the air around him, as if a ventriloquist were speaking. But the words—and what can only be called the thoughts—were entirely his.”Minds of their Own. National Geographic. 2008. How can anyone deny the animal holocaust? Denial probably grows from three sources: mental deficit, ignorance, or bigotry. Maybe people really don’t notice that their dog is a someone. Maybe people really can’t imagine the pig behind the chop. Maybe people like feeling superior. ... A whole range of animal studies now suggest that the roots of cognition are deep, widespread, and highly malleable. Minds of their Own. National Geographic. 2008. The animal holocaust must be main-stream science’s greatest failure and is likely to remain so unless we scorch the globe in a nuclear conflagration. Discoveries about the similarity between humans and other species have demolished all prior claims about animals being unfeeling brutes with little if any sense of self. These claims were used repeatedly as defenses against critics’ concerns. And yet, science has said little to alert the public to the ethical implications of these discoveries, and worse, justification for vivisection is often now based on precisely these similarities – an animal model of depression, for example, is valuable they claim, because our suffering is of a like kind. People were surprised to discover that chimpanzees make tools," said Alex Kacelnik, a behavioral ecologist at Oxford University, referring to the straws and sticks chimpanzees shape to pull termites from their nests. "But people also thought, 'Well, they share our ancestry—of course they're smart.' Now we're finding these kinds of exceptional behaviors in some species of birds. But we don't have a recently shared ancestry with birds. Their evolutionary history is very different; our last common ancestor with all birds was a reptile that lived over 300 million years ago. "This is not trivial," Kacelnik continued. "It means that evolution can invent similar forms of advanced intelligence more than once—that it's not something reserved only for primates or mammals."Minds of their Own. National Geographic. 2008. How much like us do we need to discover animals to be before we are able to see the horrors that are occurring so vividly around us all the time? And what are the implications for those who see the animal holocaust as it is? What is the proper response? Should the response change over time? At least 2500 years ago, people were arguing that the obvious similarities between us and other animals meant that eating them was immoral. Legislation providing some protection for animals has been passing in various countries since the late 1700s. And yet, little substantive effect can be seen. What should someone do in the face of the animal holocaust in light of the long history of the failure to stop it? One way to answer this question is to ask yourself what you think you might have done if you were living in Germany during the Holocaust or in the South prior to the civil war. History tells us that most people who lived through such periods did nothing to help the victims. People simply didn’t see any victims; they saw only Jews and niggers. They denied that anything wrong was occurring. There has been quite a bit of coverage lately surrounding the two incendiary devices ignited in California, allegedly by animal rights activists targeting vivisectors at UC-Santa Cruz. This seems like a government COINTELPRO sort of thing to me, but for the sake of this essay, I’ll assume that they were genuine. I don’t see how anyone who sees the animal holocaust for what it is could reasonably argue that the bombs, as the media has called them, were unjustified, over the line, uncalled for, or disproportionate. Critics come in at least two flavors; there are those who are appalled because they deny that anything wrong is being done in the first place. They don’t see victims; they see only animals. To this group, no similarity between another animal and a human can ever make their suffering as significant as ours, or even measurably significant when compared with ours. Another group declares that they too see the holocaust, but that the appropriate response is to work peacefully for change, to decry any and all violence, to turn the other cheek. They argue that violence will turn public opinion against the animal rights movement, will stall progress, or that the harm to animals will be greater somehow. Neither group makes a compelling argument. The animal holocaust is real, and neither 2500 years of brilliant discourse on the subject nor 200 years of incremental legislative progress have had much effect. More of the same seems like telling the victim to keep turning the other cheek. It defies common sense to believe that more of the same is the answer. The likelihood of increasing violence seems high to me. As I’ve written before, this is a social problem of growing intensity that we all have a responsibility to address. If we can’t invent ways to deal with this problem, the future is dark indeed. Looking out over the landscape of enterprises associated with the issue, it seems to me that academia, particularly public universities, have a strong responsibility in this regard. They have the power and resources to provide a public venue for sustained and in-depth discussion of this matter. Maybe my belief in the power of education is naïve, but given the chance, with the facts before them, I believe that enough people would choose a drastic alteration in public policy regarding our relationship with other animals. And whether they would or not, open public discussion could vent much pent-up frustration. My belief in this likelihood is the driving force behind our efforts to establish the National Primate Research Exhibition Hall adjacent to the University of Wisconsin’s primate vivisection labs. I believe that talking and a willingness to change can solve just about any problem. But talk is such a dangerous idea to those who are vested in the system that the university has been fighting to quash this potentially national venue for three years. Their legal costs to-date must be around half a million dollars. And this isn’t an anomaly. When I first got involved, more than a decade ago, I asked the Oregon Primate Center to convene a public forum and to invite the public to a discussion about their use of monkeys. They refused. So did every other university that I approached. It turned out that I was far from being the first to call for public discussion. Animal enterprises’ response to criticism has uniformly been a hunkering down. Hunkering down and passing stricter laws to curtail criticism do not seem to me to be creative ways to deal with this escalating problem. I don’t see how anyone could pin much hope on them. Complicating the problem, no one is likely to stop hurting animals without being forced to do so. This makes it unlikely that those doing the hurting and killing will give serious consideration to actual discussion about what they do. I fear that increasing violence is inevitable given the history of industry’s obfuscation, denial, and economic interest in harming animals. There is slim chance that any genuine discussion is likely or will be allowed to take place. Responsibility for every violent act past and future intended to stem the animal holocaust is borne by everyone: the vivisectors and the butchers; activists; denialists; media; government; everyone.
# This file is part of Indico. # Copyright (C) 2002 - 2021 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from collections import defaultdict from operator import attrgetter from flask import render_template, session from pytz import utc from sqlalchemy import Date, cast from sqlalchemy.orm import contains_eager, joinedload, subqueryload, undefer from indico.core.db import db from indico.modules.events.contributions.models.contributions import Contribution from indico.modules.events.models.events import Event from indico.modules.events.models.persons import EventPersonLink from indico.modules.events.sessions.models.blocks import SessionBlock from indico.modules.events.sessions.models.sessions import Session from indico.modules.events.timetable.legacy import TimetableSerializer, serialize_event_info from indico.modules.events.timetable.models.breaks import Break from indico.modules.events.timetable.models.entries import TimetableEntry, TimetableEntryType from indico.util.caching import memoize_request from indico.util.date_time import format_time, get_day_end, iterdays from indico.util.i18n import _ from indico.web.flask.templating import get_template_module from indico.web.forms.colors import get_colors def _query_events(categ_ids, day_start, day_end): event = db.aliased(Event) dates_overlap = lambda t: (t.start_dt >= day_start) & (t.start_dt <= day_end) return (db.session.query(Event.id, TimetableEntry.start_dt) .filter( Event.category_chain_overlaps(categ_ids), ~Event.is_deleted, ((Event.timetable_entries.any(dates_overlap(TimetableEntry))) | (Event.query.exists().where( Event.happens_between(day_start, day_end) & (Event.id == event.id))))) .group_by(Event.id, TimetableEntry.start_dt) .order_by(Event.id, TimetableEntry.start_dt) .join(TimetableEntry, (TimetableEntry.event_id == Event.id) & (dates_overlap(TimetableEntry)), isouter=True)) def _query_blocks(event_ids, dates_overlap, detail_level='session'): options = [subqueryload('session').joinedload('blocks').joinedload('person_links')] if detail_level == 'contribution': options.append(contains_eager(SessionBlock.timetable_entry).joinedload(TimetableEntry.children)) else: options.append(contains_eager(SessionBlock.timetable_entry)) return (SessionBlock.query .filter(~Session.is_deleted, Session.event_id.in_(event_ids), dates_overlap(TimetableEntry)) .options(*options) .join(TimetableEntry) .join(Session)) def find_latest_entry_end_dt(obj, day=None): """Get the latest end datetime for timetable entries within the object. :param obj: The :class:`Event` or :class:`SessionBlock` that will be used to look for timetable entries. :param day: The local event date to look for timetable entries. Applicable only to ``Event``. :return: The end datetime of the timetable entry finishing the latest. ``None`` if no entry was found. """ if isinstance(obj, Event): if day is None: raise ValueError("No day specified for event.") if not (obj.start_dt_local.date() <= day <= obj.end_dt_local.date()): raise ValueError("Day out of event bounds.") entries = obj.timetable_entries.filter(TimetableEntry.parent_id.is_(None), cast(TimetableEntry.start_dt.astimezone(obj.tzinfo), Date) == day).all() elif isinstance(obj, SessionBlock): if day is not None: raise ValueError("Day specified for session block.") entries = obj.timetable_entry.children else: raise ValueError(f"Invalid object type {type(obj)}") return max(entries, key=attrgetter('end_dt')).end_dt if entries else None def find_next_start_dt(duration, obj, day=None, force=False): """Find the next most convenient start date fitting a duration within an object. :param duration: Duration to fit into the event/session-block. :param obj: The :class:`Event` or :class:`SessionBlock` the duration needs to fit into. :param day: The local event date where to fit the duration in case the object is an event. :param force: Gives earliest datetime if the duration doesn't fit. :return: The end datetime of the latest scheduled entry in the object if the duration fits then. It it doesn't, the latest datetime that fits it. ``None`` if the duration cannot fit in the object, earliest datetime if ``force`` is ``True``. """ if isinstance(obj, Event): if day is None: raise ValueError("No day specified for event.") if not (obj.start_dt_local.date() <= day <= obj.end_dt_local.date()): raise ValueError("Day out of event bounds.") earliest_dt = obj.start_dt if obj.start_dt_local.date() == day else obj.start_dt.replace(hour=8, minute=0) latest_dt = obj.end_dt if obj.start_dt.date() == day else get_day_end(day, tzinfo=obj.tzinfo) elif isinstance(obj, SessionBlock): if day is not None: raise ValueError("Day specified for session block.") earliest_dt = obj.timetable_entry.start_dt latest_dt = obj.timetable_entry.end_dt else: raise ValueError(f"Invalid object type {type(obj)}") max_duration = latest_dt - earliest_dt if duration > max_duration: return earliest_dt if force else None start_dt = find_latest_entry_end_dt(obj, day=day) or earliest_dt end_dt = start_dt + duration if end_dt > latest_dt: start_dt = latest_dt - duration return start_dt def get_category_timetable(categ_ids, start_dt, end_dt, detail_level='event', tz=utc, from_categ=None, grouped=True, includible=lambda item: True): """Retrieve time blocks that fall within a specific time interval for a given set of categories. :param categ_ids: iterable containing list of category IDs :param start_dt: start of search interval (``datetime``, expected to be in display timezone) :param end_dt: end of search interval (``datetime`` in expected to be in display timezone) :param detail_level: the level of detail of information (``event|session|contribution``) :param tz: the ``timezone`` information should be displayed in :param from_categ: ``Category`` that will be taken into account to calculate visibility :param grouped: Whether to group results by start date :param includible: a callable, to allow further arbitrary custom filtering (maybe from 3rd party plugins) on whether to include (returns True) or not (returns False) each ``detail`` item. Default always returns True. :returns: a dictionary containing timetable information in a structured way. See source code for examples. """ day_start = start_dt.astimezone(utc) day_end = end_dt.astimezone(utc) dates_overlap = lambda t: (t.start_dt >= day_start) & (t.start_dt <= day_end) items = defaultdict(lambda: defaultdict(list)) # first of all, query TimetableEntries/events that fall within # specified range of dates (and category set) events = _query_events(categ_ids, day_start, day_end) if from_categ: events = events.filter(Event.is_visible_in(from_categ.id)) for eid, tt_start_dt in events: if tt_start_dt: items[eid][tt_start_dt.astimezone(tz).date()].append(tt_start_dt) else: items[eid] = None # then, retrieve detailed information about the events event_ids = set(items) query = (Event.query .filter(Event.id.in_(event_ids)) .options(subqueryload(Event.person_links).joinedload(EventPersonLink.person), joinedload(Event.own_room).noload('owner'), joinedload(Event.own_venue), joinedload(Event.category).undefer('effective_icon_data'), undefer('effective_protection_mode'))) scheduled_events = defaultdict(list) ongoing_events = [] events = [] for e in query: if not includible(e): continue if grouped: local_start_dt = e.start_dt.astimezone(tz).date() local_end_dt = e.end_dt.astimezone(tz).date() if items[e.id] is None: # if there is no TimetableEntry, this means the event has not timetable on that interval for day in iterdays(max(start_dt.date(), local_start_dt), min(end_dt.date(), local_end_dt)): # if the event starts on this date, we've got a time slot if day.date() == local_start_dt: scheduled_events[day.date()].append((e.start_dt, e)) else: ongoing_events.append(e) else: for start_d, start_dts in items[e.id].items(): scheduled_events[start_d].append((start_dts[0], e)) else: events.append(e) # result['events'][date(...)] -> [(datetime(....), Event(...))] # result[event_id]['contribs'][date(...)] -> [(TimetableEntry(...), Contribution(...))] # result['ongoing_events'] = [Event(...)] if grouped: result = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) else: result = defaultdict(lambda: defaultdict(list)) result.update({ 'events': scheduled_events if grouped else events, 'ongoing_events': ongoing_events }) # according to detail level, ask for extra information from the DB if detail_level != 'event': query = _query_blocks(event_ids, dates_overlap, detail_level) if grouped: for b in query: start_date = b.timetable_entry.start_dt.astimezone(tz).date() result[b.session.event_id]['blocks'][start_date].append((b.timetable_entry, b)) else: for b in query: result[b.session.event_id]['blocks'].append(b) if detail_level == 'contribution': query = (Contribution.query .filter(Contribution.event_id.in_(event_ids), dates_overlap(TimetableEntry), ~Contribution.is_deleted) .options(contains_eager(Contribution.timetable_entry), joinedload(Contribution.person_links)) .join(TimetableEntry)) if grouped: for c in query: start_date = c.timetable_entry.start_dt.astimezone(tz).date() result[c.event_id]['contribs'][start_date].append((c.timetable_entry, c)) else: for c in query: result[c.event_id]['contributions'].append(c) query = (Break.query .filter(TimetableEntry.event_id.in_(event_ids), dates_overlap(TimetableEntry)) .options(contains_eager(Break.timetable_entry)) .join(TimetableEntry)) if grouped: for b in query: start_date = b.timetable_entry.start_dt.astimezone(tz).date() result[b.timetable_entry.event_id]['breaks'][start_date].append((b.timetable_entry, b)) else: for b in query: result[b.timetable_entry.event_id]['breaks'].append(b) return result def render_entry_info_balloon(entry, editable=False, sess=None, is_session_timetable=False): if entry.break_: return render_template('events/timetable/balloons/break.html', break_=entry.break_, editable=editable, can_manage_event=entry.event.can_manage(session.user), color_list=get_colors(), event_locked=entry.event.is_locked, is_session_timetable=is_session_timetable) elif entry.contribution: return render_template('events/timetable/balloons/contribution.html', contrib=entry.contribution, editable=editable, can_manage_event=entry.event.can_manage(session.user), can_manage_contributions=sess.can_manage_contributions(session.user) if sess else True, event_locked=entry.event.is_locked) elif entry.session_block: return render_template('events/timetable/balloons/block.html', block=entry.session_block, editable=editable, can_manage_session=sess.can_manage(session.user) if sess else True, can_manage_blocks=sess.can_manage_blocks(session.user) if sess else True, color_list=get_colors(), event_locked=entry.event.is_locked, is_session_timetable=is_session_timetable) else: raise ValueError("Invalid entry") def render_session_timetable(session, timetable_layout=None, management=False): if not session.start_dt: # no scheduled sessions present return '' timetable_data = TimetableSerializer(session.event).serialize_session_timetable(session, without_blocks=True, strip_empty_days=True) event_info = serialize_event_info(session.event) tpl = get_template_module('events/timetable/_timetable.html') return tpl.render_timetable(timetable_data, event_info, timetable_layout=timetable_layout, management=management) def get_session_block_entries(event, day): """Return a list of event top-level session blocks for the given `day`.""" return (event.timetable_entries .filter(db.cast(TimetableEntry.start_dt.astimezone(event.tzinfo), db.Date) == day.date(), TimetableEntry.type == TimetableEntryType.SESSION_BLOCK) .all()) def shift_following_entries(entry, shift, session_=None): """Reschedule entries starting after the given entry by the given shift.""" query = entry.siblings_query.filter(TimetableEntry.start_dt >= entry.end_dt) if session_ and not entry.parent: query.filter(TimetableEntry.type == TimetableEntryType.SESSION_BLOCK, TimetableEntry.session_block.has(session_id=session_.id)) entries = query.all() if not entries: return [] for sibling in entries: sibling.move(sibling.start_dt + shift) def get_timetable_offline_pdf_generator(event): from indico.legacy.pdfinterface.conference import TimetablePDFFormat, TimeTablePlain pdf_format = TimetablePDFFormat() return TimeTablePlain(event, session.user, sortingCrit=None, ttPDFFormat=pdf_format, pagesize='A4', fontsize='normal') def get_time_changes_notifications(changes, tzinfo, entry=None): notifications = [] for obj, change in changes.items(): if entry: if entry.object == obj: continue if not isinstance(obj, Event) and obj.timetable_entry in entry.children: continue msg = None if isinstance(obj, Event): if 'start_dt' in change: new_time = change['start_dt'][1] msg = _("Event start time changed to {}") elif 'end_dt' in change: new_time = change['end_dt'][1] msg = _("Event end time changed to {}") else: raise ValueError("Invalid change in event.") elif isinstance(obj, SessionBlock): if 'start_dt' in change: new_time = change['start_dt'][1] msg = _("Session block start time changed to {}") elif 'end_dt' in change: new_time = change['end_dt'][1] msg = _("Session block end time changed to {}") else: raise ValueError("Invalid change in session block.") if msg: notifications.append(msg.format(format_time(new_time, timezone=tzinfo))) return notifications @memoize_request def get_top_level_entries(event): return event.timetable_entries.filter_by(parent_id=None).all() @memoize_request def get_nested_entries(event): entries = event.timetable_entries.filter(TimetableEntry.parent_id.isnot(None)).all() result = defaultdict(list) for entry in entries: result[entry.parent_id].append(entry) return result
Robert Trump — who disappeared from the social scene after divorcing his popular wife Blaine a decade ago — is alive and well and living on Long Island. And he can’t wait until his older brother, Donald, is in the White House. Robert, who described himself as “gainfully retired,” ended his 25-year marriage to Blaine when he fell in love with his secretary, Ann Marie Pallan, and bought her a $3.7 million house in Garden City, NY. None of his friends on the Upper East Side have seen much of him since. Robert isn’t required yet by Donald on the campaign trail. But he’s willing. Of the five children of Fred and Mary Trump, Fred Jr. died of alcoholism, Elizabeth is a retired Chase Manhattan Bank exec, and their oldest sister, Maryanne Trump Barry, is a senior judge on the US Court of Appeals for the Third Circuit.
#!/usr/bin/env python # Copyright (c) 2013 Robie Basak # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from __future__ import print_function import sys import unittest import mock from mock import Mock, patch, sentinel import nose.tools import glacier EX_TEMPFAIL = 75 PY2 = (sys.version_info[0] == 2) def patch_builtin(name, *args, **kwargs): """Helper to patch builtins easily for py2 and py3""" target = '{b}.{n}'.format(b='__builtin__' if PY2 else 'builtins', n=name) return patch(target, *args, **kwargs) class TestCase(unittest.TestCase): def init_app(self, args, memory_cache=False): self.connection = Mock() if memory_cache: self.cache = glacier.Cache(0, db_path=':memory:') else: self.cache = Mock() self.app = glacier.App( args=args, connection=self.connection, cache=self.cache) def run_app(self, args): self.init_app(args) self.app.main() def test_vault_list(self): self.init_app(['vault', 'list']) mock_vault = Mock() mock_vault.name = sentinel.vault_name self.connection.list_vaults.return_value = [mock_vault] print_mock = Mock() with patch_builtin('print', print_mock): self.app.main() print_mock.assert_called_once_with(sentinel.vault_name, sep=u'\n') def test_vault_create(self): self.run_app(['vault', 'create', 'vault_name']) self.connection.create_vault.assert_called_once_with('vault_name') def test_archive_list(self): self.init_app(['archive', 'list', 'vault_name']) archive_list = [sentinel.archive_one, sentinel.archive_two] self.cache.get_archive_list.return_value = archive_list print_mock = Mock() with patch_builtin('print', print_mock): self.app.main() print_mock.assert_called_once_with(*archive_list, sep="\n") def test_archive_list_force_ids(self): self.init_app( ['archive', 'list', '--force-ids', 'vault_name'], memory_cache=True, ) self.cache.add_archive('vault_name', 'archive_name_1', 'id_1') self.cache.add_archive('vault_name', 'archive_name_1', 'id_2') self.cache.add_archive('vault_name', 'archive_name_3', 'id_3') print_mock = Mock() with patch_builtin('print', print_mock): self.app.main() # print should have been called with a list of the items in some # arbitrary order. Testing this correctly involves being agnostic with # the order of args in *args. Does mock provide any other way of doing # this other than by introspecting mock_calls like this? nose.tools.assert_equals(print_mock.call_count, 1) nose.tools.assert_equals( sorted(print_mock.mock_calls[0][1]), sorted([ u'id:id_1\tarchive_name_1', u'id:id_2\tarchive_name_1', u'id:id_3\tarchive_name_3', ]), ) nose.tools.assert_equals( print_mock.mock_calls[0][2], {'sep': "\n"} ) def test_archive_upload(self): file_obj = Mock() file_obj.name = 'filename' file_obj.mode = 'rb' open_mock = Mock(return_value=file_obj) with patch_builtin('open', open_mock): self.run_app(['archive', 'upload', 'vault_name', 'filename']) self.connection.get_vault.assert_called_with('vault_name') mock_vault = self.connection.get_vault.return_value mock_vault.create_archive_from_file.assert_called_once_with( file_obj=file_obj, description='filename') def test_archive_stdin_upload(self): self.run_app(['archive', 'upload', 'vault_name', '-']) self.connection.get_vault.assert_called_once_with('vault_name') vault = self.connection.get_vault.return_value expected_file_obj = sys.stdin if PY2 else sys.stdin.buffer vault.create_archive_from_file.assert_called_once_with( file_obj=expected_file_obj, description='<stdin>') def test_archive_retrieve_no_job(self): self.init_app(['archive', 'retrieve', 'vault_name', 'archive_name']) mock_vault = Mock() mock_vault.list_jobs.return_value = [] self.connection.get_vault.return_value = mock_vault mock_exit = Mock() mock_print = Mock() with patch('sys.exit', mock_exit): with patch_builtin('print', mock_print): self.app.main() mock_exit.assert_called_once_with(EX_TEMPFAIL) mock_print.assert_called_once_with( u"glacier: queued retrieval job for archive 'archive_name'", file=sys.stderr) self.connection.get_vault.assert_called_once_with('vault_name') mock_vault.retrieve_archive.assert_called_once_with( self.cache.get_archive_id.return_value) def test_archive_retrieve_with_job(self): self.init_app(['archive', 'retrieve', 'vault_name', 'archive_name']) self.cache.get_archive_id.return_value = sentinel.archive_id mock_job = Mock( archive_id=sentinel.archive_id, completed=True, completion_date='1970-01-01T00:00:00Z', archive_size=1) mock_vault = Mock() mock_vault.list_jobs.return_value = [mock_job] self.connection.get_vault.return_value = mock_vault mock_open = mock.mock_open() with patch_builtin('open', mock_open): self.app.main() self.cache.get_archive_id.assert_called_once_with( 'vault_name', 'archive_name') mock_job.get_output.assert_called_once_with() mock_job.get_output.return_value.read.assert_called_once_with() mock_open.assert_called_once_with('archive_name', u'wb') mock_open.return_value.write.assert_called_once_with( mock_job.get_output.return_value.read.return_value) def test_archive_delete(self): self.run_app(['archive', 'delete', 'vault_name', 'archive_name']) self.cache.get_archive_id.assert_called_once_with( 'vault_name', 'archive_name') self.connection.get_vault.assert_called_with('vault_name') mock_vault = self.connection.get_vault.return_value mock_vault.delete_archive.assert_called_once_with( self.cache.get_archive_id.return_value)
Community Center is a common amenities center built for the benefit of the whole community where facility is provided for the tourists for Accommodation at economical rates compared to luxurious hotel. This type of community centers are beneficial to the common public of particular community saving huge amount of money in the long run. REDSUN solar water heaters with ETC glass tube technology were installed by us at one of the Community Center at Lonavala. This project includes 4000 Litres Per Day (LPD) REDSUN ETC solar water heaters wherein 500 LPD X 8 Number of systems are installed in cascade providing hot water to 100 persons per day. Installation of solar water heaters is done in cascade mode wherein hot water from First solar tank is fed to the Second Solar tank, then Hot water from Second solar tank is fed to the Third Solar tank, and finally Hot water from Eighth Solar tank is fed to Bath Room (hot water usage points). Usage of REDSUN solar water heating system in this Community Center will help in saving of 45000 KwH (units) of Electricity Per Year indirectly saving Rs. 3,25,000 per annum. This clearly indicates that investment made by the institute will be recovered in just 1.5 years compared to Electric Geysers and within 2 years in comparison with Gas Geysers. REDSUN ETC Solar Water Heaters when installed in Community Buildings or in Residential Apartments are very economical as the Return on Investment is recovered within 2 years. In order to generated 45000 KWh from On Grid Solar Power Plant, it is necessary to install 30 KW On Grid Solar Power Plant with estimated investment of Indian Rs. 21 Lacs. Hence if water is to be heated by Renewable energy Sources then Solar Water Heaters is the best option available for a country like India blessed with abundant sunlight.
# -*- coding: utf-8 -*- # Copyright © 2014 SEE AUTHORS FILE # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Git worker. """ import git import time import os import shutil import subprocess import uuid from reworker.worker import Worker class GitWorkerError(Exception): """ Base exception class for GitWorker errors. """ pass class GitWorker(Worker): """ Worker which provides basic functionality with Git. """ #: allowed subcommands subcommands = ('CherryPickMerge', 'Merge') dynamic = [] # Subcommand methods def cherry_pick_merge(self, body, corr_id, output): # Get neede ic variables params = body.get('parameters', {}) try: commits = params['commits'] to_branch = params['to_branch'] temp_branch = params.get('temp_branch', 'mergebranch') run_scripts = params.get('run_scripts', []) repo = params['repo'] self.app_logger.info( 'Attempting to cherry pick the following commits on %s: %s' % ( repo, ",".join(commits))) # Create a workspace workspace = self._create_workspace() # result_data is where we store the results to return to the bus result_data = { "cherry_pick": [], } # Create a git command wrapper gitcmd = git.cmd.Git(workspace) # Clone location_type = 'local' if ( repo.startswith('http://') or repo.startswith('https://') or repo.startswith('ssh://')): location_type = 'remote' output.info('Cloning %s %s' % (location_type, repo)) gitcmd.clone(repo, workspace) local_repo = git.Repo(workspace) output.info('Checking out branch %s for work' % temp_branch) local_repo.git.checkout(b=temp_branch) for commit in commits: self.app_logger.info("Going to cherry pick %s now" % commit) local_repo.git.cherry_pick(commit) result_data['cherry_pick'].append(commit) output.info('Cherry picked %s' % commit) self.app_logger.info("Cherry picked %s successfully" % commit) local_repo.git.fetch('origin', to_branch) local_repo.git.checkout(to_branch) local_repo.git.pull('origin', to_branch) local_repo.git.merge(temp_branch, squash=True) local_repo.git.commit(m="Commit for squash-merge of release: %s" % corr_id) result_data['commit'] = local_repo.commit().hexsha result_data['branch'] = to_branch if run_scripts: for script in run_scripts: try: self._config['scripts'][script] self.app_logger.info('Executing ') self.app_logger.debug('Running: ["%s"]' % ( script)) script_process = subprocess.Popen([ self._config['scripts'][script]], shell=False, cwd=workspace, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Give a little time ... time.sleep(2) # If we get a non 0 then it's a failure. if script_process.returncode != 0: # stop executing and bail out raise GitWorkerError(str(script_process.stdout.read())) result_data['commit'] = local_repo.commit().hexsha self.app_logger.info('%s run finished' % script) output.info('%s run finished' % script) except KeyError, ke: self.app_logger.warn( '%s is not in the allowed scripts list. Skipped.') output.warn( '%s is not in the allowed scripts list. Skipped.') local_repo.git.push("origin", to_branch, force=True) # Remove the workspace after work is done (unless # keep_workspace is True) if not params.get('keep_workspace', False): self._delete_workspace(workspace) output.info('Cleaning up workspace.') self.app_logger.info('Cherry picking succeeded.') return {'status': 'completed', 'data': result_data} except KeyError, ke: raise GitWorkerError('Missing input %s' % ke) except git.GitCommandError, gce: raise GitWorkerError('Git error: %s' % gce) def merge(self, body, corr_id, output): """ Merge a branch into another branch. """ params = body.get('parameters', {}) try: from_branch = params['from_branch'] to_branch = params['to_branch'] repo = params['repo'] msg = 'Attempting to merge %s to %s' % (from_branch, to_branch) self.app_logger.info(msg) output.info(msg) # Create a workspace workspace = self._create_workspace() # Create a git command wrapper gitcmd = git.cmd.Git(workspace) # Clone location_type = 'local' if ( repo.startswith('http://') or repo.startswith('https://') or repo.startswith('ssh://')): location_type = 'remote' output.info('Cloning %s %s' % (location_type, repo)) gitcmd.clone(repo, workspace) local_repo = git.Repo(workspace) output.info('Checking out branch %s to merge into' % to_branch) # Make sure we have the data from the server local_repo.git.fetch('origin', from_branch) local_repo.git.fetch('origin', to_branch) # Move onto the branch local_repo.git.checkout(to_branch) # Do the work local_repo.git.merge("origin/" + from_branch) output.info('Merged %s to %s successfully' % ( from_branch, to_branch)) self.app_logger.info("Merged %s to %s successfully" % ( from_branch, to_branch)) result_data = { 'commit': local_repo.commit().hexsha, 'from_branch': from_branch, 'to_branch': to_branch, } local_repo.git.push("origin", to_branch, force=False) # Remove the workspace after work is done (unless # keep_workspace is True) if not params.get('keep_workspace', False): self._delete_workspace(workspace) output.info('Cleaning up workspace.') self.app_logger.info('Merge succeeded.') return {'status': 'completed', 'data': result_data} except KeyError, ke: raise GitWorkerError('Missing input %s' % ke) except git.GitCommandError, gce: raise GitWorkerError('Git error: %s' % gce) def _create_workspace(self): """ Creates a workspace to clone in. """ workspace = os.path.sep.join([ self._config['workspace_dir'], str(uuid.uuid4())]) self.app_logger.debug('Trying to make %s.' % workspace) os.makedirs(workspace) self.app_logger.info('Created workspace at %s' % workspace) return workspace def _delete_workspace(self, workspace): """ Deletes a workspace after worker is done. """ self.app_logger.debug('Attempting to delete workspace %s.' % workspace) if workspace.startswith(self._config['workspace_dir']): shutil.rmtree(workspace) self.app_logger.info('Deleted workspace at %s' % workspace) else: self.app_logger.warn( 'Worksapce %s is not inside %s. Not removing.' % ( workspace, self._config['workspace_dir'])) def process(self, channel, basic_deliver, properties, body, output): """ Processes GitWorker requests from the bus. *Keys Requires*: * subcommand: the subcommand to execute. """ # Ack the original message self.ack(basic_deliver) corr_id = str(properties.correlation_id) try: try: subcommand = str(body['parameters']['subcommand']) if subcommand not in self.subcommands: raise KeyError() except KeyError: raise GitWorkerError( 'No valid subcommand given. Nothing to do!') cmd_method = None if subcommand == 'CherryPickMerge': cmd_method = self.cherry_pick_merge elif subcommand == 'Merge': cmd_method = self.merge else: self.app_logger.warn( 'Could not find the implementation of subcommand %s' % ( subcommand)) raise GitWorkerError('No subcommand implementation') result = cmd_method(body, corr_id, output) # Send results back self.send( properties.reply_to, corr_id, {'status': 'completed', 'data': result}, exchange='' ) # Send results back self.send( properties.reply_to, corr_id, result, exchange='' ) # Notify on result. Not required but nice to do. self.notify( 'GitWorker Executed Successfully', 'GitWorker successfully executed %s. See logs.' % ( subcommand), 'completed', corr_id) # Send out responses self.app_logger.info( 'GitWorker successfully executed %s for ' 'correlation_id %s. See logs.' % ( subcommand, corr_id)) except GitWorkerError, fwe: # If a GitWorkerError happens send a failure log it. self.app_logger.error('Failure: %s' % fwe) self.send( properties.reply_to, corr_id, {'status': 'failed'}, exchange='' ) self.notify( 'GitWorker Failed', str(fwe), 'failed', corr_id) output.error(str(fwe)) def main(): # pragma: no cover from reworker.worker import runner runner(GitWorker) if __name__ == '__main__': # pragma nocover main()
JIC offers 52 fantastic Hand-Dipped, 48 Soft-Serve Ice Cream flavors combined for unlimited options, and Non-Dairy and Low Fat-No Sugar Added Ice Cream. And a bit more...super Sundaes, boomin' Banana Splits, rockin' Rootbeer Floats, monstrous Milkshakes and Malts, scrumptious Smoothies, Brewster Blasts and incredible Ice Cream Sandwiches! The ice cream is the absoulute best and the people that serve you are very friendly. There are so many different favors, and the amount you get is money well spent. Service is absolutely phenomenal!! I love this place alot???? The people, the atmosphere , the ICE CREAM ! It has the Best variety and creamiest ice cream ever!!!! Great Ice Cream and very friendly employees. Excellent flavors and a great variety! I try something new every time I go! Best ice cream, wonderful service, price is right.
from django.shortcuts import render,get_object_or_404, redirect from django.views.generic.detail import DetailView from django.views.generic.list import ListView from django.http import Http404 from django.db.models import Q from .forms import VariationInventoryFormSet from django.contrib import messages from .mixins import StaffRequiredMixin,LoginRequiredMixin # Create your views here. from .models import Product, Variation,Category class CategoryListView(ListView): model=Category queryset=Category.objects.all() template_name="products/product_list.html" class CategoryDetailView(DetailView): model=Category def get_context_data(self,*args,**kwargs): context=super(CategoryDetailView,self).get_context_data(*args,**kwargs) obj=self.get_object() product_set=obj.product_set.all() default_products=obj.default_category.all() products=(product_set | default_products).distinct() context["products"]=products return context class VariationListView(StaffRequiredMixin,ListView): model=Variation queryset=Variation.objects.all() def get_context_data(self,*args,**kwargs): context=super(VariationListView,self).get_context_data(*args,**kwargs) context['formset']=VariationInventoryFormSet(queryset=self.get_queryset()) return context def get_queryset(self,*args,**kwargs): product_pk=self.kwargs.get("pk") if product_pk: product=get_object_or_404(Product,pk=product_pk) queryset=Variation.objects.filter(product=product) return queryset def post(self,request,*args,**kwargs): formset=VariationInventoryFormSet(request.POST,request.FILES) print (request.POST) if formset.is_valid(): formset.save(commit=False) for form in formset: new_ietm=form.save(commit=False) if new_ietm.title: product_pk=self.kwargs.get("pk") product=get_object_or_404(Product,pk=product_pk) new_ietm.product=product new_ietm.save() messages.success(request,"updated successful") return redirect("products") raise Http404 #-------------------------------------------------------------- #Class based view and function based view below class ProductListView(ListView): model=Product queryset=Product.objects.all() def get_context_data(self,*args,**kwargs): context=super(ProductListView,self).get_context_data(*args,**kwargs) print (context) return context def get_queryset(self,*args,**kwargs): qs=super(ProductListView,self).get_queryset(*args,**kwargs) query=self.request.GET.get("q") if query: qs=self.model.objects.filter( Q(title__icontains=query) | Q(description__icontains=query) ) try: qs2=self.model.objects.filter(Q(price=query)) qs=(qs | qs2).distinct() except: pass return qs import random class ProductDetailView(DetailView): model=Product def get_context_data(self,*args,**kwargs): context=super(ProductDetailView,self).get_context_data(*args,**kwargs) instance=self.get_object() context["related"]=sorted(Product.objects.get_related(instance)[:6],key= lambda x:random.random()) return context def product_details_view_func(request,id): # product_instance=Product.objects.get(id=id) product_instance=get_object_or_404(Product,id=id) try: product_instance=Product.objects.get(id=id) except Product.DoesNotExist: raise Http404 except: raise Http404 template="products/product_detail.html" context={ "object":product_instance } return render(request,template,context) #--------------------------------------------------------------
FROM: Roger Callow aka The Outlawed Canadian in an outlaw Justice System due to systematic judicial malfeasance. 17) 3 more SCofC judges no doubt are being pressured to sacrifice their reputations along with Justices Wagner, Coté, Cromwell (r.) by failing to call for disclosure in 36993 SK prior to any examination of this issue on any level. 2b)...The same three judges - Cromwell (retired Sept. 01-16)/ Coté / Wagner sat on both cases amid my most vehement opposition as known to the Justice Minister and Prime Minister. Under those circumstances, P.M. Trudeau has no choice but to expel Chief Justice Beverley McLachlin (whom sat on the first enquiry in 1997). The other 4 SCofC justices are honour-bound to turn in their resignations and refuse to sit alongside Coté and Wagner. A) As Richard Wagner j. has been confirmed as the new Chief Justice to replace Beverley McLachlin whom retired on Dec. 15-2017, this letter which would be normally addressed to the new Chief Justice is now being directed to the other judges on the SCofC panel for reasons mentioned above. B) McLachlin was also President of the Canadian Council of Judges where I have renewed my request to examine the apparent fraud of Ottawa Supreme Court judges Colin McKinnon j. and Robert Scott j. (originally appointed by the Federal Court). McKinnon's bombastic statement from 2014 is still being widely disseminated in the 'Book of Authorities' which QC, SK, NS and the SCofC refuse to analyze making a mockery of the justice system. C) An action of fraud is currently being lodged in B.C. in which I have called in Premier Horgan to expedite matters. While the McKinnon caper has not yet been included in B.C., logic tells us that Harris & Co. which served in SK and NS (Hicks, Morley et al in ON / Lavery de Billy in QC. The Legal Societies in both provinces ducked out of dealing with their perfidy although it is only ON which I am currently pursuing under an Appeal to Premier Wynne) (SEE web 2018). D) The proper course of the extant SCofC judges is to excuse Wagner and Coté from any discussion on the above apparent transgressions. Canada deserves to know of your outcome on this issue as to the credibility of the Justice System in general.
from os import path from collections import deque import string import io from constants import SCRIPT_PATH import opcodes class Script: ''' A script is code to be run by each agent It has code ''' def __init__(self, scripName): self.ip = ip self.data = [] self.keys = {} self.keysBack = {} with open(path.join(SCRIPT_PATH, scripName), 'rb') as f: tok = '' lastIndex = None for byte in f.read(): byte = byte.upper() # Skip non-letter bytes if byte not in string.uppercase: continue # Add letter to tok tok += byte # Check if tok is an opcode if tok in opcodes.OP_CODES: # Check if tok is the 'literal' opcode if tok == opcodes.LITERAL_CODE: # Parse literal number num = '' for digit in f.read(): if d not in (string.digits + '-bx' + string.uppercase[:6] + ' +*/()' + '<>^~&|%'): break num += digit f.seek(-1, io.SEEK_CUR) try: # Add literal to data self.data.append(int(eval(num))) except: pass else: # Add opcode to data self.data.append(tok) if tok in opcodes.TICK_CODES: if lastIndex is not None: index = len(self.data) self.keys[lastIndex] = index self.keysBack[index] = lastIndex lastIndex = index # Reset tok tok = '' def nextOps(self): ''' Gets the next list of op codes to be evaluated ''' nextKey = self.keys[self.ip] opStr = self.data[self.ip:nextKey-1] self.ip = nextKey return opStr def execute(self, opStr): ''' Executes a list of evaluated op codes @return - The command to be executed in a tuple ('opcode', value) ''' # JG MA 25 50 MA 2 0 -1 opstack = deque(opStr) argStack = deque() while len(opstack) > 0: p = opstack.pop() if p in opcodes.OP_CODES: arity = opcodes.OP_CODES[p] if len(argStack) > arity: args = [argStack.pop() for i in range(arity)] result = self.applyOpCode(p, args) if result is not None: argStack.append(result) else: argStack.append(p) # opstack = JG MA 25 50 MA 2 0 -1 # argStack = # opstack = JG MA 25 50 MA 2 0 # argStack = -1 # opstack = JG MA 25 50 MA 2 # argStack = -1 0 # opstack = JG MA 25 50 MA # argStack = -1 0 2 # MA 2 0 = 2 # opstack = JG MA 25 50 # argStack = -1 2 # opstack = JG MA 25 # argStack = -1 2 50 # opstack = JG MA # argStack = -1 2 50 25 # MA 50 25 = 75 # opstack = JG # argStack = -1 2 75 # JG 75 2 -1 = None (sets ip to ip-1) # opstack = # argStack = def applyOpCode(self, opcode, args): pass
Most small businesses can't afford to pay large advertising agency fees for turnkey marketing services. As a result, they end up with a patchwork marketing strategy and waste a lot of money on ineffective advertising. Bullseye Media has recently introduced a 100% Done-For-You marketing program designed specifically for companies with fewer than 50 employees and no dedicated marketing specialist on staff. A partial list of services includes: Marketing Plan creation, Marketing Calendar implementation, Search Engine Optimization (SEO), Pay Per Click (PPC) management, landing page creation and testing, website maintenance, blog set up, copywriting, design and delivery of a monthly client newsletter, advertising effectiveness tracking tools and video creation and editing.
import logging import urllib import xmlrpclib from handlerbase import Abort from handlerdl import DownloadHandler from network.httprequest import BadHeader from network.stringcookiejar import StringCookieJar class ISPDownloadHandler(DownloadHandler): def loadCookieFromSPAT(self): """ Load cookies from center server """ cookieString = None try: s = xmlrpclib.ServerProxy("http://localhost:8888") cookieString = s.get_cookie_str() except Exception as e: logging.error("Failed to get the cookie from center server because of: %s" % e) return None stringCookieJar = StringCookieJar() stringCookieJar.load_from_str(cookieString, True, True) """ handles important things to do before starting """ cj = self.loadCookieFromSPAT() if cj: self.req.setCookieJar(cj) else: raise Abort("Failed to get cookie from Center server") def abort(self): return self.task.abort def downloadISPAtt(self): # try at most 5 times redirect url = self.task.url for i in range(5): # if just_header set, then dict like http headers will be return headers = self.httpfetch(url, just_header = True) # self.load does not raise a BadHeader on 404 responses, do it here if headers.has_key('code') and headers['code'] == 404: raise BadHeader(404) if 'location' in headers: self.logDebug("Location: " + headers['location']) url = urllib.unquote(headers['location']) else: break # download the url # we don't use the 'content-disposition' name specified in http header self.httpdownload(url) def preprocess(self, thread): # load cookie from center server and set it into current request self.loadCookieFromSPAT() # mimic ack ISP downloading agreements page # set the taskfile's status to be 'starting' self.task.setStatus("starting") def process(self): """main function""" # preprocess function self.preprocess() # real download try: self.downloadISPAtt() except BadHeader, e: if e.code in (401, 403): self.logging("Cookie expired, try to reload cookie from center server and retry again!") self.loadCookieFromSPAT() self.downloadISPAtt() else: raise # postprocess self.postprocess() def postprocess(self): self.load(self.task.url)
As an employee of Gateways: Access to Jewish Education, I was quite moved to watch Arlene Remz, our Executive Director, receive the honor of the Community Service Award from the Synagogue Council of Massachusetts for her work in leading our organization to improve the quality of Jewish education for children with special needs. I certainly agree with his point and I want to add that by honoring Arlene Remz, Executive Director of Gateways: Access to Jewish Education, the Council has recognized our agency’s ability to also serve families across the denominational spectrum, as well as recognized the importance of including families who have children with a diverse range of learning styles and abilities. Gateways provides Jewish education to children with special needs in a variety of settings – day schools, synagogue and community programs, and specially designed programs for children who would benefit from highly structured programming, individualized attention and smaller class sizes. Our client families identify as Orthodox, Conservative, Reform, Reconstructionist and unaffiliated. The Synagogue Council of Massachusetts and Gateways: Access to Jewish Education are vibrant models of organizations identifying an important community need and working for the greater good to create services and programs for all Jewish families. For more information about Gateways: Access to Jewish Education please visit our website and become a fan on facebook.
# Originally from: http://code.activestate.com/recipes/577202/#c4 # Written by Vasilij Pupkin (2012) # Minor changes by Elmer de Looff (2012) # Licensed under the MIT License (http://opensource.org/licenses/MIT class ALIGN(object): LEFT, RIGHT = '-', '' class Column(list): def __init__(self, name, data, align=ALIGN.LEFT): list.__init__(self, data) self.name = name self.width = max(len(x) for x in self + [name]) self.format = ' %%%s%ds ' % (align, self.width) class Table(object): def __init__(self, *columns): self.columns = columns self.length = max(len(x) for x in columns) def get_row(self, i=None): for x in self.columns: if i is None: yield x.format % x.name else: yield x.format % x[i] def get_line(self): for x in self.columns: yield '-' * (x.width + 2) def join_n_wrap(self, char, elements): return ' ' + char + char.join(elements) + char def get_rows(self): yield self.join_n_wrap('+', self.get_line()) yield self.join_n_wrap('|', self.get_row(None)) yield self.join_n_wrap('+', self.get_line()) for i in range(0, self.length): yield self.join_n_wrap('|', self.get_row(i)) yield self.join_n_wrap('+', self.get_line()) def __str__(self): return '\n'.join(self.get_rows())
Are you searching for reliable and expert services provider for your vehicle in Victoria? Your search ends at our door. As a VACC accredited service station, we provide premium quality services at competitive rate. Over the years, due to our heavy investment in new technology, tools, amenity and human resources, we have attained expertise in all areas of repairing. Our bouquets of services include auto-electrical repairs, all mechanical repairs, log book services, 4wd services, brakes and clutch repair services, suspension repairs services, tyre and wheel,car airconditioning,safety check services, tow bar and accessories, and oil change & lubrication services . We are expert in repairing small, family, SUV’s and premium cars. I’ve been here twice once through my insurance and second for a personal quote and both times I’ve experienced the best customer service and great quality work.
#!/usr/bin/env python # -*- coding: utf-8 -*- import re from user import make_anonymous_user from exeptions import HttpStatusError, RegexError def make_subject_url(url): if url.endswith("/"): return url + "subject.txt" else: return url + "/subject.txt" def parse_board(string): if not isinstance(string, unicode): raise TypeError("unsupported string type:" + str(type(string))) thread_expressions = re.compile( r"^(?P<dat>\d+\.dat)<>(?P<title>.*) \((?P<n_comments>\d*)\)$") results = [] for thread_string in string.split("\n"): thread_data = thread_expressions.search(thread_string) if thread_data: results.append({ "title": thread_data.group("title"), "n_comments": int(thread_data.group("n_comments")), "dat": thread_data.group("dat"), }) elif len(thread_string) != 0: raise RegexError( "Regex unmatched in parsing the thread's data", thread_expressions) return results def retrieve_board(board_url, user=None): my_user = user if user else make_anonymous_user() subject_url = make_subject_url(board_url) response = my_user.urlopen(subject_url, gzip=False) if response.code == 200: retrieved_string = unicode(response.read(), "Shift_JIS", "ignore") print type(retrieved_string) return parse_board(retrieved_string) else: message = "HTTP status is invalid: " + str(response.code) raise HttpStatusError(message, response)
Pop some bubbly and indulge in the sweet, fruity notes of pink prosecco. Another glass, please! Wipe glass with a damp cloth.
#!/usr/bin/env python # Copyright 2018 Informatics Matters Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from rdkit import DataStructs, rdBase from rdkit.Chem.MolStandardize import rdMolStandardize from pipelines_utils import parameter_utils, utils from pipelines_utils_rdkit import rdkit_utils, mol_utils ### functions ######################################### #lfc = rdMolStandardize.LargestFragmentChooser() uncharger = rdMolStandardize.Uncharger() def standardize(mol, neutralize, fragment): """ :param mol: The molecule to standardize :param neutralize: Boolean for whether to neutralize the molecule :param fragment: The approach for choosing the largest fragment. Either 'hac' or 'mw'. If not specified the whole molecule is used. :return: The standardized molecule """ mol = rdMolStandardize.Cleanup(mol) #mol = lfc.choose(mol) # We use our own largest fragment picker as the RDKit one behaves slightly differently if fragment: mol = mol_utils.fragment(mol, fragment) if neutralize: mol = uncharger.uncharge(mol) return mol ### start main execution ######################################### def main(): ### command line args definitions ######################################### parser = argparse.ArgumentParser(description='RDKit Standardize') parser.add_argument('--fragment-method', choices=['hac', 'mw'], help='Approach to find biggest fragment if more than one (hac = biggest by heavy atom count, mw = biggest by mol weight)') parser.add_argument('--neutralize', action='store_true', help='Neutralize the molecule') parameter_utils.add_default_io_args(parser) parser.add_argument('-q', '--quiet', action='store_true', help='Quiet mode') parser.add_argument('--thin', action='store_true', help='Thin output mode') args = parser.parse_args() utils.log("Standardize Args: ", args) # handle metadata source = "standardize.py" datasetMetaProps = {"source":source, "description": "Standardize using RDKit " + rdBase.rdkitVersion} clsMappings = {} fieldMetaProps = [] input,output,suppl,writer,output_base = rdkit_utils.\ default_open_input_output(args.input, args.informat, args.output, 'standardize', args.outformat, thinOutput=False, valueClassMappings=clsMappings, datasetMetaProps=datasetMetaProps, fieldMetaProps=fieldMetaProps) count = 0 total = 0 errors = 0 for mol in suppl: count += 1 if mol is None: errors += 1 continue m = standardize(mol, args.neutralize, args.fragment_method) writer.write(m) total += 1 input.close() writer.flush() writer.close() output.close() if args.meta: utils.write_metrics(output_base, {'__InputCount__':count, '__OutputCount__':total, '__ErrorCount__':errors, 'RDKitStandardize':total}) if __name__ == "__main__": main()
Ritter Jay R.The costs of going public[J].Journal of Financial Economics, 1987, 19 (2) :268-281. Megginson, W.L.andKathleenA.Weiss, July 1991.Venture Capitalist Certification in Initial Public Offerings, the Journal of Finance.Vol.XLVI, No.3. Lin, Smith. Insider reputation and selling decision:the unwinding of venture capital investments during eqity IPOs[J].Journal of rate financial, 1998, 23 (5) :241-263. Gompers, P.A., 1996. Grandstanding in the venture capital industry, Journal of Financial Economics 42, 133-156. 陈伟, 杨大楷.风险投资的异质性对IPO的影响研究[J].山西财经大学学报, 2013 (03) :33-43.
import datetime import json import os import logging from dci_umb.sender import send logger = logging.getLogger(__name__) def _get_architecture(job): arch = "x86_64" available_arches = ["x86_64", "ppc64le", "aarch64", "s390x"] for available_arch in available_arches: if available_arch in job["tags"]: arch = available_arch break return arch def _get_artifact(component): return { "compose_type": "nightly" if "nightly" in component["url"] else "rel-eng", "id": component["name"], "type": "productmd-compose", } def _build_generic_message(job, component, result, now): test_name = result["name"] job_id = str(job["id"]) job_url = "https://www.distributed-ci.io/jobs/%s/jobStates" % job_id target = "topic://VirtualTopic.eng.dci.job.complete" architecture = _get_architecture(job) return { "target": target, "body": json.dumps( { "contact": { "name": "DCI CI", "team": "DCI", "docs": "https://docs.distributed-ci.io/", "email": "distributed-ci@redhat.com", "url": "https://distributed-ci.io/", }, "run": {"url": job_url, "log": job_url}, "artifact": _get_artifact(component), "pipeline": {"id": job_id, "name": "job id"}, "test": { "category": "system", "namespace": "dci", "type": test_name, "result": "passed" if job["status"] == "success" else "failed", }, "system": [{"provider": "beaker", "architecture": architecture}], "generated_at": "%sZ" % now.isoformat(), "version": "0.1.0", } ), } def _get_kernel_version(component): if "tags" not in component: return None kernel_version = None for tag in component["tags"]: if "kernel:" in tag: kernel_version = tag.replace("kernel:", "") return kernel_version def _build_cki_message(job, component, result): job_url = "https://www.distributed-ci.io/jobs/%s/jobStates" % str(job["id"]) target = "topic://VirtualTopic.eng.dci.cki" architecture = _get_architecture(job) return { "target": target, "body": json.dumps( { "results": [ { "test_arch": architecture, "test_description": tc["classname"], "test_log_url": [job_url], "test_name": tc["name"], "test_result": "PASS" if job["status"] == "success" else "FAIL", "is_debug": False, } for tc in result["testcases"] ], "summarized_result": "", "team_email": "distributed-ci@redhat.com", "team_name": "DCI", "kernel_version": _get_kernel_version(component), "artifact": _get_artifact(component), } ), } def build_umb_messages(event, now=datetime.datetime.utcnow()): logger.debug("Received event to send on UMB: %s" % event) messages = [] job = event["job"] for component in job["components"]: if component["type"].lower() != "compose": logger.debug( 'Ignoring event of type "%s". Only processing events of type "compose".' % component["type"] ) continue for result in job["results"]: if "cki-results" == result["name"].lower(): messages.append(_build_cki_message(job, component, result)) messages.append(_build_generic_message(job, component, result, now)) return messages def send_event_on_umb(event): messages = build_umb_messages(event) key_file = os.getenv("UMB_KEY_FILE_PATH", "/etc/pki/tls/private/umb.key") crt_file = os.getenv("UMB_CRT_FILE_PATH", "/etc/pki/tls/certs/umb.crt") ca_file = os.getenv("UMB_CA_FILE_PATH", "/etc/pki/tls/certs/RH-IT-Root-CA.crt") brokers = os.environ.get("UMB_BROKERS", "amqps://umb.api.redhat.com:5671").split() for message in messages: try: send( { "key_file": key_file, "crt_file": crt_file, "ca_file": ca_file, "brokers": brokers, "target": message["target"], "message": message["body"], } ) except Exception as e: logger.exception(e)
A hot open face sandwich with ham, tomatoes, goat cheese, gruyère cheese, nuts, mustard and olive oil. Open-face sandwich served with grilled vegetables, goat cheese and gruyère cheese. Topped with nuts. Roasted lamb on Foccaccia bread with tomatoes, onions, lettuce, aioli mayonnaise and chimichurri sauce on the side. Toasted bread filled with ham and cheese topped with béchamel sauce. Toasted bread filled with ham and cheese, béchamel sauce topped with a sunny side up egg. A French Classic! Fresh salad served with onions, tomatoes, tuna, anchovies, olives and boiled egg. Steak topped with blue cheese served on mixed greens, sprinkled with balsamic vinegar reduction. Goat cheese on toast, bacon and roasted almonds served on a bed of spring mix.
import six try: StandardError = StandardError except NameError: StandardError = Exception class OperationError(Exception): pass from psycopg2cffi._impl.libpq import libpq, ffi class Warning(StandardError): pass class Error(StandardError): pgerror = None pgcode = None cursor = None _pgres = None @property def diag(self): return Diagnostics(self) def __del__(self): if self._pgres: libpq.PQclear(self._pgres) self._pgres = None def __reduce__(self): t = super(Error, self).__reduce__() if not isinstance(t, tuple): return t # note: in c implementation reduce returns a 2-items tuple; # in python a 3-items tuple. Maybe the c exception doesn't have a dict? if len(t) != 3: return t d = t[2].copy() d.pop('cursor', None) d.pop('_pgres', None) return (t[0], t[1], d) def __setstate__(self, state): self.pgerror = state.get('pgerror') self.pgcode = state.get('pgcode') class InterfaceError(Error): pass class DatabaseError(Error): pass class DataError(DatabaseError): pass class OperationalError(DatabaseError): pass class IntegrityError(DatabaseError): pass class InternalError(DatabaseError): pass class ProgrammingError(DatabaseError): pass class NotSupportedError(DatabaseError): pass class QueryCanceledError(OperationalError): pass class TransactionRollbackError(OperationalError): pass class Diagnostics(object): def __init__(self, exc): self._exc = exc def _get_field(self, field): from psycopg2cffi._impl.adapters import bytes_to_ascii if self._exc and self._exc._pgres: b = libpq.PQresultErrorField(self._exc._pgres, field) if b: b = ffi.string(b) if six.PY3: # py2 tests insist on str here b = bytes_to_ascii(b) return b @property def severity(self): return self._get_field(libpq.LIBPQ_DIAG_SEVERITY) @property def sqlstate(self): return self._get_field(libpq.LIBPQ_DIAG_SQLSTATE) @property def message_primary(self): return self._get_field(libpq.LIBPQ_DIAG_MESSAGE_PRIMARY) @property def message_detail(self): return self._get_field(libpq.LIBPQ_DIAG_MESSAGE_DETAIL) @property def message_hint(self): return self._get_field(libpq.LIBPQ_DIAG_MESSAGE_HINT) @property def statement_position(self): return self._get_field(libpq.LIBPQ_DIAG_STATEMENT_POSITION) @property def internal_position(self): return self._get_field(libpq.LIBPQ_DIAG_INTERNAL_POSITION) @property def internal_query(self): return self._get_field(libpq.LIBPQ_DIAG_INTERNAL_QUERY) @property def context(self): return self._get_field(libpq.LIBPQ_DIAG_CONTEXT) @property def schema_name(self): return self._get_field(libpq.LIBPQ_DIAG_SCHEMA_NAME) @property def table_name(self): return self._get_field(libpq.LIBPQ_DIAG_TABLE_NAME) @property def column_name(self): return self._get_field(libpq.LIBPQ_DIAG_COLUMN_NAME) @property def datatype_name(self): return self._get_field(libpq.LIBPQ_DIAG_DATATYPE_NAME) @property def constraint_name(self): return self._get_field(libpq.LIBPQ_DIAG_CONSTRAINT_NAME) @property def source_file(self): return self._get_field(libpq.LIBPQ_DIAG_SOURCE_FILE) @property def source_line(self): return self._get_field(libpq.LIBPQ_DIAG_SOURCE_LINE) @property def source_function(self): return self._get_field(libpq.LIBPQ_DIAG_SOURCE_FUNCTION)
If the prospect of tiling makes you feel tired, we've come up with simple and stylish alternative. This beautiful acrylic wall panel with light grey stone colour, avoids the need for tricky tiling and grouting, giving you a protective watertight barrier in next to no time. With the stunning appearance of glass, but 20 times the impact strength and a quarter the weight, it will help you transform your bathroom into one that is truly contemporary. Featuring a light stone colour we've named stone, this acrylic wall panel has a non-stick surface which means soap and limescale simply wash away, plus with fewer joints, mould has nowhere to develop. Use our specialist wall fixing kit for a solid installation and finish off with our matching end caps.
# Name: core.py # Purpose: Core components # Author: Roman Rolinsky <rolinsky@femagsoft.com> # Created: 31.05.2007 # RCS-ID: $Id: core.py 71860 2012-06-25 15:46:16Z ROL $ import wx from wx.tools.XRCed import component, images, attribute, params, view from wx.tools.XRCed.globals import TRACE,is_object,is_element,STD_NAME import _bitmaps as bitmaps TRACE('*** creating core components') # Set panel images component.Manager.panelImages['Windows'] = images.ToolPanel_Windows.GetImage() component.Manager.panelImages['Menus'] = images.ToolPanel_Menus.GetImage() component.Manager.panelImages['Sizers'] = images.ToolPanel_Sizers.GetImage() component.Manager.panelImages['Panels'] = images.ToolPanel_Panels.GetImage() component.Manager.panelImages['Gizmos'] = images.ToolPanel_Gizmos.GetImage() ### wxFrame class Frame(component.Container): def getChildObject(self, node, obj, index): # Do not count toolbar and menubar objects = filter(is_element, node.childNodes) indexOffset = 0 # count non-window children for i,o in enumerate(objects): if o.getAttribute('class') == 'wxMenuBar': if i == index: return obj.GetMenuBar() elif i < index: indexOffset += 1 elif o.getAttribute('class') == 'wxToolBar': if i == index: return obj.GetToolBar() elif i < index: indexOffset += 1 return component.Container.getChildObject(self, node, obj, index - indexOffset) c = Frame('wxFrame', ['frame','window','top_level'], ['pos', 'size', 'title', 'centered'], image=images.TreeFrame.GetImage()) c.isTopLevel = True c.addStyles('wxDEFAULT_FRAME_STYLE', 'wxDEFAULT_DIALOG_STYLE', 'wxCAPTION', 'wxSTAY_ON_TOP', 'wxSYSTEM_MENU', 'wxRESIZE_BORDER', 'wxCLOSE_BOX', 'wxMAXIMIZE_BOX', 'wxMINIMIZE_BOX', 'wxFRAME_NO_TASKBAR', 'wxFRAME_SHAPED', 'wxFRAME_TOOL_WINDOW', 'wxFRAME_FLOAT_ON_PARENT', 'wxTAB_TRAVERSAL') c.addExStyles('wxFRAME_EX_CONTEXTHELP', 'wxFRAME_EX_METAL') c.addEvents('EVT_SIZE', 'EVT_CLOSE', 'EVT_MENU_HIGHLIGHT', 'EVT_ICONIZE', 'EVT_MAXIMIZE', 'EVT_ACTIVATE', 'EVT_UPDATE_UI') component.Manager.register(c) component.Manager.setMenu(c, 'TOP_LEVEL', 'frame', 'wxFrame', 10) component.Manager.setTool(c, 'Windows', bitmaps.wxFrame.GetBitmap(), (0,0)) ### wxMDIParentFrame class MDIParentFrame(component.Container): def getChildObject(self, node, obj, index): # Do not count toolbar and menubar objects = filter(is_element, node.childNodes) indexOffset = 0 # count non-window children for i,o in enumerate(objects): if o.getAttribute('class') == 'wxMenuBar': if i == index: return obj.GetMenuBar() elif i < index: indexOffset += 1 elif o.getAttribute('class') == 'wxToolBar': if i == index: return obj.GetToolBar() elif i < index: indexOffset += 1 return obj.GetClientWindow().GetChildren()[index] c = MDIParentFrame('wxMDIParentFrame', ['mdi_parent_frame','top_level'], ['pos', 'size', 'title', 'centered'], image=images.TreeFrame.GetImage()) c.isTopLevel = True c.addStyles('wxDEFAULT_FRAME_STYLE', 'wxDEFAULT_DIALOG_STYLE', 'wxCAPTION', 'wxSTAY_ON_TOP', 'wxSYSTEM_MENU', 'wxRESIZE_BORDER', 'wxCLOSE_BOX', 'wxMAXIMIZE_BOX', 'wxMINIMIZE_BOX', 'wxFRAME_NO_TASKBAR', 'wxFRAME_SHAPED', 'wxFRAME_TOOL_WINDOW', 'wxFRAME_FLOAT_ON_PARENT', 'wxFRAME_NO_WINDOW_MENU', 'wxTAB_TRAVERSAL') c.addExStyles('wxFRAME_EX_METAL') c.addEvents('EVT_SIZE', 'EVT_CLOSE', 'EVT_MENU_HIGHLIGHT', 'EVT_ICONIZE', 'EVT_MAXIMIZE', 'EVT_ACTIVATE', 'EVT_UPDATE_UI') component.Manager.register(c) component.Manager.setMenu(c, 'TOP_LEVEL', 'MDI parent frame', 'wxMDIParentFrame', 11) #component.Manager.setTool(c, 'Windows', bitmaps.wxFrame.GetBitmap(), (0,0)) ### wxMDIChildFrame class MDIChildFrame(component.Container): def getChildObject(self, node, obj, index): # Do not count toolbar and menubar objects = filter(is_element, node.childNodes) indexOffset = 0 # count non-window children for i,o in enumerate(objects): if o.getAttribute('class') == 'wxMenuBar': if i == index: return obj.GetMenuBar() elif i < index: indexOffset += 1 elif o.getAttribute('class') == 'wxToolBar': if i == index: return obj.GetToolBar() elif i < index: indexOffset += 1 return component.Container.getChildObject(self, node, obj, index - indexOffset) c = MDIChildFrame('wxMDIChildFrame', ['mdi_child_frame','window'], ['pos', 'size', 'title', 'centered'], image=images.TreeFrame.GetImage()) c.addStyles('wxDEFAULT_FRAME_STYLE', 'wxDEFAULT_DIALOG_STYLE', 'wxCAPTION', 'wxSTAY_ON_TOP', 'wxSYSTEM_MENU', 'wxRESIZE_BORDER', 'wxCLOSE_BOX', 'wxMAXIMIZE_BOX', 'wxMINIMIZE_BOX', 'wxFRAME_NO_TASKBAR', 'wxFRAME_SHAPED', 'wxFRAME_TOOL_WINDOW', 'wxFRAME_FLOAT_ON_PARENT', 'wxFRAME_NO_WINDOW_MENU', 'wxTAB_TRAVERSAL') c.addExStyles('wxFRAME_EX_METAL') c.addEvents('EVT_SIZE', 'EVT_CLOSE', 'EVT_MENU_HIGHLIGHT', 'EVT_ICONIZE', 'EVT_MAXIMIZE', 'EVT_ACTIVATE', 'EVT_UPDATE_UI') component.Manager.register(c) component.Manager.setMenu(c, 'container', 'MDI child frame', 'wxMDIChildFrame', 12) #component.Manager.setTool(c, 'Windows', bitmaps.wxFrame.GetBitmap(), (0,0)) ### wxDialog c = component.Container('wxDialog', ['frame','window','top_level'], ['pos', 'size', 'title', 'centered', 'icon'], image=images.TreeDialog.GetImage()) c.isTopLevel = True c.setSpecial('icon', attribute.BitmapAttribute) c.addStyles('wxDEFAULT_DIALOG_STYLE', 'wxCAPTION', 'wxSTAY_ON_TOP', 'wxSYSTEM_MENU', 'wxRESIZE_BORDER', 'wxCLOSE_BOX', 'wxMAXIMIZE_BOX', 'wxMINIMIZE_BOX', 'wxDIALOG_NO_PARENT', 'wxFRAME_SHAPED', 'wxTAB_TRAVERSAL') c.addExStyles('wxDIALOG_EX_CONTEXTHELP', 'wxDIALOG_EX_METAL') c.addEvents('EVT_INIT_DIALOG', 'EVT_SIZE', 'EVT_CLOSE', 'EVT_ICONIZE', 'EVT_MAXIMIZE', 'EVT_ACTIVATE', 'EVT_UPDATE_UI') component.Manager.register(c) component.Manager.setMenu(c, 'TOP_LEVEL', 'dialog', 'wxDialog', 20) component.Manager.setTool(c, 'Windows', bitmaps.wxDialog.GetBitmap(), (0,1)) ### wxPanel c = component.Container('wxPanel', ['window', 'top_level', 'control'], ['pos', 'size'], image=images.TreePanel.GetImage()) c.addStyles('wxTAB_TRAVERSAL') component.Manager.register(c) component.Manager.setMenu(c, 'TOP_LEVEL', 'panel', 'wxPanel', 30) component.Manager.setMenu(c, 'container', 'panel', 'wxPanel', 10) component.Manager.setTool(c, 'Windows', bitmaps.wxPanel.GetBitmap(), (0,2)) ### wxWizard class Wizard(component.Container): genericStyles = genericExStyles = [] def makeTestWin(self, res, name): wiz = wx.wizard.PreWizard() res.LoadOnObject(wiz, view.frame, STD_NAME, self.klass) # Find and select first page firstPage = None for w in wiz.GetChildren(): if isinstance(w, wx.wizard.WizardPage): firstPage = w break if firstPage: wiz.RunWizard(firstPage) else: wx.LogMessage('Wizard is empty') wiz.Destroy() return None, None c = Wizard('wxWizard', ['wizard', 'top_level'], ['pos', 'title', 'bitmap'], image=images.TreeWizard.GetImage()) c.addExStyles('wxWIZARD_EX_HELPBUTTON') c.setSpecial('bitmap', attribute.BitmapAttribute) component.Manager.register(c) component.Manager.setMenu(c, 'TOP_LEVEL', 'wizard', 'wxWizard', 40) component.Manager.setTool(c, 'Windows', bitmaps.wxWizard.GetBitmap(), (1,0), (1,2)) ### wxWizardPage class WizardPage(component.Container): def makeTestWin(self, res, name): # Create single-page wizard wiz = wx.wizard.Wizard(view.frame, title='Test Wizard') print self.klass import pdb;pdb.set_trace() page = wx.wizard.PrePyWizardPage() print res.LoadOnObject(page, wiz, STD_NAME, self.klass) # page = res.LoadObject(wiz, STD_NAME, self.klass) print page wiz.RunWizard(page) wiz.Destroy() return None, None c = WizardPage('wxWizardPage', ['wizard_page', 'window'], ['bitmap'], image=images.TreePanel.GetImage()) c.setSpecial('bitmap', attribute.BitmapAttribute) component.Manager.register(c) component.Manager.setMenu(c, 'container', 'wizard page', 'wxWizardPage') ### wxWizardPageSimple c = component.Container('wxWizardPageSimple', ['wizard_page', 'window'], ['bitmap'], image=images.TreePanel.GetImage()) c.setSpecial('bitmap', attribute.BitmapAttribute) component.Manager.register(c) component.Manager.setMenu(c, 'container', 'simple wizard page', 'wxWizardPageSimple') ### wxPropertySheetDialog class ParamButtons(params.ParamBinaryOr): '''Button flags.''' values = ['wxOK', 'wxCANCEL', 'wxYES', 'wxNO', 'wxHELP', 'wxNO_DEFAULT'] c = component.SmartContainer('wxPropertySheetDialog', ['frame','book','window','top_level'], ['pos', 'size', 'title', 'centered', 'icon', 'buttons'], params={'buttons': ParamButtons}, implicit_klass='propertysheetpage', implicit_page='PropertySheetPage', implicit_attributes=['label', 'selected', 'bitmap'], implicit_params={'label': params.ParamText, 'selected': params.ParamBool}, image=images.TreeDialog.GetImage()) c.isTopLevel = True c.setSpecial('bitmap', attribute.BitmapAttribute) c.setSpecial('icon', attribute.BitmapAttribute) c.addStyles('wxDEFAULT_DIALOG_STYLE', 'wxCAPTION', 'wxFRAME_SHAPED', 'wxTAB_TRAVERSAL', 'wxSTAY_ON_TOP', 'wxSYSTEM_MENU', 'wxRESIZE_BORDER', 'wxCLOSE_BOX', 'wxMAXIMIZE_BOX', 'wxMINIMIZE_BOX', 'wxDIALOG_MODAL', 'wxDIALOG_MODELESS', 'wxDIALOG_NO_PARENT', 'wxTAB_TRAVERSAL') c.addExStyles('wxDIALOG_EX_CONTEXTHELP', 'wxDIALOG_EX_METAL') c.addEvents('EVT_INIT_DIALOG', 'EVT_SIZE', 'EVT_CLOSE', 'EVT_ICONIZE', 'EVT_MAXIMIZE', 'EVT_ACTIVATE', 'EVT_UPDATE_UI') component.Manager.register(c) component.Manager.setMenu(c, 'TOP_LEVEL', 'propery sheet dialog', 'wxPropertySheetDialog', 50) component.Manager.setTool(c, 'Windows', bitmaps.wxPropertySheetDialog.GetBitmap(), (1,1)) ### wxBoxSizer c = component.BoxSizer('wxBoxSizer', ['sizer'], ['orient'], defaults={'orient': 'wxVERTICAL'}, images=[images.TreeSizerV.GetImage(), images.TreeSizerH.GetImage()]) component.Manager.register(c) component.Manager.setMenu(c, 'sizer', 'box sizer', 'wxBoxSizer', 10) component.Manager.setTool(c, 'Sizers', pos=(0,0)) ### wxStaticBoxSizer c = component.BoxSizer('wxStaticBoxSizer', ['sizer'], ['label', 'orient'], defaults={'orient': 'wxVERTICAL'}, images=[images.TreeSizerV.GetImage(), images.TreeSizerH.GetImage()]) component.Manager.register(c) component.Manager.setMenu(c, 'sizer', 'static box sizer', 'wxStaticBoxSizer', 20) component.Manager.setTool(c, 'Sizers', pos=(0,2)) ### wxGridSizer c = component.Sizer('wxGridSizer', ['sizer'], ['cols', 'rows', 'vgap', 'hgap'], defaults={'cols': '2', 'rows': '2'}, image=images.TreeSizerGrid.GetImage()) component.Manager.register(c) component.Manager.setMenu(c, 'sizer', 'grid sizer', 'wxGridSizer', 30) component.Manager.setTool(c, 'Sizers', pos=(0,1)) ### wxFlexGridSizer c = component.Sizer('wxFlexGridSizer', ['sizer'], ['cols', 'rows', 'vgap', 'hgap', 'growablecols', 'growablerows'], defaults={'cols': '2', 'rows': '2'}, image=images.TreeSizerFlexGrid.GetImage()) c.setSpecial('growablecols', attribute.MultiAttribute) c.setParamClass('growablecols', params.ParamIntList) c.setSpecial('growablerows', attribute.MultiAttribute) c.setParamClass('growablerows', params.ParamIntList) component.Manager.register(c) component.Manager.setMenu(c, 'sizer', 'flex grid sizer', 'wxFlexGridSizer', 40) component.Manager.setTool(c, 'Sizers', pos=(1,0)) ### wxGridBagSizer c = component.Sizer('wxGridBagSizer', ['sizer'], ['vgap', 'hgap', 'growablecols', 'growablerows'], image=images.TreeSizerGridBag.GetImage(), implicit_attributes=['option', 'flag', 'border', 'minsize', 'ratio', 'cellpos', 'cellspan']) c.setSpecial('growablecols', attribute.MultiAttribute) c.setParamClass('growablecols', params.ParamIntList) c.setSpecial('growablerows', attribute.MultiAttribute) c.setParamClass('growablerows', params.ParamIntList) c.setImplicitParamClass('cellpos', params.ParamPosSize) c.setImplicitParamClass('cellspan', params.ParamPosSize) component.Manager.register(c) component.Manager.setMenu(c, 'sizer', 'grid bag sizer', 'wxGridBagSizer', 50) component.Manager.setTool(c, 'Sizers', pos=(1,1)) ### wxStdDialogButtonSizer class StdDialogButtonSizer(component.Sizer): def getChildObject(self, node, obj, index): # This sizer orders buttons by fixed ordering, so we must # get the ID to find them try: n = filter(is_element, node.childNodes)[index] n = filter(is_element, n.childNodes)[0] id = n.getAttribute('name') except IndexError: return None items = filter(wx.SizerItem.IsWindow, obj.GetChildren()) for item in items: w = item.GetWindow() if w.GetName() == id: return w return None c = StdDialogButtonSizer('wxStdDialogButtonSizer', ['btnsizer'], [], implicit_klass='button', implicit_attributes=[]) component.Manager.register(c) component.Manager.setMenu(c, 'sizer', 'dialog button sizer', 'wxStdDialogButtonSizer', 60) #component.Manager.setTool(c, 'Sizers', pos=(0,2)) ### spacer c = component.SimpleComponent('spacer', ['spacer'], ['size', 'option', 'flag', 'border'], image=images.TreeSpacer.GetImage()) c.hasName = False component.Manager.register(c) component.Manager.setMenu(c, 'sizer', 'spacer', 'spacer', 70) component.Manager.setTool(c, 'Sizers', pos=(1,2)) ################################################################################ # Containers # wxPanel is already added ### wxScrolledWindow c = component.Container('wxScrolledWindow', ['window', 'control'], ['pos', 'size']) c.addStyles('wxHSCROLL', 'wxVSCROLL', 'wxTAB_TRAVERSAL') c.addEvents('EVT_SCROLLWIN_TOP', 'EVT_SCROLLWIN_BOTTOM', 'EVT_SCROLLWIN_LINEUP', 'EVT_SCROLLWIN_LINEDOWN', 'EVT_SCROLLWIN_PAGEUP', 'EVT_SCROLLWIN_PAGEDOWN', 'EVT_SCROLLWIN_THUMBTRACK', 'EVT_SCROLLWIN_THUMBRELEASE') component.Manager.register(c) component.Manager.setMenu(c, 'container', 'scrolled window', 'wxScrolledWindow', 20) component.Manager.setTool(c, 'Panels', pos=(3,0)) ### wxSplitterWindow c = component.Container('wxSplitterWindow', ['book', 'window', 'control'], ['pos', 'size', 'orientation', 'sashpos', 'minsize', 'gravity'], # note: no flt[0..1], so just leaving gravity as text params={'orientation': params.ParamOrientation, 'sashpos': params.ParamUnit, 'minsize': params.ParamUnit}, image=images.TreeSplitterWindow.GetImage()) c.addStyles('wxSP_3D', 'wxSP_3DSASH', 'wxSP_3DBORDER', 'wxSP_BORDER', 'wxSP_FULLSASH', 'wxSP_NOBORDER', 'wxSP_PERMIT_UNSPLIT', 'wxSP_LIVE_UPDATE', 'wxSP_NO_XP_THEME') c.addEvents('EVT_SPLITTER_SASH_POS_CHANGING', 'EVT_SPLITTER_SASH_POS_CHANGED', 'EVT_SPLITTER_UNSPLIT', 'EVT_SPLITTER_DCLICK') component.Manager.register(c) component.Manager.setMenu(c, 'container', 'splitter window', 'wxSplitterWindow', 30) component.Manager.setTool(c, 'Panels', pos=(2,3)) ### wxNotebook c = component.SmartContainer('wxNotebook', ['book', 'window', 'control'], ['pos', 'size'], implicit_klass='notebookpage', implicit_page='NotebookPage', implicit_attributes=['label', 'selected', 'bitmap'], implicit_params={'label': params.ParamText, 'selected': params.ParamBool}, image=images.TreeNotebook.GetImage()) c.addStyles('wxBK_DEFAULT', 'wxBK_TOP', 'wxBK_LEFT', 'wxBK_RIGHT', 'wxBK_BOTTOM', 'wxNB_FIXEDWIDTH', 'wxNB_MULTILINE', 'wxNB_NOPAGETHEME') c.addEquivStyles({'wxBK_DEFAULT': 'wxNB_DEFAULT', 'wxBK_LEFT': 'wxNB_LEFT', 'wxBK_RIGHT': 'wxNB_RIGHT', 'wxBK_TOP': 'wxNB_TOP', 'wxBK_BOTTOM': 'wxNB_BOTTOM'}) c.setSpecial('bitmap', attribute.BitmapAttribute) c.addEvents('EVT_NOTEBOOK_PAGE_CHANGED', 'EVT_NOTEBOOK_PAGE_CHANGING') component.Manager.register(c) component.Manager.setMenu(c, 'container', 'notebook', 'Notebook control', 40) component.Manager.setTool(c, 'Panels', pos=(1,0)) ### wxChoicebook c = component.SmartContainer('wxChoicebook', ['book', 'window', 'control'], ['pos', 'size'], implicit_klass='choicebookpage', implicit_page='ChoicebookPage', implicit_attributes=['label', 'selected', 'bitmap'], implicit_params={'label': params.ParamText, 'selected': params.ParamBool}) c.addStyles('wxBK_DEFAULT', 'wxBK_TOP', 'wxBK_LEFT', 'wxBK_RIGHT', 'wxBK_BOTTOM') c.addEquivStyles({'wxBK_DEFAULT': 'wxCHB_DEFAULT', 'wxBK_LEFT': 'wxCHB_LEFT', 'wxBK_RIGHT': 'wxCHB_RIGHT', 'wxBK_TOP': 'wxCHB_TOP', 'wxBK_BOTTOM': 'wxCHB_BOTTOM'}) c.setSpecial('bitmap', attribute.BitmapAttribute) c.addEvents('EVT_CHOICEBOOK_PAGE_CHANGED', 'EVT_CHOICEBOOK_PAGE_CHANGING') component.Manager.register(c) component.Manager.setMenu(c, 'container', 'choicebook', 'wxChoicebook', 50) component.Manager.setTool(c, 'Panels', pos=(1,3)) ### wxListbook class ListBook(component.SmartContainer): def getChildObject(self, node, obj, index): # Listbook's first child is ListView return obj.GetChildren()[index+1] c = ListBook('wxListbook', ['book', 'window', 'control'], ['pos', 'size'], implicit_klass='listbookpage', implicit_page='ListbookPage', implicit_attributes=['label', 'selected', 'bitmap'], implicit_params={'label': params.ParamText, 'selected': params.ParamBool}) c.addStyles('wxBK_DEFAULT', 'wxBK_LEFT', 'wxBK_RIGHT', 'wxBK_TOP', 'wxBK_BOTTOM') c.addEquivStyles({'wxBK_DEFAULT': 'wxLB_DEFAULT', 'wxBK_LEFT': 'wxLB_LEFT', 'wxBK_RIGHT': 'wxLB_RIGHT', 'wxBK_TOP': 'wxLB_TOP', 'wxBK_BOTTOM': 'wxLB_BOTTOM'}) c.setSpecial('bitmap', attribute.BitmapAttribute) c.addEvents('EVT_LISTBOOK_PAGE_CHANGED', 'EVT_LISTBOOK_PAGE_CHANGING') component.Manager.register(c) component.Manager.setMenu(c, 'container', 'listbook', 'wxListbook', 60) component.Manager.setTool(c, 'Panels', pos=(0,3)) ### wxTreebook class TreeBook(component.SmartContainer): def getChildObject(self, node, obj, index): # Listbook's first child is ListView return obj.GetChildren()[index+1] c = TreeBook('wxTreebook', ['book', 'window', 'control'], ['pos', 'size'], implicit_klass='treebookpage', implicit_page='TreebookPage', implicit_attributes=['label', 'selected', 'bitmap', 'depth'], implicit_params={'label': params.ParamText, 'selected': params.ParamBool, 'depth': params.ParamInt}) c.addStyles('wxBK_DEFAULT', 'wxBK_LEFT', 'wxBK_RIGHT', 'wxBK_TOP', 'wxBK_BOTTOM') c.setSpecial('bitmap', attribute.BitmapAttribute) c.addEvents('EVT_TREEBOOK_PAGE_CHANGED', 'EVT_TREEBOOK_PAGE_CHANGING', 'EVT_TREEBOOK_NODE_COLLAPSED', 'EVT_TREEBOOK_NODE_EXPANDED') component.Manager.register(c) component.Manager.setMenu(c, 'container', 'treebook', 'wxTreebook', 70) component.Manager.setTool(c, 'Panels', pos=(1,1), span=(1,2)) ### wxCollapsiblePane c = component.SmartContainer('wxCollapsiblePane', ['book', 'window', 'control'], ['pos', 'size', 'label', 'collapsed'], implicit_klass='panewindow', implicit_page='', implicit_attributes=[]) c.addStyles('wxCP_NO_TLW_RESIZE', 'wxCP_DEFAULT_STYLE') c.setParamClass('collapsed', params.ParamBool) c.addEvents('EVT_COMMAND_COLLPANE_CHANGED') component.Manager.register(c) component.Manager.setMenu(c, 'container', 'collapsible pane', 'wxCollapsiblePane', 71) ################################################################################ # Menus ### wxMenuBar class MenuBar(component.SimpleContainer): isTestable = True # Menubar should be shown in a normal frame def makeTestWin(self, res, name): frame = wx.Frame(None, -1, '%s: %s' % (self.klass, name), name=STD_NAME) object = res.LoadMenuBarOnFrame(frame, STD_NAME) return None, frame def getRect(self, obj): return None c = MenuBar('wxMenuBar', ['menubar', 'top_level'], [], image=images.TreeMenuBar.GetImage()) c.addStyles('wxMB_DOCKABLE') c.addEvents('EVT_MENU', 'EVT_MENU_OPEN', 'EVT_MENU_CLOSE', 'EVT_MENU_HIGHLIGHT_ALL') component.Manager.register(c) component.Manager.setMenu(c, 'TOP_LEVEL', 'menu bar', 'wxMenuBar', 40) component.Manager.setMenu(c, 'bar', 'menu bar', 'wxMenuBar', 10) component.Manager.setTool(c, 'Menus', pos=(1,0)) ### wxMenu c = component.SimpleContainer('wxMenu', ['menu', 'top_level'], ['label', 'help', 'enabled'], image=images.TreeMenu.GetImage()) #c.setSpecial('bitmap', attribute.BitmapAttribute) c.addStyles('wxMENU_TEAROFF') c.addEvents('EVT_MENU', 'EVT_MENU_OPEN', 'EVT_MENU_CLOSE', 'EVT_MENU_HIGHLIGHT_ALL') component.Manager.register(c) component.Manager.setMenu(c, 'TOP_LEVEL', 'menu', 'wxMenu', 50) component.Manager.setMenu(c, 'ROOT', 'menu', 'wxMenu', 20) component.Manager.setTool(c, 'Menus', pos=(1,1), span=(2,1)) ### wxMenuItem c = component.SimpleComponent('wxMenuItem', ['menu_item'], ['label', 'bitmap', 'accel', 'help', 'checkable', 'radio', 'enabled', 'checked'], image=images.TreeMenuItem.GetImage()) c.setSpecial('bitmap', attribute.BitmapAttribute) c.addEvents('EVT_MENU', 'EVT_MENU_HIGHLIGHT') component.Manager.register(c) component.Manager.setMenu(c, 'ROOT', 'menu item', 'wxMenuItem', 10) component.Manager.setTool(c, 'Menus', pos=(1,2)) ### wxToolBar class ToolBar(component.SimpleContainer): isTestable = True # Toolbar should be shown in a normal frame def makeTestWin(self, res, name): frame = wx.Frame(None, -1, '%s: %s' % (self.klass, name), name=STD_NAME) object = res.LoadToolBar(frame, STD_NAME) return None, frame def getRect(self, obj): return None c = ToolBar('wxToolBar', ['toolbar', 'top_level', 'control'], ['bitmapsize', 'margins', 'packing', 'separation', 'dontattachtoframe', 'pos', 'size'], image=images.TreeToolBar.GetImage()) c.addStyles('wxTB_FLAT', 'wxTB_DOCKABLE', 'wxTB_VERTICAL', 'wxTB_HORIZONTAL', 'wxTB_3DBUTTONS','wxTB_TEXT', 'wxTB_NOICONS', 'wxTB_NODIVIDER', 'wxTB_NOALIGN', 'wxTB_HORZ_LAYOUT', 'wxTB_HORZ_TEXT', 'wxTB_TOP', 'wxTB_LEFT', 'wxTB_RIGHT', 'wxTB_BOTTOM') c.setParamClass('dontattachtoframe', params.ParamBool) c.setParamClass('bitmapsize', params.ParamPosSize) c.setParamClass('margins', params.ParamPosSize) c.setParamClass('packing', params.ParamUnit) c.setParamClass('separation', params.ParamUnit) c.renameDict = {'dontattachtoframe': "don't attach"} c.addEvents('EVT_TOOL', 'EVT_TOOL_ENTER', 'EVT_TOOL_RCLICKED') component.Manager.register(c) component.Manager.setMenu(c, 'TOP_LEVEL', 'tool bar', 'wxToolBar', 50) component.Manager.setMenu(c, 'bar', 'tool bar', 'wxToolBar', 20) component.Manager.setTool(c, 'Menus', pos=(0,0)) ### wxTool c = component.SimpleComponent('tool', ['tool'], ['bitmap', 'bitmap2', 'radio', 'toggle', 'tooltip', 'longhelp', 'label'], image=images.TreeTool.GetImage()) component.Manager.register(c) c.setSpecial('bitmap', attribute.BitmapAttribute) c.setSpecial('bitmap2', attribute.BitmapAttribute) c.setParamClass('bitmap2', params.ParamBitmap) c.setParamClass('toggle', params.ParamBool) c.addEvents('EVT_TOOL', 'EVT_TOOL_ENTER', 'EVT_TOOL_RCLICKED') component.Manager.setMenu(c, 'ROOT', 'tool', 'wxTool', 10) component.Manager.setTool(c, 'Menus', pos=(0,1)) ### wxSeparator c = component.SimpleComponent('separator', ['separator'], [], image=images.TreeSeparator.GetImage()) c.hasName = False component.Manager.register(c) component.Manager.setMenu(c, 'ROOT', 'separator', 'separator', 20) component.Manager.setTool(c, 'Menus', pos=(0,2)) ### wxBreak c = component.SimpleComponent('break', ['break'], [], image=images.TreeSeparator.GetImage()) c.hasName = False component.Manager.register(c) component.Manager.setMenu(c, 'ROOT', 'break', 'break', 21) ### wxStatusBar c = component.SimpleComponent('wxStatusBar', ['statusbar'], ['fields', 'widths', 'styles']) c.addStyles('wxST_SIZEGRIP') c.setParamClass('fields', params.ParamIntP) component.Manager.register(c) component.Manager.setMenu(c, 'bar', 'status bar', 'wxStatusBar', 30) component.Manager.setTool(c, 'Menus', pos=(2,0)) ################################################################################ ### wxBitmap c = component.SimpleComponent('wxBitmap', ['top_level'], ['object']) c.renameDict = {'object': ''} c.setSpecial('object', attribute.BitmapAttribute) c.setParamClass('object', params.ParamBitmap) component.Manager.register(c) component.Manager.setMenu(c, 'TOP_LEVEL', 'bitmap', 'wxBitmap', 60) ### wxIcon c = component.SimpleComponent('wxIcon', ['top_level'], ['object']) c.renameDict = {'object': ''} c.setSpecial('object', attribute.BitmapAttribute) c.setParamClass('object', params.ParamBitmap) component.Manager.register(c) component.Manager.setMenu(c, 'TOP_LEVEL', 'icon', 'wxIcon', 70) ### wxXXX #c = component.Component('wxXXX', ['control','tool'], # ['pos', 'size', ...]) #c.addStyles(...) #component.Manager.register(c) #component.Manager.setMenu(c, 'control', 'XXX', 'wxXXX', NN)
I've been posting on the catholic board. thought I'd let you guys know as well. FI's grandma passed away last night. We've been expecting for most of March. We're not sure when the funeral is, but I suspect next week sometime. We now have zero grandparents between us.
#!/usr/bin/python # (c) 2016, NetApp, Inc # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # DOCUMENTATION = ''' module: netapp_e_facts version_added: '2.2' short_description: Get facts about NetApp E-Series arrays options: api_username: required: true description: - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. api_password: required: true description: - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. api_url: required: true description: - The url to the SANtricity WebServices Proxy or embedded REST API. example: - https://prod-1.wahoo.acme.com/devmgr/v2 validate_certs: required: false default: true description: - Should https certificates be validated? ssid: required: true description: - The ID of the array to manage. This value must be unique for each array. description: - Return various information about NetApp E-Series storage arrays (eg, configuration, disks) author: Kevin Hulquest (@hulquest) ''' EXAMPLES = """ --- - name: Get array facts netapp_e_facts: array_id: "{{ netapp_array_id }}" api_url: "{{ netapp_api_url }}" api_username: "{{ netapp_api_username }}" api_password: "{{ netapp_api_password }}" validate_certs: "{{ netapp_api_validate_certs }}" """ RETURN = """ msg: Gathered facts for <StorageArrayId>. """ import json from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule, get_exception from ansible.module_utils.urls import open_url from ansible.module_utils.six.moves.urllib.error import HTTPError def request(url, data=None, headers=None, method='GET', use_proxy=True, force=False, last_mod_time=None, timeout=10, validate_certs=True, url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): try: r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, url_username=url_username, url_password=url_password, http_agent=http_agent, force_basic_auth=force_basic_auth) except HTTPError: err = get_exception() r = err.fp try: raw_data = r.read() if raw_data: data = json.loads(raw_data) else: data = None except: if ignore_errors: pass else: raise resp_code = r.getcode() if resp_code >= 400 and not ignore_errors: raise Exception(resp_code, data) else: return resp_code, data def main(): argument_spec = basic_auth_argument_spec() argument_spec.update( api_username=dict(type='str', required=True), api_password=dict(type='str', required=True, no_log=True), api_url=dict(type='str', required=True), ssid=dict(required=True)) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True ) p = module.params ssid = p['ssid'] validate_certs = p['validate_certs'] api_usr = p['api_username'] api_pwd = p['api_password'] api_url = p['api_url'] facts = dict(ssid=ssid) # fetch the list of storage-pool objects and look for one with a matching name try: (rc, resp) = request(api_url + "/storage-systems/%s/graph" % ssid, headers=dict(Accept="application/json"), url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs) except: error = get_exception() module.fail_json( msg="Failed to obtain facts from storage array with id [%s]. Error [%s]" % (ssid, str(error))) facts['snapshot_images'] = [ dict( id=d['id'], status=d['status'], pit_capacity=d['pitCapacity'], creation_method=d['creationMethod'], reposity_cap_utilization=d['repositoryCapacityUtilization'], active_cow=d['activeCOW'], rollback_source=d['isRollbackSource'] ) for d in resp['highLevelVolBundle']['pit']] facts['netapp_disks'] = [ dict( id=d['id'], available=d['available'], media_type=d['driveMediaType'], status=d['status'], usable_bytes=d['usableCapacity'], tray_ref=d['physicalLocation']['trayRef'], product_id=d['productID'], firmware_version=d['firmwareVersion'], serial_number=d['serialNumber'].lstrip() ) for d in resp['drive']] facts['netapp_storage_pools'] = [ dict( id=sp['id'], name=sp['name'], available_capacity=sp['freeSpace'], total_capacity=sp['totalRaidedSpace'], used_capacity=sp['usedSpace'] ) for sp in resp['volumeGroup']] all_volumes = list(resp['volume']) # all_volumes.extend(resp['thinVolume']) # TODO: exclude thin-volume repo volumes (how to ID?) facts['netapp_volumes'] = [ dict( id=v['id'], name=v['name'], parent_storage_pool_id=v['volumeGroupRef'], capacity=v['capacity'], is_thin_provisioned=v['thinProvisioned'] ) for v in all_volumes] features = [f for f in resp['sa']['capabilities']] features.extend([f['capability'] for f in resp['sa']['premiumFeatures'] if f['isEnabled']]) features = list(set(features)) # ensure unique features.sort() facts['netapp_enabled_features'] = features # TODO: include other details about the storage pool (size, type, id, etc) result = dict(ansible_facts=facts, changed=False) module.exit_json(msg="Gathered facts for %s." % ssid, **result) if __name__ == "__main__": main()
Before True Father (Dr. Sun Myung Moon) made his first appearance to me in a dream, Jesus appeared to me one evening, shortly after I accepted the Divine Principle, I was lying on my bed when Jesus appeared at my bedside. He told me that I must go through the crucifixion with him. At that point I was suddenly on the cross. I could see and feel the nails pierce my feet and hands. I could feel the crown of thorns on my head. I lay in my bed, writhing in the total anguish and pain of the crucifixion; I was there with Jesus on the cross. After it was over, I knew the deeper pain was in Jesus' heart, and I knew that if I continued to go this way I would experience an ongoing crucifixion of my self. How sober I felt! Another time Jesus appeared in a dream, letting me know how much he loved me, how he was a man of flesh as any other man, and that as much as he had tried and desired, he could not complete his work. For this he was most sad and, in a way, broken. I shall never forget his personal love and kindness in this dream. Jesus appeared to me again for a last time. I was in a place not unlike heaven in the spirit world. Jesus appeared in the distance and began approaching me. At one point he stopped and gave me an ancient salutation. As he did so, a voice spoke and all around me the atmosphere rumbled with the sound. It was like the voice of God and it said, "This is Jesus, the Christ." Another man dressed in Oriental garb appeared beside Jesus - it was Father. They then became one and Father went inside of Jesus, and Jesus' face became magnificently radiant as he continued to walk towards me. He took both of my hands in his, and I felt tears running down my cheeks. Satan appeared in the dream, accusing me of my past sins. Jesus only became more compassionate. And I spoke, saying, "I have waited all of my life to meet Jesus." Jesus spoke to me from his mind, "I understand." As I woke from the dream, Jesus was standing beside my bed. Then only his face and piercing blue eyes hung in the air against the background of the darkened room. Finally, his face and then his eyes gradually faded out of sight. From then on True Father began to appear. And since I was alone, he came to guide me in the big and little things of life. There was no way I could doubt, no way I could turn away. I quickly outlined the Principle and witnessed wherever I went. I never allowed myself to eat a meal without witnessing. I fasted much and prayed a lot. The real work had just begun. There is so much to be told from those early days. Witnessing was our life's major focus. We ate, drank and slept witnessing. Through this experience God raised me to understand His heart and the principle as a reality. Through witnessing we came to experience the reality of central biblical character's lives. Witnessing became a joy and the highest reason to live. From the time I got up until I retired at night, I thought of little else. Because I witnessed so much, I received criticism from fellow soldiers and officers, especially from the sergeant in charge of my barracks. After an unceasing siege of criticisms and slanders, he came to me one evening after work to "humbly" ask if he could talk to me. We went into his room. He began speaking almost before I could sit down. "Burley, I owe you an apology." "Last night I had a dream about you. I was shown the fall of man and what you are teaching is true. God told me to stop persecuting you because you are His man." I didn't say much. He now wanted to hear all of "my teachings", and finally I did teach him, though he never accepted it. But Heaven was ever giving uplifting testimonies from other people who through their witness testifies, in spite of themselves, to our work. Through intense witnessing I paid a lot of indemnity quickly. Within two months I had two spiritual sons. Through an experience with one of them I was to relate again the help of Heaven and how spirit world is ever watching to help. Peter, my second spiritual son, and I drove down into Lawton one night to witness. The place was deserted. We headed back to the post, when all of a sudden I knew I had to return to Lawton. I told Peter to immediately turn the car around, that there was someone we had to meet. He was puzzled and a bit miffed at me. Still, he obeyed. With a little anger in his voice he said, "Where do you want me to go?" Without thinking I said, "City Hall." When we arrived there were several people on the lawn. Peter said, "Which one" I pointed to a American Indian woman sitting on a park Bench, "That one." I walked directly up to her and asked her if I might speak to her. She said, "Yes, I have been waiting for you." Peter nearly fainted. I asked, "How did you know?" She said, "Jesus came to me in prayer this morning and told me to go to the park bench in Lawton at the City Hall. There you will meet two young men who have a great message for you." I was so humbled. Jesus had not only appeared to me in dreams and at my bedside, he was personally opening the way for me to witness to others. Again, though the woman never accepted the Principle, I did have a chance to sing and speak at her church and made overtures to teach her. Although there was no opening, I had gained badly needed experience. I would like to close with this one final spiritual experience. I tell this experience because it testifies to the authenticity of Divine Principle and the fulfillment of its personal and universal promises. It was given not only for myself, but to be shared, that others who are walking this path can find hope and conviction to go on. Father was in Danbury the night that I bad the following dream. I was riding with Father in the back seat of a gray limousine. We were on our way to a coliseum where Father was scheduled to speak. Father turned to me and in a quiet parental voice, told me that he wanted me to speak in his place. "O.K. Father, I'll do it." When we arrived at the site he said, "Do a good job, Philip. The people are an in there, waiting to see you." "I'll do my best Father, for you." As I walked onto the arena area, I could see the place was packed. 60 000 people filled every seat. I could see our members sprinkled among the non-members. At one end of the place there was a tall stage especially erected for the talk. It was built of wood, painted light blue and bad three levels to it, each being smaller than the one below, until at the top, there was only room for one person to stand. There was a ladder painted white at the back of the stage by which I was to ascend to the top. I bowed my head to say a prayer before climbing up. As I looked down, I suddenly became aware of the clothes I was wearing. I wore a dark blue jacket, a white shirt, red tie, white pants and white shoes. Out of my mouth came the word: "America". I knew I represented America. I climbed to the top and people began shouting my name and applauding wildly. This continued sometime until I put my hands up and began to speak. At that moment I woke up. This dream took place just before my 21st spiritual birthday. It was the end of a 21 year course. I knew that on True Parents' foundation, the merit of their ancestors and their work, including bringing and giving the Blessing, that I had passed through an historical course for America. Even though I made mis- takes many times, because I prayed in Their Name and did work in the Parents' Name, God recognized me and blessed me with that dream. This is the promise to all those who never say die and remain faithful even when everything else says "Leave! Stop! Give up!" As long as our hearts remain linked and we keep trying, God will never give up on us. Then Heavenly Father will take our imperfect faith and offerings and add them to True Parents' great faith and great offerings, and combine this with His own merciful love and forgiveness to bring us home.
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Chromium Mac implementation of the Port interface.""" import logging from blinkpy.web_tests.port import base _log = logging.getLogger(__name__) class MacPort(base.Port): SUPPORTED_VERSIONS = ('mac10.10', 'mac10.11', 'mac10.12', 'mac10.13', 'mac10.14', 'mac10.15', 'retina') port_name = 'mac' # FIXME: We treat Retina (High-DPI) devices as if they are running a # different operating system version. This is lame and should be fixed. # Note that the retina versions fallback to the non-retina versions and so # no baselines are shared between retina versions; this keeps the fallback # graph as a tree and maximizes the number of baselines we can share that # way. We also currently only support Retina on 10.13. FALLBACK_PATHS = {} FALLBACK_PATHS['mac10.15'] = ['mac'] FALLBACK_PATHS['mac10.14'] = ['mac'] FALLBACK_PATHS['mac10.13'] = ['mac'] FALLBACK_PATHS['mac10.12'] = ['mac-mac10.12'] + FALLBACK_PATHS['mac10.13'] FALLBACK_PATHS['mac10.11'] = ['mac-mac10.11'] + FALLBACK_PATHS['mac10.12'] FALLBACK_PATHS['mac10.10'] = ['mac-mac10.10'] + FALLBACK_PATHS['mac10.11'] FALLBACK_PATHS['retina'] = ['mac-retina'] + FALLBACK_PATHS['mac10.13'] CONTENT_SHELL_NAME = 'Content Shell' BUILD_REQUIREMENTS_URL = 'https://chromium.googlesource.com/chromium/src/+/master/docs/mac_build_instructions.md' @classmethod def determine_full_port_name(cls, host, options, port_name): if port_name.endswith('mac'): version = host.platform.os_version if host.platform.is_highdpi(): version = 'retina' return port_name + '-' + version return port_name def __init__(self, host, port_name, **kwargs): super(MacPort, self).__init__(host, port_name, **kwargs) self._version = port_name[port_name.index('mac-') + len('mac-'):] assert self._version in self.SUPPORTED_VERSIONS def check_build(self, needs_http, printer): result = super(MacPort, self).check_build(needs_http, printer) if result: _log.error('For complete Mac build requirements, please see:') _log.error('') _log.error(' https://chromium.googlesource.com/chromium/src/+/master/docs/mac_build_instructions.md') return result def operating_system(self): return 'mac' # # PROTECTED METHODS # def path_to_apache(self): return self._path_from_chromium_base( 'third_party', 'apache-mac', 'bin', 'httpd') def path_to_apache_config_file(self): config_file_basename = 'apache2-httpd-%s-php7.conf' % (self._apache_version(),) return self._filesystem.join(self.apache_config_directory(), config_file_basename) def _path_to_driver(self, target=None): return self._build_path_with_target(target, self.driver_name() + '.app', 'Contents', 'MacOS', self.driver_name())
Academic freedom protects researchers so that they might discover the truth and tell it to the world. It protects teachers so that they might find and use effective ways of instructing their students. And it protects professors critical of goings on at their universities so that they might help their universities to remain sound institutions of higher learning. The discovery of truth, the dissemination of knowledge, and the care of the university are the central elements in what appears currently to be the most widespread understanding of academic freedom. According to this understanding, because we value truth, knowledge, and the university’s mission to promote both, we should value academic freedom. For my part, though, I prefer a different understanding of the nature and value of academic freedom, one that begins from a particular conception of the nature and value of the university itself. Although, on this other understanding, academic freedom continues to protect truth, knowledge, and the care of the university, none of the three is the root value that academic freedom serves. The understanding I favour conceives of the university as a community in which individuals enjoy, or aspire to enjoy, full intellectual autonomy. They enjoy, or aspire to enjoy, intellectual autonomy for themselves, but they are also committed to ensuring that the other members of the community can enjoy it along with them. The purpose of academic freedom, then, is to promote and maintain a community in which people enjoy full intellectual autonomy. We enjoy intellectual autonomy when we believe what we believe and value what we value for our own considered reasons. We are less than fully autonomous intellectually when our reasons for believing and valuing are opaque to us or are merely the causes of our mental states. The reasons why we believe or value as we do are merely causes when they consist in the pressures of punishment or reward. Suppose, for instance, that we believe that species evolve by means of natural selection. If we believe that they do because we don’t wish to appear ignorant or stupid, or because we crave acceptance by our peers, then we believe they do in indifference to whether in fact they do; we don’t actually care to understand the origin of species; we care, rather, not to appear ignorant or stupid. To believe something in indifference to its truth is to lack intellectual autonomy. Academic freedom prevents those who think or value differently from us from shutting us up or denying us resources. Academic freedom, then, functions to limit the pressures on our believing and valuing minds, save the pressures of evidence and argument. Since evidence and argument bear on the truth of belief and the soundness of values, those who value intellectual autonomy are keen to collect evidence and to follow the arguments. But they wish to allow only evidence and argument to influence their cognitive and affective minds. A university, one might hope, is a place at which people who value intellectual autonomy congregate so that they may pursue enquiry and study together. They wish to pursue enquiry and study together first of all because it’s pleasant and stimulating to enquire into the world alongside others, especially others who share one’s love of intellectual autonomy. But people congregate in universities also because they appreciate the benefits of constructive criticism. They recognize that by expressing one’s thoughts to others, one comes to understand those thoughts better, both their weaknesses and their strengths. They desire to believe truly and to value soundly, and see criticism as useful in attaining what they desire. A university in which academic freedom is valued as essential for intellectual autonomy will be a freer place, certainly, than a university in which academic freedom is valued solely for its role in discovering truth, disseminating knowledge, and caring for the university. This is because while academic freedom is essential to intellectual autonomy, it is merely useful to discovery, dissemination, and care. Indeed, as many have argued, the interests of discovery, dissemination, and care can sometimes best be furthered by limiting the freedom of members of the university community. Fruitless research, they note, does not help in the discovery of truth, while error and falsehood impede the dissemination of knowledge. Bad teaching wastes students’ time and money. As for the care of the university, when professors say stupid things or reveal to the world the woes besetting their institutions, they do more harm than good to their universities. Those who would limit the freedom of members of the university community in the interests of truth, knowledge, and the university think there is a sound principle by which they can draw limits around freedom without violating it. Academics, they say, are experts and professionals; the principle is that as experts and professionals, academics may properly be held accountable to the expert and professional standards relevant to their endeavours. They propose that the state of each discipline implies norms that one cannot violate without ceasing to be an expert in that discipline. A biologist committed to intelligent design, then, has given up real biology and, thereby, the academic freedom university biologists enjoy to pursue truth and to disseminate knowledge. Likewise, a teacher who violates in his classroom what his peers recognize as best practices should face sanctions if he doesn’t reform his ways. An engineering professor who says publicly that few women study engineering because women are not as good as men in math is not speaking as an engineer but as an unaccredited cognitive psychologist; because she is not speaking about engineering, she may be directed by her dean to speak only the explanation approved by the faculty of engineering or keep quiet. If, though, we value academic freedom as essential to a university community centred on intellectual autonomy, we cannot cite expert or professional standards or norms in responding to the ID biologists, unconventional graders, and offending engineers in our midst. At a university given to promoting intellectual autonomy, all these types and more would be enabled by academic freedom to continue as they wish. Of course, a university is a sort of business, trading in money, power, and status. It collects money from students, governments, industry, and alumni, and pays professors to pursue research and to teach. It rewards students with degrees and professors with acknowledgements and promotions. How can it do all that properly when wide academic freedom would remove accountability from professors? How in a university marked by wide academic freedom is order and discipline to be maintained? I’ve compared and contrasted two accounts of the nature and purpose of academic freedom, and I declared that I prefer the one according to which academic freedom removes the pressures that can prevent us from believing and valuing for our own good reasons. I’ve expressed my contention that in a university organized around intellectual autonomy, critical discussion rather than oversight and control will do all that’s needed to be done to ensure good research and teaching. I’ve said nothing, though, that might answer the question whether our culture is one in which universities dedicated to fostering intellectual autonomy might find public support. Mark Mercer is a professor and chair of philosophy at Saint Mary’s University and also a member of the Board of Directors of SAFS.
class StringStream: """A simple class to hold text so that when passed between functions, the object is passed by reference and memory does not need to be repeatedly allocated for the string. This class was written here to avoid adding a dependency to the project. """ def __init__(self, raw_text, debug=False): self.raw_text = raw_text self.index = 0 self.len = len(raw_text) def read(self, count): """Read count characters starting at self.index, and return those characters as a string """ new_index = self.index + count if new_index > self.len: buf = self.raw_text[self.index :] # return to the end, don't fail else: buf = self.raw_text[self.index : new_index] self.index = new_index return buf def seek(self, offset): """Advance the index of this StringStream by offset characters""" self.index = self.index + offset def advance_past_chars(self, chars): """Advance the index past specific chars Args chars (list): list of characters to advance past Return substring that was advanced past """ start_index = self.index while True: current_char = self.raw_text[self.index] self.index += 1 if current_char in chars: break elif self.index == self.len: break return self.raw_text[start_index : self.index - 1] def advance_past_string_with_gdb_escapes(self, chars_to_remove_gdb_escape=None): """characters that gdb escapes that should not be escaped by this parser """ if chars_to_remove_gdb_escape is None: chars_to_remove_gdb_escape = ['"'] buf = "" while True: c = self.raw_text[self.index] self.index += 1 if c == "\\": # We are on a backslash and there is another character after the backslash # to parse. Handle this case specially since gdb escaped it for us # Get the next char that is being escaped c2 = self.raw_text[self.index] self.index += 1 # only store the escaped character in the buffer; don't store the backslash # (don't leave it escaped) buf += c2 elif c == '"': # Quote is closed. Exit (and don't include the end quote). break else: # capture this character, and keep capturing buf += c return buf
Transition from day to night with ease and grace wearing the Rebecca Taylor™ Long Sleeve Giverney Fleur Top. Ruched design at sleeve cuff. Allover leaf print with metallic star print on top.
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'src/clubs_list.ui' # # Created by: PyQt5 UI code generator 5.7.1 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_dialog_clubs(object): def setupUi(self, dialog_clubs): dialog_clubs.setObjectName("dialog_clubs") dialog_clubs.resize(439, 486) dialog_clubs.setStyleSheet("") dialog_clubs.setModal(True) self.gridLayoutWidget = QtWidgets.QWidget(dialog_clubs) self.gridLayoutWidget.setGeometry(QtCore.QRect(9, 9, 421, 471)) self.gridLayoutWidget.setObjectName("gridLayoutWidget") self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget) self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setSpacing(10) self.gridLayout.setObjectName("gridLayout") self.line_search_clubes = QtWidgets.QLineEdit(self.gridLayoutWidget) self.line_search_clubes.setObjectName("line_search_clubes") self.gridLayout.addWidget(self.line_search_clubes, 0, 1, 1, 1) self.btn_search_clubes = QtWidgets.QPushButton(self.gridLayoutWidget) self.btn_search_clubes.setAutoFillBackground(False) self.btn_search_clubes.setStyleSheet("") self.btn_search_clubes.setDefault(False) self.btn_search_clubes.setFlat(False) self.btn_search_clubes.setObjectName("btn_search_clubes") self.gridLayout.addWidget(self.btn_search_clubes, 0, 2, 1, 1) self.verticalLayout = QtWidgets.QVBoxLayout() self.verticalLayout.setObjectName("verticalLayout") self.btn_new_club = QtWidgets.QPushButton(self.gridLayoutWidget) self.btn_new_club.setAutoFillBackground(False) self.btn_new_club.setStyleSheet("") self.btn_new_club.setDefault(False) self.btn_new_club.setFlat(False) self.btn_new_club.setObjectName("btn_new_club") self.verticalLayout.addWidget(self.btn_new_club) self.btn_edit_club = QtWidgets.QPushButton(self.gridLayoutWidget) self.btn_edit_club.setAutoFillBackground(False) self.btn_edit_club.setStyleSheet("") self.btn_edit_club.setFlat(False) self.btn_edit_club.setObjectName("btn_edit_club") self.verticalLayout.addWidget(self.btn_edit_club) self.btn_delete_club = QtWidgets.QPushButton(self.gridLayoutWidget) self.btn_delete_club.setAutoFillBackground(False) self.btn_delete_club.setStyleSheet("") self.btn_delete_club.setFlat(False) self.btn_delete_club.setObjectName("btn_delete_club") self.verticalLayout.addWidget(self.btn_delete_club) self.btn_close_club = QtWidgets.QPushButton(self.gridLayoutWidget) self.btn_close_club.setAutoFillBackground(False) self.btn_close_club.setStyleSheet("") self.btn_close_club.setFlat(False) self.btn_close_club.setObjectName("btn_close_club") self.verticalLayout.addWidget(self.btn_close_club) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem) self.gridLayout.addLayout(self.verticalLayout, 1, 2, 1, 1) self.table_list_clubes = QtWidgets.QTableView(self.gridLayoutWidget) self.table_list_clubes.setEditTriggers(QtWidgets.QAbstractItemView.AnyKeyPressed|QtWidgets.QAbstractItemView.EditKeyPressed) self.table_list_clubes.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.table_list_clubes.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows) self.table_list_clubes.setObjectName("table_list_clubes") self.gridLayout.addWidget(self.table_list_clubes, 1, 1, 1, 1) self.retranslateUi(dialog_clubs) QtCore.QMetaObject.connectSlotsByName(dialog_clubs) def retranslateUi(self, dialog_clubs): _translate = QtCore.QCoreApplication.translate dialog_clubs.setWindowTitle(_translate("dialog_clubs", "Listado de clubes")) self.btn_search_clubes.setText(_translate("dialog_clubs", "Buscar")) self.btn_new_club.setText(_translate("dialog_clubs", "Nuevo")) self.btn_edit_club.setText(_translate("dialog_clubs", "Editar")) self.btn_delete_club.setText(_translate("dialog_clubs", "Eliminar")) self.btn_close_club.setText(_translate("dialog_clubs", "Cerrar")) from . import resources_rc
Die Vine Intervention podcast. Is sherry just for tannies? John Fraser and Michael Olivier were joined by David Bullard and Malcolm MacDonald to taste three sherries from Monis. Next Post Why do ZA Supermarkets have barmy booze bans?
import datetime from math import floor, log def format_timedelta(value, time_format='{days} days {hours} hours {minutes} minutes'): if hasattr(value, 'seconds'): seconds = value.seconds + value.days * 24 * 3600 else: seconds = int(value) seconds_total = seconds minutes = int(floor(seconds / 60)) minutes_total = minutes seconds -= minutes * 60 hours = int(floor(minutes / 60)) hours_total = hours minutes -= hours * 60 days = int(floor(hours / 24)) days_total = days hours -= days * 24 years = int(floor(days / 365)) years_total = years days -= years * 365 return time_format.format(**{ 'seconds': seconds, 'seconds2': str(seconds).zfill(2), 'minutes': minutes, 'minutes2': str(minutes).zfill(2), 'hours': hours, 'hours2': str(hours).zfill(2), 'days': days, 'years': years, 'seconds_total': seconds_total, 'minutes_total': minutes_total, 'hours_total': hours_total, 'days_total': days_total, 'years_total': years_total, }) def format_timedelta_secs(secs, time_format='{days} days {hours} hours {minutes} minutes'): return format_timedelta(datetime.timedelta(seconds=secs), time_format=time_format) def pretty_size(n, b=1024, u='B', pre=[''] + [p + 'i' for p in 'KMGTPEZY']): pow, n = min(int(log(max(n, 1), b)), len(pre) - 1), n return "%.2f %s%s" % (n / b ** float(pow), pre[pow], u) def pretty_size_dec(value): return pretty_size(value, b=1000, u = '', pre = ['', 'Thousand', 'Million', 'Billion']) def format_ago(current_time, ago): return (format_timedelta_secs(current_time - ago) + ' ago') if ago else 'none so far',
Good soap, smells ok, good slip, holds foam perfectly. Not perfect on very dirty windows, but still performs as expected. Having used this over the past couple of weeks with the bad weather, this product has failed to perform next to the more popular dish soaps. Perhaps it's me not using the product correctly, but using different variations I've not managed to find a winning way when it comes to heavy weathering on glass. Otherwise, it does great on maintenance cleans. Given the price, I will be giving this a miss from now on. Have been using this product for just over two weeks, previously used Fairy Platinum. It works really well on the glass, giving just enough slip, it also leaves a great shine to both the glass and UPVC. Also it doesnt leave excessive amounts of soap on your cloths like other products. Top marks.
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file './pyqtgraph/flowchart/FlowchartCtrlTemplate.ui' # # Created: Mon Dec 23 10:10:51 2013 # by: pyside-uic 0.2.14 running on PySide 1.1.2 # # WARNING! All changes made in this file will be lost! from PySide import QtCore, QtGui class Ui_Form(object): def setupUi(self, Form): Form.setObjectName("Form") Form.resize(217, 499) self.gridLayout = QtGui.QGridLayout(Form) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setVerticalSpacing(0) self.gridLayout.setObjectName("gridLayout") self.loadBtn = QtGui.QPushButton(Form) self.loadBtn.setObjectName("loadBtn") self.gridLayout.addWidget(self.loadBtn, 1, 0, 1, 1) self.saveBtn = FeedbackButton(Form) self.saveBtn.setObjectName("saveBtn") self.gridLayout.addWidget(self.saveBtn, 1, 1, 1, 2) self.saveAsBtn = FeedbackButton(Form) self.saveAsBtn.setObjectName("saveAsBtn") self.gridLayout.addWidget(self.saveAsBtn, 1, 3, 1, 1) self.reloadBtn = FeedbackButton(Form) self.reloadBtn.setCheckable(False) self.reloadBtn.setFlat(False) self.reloadBtn.setObjectName("reloadBtn") self.gridLayout.addWidget(self.reloadBtn, 4, 0, 1, 2) self.showChartBtn = QtGui.QPushButton(Form) self.showChartBtn.setCheckable(True) self.showChartBtn.setObjectName("showChartBtn") self.gridLayout.addWidget(self.showChartBtn, 4, 2, 1, 2) self.ctrlList = TreeWidget(Form) self.ctrlList.setObjectName("ctrlList") self.ctrlList.headerItem().setText(0, "1") self.ctrlList.header().setVisible(False) self.ctrlList.header().setStretchLastSection(False) self.gridLayout.addWidget(self.ctrlList, 3, 0, 1, 4) self.fileNameLabel = QtGui.QLabel(Form) font = QtGui.QFont() font.setWeight(75) font.setBold(True) self.fileNameLabel.setFont(font) self.fileNameLabel.setText("") self.fileNameLabel.setAlignment(QtCore.Qt.AlignCenter) self.fileNameLabel.setObjectName("fileNameLabel") self.gridLayout.addWidget(self.fileNameLabel, 0, 1, 1, 1) self.retranslateUi(Form) QtCore.QMetaObject.connectSlotsByName(Form) def retranslateUi(self, Form): Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8)) self.loadBtn.setText(QtGui.QApplication.translate("Form", "Load..", None, QtGui.QApplication.UnicodeUTF8)) self.saveBtn.setText(QtGui.QApplication.translate("Form", "Save", None, QtGui.QApplication.UnicodeUTF8)) self.saveAsBtn.setText(QtGui.QApplication.translate("Form", "As..", None, QtGui.QApplication.UnicodeUTF8)) self.reloadBtn.setText(QtGui.QApplication.translate("Form", "Reload Libs", None, QtGui.QApplication.UnicodeUTF8)) self.showChartBtn.setText(QtGui.QApplication.translate("Form", "Flowchart", None, QtGui.QApplication.UnicodeUTF8)) from ..widgets.TreeWidget import TreeWidget from ..widgets.FeedbackButton import FeedbackButton
One of the most popular safety surfaces for playgrounds is rubber wet pour surfacing in Caerphilly CF81 9 and this can be installed at great prices by our team. Wetpour is a soft safety surface which may be installed under children's play equipment as a way to prevent them from being harmed if they fall off. EPDM rubber crumb can be laid in a number of different colours making a vibrant play area for children to enjoy. We are able to alter our specifications to suit individual needs. If you're on a lower budget we can offer a plain black wet pour surface with coloured graphics installed to brighten the area. EPDM rubber is installed in a number of depths to meet safety requirements. Wetpour is a impact absorbing surface which meets up with BS EN 1177 safety standards. You can find out more about these safety requirements here https://www.playground-surfaces.com/en1177/caerphilly/ Our team can carry out a number of services to ensure the surface meets up with these standards and to make certain children using the area are safe. When installing wetpour in Caerphilly CF81 9 it's will have a two layer system, the very first tier will be a shock-pad. The shock pad can decrease the possible injury that falling over may cause young children when making use of the flooring. The next coating is the EPDM rubber crumb wearing course that is set up on the top of the impact mat. The top tier can be chosen to be a great deal of various colourings to offer your establishment the personality. Wetpour can be installed using images that brighten up the facility near me while additionally educating them innovative skills. Each client has full control over the appearance the surface has. The thickness of wetpour will vary depending on the Critical Fall Height which is required. The CFH is determined by the height of play equipment above the surfacing and the existing surface which the wetpour safety surfaces will be installed on top of. CFH can also be considered when installing other surfaces like rubber mulch. To learn more about rubber mulch please click here https://www.playground-surfaces.com/surfaces/mulch/caerphilly/ If you do not know how to work out the CFH, please let us know the equipment height and current surfacing and we will decide on the depth of wetpour which is necessary to keep the children safe. EPDM can also be used for the daily mile. Wetpour is perfect for the daily mile since it offers a smooth surface for kids to run on. To learn more about the daily mile please click here https://www.playground-surfaces.com/facility/daily-mile/caerphilly/ If you've got this surface installed for the golden mile, it is important that you carry out regular maintenance to ensure the surface is safe for the kids. The surface is designed to always be fully porous, and therefore fluids may move through the pores inside the surfacing to avoid any floods. Servicing helps maintain these kind of properties in balance, since debris and also contaminants obstruct the tiny holes within the surface stopping water from being able to slide through. Should they be clogged up, it will retain water making it unusable. It could potentially be harmful since the surface being slippery would result in damages. However if the surface is simply too harmed for servicing, there is a fixing services. Our professional experts can carry out wetpour surface repairs, should damage occur to the area. If you'd like to find out more about our EPDM rubber flooring maintenance and repairs in Caerphilly, please fill out the contact form available. If you would like to install rubber wet pour surfacing in Caerphilly CF81 9 for your school playground, garden or pathway please fill in the contact form provided on this page. One of our local expert team closest to you will get back to you straight away with a helpful response and a free quotation if required.
#!/usr/bin/python # -*- coding: utf-8 -*- import pymarc, os, re from collections import defaultdict from settings import DATAROOT YEAR_P = re.compile(r"(1[0-9]{3})") def find_a_year(args): for term in args: d = YEAR_P.search(term) if d != None: return d.groups()[0] return u"" def generate_marcfiles(reverse_order = False): docfiles = sorted([x for x in os.listdir(DATAROOT) if x.startswith("19C_0")]) if reverse_order: docfiles.reverse() for docfile in docfiles: docfilepath = os.path.join(DATAROOT, docfile) yield (docfilepath, pymarc.parse_xml_to_array(docfilepath)) def get_language(marcdoc): oh_eights = filter(lambda x: x.has_key("008"), marcdoc.as_dict()["fields"]) if len(oh_eights) == 1: return oh_eight[0]['008'][35:38] elif len(oh_eights) > 1: raise Exception("More than one 008 field found. Bigger problem likely") else: return "" def get_subfield(field, marccode, subfield): subfields = filter(lambda x: x.has_key(subfield), field[marccode].get('subfields', [{}])) for subf in subfields: yield subf[subfield] def collate(record): collated = defaultdict(list) for field in record.get_fields(): collated[field.tag].append(field) return collated def _normalise_name(args): name, date, relator = args # If no relator, assume author? # Spaces to "_" # eg "SMITH, John", "1948-", "" ---> "author/SMITH,_John__1948-" if not relator: relator = u"author" else: relator = relator[0].lower() if not name: name = u"" else: name = name[0] if not date: date = u"" else: date = date[0] return (name, date, relator) def flatten_name(args): name, date, relator = _normalise_name(args) fname = u"{0}/{1}".format(relator, name) if date: fname = u"{0}/{1} {2}".format(relator, name, date) return fname def flatten_name_for_facet(args): name, date, relator = _normalise_name(args) fname = u"{0}/{1}".format(relator, name) if date: fname = u"{0}/{1}__{2}".format(relator, name, date) return re.sub(u" ", u"_", fname) def get_sysnum(collated_record): if len(collated_record["001"]) == 1: return collated_record["001"][0].value() else: return "" def get_lang(collated_record): if len(collated_record["008"]) == 1: return collated_record["008"][0].value()[35:38] else: return "" def _gather_names(namefield): name = namefield.get_subfields("a") date = namefield.get_subfields("d") relator = namefield.get_subfields("e") return (name, date, relator) def get_raw_names(collated_record): # personal 100 - $a name $d date $e relator # Corp 110 - $a name $b subgroup # alt name 700 - $a name $t title of previous/related work (ADD later maybe?) names = {'100':[], '110':[]} for nametype in names: for namefield in collated_record.get(nametype, []): names[nametype].append(_gather_names(namefield)) return names def get_names(collated_record, facet = False): names = get_raw_names(collated_record) if facet: return {'personal': map(flatten_name_for_facet, names['100']), 'corporate': map(flatten_name_for_facet, names['110'])} else: return {'personal': map(flatten_name, names['100']), 'corporate': map(flatten_name, names['110'])} def get_titles(collated_record): # A title can hide in 245 $a + $b, 240 and 130 on occasion. # ~99.9% of records had a main title in 245 # and 240 + 130 coverage was below 15% so skipping for now # Output is still as a list, in case this changes if collated_record.get('245', u""): maintitles = [x.value() for x in collated_record['245'] + collated_record['240'] + collated_record['130']] return maintitles else: return u"" def get_pub_detail(collated_record): # 260 $a Place of Publication/Distribution # $b Name of Publisher/Distrib # $c date of Pub # $e Place of Manuf # $f manufacturer # $g Manuf date # Near 95% coverage in the dataset if collated_record.get("260", u""): # Typically all contained in a single field. pubfield = collated_record['260'][0] pdtype = u"m" place = pubfield.get_subfields("e") date = pubfield.get_subfields("f") maker = pubfield.get_subfields("g") if pubfield.get_subfields("a"): pdtype = u"p" place = pubfield.get_subfields("a") date = pubfield.get_subfields("c") maker = pubfield.get_subfields("b") def present_value(items): if len(items[0]) == 1: return u"{0}/{1}".format(items[1], items[0][0]) return u"" return map(present_value, [(place, pdtype), (maker, pdtype), (date, pdtype)]) return [u"", u"", u""] def get_phys_desc(collated_record): # $a - Extent (R) # $b - Other physical details (NR) # $c - Dimensions (R) # $e - Accompanying material (NR) # $f - Type of unit (R) # $g - Size of unit (R) # $3 - Materials specified (NR) # $6 - Linkage (NR) # $8 - Field link and sequence number (R) # Lump it all in there? def iter_subf(fields): for x in fields: for y in x.get_subfields("a", "b", "c", "e", "f", "g", "3", "6"): yield y if collated_record.get("300"): return [y for y in iter_subf(collated_record["300"])] return [] def get_general_note(collated_record): if collated_record.get("500"): return [x.value() for x in collated_record['500']] return [] def get_domids(collated_record): if collated_record.get("852"): sfx = filter(lambda x: x.get_subfields("c") == [u"SFX"], collated_record["852"]) if sfx: domids = [x.get_subfields("j")[0] for x in sfx if x.get_subfields("j") and x.get_subfields("j")[0].startswith("lsid")] return domids return [] def get_shelfmarks(collated_record): # ignore SFX + lsid shelfmarks, as these are harvested by the get_domids part marks = [] if collated_record.get("852"): for sm in collated_record['852']: if not(sm.get_subfields("c") == [u"SFX"] and sm.get_subfields("j")[0].startswith("lsid")): marks.append(sm.value()) return marks def get_solr_doc(collated_record): names = get_names(collated_record) pubplace, maker, pubdate = get_pub_detail(collated_record) domids = get_domids(collated_record) digital = False if domids: digital = True year = find_a_year([pubdate, maker, pubplace]) doc = {'id': get_sysnum(collated_record), 'title': get_titles(collated_record), 'personal': names['personal'], 'corporate': names['corporate'], 'place': pubplace, 'maker': maker, 'date': pubdate, 'year': year, 'physdesc': get_phys_desc(collated_record), 'general': get_general_note(collated_record), 'domids': domids, 'shelfmarks': get_shelfmarks(collated_record), 'lang': get_lang(collated_record), 'digital': digital} return doc
Re: Are diesel bucket vans that rare?? The main issue is fitting the diesel engine inside the engine compartment of the van itself. With the DEF systems on them the engine mass is too much for the small bay and access would be impossible with maintenance and repairs requiring massive and expensive work. The import vans IE: Mercedes Benz, Isuzu and others aren't a popular enough option for the bucket manufacturing companies to use but that should be changing now with these new 4 cylinder motors that Ford and the other Big 3 vehicle manufaturers are introducing. It should be an option before long so hang in there!!
"""Simple tree-like structure""" import copy class DictIter(object): """Iterator through the tree""" __slots__ = 'data', 'keys', 'index' def __init__(self, data): self.data = data self.keys = sorted(data.keys()) self.index = -1 # ready to iterate on the next() call def __next__(self): """ Return the next item in the container Once we go off the list we stay off even if the list changes """ self.index += 1 if self.index >= len(self.keys): raise StopIteration return self.data[self.keys[self.index]] class RBDict(object): """Sorted dictionary""" __slots__ = 'data', 'changed' def __init__(self, initial=None, changes=False): self.data = {} if changes: self.changed = {} else: self.changed = None if initial: for key, value in initial.items(): self[key] = value def remember_changes(self, remember_changes=True): """Start or stop remembering changes on this Set""" self.changed = {} if remember_changes else None def changes(self): """Get the list of changes with old and new value, clear the changes""" if self.changes is None: raise AttributeError("No change recoding supported on this Set") res = [( self.changed[chkey], self.data[chkey] if chkey in self.data else None ) for chkey in sorted(self.changed.keys())] self.changed.clear() return res def has_changes(self): """Return if there are changes in this rbtree""" return self.changed is not None and len(self.changed) > 0 def restore(self, key): """Try to restore the old changed record""" if self.changed is not None and key in self.changed: self.data[key] = self.changed[key] del self.changed[key] def __getitem__(self, key): return self.data[key] def __setitem__(self, key, value): if self.changed is not None and key not in self.changed: if key in self.data: raise ValueError("Remove an item before storing a changed one") self.changed[key] = None self.data[key] = value def __delitem__(self, key): if self.changed is not None and key not in self.changed: if key in self.data: self.changed[key] = copy.copy(self.data[key]) del self.data[key] def get(self, key, default=None): """Get a key from the dictionary with a default""" if key in self.data: return self.data[key] return default def __iter__(self): return DictIter(self.data) def __len__(self): return len(self.data) def keys(self): """Return all keys""" return sorted(self.data.keys()) def values(self): """Return all values""" return [self.data[k] for k in self.keys()] def items(self): """Return all items""" return [(k, self.data[k]) for k in self.keys()] def __contains__(self, key): return key in self.data def clear(self): """delete all entries""" self.data.clear() if self.changed is not None: self.changed.clear() def copy(self): """return shallow copy""" # there may be a more efficient way of doing this return RBDict(self) def update(self, other): """Add all items from the supplied mapping to this one. Will overwrite old entries with new ones.""" for key in other.keys(): self[key] = other[key] def __repr__(self): ls = ['{'] for k, v in self.items(): if len(ls) > 1: ls.append(', ') ls.append(k) ls.append('=') ls.append(str(v)) ls.append('}') return ''.join(ls)
A well-celebrated festival for brothers, Bhai Dooj holds the magic of bonding the brothers and sisters on this very eminent day in order to celebrate it in utmost vigor. While brothers make sure to get their adorable sisters a gift of the year, sisters can too put their best foot forward with this endless variety that caters to pamper their brothers in a unique way. This is indeed one of the best quotes on siblings that would never go wrong on Bhai Dooj to celebrate the strong bond you share with your brother. As you grow older you realize that no matter how many fights you have had with your brother, he will always be there to stand by your side, cheer you up in your sorrows and be your superhero during the tough times. Right? So, on the auspicious occasion of Bhai Dooj, don’t miss the opportunity to thank your brother for always being with you. The bond of love, care, and affection that you share with your brother cannot be expressed in words. Right? But, you can surely present him meaningful Bhai Dooj gift to make him feel special. So, if you are looking for some amazing and unique Bhai Dooj gift ideas for your brother then, you are in the right place. We, at FlowerAura, offer you a delightful range of Bhai Dooj gifts for Brother that are sure to convey your emotions in the most amazing manner. We have an exclusive collection of Bhai Dooj gifts for brother online which includes mugs, cushions, greetings, personalised keychains, flowers, chocolates, cakes, grooming kits, wallets, cologne, and many more. The best thing is that now you don’t have to go outside to buy a Bhaubeej gift for brother as you can order one online from us comfortably seated anywhere. All you need to do is to visit FlowerAura.com, select a Bhai Phota gift for Brother, add your address details, and place your order. Your ordered Bhai Phota gifts for brother will be delivered to your doorstep by our delivery team via an express delivery service. So, go ahead to show your adoration for your doting and beloved brother with our wide range of alluring and attractive Bhai Dooj 2018 gifts for brother. This joyous festival of Bhai Dooj strengthens the bond between a brother in a sister. So, if you want to build your bond stronger with your brother, don’t forget to surprise him with the best Bhai Dooj gift from FlowerAura.We have a wide variety of mouth-watering sweets like Kaju Katli, Rasagulla, gulab jamun, Soan Papdi etc. You can also surprise your brother with our collection of scrumptious cakes like chocolate, vanilla, red velvet, butterscotch, strawberry, black forest, truffle which are available in amazing combos. Also because chocolates make the best gift for your brother on Bhaubeej, We offer you a delectable range of chocolates like Ferrero Rocher, Cadbury Temptations, Dairy Milk, Bournville, Cadbury celebrations and many more. Apart from sweets and personalized gifts, we also have amazing greeting cards that would help you to express your emotions towards your brother in a wonderful manner. So, log in to our website, check our amazing collection of Bhai Phota gifts, and grab the best gift for your brother on Bhai Dooj. With the aid of our express delivery service, now you can also send Bhai Dooj gifts for brother In India. Yes, if your brother lives miles away from you and cannot make it to your place on the festival of Bhai Dooj then, don’t be upset because you can send him your tokens of love from our online gift portal. You can send a Bhai Dooj pooja thali or chocolates or sweets or other creative gifts and surprise him in a mesmerizing way. You can also send Bhai Dooj greetings to brother along with some delicious sweets or chocolates and celebrate the love for each other. With the aid of our same-day delivery service, you can send Bhaubeej gifts for brothers on the same day of your order placement. You can also surprise your beloved brother by sending gifts at midnight via our midnight delivery services. You can also avail our next day delivery option if you want are not available to receive your orders on the same day. And, guess what? With us, you can enjoy free shipping too. So, on this Bhai Dooj, gift your brother an everlasting memory that he will always cherish.
# Copyright 2013 Dan Smith <dsmith@danplanet.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from chirp.drivers import icf from chirp import chirp_common, errors, directory, bitwise MEM_FORMAT = """ struct memory { u24 freq; u16 offset; u8 power:2, rtone:6; u8 duplex:2, ctone:6; u8 unknown1:1, dtcs:7; u8 tuning_step:4, unknown2:4; u8 unknown3; u8 alt_mult:1, unknown4:1, is_fm:1, is_wide:1, unknown5:2, tmode:2; u16 dtcs_polarity:2, usealpha:1, empty:1, name1:6, name2:6; u24 name3:6, name4:6, name5:6, name6:6; }; struct memory memory[510]; struct { u8 unknown1:1, empty:1, pskip:1, skip:1, bank:4; } flags[512]; struct memory call[2]; """ MODES = ["AM", "FM", "NFM", "NAM"] TMODES = ["", "Tone", "TSQL", "DTCS"] DUPLEX = ["", "", "-", "+"] DTCS_POL = ["NN", "NR", "RN", "RR"] STEPS = [5.0, 10.0, 12.5, 15, 20.0, 25.0, 30.0, 50.0, 100.0, 200.0] POWER = [chirp_common.PowerLevel("High", watts=50), chirp_common.PowerLevel("Low", watts=5), chirp_common.PowerLevel("Mid", watts=15), ] IC208_SPECIAL = [] for i in range(1, 6): IC208_SPECIAL.append("%iA" % i) IC208_SPECIAL.append("%iB" % i) CHARSET = dict(zip([0x00, 0x08, 0x09, 0x0a, 0x0b, 0x0d, 0x0f], " ()*+-/") + zip(range(0x10, 0x1a), "0123456789") + [(0x1c, '|'), (0x1d, '=')] + zip(range(0x21, 0x3b), "ABCDEFGHIJKLMNOPQRSTUVWXYZ")) CHARSET_REV = dict(zip(CHARSET.values(), CHARSET.keys())) def get_name(_mem): """Decode the name from @_mem""" def _get_char(val): try: return CHARSET[int(val)] except KeyError: return "*" name_bytes = [_mem.name1, _mem.name2, _mem.name3, _mem.name4, _mem.name5, _mem.name6] name = "" for val in name_bytes: name += _get_char(val) return name.rstrip() def set_name(_mem, name): """Encode @name in @_mem""" def _get_index(char): try: return CHARSET_REV[char] except KeyError: return CHARSET_REV["*"] name = name.ljust(6)[:6] _mem.usealpha = bool(name.strip()) # The element override calling convention makes this harder to automate. # It's just six, so do it manually _mem.name1 = _get_index(name[0]) _mem.name2 = _get_index(name[1]) _mem.name3 = _get_index(name[2]) _mem.name4 = _get_index(name[3]) _mem.name5 = _get_index(name[4]) _mem.name6 = _get_index(name[5]) @directory.register class IC208Radio(icf.IcomCloneModeRadio): """Icom IC800""" VENDOR = "Icom" MODEL = "IC-208H" _model = "\x26\x32\x00\x01" _memsize = 0x2600 _endframe = "Icom Inc\x2e30" _can_hispeed = True _memories = [] _ranges = [(0x0000, 0x2600, 32)] def get_features(self): rf = chirp_common.RadioFeatures() rf.memory_bounds = (1, 500) rf.has_bank = True rf.valid_tuning_steps = list(STEPS) rf.valid_tmodes = list(TMODES) rf.valid_modes = list(MODES) rf.valid_duplexes = list(DUPLEX) rf.valid_power_levels = list(POWER) rf.valid_skips = ["", "S", "P"] rf.valid_bands = [(118000000, 174000000), (230000000, 550000000), (810000000, 999995000)] rf.valid_special_chans = ["C1", "C2"] + sorted(IC208_SPECIAL) rf.valid_characters = "".join(CHARSET.values()) return rf def get_raw_memory(self, number): _mem, _flg, index = self._get_memory(number) return repr(_mem) + repr(_flg) def process_mmap(self): self._memobj = bitwise.parse(MEM_FORMAT, self._mmap) def _get_bank(self, loc): _flg = self._memobj.flags[loc-1] if _flg.bank >= 0x0A: return None else: return _flg.bank def _set_bank(self, loc, bank): _flg = self._memobj.flags[loc-1] if bank is None: _flg.bank = 0x0A else: _flg.bank = bank def _get_memory(self, number): if isinstance(number, str): if "A" in number or "B" in number: index = 501 + IC208_SPECIAL.index(number) _mem = self._memobj.memory[index - 1] _flg = self._memobj.flags[index - 1] else: index = int(number[1]) - 1 _mem = self._memobj.call[index] _flg = self._memobj.flags[510 + index] index = index + -10 elif number <= 0: index = 10 - abs(number) _mem = self._memobj.call[index] _flg = self._memobj.flags[index + 510] else: index = number _mem = self._memobj.memory[number - 1] _flg = self._memobj.flags[number - 1] return _mem, _flg, index def get_memory(self, number): _mem, _flg, index = self._get_memory(number) mem = chirp_common.Memory() mem.number = index if isinstance(number, str): mem.extd_number = number else: mem.skip = _flg.pskip and "P" or _flg.skip and "S" or "" if _flg.empty: mem.empty = True return mem mult = _mem.alt_mult and 6250 or 5000 mem.freq = int(_mem.freq) * mult mem.offset = int(_mem.offset) * 5000 mem.rtone = chirp_common.TONES[_mem.rtone] mem.ctone = chirp_common.TONES[_mem.ctone] mem.dtcs = chirp_common.DTCS_CODES[_mem.dtcs] mem.dtcs_polarity = DTCS_POL[_mem.dtcs_polarity] mem.duplex = DUPLEX[_mem.duplex] mem.tmode = TMODES[_mem.tmode] mem.mode = ((not _mem.is_wide and "N" or "") + (_mem.is_fm and "FM" or "AM")) mem.tuning_step = STEPS[_mem.tuning_step] mem.name = get_name(_mem) mem.power = POWER[_mem.power] return mem def set_memory(self, mem): _mem, _flg, index = self._get_memory(mem.number) if mem.empty: _flg.empty = True self._set_bank(mem.number, None) return if _flg.empty: _mem.set_raw("\x00" * 16) _flg.empty = False _mem.alt_mult = chirp_common.is_fractional_step(mem.freq) _mem.freq = mem.freq / (_mem.alt_mult and 6250 or 5000) _mem.offset = mem.offset / 5000 _mem.rtone = chirp_common.TONES.index(mem.rtone) _mem.ctone = chirp_common.TONES.index(mem.ctone) _mem.dtcs = chirp_common.DTCS_CODES.index(mem.dtcs) _mem.dtcs_polarity = DTCS_POL.index(mem.dtcs_polarity) _mem.duplex = DUPLEX.index(mem.duplex) _mem.tmode = TMODES.index(mem.tmode) _mem.is_fm = "FM" in mem.mode _mem.is_wide = mem.mode[0] != "N" _mem.tuning_step = STEPS.index(mem.tuning_step) set_name(_mem, mem.name) try: _mem.power = POWER.index(mem.power) except Exception: pass if not isinstance(mem.number, str): _flg.skip = mem.skip == "S" _flg.pskip = mem.skip == "P"
We want you to LOVE your bulldog boutique purchase. If you are not 100% happy with your item(s), we accept returns within 14 days of your receipt of the item(s). Item(s) must be in original condition with all tags attached. Items(s) must be unworn, unwashed and free from alterations and/or damages. We will refund to your original form of payment within 2 business days of our receipt of your returned item(s). Please allow up to 10 days to see your refund displayed on your original form of payment. bulldog is not responsible for return shipping charges for orders under $100. Sale merchandise is final sale and cannot be returned. Online class purchases are not returnable. All returns must be authorized. To process a return please call 866-523-2997 or email BDYoga@pickSP.com, both of which will connect you directly to our fulfillment center, Sure-Pak, LLC.
#!/usr/bin/env python """ This is a script for starting postgres for development purposes in an SMC project. """ import os, sys, time, util path = os.path.split(os.path.realpath(__file__))[0]; os.chdir(path); sys.path.insert(0, path) PG_DATA = os.path.join(path, "postgres_data") if not os.path.exists(PG_DATA): util.cmd("pg_ctl init -D '%s'"%PG_DATA) # Lock down authentication so it is ONLY via unix socket open(os.path.join(PG_DATA,'pg_hba.conf'), 'w').write( """ # This is safe since we only enable a socket protected by filesystem permissions: local all all trust # You can uncomment this and comment out the above if you want to test password auth. #local all all md5 """) # Make it so the socket is in this subdirectory, so that it is # protected by UNIX permissions. This approach avoids any need # for accounts/passwords for development and the Docker image. conf = os.path.join(PG_DATA, 'postgresql.conf') s = open(conf).read() s += '\n' # Move the default directory where the socket is from /tmp to right here. socket_dir = os.path.join(PG_DATA, 'socket') s += "unix_socket_directories = '%s'\nlisten_addresses=''\n"%socket_dir os.makedirs(socket_dir) util.cmd("chmod og-rwx '%s'"%PG_DATA) # just in case -- be paranoid... open(conf,'w').write(s) # Create script so that clients will know where socket dir is. open("postgres-env", 'w').write("""#!/bin/sh export PGUSER='smc' export PGHOST='%s' """%socket_dir) util.cmd('chmod +x postgres-env') # Start database running in background as daemon util.cmd("postgres -D '%s' >%s/postgres.log 2>&1 &"%(PG_DATA, PG_DATA)) time.sleep(5) # Create the smc user with no password (not needed since we are using local file permissions) util.cmd("unset PGUSER; unset PGHOST; createuser -h '%s' -sE smc"%socket_dir) # Stop database daemon util.cmd("kill %s"%(open(os.path.join(PG_DATA, 'postmaster.pid')).read().split()[0])) # Let it die and remove lock file. time.sleep(3) util.cmd("postgres -D '%s'"%PG_DATA)
SECTION 200. (a) There shall be within the department and reporting to the secretary a control board. (b) The control board shall consist of 5 members to be appointed by the governor, 3 of whom shall be members of the board, 1 of whom shall have experience in transportation finance and 1 of whom shall have experience in mass transit operation. Each member shall serve for the entire time that the control board exists unless removed with or without cause by the governor. Any vacancy in the control board shall be filled in the same manner as the original appointment of the member being replaced. The secretary shall designate the chair of the control board, and may fix annual time and expense reimbursements of not more than $6,000 to be paid to members of the control board from funds of the department. Directors serving on the control board may receive the time and expense reimbursements in addition to any payments provided to directors under subsection (b) of section 2 of chapter 6C of the General Laws. The control board shall meet as regularly as necessary to ensure the stability of authority operations and finances but not less than 3 times per month. (c) Notwithstanding sections 3 and 7 of chapter 161A of the General Laws or any other general or special law to the contrary, the control board shall be afforded all powers, responsibilities and obligations relative to the authority that are vested in the board, except as otherwise provided in this act, and those powers, responsibilities and obligations set forth in this act. (d) Meetings of the control board shall be subject to sections 18 to 25, inclusive, of chapter 30A of the General Laws. Records of the control board shall be subject to section 10 of chapter 66 of the General Laws. (e) The control board may employ, retain and supervise managerial, professional and clerical staff as may be necessary to carry out its responsibilities. The control board may set the compensation, terms and conditions of employment of its staff. Staff hired under this subsection shall be employees of the department except employees that the control board formally designates as independent contractors; provided, however, that the independent contractors shall be treated as executives under section 26 of said chapter 161A. (f) The control board shall initiate and assure the implementation of appropriate measures to secure the fiscal, operational and managerial stability of the authority and shall continue in existence until June 30, 2018 and thereafter for an additional period of 2 years upon the recommendation by the control board to the governor in writing based upon specific findings that such 2-year period is in the best interest of the public and necessary to achieve operational stability and establish performance metrics for the authority; provided, however, that the recommendation shall be approved in writing by the governor not later than June 30, 2018; and provided further, that the control board shall in no event continue beyond June 30, 2020. The recommendations and findings by the control board, if any, and the governor's approval, if any, shall be filed with the clerks of the house of representatives and senate, the chairs of the house and senate committees on ways and means and the chairs of the joint committee on transportation. (g) Following the dissolution of the control board, the board shall resume governance of the authority.
# -*- coding: utf-8 -*- """Public section, including homepage and signup.""" import ujson import urllib import datetime today = datetime.date.today() try: import urllib.request as urllib2 except ImportError: import urllib2 from flask import ( Blueprint, request, render_template, flash, current_app, abort ) from flask.ext.login import current_user, login_user, logout_user from feedback.extensions import login_manager from feedback.user.models import User blueprint = Blueprint('public', __name__, static_folder="../static") @login_manager.user_loader def load_user(id): return User.query.filter_by(email=id).first() @blueprint.route('/login', methods=['GET']) def login(): return render_template("user/login.html", current_user=current_user, date=today.strftime('%B %d, %Y'),) @blueprint.route('/logout', methods=['GET', 'POST']) def logout(): logout_user() if request.args.get('persona', None): return 'OK' else: flash('You are logged out.', 'info') return render_template('user/logout.html', date=today.strftime('%B %d, %Y'),) @blueprint.route('/auth', methods=['POST']) def auth(): ''' Endpoint from AJAX request for authentication from persona ''' data = urllib.urlencode({ 'assertion': request.form.get('assertion'), 'audience': current_app.config.get('BROWSERID_URL') }) req = urllib2.Request('https://verifier.login.persona.org/verify', data) response = ujson.loads(urllib2.urlopen(req).read()) current_app.logger.debug( 'LOGIN: status from persona: {}'.format(response)) if response.get('status') != 'okay': current_app.logger.debug( 'REJECTEDUSER: User login rejected from persona. Messages: {}'.format(response)) abort(403) next_url = request.args.get('next', None) email = response.get('email') user = User.query.filter(User.email == email).first() domain = email.split('@')[1] if len(email.split('@')) > 1 else None if user: login_user(user) flash('Logged in successfully!', 'alert-success') current_app.logger.debug( 'LOGIN: User {} logged in successfully'.format(user.email)) return next_url if next_url else '/' elif domain in current_app.config.get('CITY_DOMAINS'): user = User.create(email=email) login_user(user) current_app.logger.debug( 'NEWUSER: New User {} successfully created'.format(user.email)) return '/' else: current_app.logger.debug( 'NOTINDB: User {} not in DB -- aborting!'.format(email)) abort(403)
The very first thing you should learn about intervals is whats called enharmonics. Enharmonics are the exact same pitch, just spelt differently. For example, "C"-sharp is the same key on the piano as "D"-flat. Why are they spelled differently if they sound the same? The answer will eventually become obvious as you learn. Keeping in mind the differences with "E" and "B" from the previous paragraph, "E"-sharp is the enharmonic of "F". The note "C"-Flat is the enharmonic of "B". The next step is to figure out what quality the interval is. There are five types of qualitys: Major, minor, augmented, diminished, and perfect. The chart below refers to figures 1-12 to show how many half steps there are in the major, minor,and perfect intervals. If a perfect or major interval is made one half step larger (without changing its interval number) it becomes augmented. If a perfect or minor interval is made one half step smaller (without changing its interval number) it becomes diminished. An augmented fourth (and it's enharmonic - a diminished 5th) is called a Tritone. Enharmonic Intervals are intervals with the same sound that are spelled differently. If you were to play the above example on a piano, you would find that you would be playing the exact same notes each measure. They all sound the same, but are spelled differently. The reason for spelling them differently becomes obvious when you learn more about theory. Care must be taken in spelling intervals. If a specific interval is requested, the enharmonic-equivalent spelling is not correct. Thus, if a major third above E-flat is called for, a diminished fourth above D-Sharp is not correct, even though the sound is the same. The inversion of an interval means that the lower tone of an interval becomes the higher tone, or the higher tone becomes the lower tone. Intervals greater than an octave are called compound intervals. These intervals are named in a similar manner to the intervals within an octave (simple intervals). The compound intervals are often labeled as their simple evuivalents-as if an octave were removed from the interval. The compound names are used only if it is important to stress the exact interval size.
from .django_settings import * from wooey.version import DJANGO_VERSION, DJ110 from django.utils.translation import ugettext_lazy as _ INSTALLED_APPS += ( # 'corsheaders', 'wooey', ) if DJANGO_VERSION < DJ110: MIDDLEWARE_CLASSES = list(MIDDLEWARE_CLASSES) MIDDLEWARE_CLASSES.append('{{ project_name }}.middleware.ProcessExceptionMiddleware') MIDDLEWARE_OBJ = MIDDLEWARE_CLASSES else: # Using Django 1.10 + MIDDLEWARE = list(MIDDLEWARE) MIDDLEWARE.append('{{ project_name }}.middleware.ProcessExceptionMiddleware') MIDDLEWARE_OBJ = MIDDLEWARE LANGUAGES = [ ('de', _('German')), ('en', _('English')), ('fr', _('French')), ('ja', _('Japanese')), ('nl', _('Dutch')), ('zh-hans', _('Simplified Chinese')), ('ko', _('Korean')), ('es', _('Spanish')) ] NEW_MIDDLEWARE = [] for i in MIDDLEWARE_OBJ: NEW_MIDDLEWARE.append(i) if i == 'django.contrib.sessions.middleware.SessionMiddleware': NEW_MIDDLEWARE.append('django.middleware.locale.LocaleMiddleware') NEW_MIDDLEWARE.append('{{ project_name }}.middleware.ProcessExceptionMiddleware') if DJANGO_VERSION < DJ110: MIDDLEWARE_CLASSES = NEW_MIDDLEWARE else: MIDDLEWARE = NEW_MIDDLEWARE PROJECT_NAME = "{{ project_name }}" WOOEY_CELERY_APP_NAME = 'wooey.celery' WOOEY_CELERY_TASKS = 'wooey.tasks'
(VMS) lenses containing high grade zinc, lead, and silver. to any price recovery that may occur. Low corporate overheads. Track record in capital-efficient funding. Argent Minerals Limited is an ASX listed public company focused on creating shareholder wealth through the discovery, extraction and marketing of precious and base metals. A key goal of the Company is to become a leading Australian polymetallic producer, mining 1.5 million tonnes per annum with a mine life of the order of 20 years, the success of which will also result in a significant contribution to federal, state, regional and local economies by way of employment, taxes, and royalties. The Company’s project assets are situated in the Lachlan Orogen in New South Wales, Australia, a richly mineralised geological terrane extending from northern NSW through Victoria and into Tasmania. Argent Minerals’ three projects, in each of which the Company owns a controlling interest, is strategically positioned within a compelling neighbourhood that is home to Australia’s first discovery of gold, and today hosts world class deposits including one of the largest underground copper-gold mines in the southern hemisphere, Newcrest’s Cadia Valley Operations.
#! /usr/bin/env python import macrodensity as md import math import numpy as np import matplotlib.pyplot as plt import csv from itertools import izip #------------------------------------------------------------------ # Get the potential # This section should not be altered #------------------------------------------------------------------ vasp_pot, NGX, NGY, NGZ, Lattice = md.read_vasp_density('LOCPOT.slab') vector_a,vector_b,vector_c,av,bv,cv = md.matrix_2_abc(Lattice) resolution_x = vector_a/NGX resolution_y = vector_b/NGY resolution_z = vector_c/NGZ grid_pot, electrons = md.density_2_grid(vasp_pot,NGX,NGY,NGZ) cutoff_varience = 1E-4 hanksConstant = 4.89E-7 ## Get the gradiens (Field), if required. ## Comment out if not required, due to compuational expense. #grad_x,grad_y,grad_z = np.gradient(grid_pot[:,:,:],resolution_x,resolution_y,resolution_z) #------------------------------------------------------------------ ##------------------------------------------------------------------ ## Get the equation for the plane ## This is the section for plotting on a user defined plane; ## uncomment commands if this is the option that you want. ##------------------------------------------------------------------ ## Input section (define the plane with 3 points) #a_point = [0, 0, 0] #b_point = [1, 0, 1] #c_point = [0, 1, 0] ## Convert the fractional points to grid points on the density surface #a = pot.numbers_2_grid(a_point,NGX,NGY,NGZ) #b = pot.numbers_2_grid(b_point,NGX,NGY,NGZ) #c = pot.numbers_2_grid(c_point,NGX,NGY,NGZ) #plane_coeff = pot.points_2_plane(a,b,c) ## Get the gradients #XY = np.multiply(grad_x,grad_y) #grad_mag = np.multiply(XY,grad_z) ## Create the plane #xx,yy,grd = pot.create_plotting_mesh(NGX,NGY,NGZ,plane_coeff,grad_x) ## Plot the surface #plt.contourf(xx,yy,grd,V) #plt.show() ##------------------------------------------------------------------ ##------------------------------------------------------------------ ##------------------------------------------------------------------ ## Plotting a planar average (Field/potential) throughout the material ##------------------------------------------------------------------ ## FIELDS #planar = pot.planar_average(grad_x,NGX,NGY,NGZ) ## POTENTIAL #planar = pot.planar_average(grid_pot,NGX,NGY,NGZ) ## MACROSCOPIC AVERAGE #macro = pot.macroscopic_average(planar,4.80,resolution_z) #plt.plot(planar) #plt.plot(macro) #plt.savefig('Planar.eps') #plt.show() ##------------------------------------------------------------------ ##------------------------------------------------------------------ ##------------------------------------------------------------------ # Getting the average potential in a single cube of arbitrary size ##------------------------------------------------------------------ ## cube defines the size of the cube in units of mesh points (NGX/Y/Z) cube = [2,2,2] ## origin defines the bottom left point of the cube the "0,0,0" point in fractional coordinates origin = [0,0,0] ## Uncomment the lines below to do the business vacuum = [] non_vacuum = [] beers = hanksConstant*NGX*NGY*NGZ pinners = beers*1.5 print "You have time for %.1f oz of beer "%beers print "... or %.1f oz of Pinner (TM)."%pinners for i in range(0,NGX,cube[0]): print float(i)/NGX for j in range(0,NGY,cube[1]): for k in range(0,NGZ,cube[2]): origin = [float(i)/NGX,float(j)/NGY,float(k)/NGZ] volume_average, cube_var = md.voulme_average(origin, cube, grid_pot, NGX, NGY, NGZ) if cube_var <= cutoff_varience: vacuum.append(origin) else: non_vacuum.append(origin) print "Number of vacuum cubes: ", len(vacuum) print "Number of non-vacuum cubes: ", len(non_vacuum) print "Percentage of vacuum cubes: ",(float(len(vacuum))/(float(len(vacuum))+float(len(non_vacuum)))*100.) print "Percentage of non-vacuum cubes: ",(float(len(non_vacuum))/(float(len(vacuum))+float(len(non_vacuum)))*100.)
WordPlay T. Jay is a Hip Hop artist and producer from Little Rock, AR. His rise began with a feature on MTV's RapFix Live and that turned into making Music for the Underdog. T. Jay wants to inspire his listeners to get out of their heads and make the most of their gifts. He mixes a wide range of production with motivating lyrics to share his story. Currently, he is releasing music in a series called Orientation Day. The goal is to motivate the underdogs to make changes.
# -*- coding: utf-8 -*- ''' This module implements objects relating to analog signals, :class:`BaseAnalogSignal` and its child :class:`AnalogSignal`. :class:`AnalogSignalArray` is derived from :class:`BaseAnalogSignal` but is defined in :module:`neo.core.analogsignalarray`. :class:`IrregularlySampledSignal` is not derived from :class:`BaseAnalogSignal` and is defined in :module:`neo.core.irregularlysampledsignal`. :class:`BaseAnalogSignal` inherits from :class:`quantites.Quantity`, which inherits from :class:`numpy.array`. Inheritance from :class:`numpy.array` is explained here: http://docs.scipy.org/doc/numpy/user/basics.subclassing.html In brief: * Initialization of a new object from constructor happens in :meth:`__new__`. This is where user-specified attributes are set. * :meth:`__array_finalize__` is called for all new objects, including those created by slicing. This is where attributes are copied over from the old object. ''' # needed for python 3 compatibility from __future__ import absolute_import, division, print_function import numpy as np import quantities as pq from neo.core.baseneo import BaseNeo def _get_sampling_rate(sampling_rate, sampling_period): ''' Gets the sampling_rate from either the sampling_period or the sampling_rate, or makes sure they match if both are specified ''' if sampling_period is None: if sampling_rate is None: raise ValueError("You must provide either the sampling rate or " + "sampling period") elif sampling_rate is None: sampling_rate = 1.0 / sampling_period elif sampling_period != 1.0 / sampling_rate: raise ValueError('The sampling_rate has to be 1/sampling_period') if not hasattr(sampling_rate, 'units'): raise TypeError("Sampling rate/sampling period must have units") return sampling_rate def _new_BaseAnalogSignal(cls, signal, units=None, dtype=None, copy=True, t_start=0*pq.s, sampling_rate=None, sampling_period=None, name=None, file_origin=None, description=None, channel_index=None, annotations=None): ''' A function to map BaseAnalogSignal.__new__ to function that does not do the unit checking. This is needed for pickle to work. ''' return cls(signal=signal, units=units, dtype=dtype, copy=copy, t_start=t_start, sampling_rate=sampling_rate, sampling_period=sampling_period, name=name, file_origin=file_origin, description=description, channel_index=channel_index, **annotations) class BaseAnalogSignal(BaseNeo, pq.Quantity): ''' Base class for AnalogSignal and AnalogSignalArray ''' _single_parent_objects = ('Segment', 'RecordingChannel') _quantity_attr = 'signal' _necessary_attrs = (('signal', pq.Quantity, 1), ('sampling_rate', pq.Quantity, 0), ('t_start', pq.Quantity, 0)) _recommended_attrs = ((('channel_index', int),) + BaseNeo._recommended_attrs) def __new__(cls, signal, units=None, dtype=None, copy=True, t_start=0 * pq.s, sampling_rate=None, sampling_period=None, name=None, file_origin=None, description=None, channel_index=None, **annotations): ''' Constructs new :class:`BaseAnalogSignal` from data. This is called whenever a new class:`BaseAnalogSignal` is created from the constructor, but not when slicing. __array_finalize__ is called on the new object. ''' if units is None: if hasattr(signal, "units"): units = signal.units else: raise ValueError("Units must be specified") elif isinstance(signal, pq.Quantity): # could improve this test, what if units is a string? if units != signal.units: signal = signal.rescale(units) obj = pq.Quantity.__new__(cls, signal, units=units, dtype=dtype, copy=copy) if t_start is None: raise ValueError('t_start cannot be None') obj._t_start = t_start obj._sampling_rate = _get_sampling_rate(sampling_rate, sampling_period) obj.channel_index = channel_index obj.segment = None obj.recordingchannel = None return obj def __init__(self, signal, units=None, dtype=None, copy=True, t_start=0 * pq.s, sampling_rate=None, sampling_period=None, name=None, file_origin=None, description=None, channel_index=None, **annotations): ''' Initializes a newly constructed :class:`BaseAnalogSignal` instance. ''' # This method is only called when constructing a new BaseAnalogSignal, # not when slicing or viewing. We use the same call signature # as __new__ for documentation purposes. Anything not in the call # signature is stored in annotations. # Calls parent __init__, which grabs universally recommended # attributes and sets up self.annotations BaseNeo.__init__(self, name=name, file_origin=file_origin, description=description, **annotations) def __reduce__(self): ''' Map the __new__ function onto _new_BaseAnalogSignal, so that pickle works ''' return _new_BaseAnalogSignal, (self.__class__, np.array(self), self.units, self.dtype, True, self.t_start, self.sampling_rate, self.sampling_period, self.name, self.file_origin, self.description, self.channel_index, self.annotations) def __array_finalize__(self, obj): ''' This is called every time a new :class:`BaseAnalogSignal` is created. It is the appropriate place to set default values for attributes for :class:`BaseAnalogSignal` constructed by slicing or viewing. User-specified values are only relevant for construction from constructor, and these are set in __new__. Then they are just copied over here. ''' super(BaseAnalogSignal, self).__array_finalize__(obj) self._t_start = getattr(obj, '_t_start', 0 * pq.s) self._sampling_rate = getattr(obj, '_sampling_rate', None) # The additional arguments self.annotations = getattr(obj, 'annotations', None) # Globally recommended attributes self.name = getattr(obj, 'name', None) self.file_origin = getattr(obj, 'file_origin', None) self.description = getattr(obj, 'description', None) self.channel_index = getattr(obj, 'channel_index', None) def __repr__(self): ''' Returns a string representing the :class:`BaseAnalogSignal`. ''' return ('<%s(%s, [%s, %s], sampling rate: %s)>' % (self.__class__.__name__, super(BaseAnalogSignal, self).__repr__(), self.t_start, self.t_stop, self.sampling_rate)) def __getslice__(self, i, j): ''' Get a slice from :attr:`i` to :attr:`j`. Doesn't get called in Python 3, :meth:`__getitem__` is called instead ''' obj = super(BaseAnalogSignal, self).__getslice__(i, j) obj.t_start = self.t_start + i * self.sampling_period return obj def __getitem__(self, i): ''' Get the item or slice :attr:`i`. ''' obj = super(BaseAnalogSignal, self).__getitem__(i) if isinstance(obj, BaseAnalogSignal): # update t_start and sampling_rate slice_start = None slice_step = None if isinstance(i, slice): slice_start = i.start slice_step = i.step elif isinstance(i, tuple) and len(i) == 2: slice_start = i[0].start slice_step = i[0].step if slice_start: obj.t_start = self.t_start + slice_start * self.sampling_period if slice_step: obj.sampling_period *= slice_step return obj # sampling_rate attribute is handled as a property so type checking can # be done @property def sampling_rate(self): ''' Number of samples per unit time. (1/:attr:`sampling_period`) ''' return self._sampling_rate @sampling_rate.setter def sampling_rate(self, rate): ''' Setter for :attr:`sampling_rate` ''' if rate is None: raise ValueError('sampling_rate cannot be None') elif not hasattr(rate, 'units'): raise ValueError('sampling_rate must have units') self._sampling_rate = rate # sampling_period attribute is handled as a property on underlying rate @property def sampling_period(self): ''' Interval between two samples. (1/:attr:`sampling_rate`) ''' return 1. / self.sampling_rate @sampling_period.setter def sampling_period(self, period): ''' Setter for :attr:`sampling_period` ''' if period is None: raise ValueError('sampling_period cannot be None') elif not hasattr(period, 'units'): raise ValueError('sampling_period must have units') self.sampling_rate = 1. / period # t_start attribute is handled as a property so type checking can be done @property def t_start(self): ''' Time when signal begins. ''' return self._t_start @t_start.setter def t_start(self, start): ''' Setter for :attr:`t_start` ''' if start is None: raise ValueError('t_start cannot be None') self._t_start = start @property def duration(self): ''' Signal duration (:attr:`size` * :attr:`sampling_period`) ''' return self.shape[0] / self.sampling_rate @property def t_stop(self): ''' Time when signal ends. (:attr:`t_start` + :attr:`duration`) ''' return self.t_start + self.duration @property def times(self): ''' The time points of each sample of the signal (:attr:`t_start` + arange(:attr:`shape`)/:attr:`sampling_rate`) ''' return self.t_start + np.arange(self.shape[0]) / self.sampling_rate def rescale(self, units): ''' Return a copy of the AnalogSignal(Array) converted to the specified units ''' to_dims = pq.quantity.validate_dimensionality(units) if self.dimensionality == to_dims: to_u = self.units signal = np.array(self) else: to_u = pq.Quantity(1.0, to_dims) from_u = pq.Quantity(1.0, self.dimensionality) try: cf = pq.quantity.get_conversion_factor(from_u, to_u) except AssertionError: raise ValueError('Unable to convert between units of "%s" \ and "%s"' % (from_u._dimensionality, to_u._dimensionality)) signal = cf * self.magnitude new = self.__class__(signal=signal, units=to_u, sampling_rate=self.sampling_rate) new._copy_data_complement(self) new.annotations.update(self.annotations) return new def duplicate_with_new_array(self, signal): ''' Create a new :class:`BaseAnalogSignal` with the same metadata but different data ''' #signal is the new signal new = self.__class__(signal=signal, units=self.units, sampling_rate=self.sampling_rate) new._copy_data_complement(self) new.annotations.update(self.annotations) return new def __eq__(self, other): ''' Equality test (==) ''' if (self.t_start != other.t_start or self.sampling_rate != other.sampling_rate): return False return super(BaseAnalogSignal, self).__eq__(other) def __ne__(self, other): ''' Non-equality test (!=) ''' return not self.__eq__(other) def _check_consistency(self, other): ''' Check if the attributes of another :class:`BaseAnalogSignal` are compatible with this one. ''' if isinstance(other, BaseAnalogSignal): for attr in "t_start", "sampling_rate": if getattr(self, attr) != getattr(other, attr): raise ValueError("Inconsistent values of %s" % attr) # how to handle name and annotations? def _copy_data_complement(self, other): ''' Copy the metadata from another :class:`BaseAnalogSignal`. ''' for attr in ("t_start", "sampling_rate", "name", "file_origin", "description", "channel_index", "annotations"): setattr(self, attr, getattr(other, attr, None)) def _apply_operator(self, other, op, *args): ''' Handle copying metadata to the new :class:`BaseAnalogSignal` after a mathematical operation. ''' self._check_consistency(other) f = getattr(super(BaseAnalogSignal, self), op) new_signal = f(other, *args) new_signal._copy_data_complement(self) return new_signal def __add__(self, other, *args): ''' Addition (+) ''' return self._apply_operator(other, "__add__", *args) def __sub__(self, other, *args): ''' Subtraction (-) ''' return self._apply_operator(other, "__sub__", *args) def __mul__(self, other, *args): ''' Multiplication (*) ''' return self._apply_operator(other, "__mul__", *args) def __truediv__(self, other, *args): ''' Float division (/) ''' return self._apply_operator(other, "__truediv__", *args) def __div__(self, other, *args): ''' Integer division (//) ''' return self._apply_operator(other, "__div__", *args) __radd__ = __add__ __rmul__ = __sub__ def __rsub__(self, other, *args): ''' Backwards subtraction (other-self) ''' return self.__mul__(-1, *args) + other def _repr_pretty_(self, pp, cycle): ''' Handle pretty-printing the :class:`BaseAnalogSignal`. ''' pp.text(" ".join([self.__class__.__name__, "in", str(self.units), "with", "x".join(map(str, self.shape)), str(self.dtype), "values", ])) if self._has_repr_pretty_attrs_(): pp.breakable() self._repr_pretty_attrs_(pp, cycle) def _pp(line): pp.breakable() with pp.group(indent=1): pp.text(line) if hasattr(self, "channel_index"): _pp("channel index: {0}".format(self.channel_index)) for line in ["sampling rate: {0}".format(self.sampling_rate), "time: {0} to {1}".format(self.t_start, self.t_stop) ]: _pp(line) class AnalogSignal(BaseAnalogSignal): ''' A continuous analog signal. A representation of a continuous, analog signal acquired at time :attr:`t_start` at a certain sampling rate. Inherits from :class:`quantities.Quantity`, which in turn inherits from :class:`numpy.ndarray`. *Usage*:: >>> from neo.core import AnalogSignal >>> from quantities import kHz, ms, nA, s, uV >>> import numpy as np >>> >>> sig0 = AnalogSignal([1, 2, 3], sampling_rate=0.42*kHz, ... units='mV') >>> sig1 = AnalogSignal([4, 5, 6]*nA, sampling_period=42*ms) >>> sig2 = AnalogSignal(np.array([1.0, 2.0, 3.0]), t_start=42*ms, ... sampling_rate=0.42*kHz, units=uV) >>> sig3 = AnalogSignal([1], units='V', day='Monday', ... sampling_period=1*s) >>> >>> sig3 <AnalogSignal(array([1]) * V, [0.0 s, 1.0 s], sampling rate: 1.0 1/s)> >>> sig3.annotations['day'] 'Monday' >>> sig3[0] array(1) * V >>> sig3[::2] <AnalogSignal(array([1]) * V, [0.0 s, 2.0 s], sampling rate: 0.5 1/s)> *Required attributes/properties*: :signal: (quantity array 1D, numpy array 1D, or list) The data itself. :units: (quantity units) Required if the signal is a list or NumPy array, not if it is a :class:`Quantity` :sampling_rate: *or* :sampling_period: (quantity scalar) Number of samples per unit time or interval between two samples. If both are specified, they are checked for consistency. *Recommended attributes/properties*: :name: (str) A label for the dataset. :description: (str) Text description. :file_origin: (str) Filesystem path or URL of the original data file. :t_start: (quantity scalar) Time when signal begins. Default: 0.0 seconds :channel_index: (int) You can use this to order :class:`AnalogSignal` objects in an way you want. :class:`AnalogSignalArray` and :class:`Unit` objects can be given indexes as well so related objects can be linked together. *Optional attributes/properties*: :dtype: (numpy dtype or str) Override the dtype of the signal array. :copy: (bool) True by default. Note: Any other additional arguments are assumed to be user-specific metadata and stored in :attr:`annotations`. *Properties available on this object*: :sampling_rate: (quantity scalar) Number of samples per unit time. (1/:attr:`sampling_period`) :sampling_period: (quantity scalar) Interval between two samples. (1/:attr:`sampling_rate`) :duration: (quantity scalar) Signal duration, read-only. (:attr:`size` * :attr:`sampling_period`) :t_stop: (quantity scalar) Time when signal ends, read-only. (:attr:`t_start` + :attr:`duration`) :times: (quantity 1D) The time points of each sample of the signal, read-only. (:attr:`t_start` + arange(:attr:`shape`)/:attr:`sampling_rate`) *Slicing*: :class:`AnalogSignal` objects can be sliced. When this occurs, a new :class:`AnalogSignal` (actually a view) is returned, with the same metadata, except that :attr:`sampling_period` is changed if the step size is greater than 1, and :attr:`t_start` is changed if the start index is greater than 0. Getting a single item returns a :class:`~quantity.Quantity` scalar. *Operations available on this object*: == != + * / ''' def __new__(cls, signal, units=None, dtype=None, copy=True, t_start=0*pq.s, sampling_rate=None, sampling_period=None, name=None, file_origin=None, description=None, channel_index=None, **annotations): ''' Constructs new :class:`AnalogSignal` from data. This is called whenever a new class:`AnalogSignal` is created from the constructor, but not when slicing. ''' obj = BaseAnalogSignal.__new__(cls, signal, units, dtype, copy, t_start, sampling_rate, sampling_period, name, file_origin, description, channel_index, **annotations) return obj def merge(self, other): ''' Merging is not supported in :class:`AnalogSignal`. ''' raise NotImplementedError('Cannot merge AnalogSignal objects')
Corrupted External Hard Drive - Can I work just off the renders of the project? Not very hopeful about this, but hopefully someone has a work around? Cutting a personal short film on Premiere Pro CC. I have the project files and renders on my computer's hard drive, but usually keep all the RAW rushes on a hard drive and have the project looking at them. I had picture locked and conformed from the proxies, overlaid some titles, put on a rough grade and exported a bounce at about 5mbps to pop on vimeo as a reference for people to see. If I boot up the premiere pro project, everything is going to load, but all the clips will be offline. Is there ANYTHING I can do to just grab one pro-res version of the cut. Do the render folders on my computer hold the key? I'm picture locked, so just need it to be able to make a version to get it graded and my bacon is saved. I am also looking into getting the hard drive fixed/salvaged in the meantime. But it will cost more than the whole project's budget. Making me queezy. Cheers all. All help appreciated! Will credit you in the credits!!!!! Re: Corrupted External Hard Drive - Can I work just off the renders of the project? yes you can gather the bits from your render files but you won't have audio. as a test i put the pieces back together from a commercial done weeks ago. Oh great - thanks for this. The renders I have found seem to be at 1280x720 and have the LUT baked into them. Since this was the last thing I rendered out from the project am I guessing that this is all there is in the way of evidence of the project?
from .torrent import Torrent class InvalidTorrentGroupException(Exception): pass class TorrentGroup(object): """ Represents a Torrent Group (usually an album). Note that TorrentGroup.torrents may not be comprehensive if you haven't called TorrentGroup.update_group_data()...it may have only been populated with filtered search results. Check TorrentGroup.has_complete_torrent_list (boolean) to be sure. """ def __init__(self, id, parent_api): self.id = id self.parent_api = parent_api self.name = None self.wiki_body = None self.wiki_image = None self.year = None self.record_label = None self.catalogue_number = None self.tags = [] self.release_type = None self.vanity_house = None self.has_bookmarked = None self.category = None self.time = None self.music_info = None self.torrents = [] self.has_complete_torrent_list = False self.parent_api.cached_torrent_groups[self.id] = self def update_group_data(self): response = self.parent_api.request(action='torrentgroup', id=self.id) self.set_group_data(response) def set_group_data(self, torrent_group_json_response): """ Takes parsed JSON response from 'torrentgroup' action on api, and updates relevant information. To avoid problems, only pass in data from an API call that used this torrentgroup's ID as an argument. """ if self.id != torrent_group_json_response['group']['id']: raise InvalidTorrentGroupException("Tried to update a TorrentGroup's information from an 'artist' API call with a different id." + " Should be %s, got %s" % (self.id, torrent_group_json_response['group']['groupId']) ) self.name = torrent_group_json_response['group']['name'] self.year = torrent_group_json_response['group']['year'] self.wiki_body = torrent_group_json_response['group']['wikiBody'] self.wiki_image = torrent_group_json_response['group']['wikiImage'] self.record_label = torrent_group_json_response['group']['recordLabel'] self.catalogue_number = torrent_group_json_response['group']['catalogueNumber'] self.release_type = torrent_group_json_response['group']['releaseType'] self.category = self.parent_api.get_category(torrent_group_json_response['group']['categoryId'], torrent_group_json_response['group']['categoryName']) self.time = torrent_group_json_response['group']['time'] self.vanity_house = torrent_group_json_response['group']['vanityHouse'] self.music_info = torrent_group_json_response['group']['musicInfo'] self.music_info['artists'] = [ self.parent_api.get_artist(artist['id'], artist['name']) for artist in self.music_info['artists'] ] self.music_info['with'] = [ self.parent_api.get_artist(artist['id'], artist['name']) for artist in self.music_info['with'] ] if 'torrents' in torrent_group_json_response: self.torrents = [] for torrent_dict in torrent_group_json_response['torrents']: torrent_dict['groupId'] = self.id torrent = self.parent_api.get_torrent(torrent_dict['id']) torrent.set_torrent_group_data(torrent_dict) self.torrents.append(torrent) self.has_complete_torrent_list = True elif 'torrent' in torrent_group_json_response: torrent = self.parent_api.get_torrent(torrent_group_json_response['torrent']['id']) self.torrents.append(torrent) def set_artist_group_data(self, artist_group_json_response): """ Takes torrentgroup section from parsed JSON response from 'artist' action on api, and updates relevant information. """ if self.id != artist_group_json_response['groupId']: raise InvalidTorrentGroupException("Tried to update a TorrentGroup's information from an 'artist' API call with a different id." + " Should be %s, got %s" % (self.id, artist_group_json_response['groupId']) ) self.name = artist_group_json_response['groupName'] self.year = artist_group_json_response['groupYear'] self.record_label = artist_group_json_response['groupRecordLabel'] self.catalogue_number = artist_group_json_response['groupCatalogueNumber'] self.tags = [] for tag_name in artist_group_json_response['tags']: tag = self.parent_api.get_tag(tag_name) self.tags.append(tag) self.release_type = artist_group_json_response['releaseType'] self.has_bookmarked = artist_group_json_response['hasBookmarked'] self.torrents = [] for torrent_dict in artist_group_json_response['torrent']: torrent = self.parent_api.get_torrent(torrent_dict['id']) torrent.set_torrent_artist_data(torrent_dict) self.torrents.append(torrent) self.has_complete_torrent_list = True def set_torrent_search_data(self, search_json_response): if self.id != search_json_response['groupId']: raise InvalidTorrentGroupException("Tried to update a TorrentGroup's information from an 'browse'/search API call with a different id." + " Should be %s, got %s" % (self.id, search_json_response['groupId']) ) self.name = search_json_response['groupName'] # purposefully ignoring search_json_response['artist']...the other data updates don't include it, would just get confusing self.tags = [] for tag_name in search_json_response['tags']: tag = self.parent_api.get_tag(tag_name) self.tags.append(tag) # some of the below keys aren't in things like comics...should probably watch out for this elsewhere if 'bookmarked' in search_json_response.keys(): self.has_bookmarked = search_json_response['bookmarked'] if 'vanityHouse' in search_json_response.keys(): self.vanity_house = search_json_response['vanityHouse'] if 'groupYear' in search_json_response.keys(): self.year = search_json_response['groupYear'] if 'releaseType' in search_json_response.keys(): self.release_type = search_json_response['releaseType'] self.time = search_json_response['groupTime'] if 'torrentId' in search_json_response.keys(): search_json_response['torrents'] = [{'torrentId': search_json_response['torrentId']}] new_torrents = [] for torrent_dict in search_json_response['torrents']: torrent_dict['groupId'] = self.id torrent = self.parent_api.get_torrent(torrent_dict['torrentId']) new_torrents.append(torrent) # torrent information gets populated in API search call, no need to duplicate that here self.torrents = self.torrents + new_torrents def __repr__(self): return "TorrentGroup: %s - ID: %s" % (self.name, self.id)
EYELET OUTLET-12mm Bling Self Adhesive Jewels. Color: Clear. This package contains 40 sticky back jewels in 12mm size on one backing sheet. These stick on bling jewels have detail around the edge and a raised center. A great addition to your paper crafts and scrapbooking projects.
# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib from eventlet import greenthread from oslo_concurrency import lockutils from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as os_db_exception from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import importutils from sqlalchemy import exc as sql_exc from sqlalchemy.orm import exc as sa_exc from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.rpc.handlers import dhcp_rpc from neutron.api.rpc.handlers import dvr_rpc from neutron.api.rpc.handlers import metadata_rpc from neutron.api.rpc.handlers import securitygroups_rpc from neutron.api.v2 import attributes from neutron.callbacks import events from neutron.callbacks import exceptions from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants as const from neutron.common import exceptions as exc from neutron.common import ipv6_utils from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db import api as db_api from neutron.db import db_base_plugin_v2 from neutron.db import dvr_mac_db from neutron.db import external_net_db from neutron.db import extradhcpopt_db from neutron.db import models_v2 from neutron.db import netmtu_db from neutron.db import quota_db # noqa from neutron.db import securitygroups_rpc_base as sg_db_rpc from neutron.db import vlantransparent_db from neutron.extensions import allowedaddresspairs as addr_pair from neutron.extensions import extra_dhcp_opt as edo_ext from neutron.extensions import portbindings from neutron.extensions import portsecurity as psec from neutron.extensions import providernet as provider from neutron.extensions import securitygroup as ext_sg from neutron.extensions import vlantransparent from neutron.i18n import _LE, _LI, _LW from neutron import manager from neutron.openstack.common import uuidutils from neutron.plugins.common import constants as service_constants from neutron.plugins.ml2.common import exceptions as ml2_exc from neutron.plugins.ml2 import config # noqa from neutron.plugins.ml2 import db from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2 import driver_context from neutron.plugins.ml2 import managers from neutron.plugins.ml2 import models from neutron.plugins.ml2 import rpc LOG = log.getLogger(__name__) MAX_BIND_TRIES = 10 # REVISIT(rkukura): Move this and other network_type constants to # providernet.py? TYPE_MULTI_SEGMENT = 'multi-segment' class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, dvr_mac_db.DVRDbMixin, external_net_db.External_net_db_mixin, sg_db_rpc.SecurityGroupServerRpcMixin, agentschedulers_db.DhcpAgentSchedulerDbMixin, addr_pair_db.AllowedAddressPairsMixin, vlantransparent_db.Vlantransparent_db_mixin, extradhcpopt_db.ExtraDhcpOptMixin, netmtu_db.Netmtu_db_mixin): """Implement the Neutron L2 abstractions using modules. Ml2Plugin is a Neutron plugin based on separately extensible sets of network types and mechanisms for connecting to networks of those types. The network types and mechanisms are implemented as drivers loaded via Python entry points. Networks can be made up of multiple segments (not yet fully implemented). """ # This attribute specifies whether the plugin supports or not # bulk/pagination/sorting operations. Name mangling is used in # order to ensure it is qualified by class __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True # List of supported extensions _supported_extension_aliases = ["provider", "external-net", "binding", "quotas", "security-group", "agent", "dhcp_agent_scheduler", "multi-provider", "allowed-address-pairs", "extra_dhcp_opt", "subnet_allocation", "net-mtu", "vlan-transparent"] @property def supported_extension_aliases(self): if not hasattr(self, '_aliases'): aliases = self._supported_extension_aliases[:] aliases += self.extension_manager.extension_aliases() sg_rpc.disable_security_group_extension_by_config(aliases) vlantransparent.disable_extension_by_config(aliases) self._aliases = aliases return self._aliases def __init__(self): # First load drivers, then initialize DB, then initialize drivers self.type_manager = managers.TypeManager() self.extension_manager = managers.ExtensionManager() self.mechanism_manager = managers.MechanismManager() super(Ml2Plugin, self).__init__() self.type_manager.initialize() self.extension_manager.initialize() self.mechanism_manager.initialize() self._setup_rpc() # REVISIT(rkukura): Use stevedore for these? self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver ) self.start_periodic_dhcp_agent_status_check() LOG.info(_LI("Modular L2 Plugin initialization complete")) def _setup_rpc(self): self.notifier = rpc.AgentNotifierApi(topics.AGENT) self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( dhcp_rpc_agent_api.DhcpAgentNotifyAPI() ) def start_rpc_listeners(self): self.endpoints = [rpc.RpcCallbacks(self.notifier, self.type_manager), securitygroups_rpc.SecurityGroupServerRpcCallback(), dvr_rpc.DVRServerRpcCallback(), dhcp_rpc.DhcpRpcCallback(), agents_db.AgentExtRpcCallback(), metadata_rpc.MetadataRpcCallback()] self.topic = topics.PLUGIN self.conn = n_rpc.create_connection(new=True) self.conn.create_consumer(self.topic, self.endpoints, fanout=False) return self.conn.consume_in_threads() def _filter_nets_provider(self, context, networks, filters): return [network for network in networks if self.type_manager.network_matches_filters(network, filters) ] def _check_mac_update_allowed(self, orig_port, port, binding): unplugged_types = (portbindings.VIF_TYPE_BINDING_FAILED, portbindings.VIF_TYPE_UNBOUND) new_mac = port.get('mac_address') mac_change = (new_mac is not None and orig_port['mac_address'] != new_mac) if (mac_change and binding.vif_type not in unplugged_types): raise exc.PortBound(port_id=orig_port['id'], vif_type=binding.vif_type, old_mac=orig_port['mac_address'], new_mac=port['mac_address']) return mac_change def _process_port_binding(self, mech_context, attrs): session = mech_context._plugin_context.session binding = mech_context._binding port = mech_context.current port_id = port['id'] changes = False host = attrs and attrs.get(portbindings.HOST_ID) original_host = binding.host if (attributes.is_attr_set(host) and original_host != host): binding.host = host changes = True vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE) if (attributes.is_attr_set(vnic_type) and binding.vnic_type != vnic_type): binding.vnic_type = vnic_type changes = True # treat None as clear of profile. profile = None if attrs and portbindings.PROFILE in attrs: profile = attrs.get(portbindings.PROFILE) or {} if profile not in (None, attributes.ATTR_NOT_SPECIFIED, self._get_profile(binding)): binding.profile = jsonutils.dumps(profile) if len(binding.profile) > models.BINDING_PROFILE_LEN: msg = _("binding:profile value too large") raise exc.InvalidInput(error_message=msg) changes = True # Unbind the port if needed. if changes: binding.vif_type = portbindings.VIF_TYPE_UNBOUND binding.vif_details = '' db.clear_binding_levels(session, port_id, original_host) mech_context._clear_binding_levels() if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: binding.vif_type = portbindings.VIF_TYPE_DISTRIBUTED binding.vif_details = '' db.clear_binding_levels(session, port_id, original_host) mech_context._clear_binding_levels() binding.host = '' self._update_port_dict_binding(port, binding) return changes def _bind_port_if_needed(self, context, allow_notify=False, need_notify=False): plugin_context = context._plugin_context port_id = context._port['id'] # Since the mechanism driver bind_port() calls must be made # outside a DB transaction locking the port state, it is # possible (but unlikely) that the port's state could change # concurrently while these calls are being made. If another # thread or process succeeds in binding the port before this # thread commits its results, the already committed results are # used. If attributes such as binding:host_id, # binding:profile, or binding:vnic_type are updated # concurrently, this loop retries binding using the new # values. count = 0 while True: # First, determine whether it is necessary and possible to # bind the port. binding = context._binding if (binding.vif_type != portbindings.VIF_TYPE_UNBOUND or not binding.host): # We either don't need to bind the port, or can't, so # notify if needed and return. if allow_notify and need_notify: self._notify_port_updated(context) return context # Limit binding attempts to avoid any possibility of # infinite looping and to ensure an error is logged # instead. This does not need to be tunable because no # more than a couple attempts should ever be required in # normal operation. Log at info level if not 1st attempt. count += 1 if count > MAX_BIND_TRIES: LOG.error(_LE("Failed to commit binding results for %(port)s " "after %(max)s tries"), {'port': port_id, 'max': MAX_BIND_TRIES}) return context if count > 1: greenthread.sleep(0) # yield LOG.info(_LI("Attempt %(count)s to bind port %(port)s"), {'count': count, 'port': port_id}) # The port isn't already bound and the necessary # information is available, so attempt to bind the port. bind_context = self._bind_port(context) # Now try to commit result of attempting to bind the port. new_context, did_commit = self._commit_port_binding( plugin_context, port_id, binding, bind_context) if not new_context: # The port has been deleted concurrently, so just # return the unbound result from the initial # transaction that completed before the deletion. LOG.debug("Port %s has been deleted concurrently", port_id) return context # Need to notify if we succeed and our results were # committed. if did_commit and (new_context._binding.vif_type != portbindings.VIF_TYPE_BINDING_FAILED): need_notify = True context = new_context def _bind_port(self, orig_context): # Construct a new PortContext from the one from the previous # transaction. port = orig_context._port orig_binding = orig_context._binding new_binding = models.PortBinding( host=orig_binding.host, vnic_type=orig_binding.vnic_type, profile=orig_binding.profile, vif_type=portbindings.VIF_TYPE_UNBOUND, vif_details='' ) self._update_port_dict_binding(port, new_binding) new_context = driver_context.PortContext( self, orig_context._plugin_context, port, orig_context._network_context._network, new_binding, None) # Attempt to bind the port and return the context with the # result. self.mechanism_manager.bind_port(new_context) return new_context def _commit_port_binding(self, plugin_context, port_id, orig_binding, new_context): session = plugin_context.session new_binding = new_context._binding # After we've attempted to bind the port, we begin a # transaction, get the current port state, and decide whether # to commit the binding results. # # REVISIT: Serialize this operation with a semaphore to # prevent deadlock waiting to acquire a DB lock held by # another thread in the same process, leading to 'lock wait # timeout' errors. with contextlib.nested(lockutils.lock('db-access'), session.begin(subtransactions=True)): # Get the current port state and build a new PortContext # reflecting this state as original state for subsequent # mechanism driver update_port_*commit() calls. port_db, cur_binding = db.get_locked_port_and_binding(session, port_id) if not port_db: # The port has been deleted concurrently. return (None, None) oport = self._make_port_dict(port_db) port = self._make_port_dict(port_db) network = new_context.network.current if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: # REVISIT(rkukura): The PortBinding instance from the # ml2_port_bindings table, returned as cur_binding # from db.get_locked_port_and_binding() above, is # currently not used for DVR distributed ports, and is # replaced here with the DVRPortBinding instance from # the ml2_dvr_port_bindings table specific to the host # on which the distributed port is being bound. It # would be possible to optimize this code to avoid # fetching the PortBinding instance in the DVR case, # and even to avoid creating the unused entry in the # ml2_port_bindings table. But the upcoming resolution # for bug 1367391 will eliminate the # ml2_dvr_port_bindings table, use the # ml2_port_bindings table to store non-host-specific # fields for both distributed and non-distributed # ports, and introduce a new ml2_port_binding_hosts # table for the fields that need to be host-specific # in the distributed case. Since the PortBinding # instance will then be needed, it does not make sense # to optimize this code to avoid fetching it. cur_binding = db.get_dvr_port_binding_by_host( session, port_id, orig_binding.host) cur_context = driver_context.PortContext( self, plugin_context, port, network, cur_binding, None, original_port=oport) # Commit our binding results only if port has not been # successfully bound concurrently by another thread or # process and no binding inputs have been changed. commit = ((cur_binding.vif_type in [portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED]) and orig_binding.host == cur_binding.host and orig_binding.vnic_type == cur_binding.vnic_type and orig_binding.profile == cur_binding.profile) if commit: # Update the port's binding state with our binding # results. cur_binding.vif_type = new_binding.vif_type cur_binding.vif_details = new_binding.vif_details db.clear_binding_levels(session, port_id, cur_binding.host) db.set_binding_levels(session, new_context._binding_levels) cur_context._binding_levels = new_context._binding_levels # Update PortContext's port dictionary to reflect the # updated binding state. self._update_port_dict_binding(port, cur_binding) # Update the port status if requested by the bound driver. if (new_context._binding_levels and new_context._new_port_status): port_db.status = new_context._new_port_status port['status'] = new_context._new_port_status # Call the mechanism driver precommit methods, commit # the results, and call the postcommit methods. self.mechanism_manager.update_port_precommit(cur_context) if commit: self.mechanism_manager.update_port_postcommit(cur_context) # Continue, using the port state as of the transaction that # just finished, whether that transaction committed new # results or discovered concurrent port state changes. return (cur_context, commit) def _update_port_dict_binding(self, port, binding): port[portbindings.HOST_ID] = binding.host port[portbindings.VNIC_TYPE] = binding.vnic_type port[portbindings.PROFILE] = self._get_profile(binding) port[portbindings.VIF_TYPE] = binding.vif_type port[portbindings.VIF_DETAILS] = self._get_vif_details(binding) def _get_vif_details(self, binding): if binding.vif_details: try: return jsonutils.loads(binding.vif_details) except Exception: LOG.error(_LE("Serialized vif_details DB value '%(value)s' " "for port %(port)s is invalid"), {'value': binding.vif_details, 'port': binding.port_id}) return {} def _get_profile(self, binding): if binding.profile: try: return jsonutils.loads(binding.profile) except Exception: LOG.error(_LE("Serialized profile DB value '%(value)s' for " "port %(port)s is invalid"), {'value': binding.profile, 'port': binding.port_id}) return {} def _ml2_extend_port_dict_binding(self, port_res, port_db): # None when called during unit tests for other plugins. if port_db.port_binding: self._update_port_dict_binding(port_res, port_db.port_binding) db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.PORTS, ['_ml2_extend_port_dict_binding']) # Register extend dict methods for network and port resources. # Each mechanism driver that supports extend attribute for the resources # can add those attribute to the result. db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.NETWORKS, ['_ml2_md_extend_network_dict']) db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.PORTS, ['_ml2_md_extend_port_dict']) db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.SUBNETS, ['_ml2_md_extend_subnet_dict']) def _ml2_md_extend_network_dict(self, result, netdb): session = db_api.get_session() with session.begin(subtransactions=True): self.extension_manager.extend_network_dict(session, netdb, result) def _ml2_md_extend_port_dict(self, result, portdb): session = db_api.get_session() with session.begin(subtransactions=True): self.extension_manager.extend_port_dict(session, portdb, result) def _ml2_md_extend_subnet_dict(self, result, subnetdb): session = db_api.get_session() with session.begin(subtransactions=True): self.extension_manager.extend_subnet_dict( session, subnetdb, result) # Note - The following hook methods have "ml2" in their names so # that they are not called twice during unit tests due to global # registration of hooks in portbindings_db.py used by other # plugins. def _ml2_port_model_hook(self, context, original_model, query): query = query.outerjoin(models.PortBinding, (original_model.id == models.PortBinding.port_id)) return query def _ml2_port_result_filter_hook(self, query, filters): values = filters and filters.get(portbindings.HOST_ID, []) if not values: return query return query.filter(models.PortBinding.host.in_(values)) db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( models_v2.Port, "ml2_port_bindings", '_ml2_port_model_hook', None, '_ml2_port_result_filter_hook') def _detect_faked_port(self, mech_context): port = mech_context._port host = port.get('binding:host_id') owner = port.get('device_owner') port = mech_context._original_port if port: if not host: host = port.get('binding:host_id') if not owner: owner = port.get('device_owner') return host if owner == 'virl:coreos' else None def _notify_port_updated(self, mech_context): port = mech_context._port segment = mech_context.bottom_bound_segment faked = self._detect_faked_port(mech_context) if faked: self.notifier.port_update(mech_context._plugin_context, port, None, None, None, faked) return if not segment: # REVISIT(rkukura): This should notify agent to unplug port network = mech_context.network.current LOG.debug("In _notify_port_updated(), no bound segment for " "port %(port_id)s on network %(network_id)s", {'port_id': port['id'], 'network_id': network['id']}) return self.notifier.port_update(mech_context._plugin_context, port, segment[api.NETWORK_TYPE], segment[api.SEGMENTATION_ID], segment[api.PHYSICAL_NETWORK]) def _delete_objects(self, context, resource, objects): delete_op = getattr(self, 'delete_%s' % resource) for obj in objects: try: delete_op(context, obj['result']['id']) except KeyError: LOG.exception(_LE("Could not find %s to delete."), resource) except Exception: LOG.exception(_LE("Could not delete %(res)s %(id)s."), {'res': resource, 'id': obj['result']['id']}) @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES, retry_on_request=True) def _create_bulk_ml2(self, resource, context, request_items): objects = [] collection = "%ss" % resource items = request_items[collection] try: with context.session.begin(subtransactions=True): obj_creator = getattr(self, '_create_%s_db' % resource) for item in items: attrs = item[resource] result, mech_context = obj_creator(context, item) objects.append({'mech_context': mech_context, 'result': result, 'attributes': attrs}) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("An exception occurred while creating " "the %(resource)s:%(item)s"), {'resource': resource, 'item': item}) try: postcommit_op = getattr(self.mechanism_manager, 'create_%s_postcommit' % resource) for obj in objects: postcommit_op(obj['mech_context']) return objects except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): resource_ids = [res['result']['id'] for res in objects] LOG.exception(_LE("mechanism_manager.create_%(res)s" "_postcommit failed for %(res)s: " "'%(failed_id)s'. Deleting " "%(res)ss %(resource_ids)s"), {'res': resource, 'failed_id': obj['result']['id'], 'resource_ids': ', '.join(resource_ids)}) self._delete_objects(context, resource, objects) def _create_network_db(self, context, network): net_data = network[attributes.NETWORK] tenant_id = self._get_tenant_id_for_create(context, net_data) session = context.session with session.begin(subtransactions=True): self._ensure_default_security_group(context, tenant_id) result = super(Ml2Plugin, self).create_network(context, network) self.extension_manager.process_create_network(context, net_data, result) self._process_l3_create(context, result, net_data) net_data['id'] = result['id'] self.type_manager.create_network_segments(context, net_data, tenant_id) self.type_manager.extend_network_dict_provider(context, result) mech_context = driver_context.NetworkContext(self, context, result) self.mechanism_manager.create_network_precommit(mech_context) if net_data.get(api.MTU, 0) > 0: res = super(Ml2Plugin, self).update_network(context, result['id'], {'network': {api.MTU: net_data[api.MTU]}}) result[api.MTU] = res.get(api.MTU, 0) return result, mech_context @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES, retry_on_request=True) def _create_network_with_retries(self, context, network): return self._create_network_db(context, network) def create_network(self, context, network): result, mech_context = self._create_network_with_retries(context, network) try: self.mechanism_manager.create_network_postcommit(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): LOG.error(_LE("mechanism_manager.create_network_postcommit " "failed, deleting network '%s'"), result['id']) self.delete_network(context, result['id']) return result def create_network_bulk(self, context, networks): objects = self._create_bulk_ml2(attributes.NETWORK, context, networks) return [obj['result'] for obj in objects] def update_network(self, context, id, network): net_data = network[attributes.NETWORK] provider._raise_if_updates_provider_attributes(net_data) session = context.session with session.begin(subtransactions=True): original_network = super(Ml2Plugin, self).get_network(context, id) updated_network = super(Ml2Plugin, self).update_network(context, id, network) self.extension_manager.process_update_network(context, net_data, updated_network) self._process_l3_update(context, updated_network, net_data) self.type_manager.extend_network_dict_provider(context, updated_network) mech_context = driver_context.NetworkContext( self, context, updated_network, original_network=original_network) self.mechanism_manager.update_network_precommit(mech_context) # TODO(apech) - handle errors raised by update_network, potentially # by re-calling update_network with the previous attributes. For # now the error is propogated to the caller, which is expected to # either undo/retry the operation or delete the resource. self.mechanism_manager.update_network_postcommit(mech_context) return updated_network def get_network(self, context, id, fields=None): session = context.session with session.begin(subtransactions=True): result = super(Ml2Plugin, self).get_network(context, id, None) self.type_manager.extend_network_dict_provider(context, result) return self._fields(result, fields) def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): session = context.session with session.begin(subtransactions=True): nets = super(Ml2Plugin, self).get_networks(context, filters, None, sorts, limit, marker, page_reverse) for net in nets: self.type_manager.extend_network_dict_provider(context, net) nets = self._filter_nets_provider(context, nets, filters) nets = self._filter_nets_l3(context, nets, filters) return [self._fields(net, fields) for net in nets] def _delete_ports(self, context, ports): for port in ports: try: self.delete_port(context, port.id) except (exc.PortNotFound, sa_exc.ObjectDeletedError): context.session.expunge(port) # concurrent port deletion can be performed by # release_dhcp_port caused by concurrent subnet_delete LOG.info(_LI("Port %s was deleted concurrently"), port.id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Exception auto-deleting port %s"), port.id) def _delete_subnets(self, context, subnets): for subnet in subnets: try: self.delete_subnet(context, subnet.id) except (exc.SubnetNotFound, sa_exc.ObjectDeletedError): context.session.expunge(subnet) LOG.info(_LI("Subnet %s was deleted concurrently"), subnet.id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Exception auto-deleting subnet %s"), subnet.id) def delete_network(self, context, id): # REVISIT(rkukura) The super(Ml2Plugin, self).delete_network() # function is not used because it auto-deletes ports and # subnets from the DB without invoking the derived class's # delete_port() or delete_subnet(), preventing mechanism # drivers from being called. This approach should be revisited # when the API layer is reworked during icehouse. LOG.debug("Deleting network %s", id) session = context.session while True: try: # REVISIT: Serialize this operation with a semaphore # to prevent deadlock waiting to acquire a DB lock # held by another thread in the same process, leading # to 'lock wait timeout' errors. # # Process L3 first, since, depending on the L3 plugin, it may # involve locking the db-access semaphore, sending RPC # notifications, and/or calling delete_port on this plugin. # Additionally, a rollback may not be enough to undo the # deletion of a floating IP with certain L3 backends. self._process_l3_delete(context, id) # Using query().with_lockmode isn't necessary. Foreign-key # constraints prevent deletion if concurrent creation happens. with contextlib.nested(lockutils.lock('db-access'), session.begin(subtransactions=True)): # Get ports to auto-delete. ports = (session.query(models_v2.Port). enable_eagerloads(False). filter_by(network_id=id).all()) LOG.debug("Ports to auto-delete: %s", ports) only_auto_del = all(p.device_owner in db_base_plugin_v2. AUTO_DELETE_PORT_OWNERS for p in ports) if not only_auto_del: LOG.debug("Tenant-owned ports exist") raise exc.NetworkInUse(net_id=id) # Get subnets to auto-delete. subnets = (session.query(models_v2.Subnet). enable_eagerloads(False). filter_by(network_id=id).all()) LOG.debug("Subnets to auto-delete: %s", subnets) if not (ports or subnets): network = self.get_network(context, id) mech_context = driver_context.NetworkContext(self, context, network) self.mechanism_manager.delete_network_precommit( mech_context) self.type_manager.release_network_segments(session, id) record = self._get_network(context, id) LOG.debug("Deleting network record %s", record) session.delete(record) # The segment records are deleted via cascade from the # network record, so explicit removal is not necessary. LOG.debug("Committing transaction") break except os_db_exception.DBError as e: with excutils.save_and_reraise_exception() as ctxt: if isinstance(e.inner_exception, sql_exc.IntegrityError): ctxt.reraise = False LOG.warning(_LW("A concurrent port creation has " "occurred")) continue self._delete_ports(context, ports) self._delete_subnets(context, subnets) try: self.mechanism_manager.delete_network_postcommit(mech_context) except ml2_exc.MechanismDriverError: # TODO(apech) - One or more mechanism driver failed to # delete the network. Ideally we'd notify the caller of # the fact that an error occurred. LOG.error(_LE("mechanism_manager.delete_network_postcommit" " failed")) self.notifier.network_delete(context, id) def _create_subnet_db(self, context, subnet): session = context.session with session.begin(subtransactions=True): result = super(Ml2Plugin, self).create_subnet(context, subnet) self.extension_manager.process_create_subnet( context, subnet[attributes.SUBNET], result) mech_context = driver_context.SubnetContext(self, context, result) self.mechanism_manager.create_subnet_precommit(mech_context) return result, mech_context def create_subnet(self, context, subnet): result, mech_context = self._create_subnet_db(context, subnet) try: self.mechanism_manager.create_subnet_postcommit(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): LOG.error(_LE("mechanism_manager.create_subnet_postcommit " "failed, deleting subnet '%s'"), result['id']) self.delete_subnet(context, result['id']) return result def create_subnet_bulk(self, context, subnets): objects = self._create_bulk_ml2(attributes.SUBNET, context, subnets) return [obj['result'] for obj in objects] def update_subnet(self, context, id, subnet): session = context.session with session.begin(subtransactions=True): original_subnet = super(Ml2Plugin, self).get_subnet(context, id) updated_subnet = super(Ml2Plugin, self).update_subnet( context, id, subnet) self.extension_manager.process_update_subnet( context, subnet[attributes.SUBNET], updated_subnet) mech_context = driver_context.SubnetContext( self, context, updated_subnet, original_subnet=original_subnet) self.mechanism_manager.update_subnet_precommit(mech_context) # TODO(apech) - handle errors raised by update_subnet, potentially # by re-calling update_subnet with the previous attributes. For # now the error is propogated to the caller, which is expected to # either undo/retry the operation or delete the resource. self.mechanism_manager.update_subnet_postcommit(mech_context) return updated_subnet def delete_subnet(self, context, id): # REVISIT(rkukura) The super(Ml2Plugin, self).delete_subnet() # function is not used because it deallocates the subnet's addresses # from ports in the DB without invoking the derived class's # update_port(), preventing mechanism drivers from being called. # This approach should be revisited when the API layer is reworked # during icehouse. LOG.debug("Deleting subnet %s", id) session = context.session while True: # REVISIT: Serialize this operation with a semaphore to # prevent deadlock waiting to acquire a DB lock held by # another thread in the same process, leading to 'lock # wait timeout' errors. with contextlib.nested(lockutils.lock('db-access'), session.begin(subtransactions=True)): record = self._get_subnet(context, id) subnet = self._make_subnet_dict(record, None) qry_allocated = (session.query(models_v2.IPAllocation). filter_by(subnet_id=id). join(models_v2.Port)) is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet) # Remove network owned ports, and delete IP allocations # for IPv6 addresses which were automatically generated # via SLAAC if is_auto_addr_subnet: self._subnet_check_ip_allocations_internal_router_ports( context, id) else: qry_allocated = ( qry_allocated.filter(models_v2.Port.device_owner. in_(db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS))) allocated = qry_allocated.all() # Delete all the IPAllocation that can be auto-deleted if allocated: map(session.delete, allocated) LOG.debug("Ports to auto-deallocate: %s", allocated) # Check if there are more IP allocations, unless # is_auto_address_subnet is True. In that case the check is # unnecessary. This additional check not only would be wasteful # for this class of subnet, but is also error-prone since when # the isolation level is set to READ COMMITTED allocations made # concurrently will be returned by this query if not is_auto_addr_subnet: if self._subnet_check_ip_allocations(context, id): LOG.debug("Found IP allocations on subnet %s, " "cannot delete", id) raise exc.SubnetInUse(subnet_id=id) # If allocated is None, then all the IPAllocation were # correctly deleted during the previous pass. if not allocated: mech_context = driver_context.SubnetContext(self, context, subnet) self.mechanism_manager.delete_subnet_precommit( mech_context) LOG.debug("Deleting subnet record") session.delete(record) LOG.debug("Committing transaction") break for a in allocated: if a.port_id: # calling update_port() for each allocation to remove the # IP from the port and call the MechanismDrivers data = {attributes.PORT: {'fixed_ips': [{'subnet_id': ip.subnet_id, 'ip_address': ip.ip_address} for ip in a.ports.fixed_ips if ip.subnet_id != id]}} try: self.update_port(context, a.port_id, data) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Exception deleting fixed_ip " "from port %s"), a.port_id) try: self.mechanism_manager.delete_subnet_postcommit(mech_context) except ml2_exc.MechanismDriverError: # TODO(apech) - One or more mechanism driver failed to # delete the subnet. Ideally we'd notify the caller of # the fact that an error occurred. LOG.error(_LE("mechanism_manager.delete_subnet_postcommit failed")) # TODO(yalei) - will be simplified after security group and address pair be # converted to ext driver too. def _portsec_ext_port_create_processing(self, context, port_data, port): attrs = port[attributes.PORT] port_security = ((port_data.get(psec.PORTSECURITY) is None) or port_data[psec.PORTSECURITY]) # allowed address pair checks if attributes.is_attr_set(attrs.get(addr_pair.ADDRESS_PAIRS)): if not port_security: raise addr_pair.AddressPairAndPortSecurityRequired() else: # remove ATTR_NOT_SPECIFIED attrs[addr_pair.ADDRESS_PAIRS] = [] if port_security: self._ensure_default_security_group_on_port(context, port) elif attributes.is_attr_set(attrs.get(ext_sg.SECURITYGROUPS)): raise psec.PortSecurityAndIPRequiredForSecurityGroups() def _create_port_db(self, context, port): attrs = port[attributes.PORT] if not attrs.get('status'): attrs['status'] = const.PORT_STATUS_DOWN session = context.session with session.begin(subtransactions=True): dhcp_opts = attrs.get(edo_ext.EXTRADHCPOPTS, []) result = super(Ml2Plugin, self).create_port(context, port) self.extension_manager.process_create_port(context, attrs, result) self._portsec_ext_port_create_processing(context, result, port) # sgids must be got after portsec checked with security group sgids = self._get_security_groups_on_port(context, port) self._process_port_create_security_group(context, result, sgids) network = self.get_network(context, result['network_id']) binding = db.add_port_binding(session, result['id']) mech_context = driver_context.PortContext(self, context, result, network, binding, None) self._process_port_binding(mech_context, attrs) result[addr_pair.ADDRESS_PAIRS] = ( self._process_create_allowed_address_pairs( context, result, attrs.get(addr_pair.ADDRESS_PAIRS))) self._process_port_create_extra_dhcp_opts(context, result, dhcp_opts) self.mechanism_manager.create_port_precommit(mech_context) return result, mech_context @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES, retry_on_request=True) def create_port(self, context, port): result, mech_context = self._create_port_db(context, port) # notify any plugin that is interested in port create events kwargs = {'context': context, 'port': result} registry.notify(resources.PORT, events.AFTER_CREATE, self, **kwargs) try: self.mechanism_manager.create_port_postcommit(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): LOG.error(_LE("mechanism_manager.create_port_postcommit " "failed, deleting port '%s'"), result['id']) self.delete_port(context, result['id']) # REVISIT(rkukura): Is there any point in calling this before # a binding has been successfully established? self.notify_security_groups_member_updated(context, result) try: bound_context = self._bind_port_if_needed(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): LOG.error(_LE("_bind_port_if_needed " "failed, deleting port '%s'"), result['id']) self.delete_port(context, result['id']) return bound_context._port def create_port_bulk(self, context, ports): # Create ports which request fixed_ips first, to avoid conflicts # with automatically assigned addresses from the pool fixed_ports = list() blank_ports = list() fixed_indices = list() for index, port in enumerate(ports['ports']): fixed = port['port'].get('fixed_ips') if fixed in (None, attributes.ATTR_NOT_SPECIFIED): fixed = None else: for obj in fixed: if obj.get('ip_address'): break else: fixed = None if fixed: fixed_ports.append(port) fixed_indices.append(index) else: blank_ports.append(port) if fixed_ports and blank_ports: ports['ports'] = fixed_ports + blank_ports else: fixed_indices = None objects = self._create_bulk_ml2(attributes.PORT, context, ports) # Recreate the original order of created objects if fixed_indices: reordered = [None] * len(objects) fixed_iter = iter(fixed_indices) fixed = next(fixed_iter) blank = 0 for obj in objects: # Fill in fixed ports while indices are not exhausted if fixed is not None: reordered[fixed] = obj try: fixed = next(fixed_iter) except StopIteration: fixed = None continue # Fill in blank spots for the rest while reordered[blank] is not None: blank += 1 reordered[blank] = obj blank += 1 objects = reordered # REVISIT(rkukura): Is there any point in calling this before # a binding has been successfully established? results = [obj['result'] for obj in objects] self.notify_security_groups_member_updated_bulk(context, results) for obj in objects: attrs = obj['attributes'] if attrs and attrs.get(portbindings.HOST_ID): kwargs = {'context': context, 'port': obj['result']} registry.notify( resources.PORT, events.AFTER_CREATE, self, **kwargs) try: for obj in objects: obj['bound_context'] = self._bind_port_if_needed( obj['mech_context']) return [obj['bound_context']._port for obj in objects] except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): resource_ids = [res['result']['id'] for res in objects] LOG.error(_LE("_bind_port_if_needed failed. " "Deleting all ports from create bulk '%s'"), resource_ids) self._delete_objects(context, attributes.PORT, objects) # TODO(yalei) - will be simplified after security group and address pair be # converted to ext driver too. def _portsec_ext_port_update_processing(self, updated_port, context, port, id): port_security = ((updated_port.get(psec.PORTSECURITY) is None) or updated_port[psec.PORTSECURITY]) if port_security: return # check the address-pairs if self._check_update_has_allowed_address_pairs(port): # has address pairs in request raise addr_pair.AddressPairAndPortSecurityRequired() elif (not self._check_update_deletes_allowed_address_pairs(port)): # not a request for deleting the address-pairs updated_port[addr_pair.ADDRESS_PAIRS] = ( self.get_allowed_address_pairs(context, id)) # check if address pairs has been in db, if address pairs could # be put in extension driver, we can refine here. if updated_port[addr_pair.ADDRESS_PAIRS]: raise addr_pair.AddressPairAndPortSecurityRequired() # checks if security groups were updated adding/modifying # security groups, port security is set if self._check_update_has_security_groups(port): raise psec.PortSecurityAndIPRequiredForSecurityGroups() elif (not self._check_update_deletes_security_groups(port)): # Update did not have security groups passed in. Check # that port does not have any security groups already on it. filters = {'port_id': [id]} security_groups = ( super(Ml2Plugin, self)._get_port_security_group_bindings( context, filters) ) if security_groups: raise psec.PortSecurityPortHasSecurityGroup() def update_port(self, context, id, port): attrs = port[attributes.PORT] need_port_update_notify = False session = context.session # REVISIT: Serialize this operation with a semaphore to # prevent deadlock waiting to acquire a DB lock held by # another thread in the same process, leading to 'lock wait # timeout' errors. with contextlib.nested(lockutils.lock('db-access'), session.begin(subtransactions=True)): port_db, binding = db.get_locked_port_and_binding(session, id) if not port_db: raise exc.PortNotFound(port_id=id) mac_address_updated = self._check_mac_update_allowed( port_db, attrs, binding) need_port_update_notify |= mac_address_updated original_port = self._make_port_dict(port_db) updated_port = super(Ml2Plugin, self).update_port(context, id, port) self.extension_manager.process_update_port(context, attrs, updated_port) self._portsec_ext_port_update_processing(updated_port, context, port, id) if original_port['device_id'] != updated_port['device_id']: need_port_update_notify = True if (psec.PORTSECURITY in attrs) and ( original_port[psec.PORTSECURITY] != updated_port[psec.PORTSECURITY]): need_port_update_notify = True if addr_pair.ADDRESS_PAIRS in attrs: need_port_update_notify |= ( self.update_address_pairs_on_port(context, id, port, original_port, updated_port)) need_port_update_notify |= self.update_security_group_on_port( context, id, port, original_port, updated_port) network = self.get_network(context, original_port['network_id']) need_port_update_notify |= self._update_extra_dhcp_opts_on_port( context, id, port, updated_port) levels = db.get_binding_levels(session, id, binding.host) mech_context = driver_context.PortContext( self, context, updated_port, network, binding, levels, original_port=original_port) need_port_update_notify |= self._process_port_binding( mech_context, attrs) self.mechanism_manager.update_port_precommit(mech_context) # Notifications must be sent after the above transaction is complete kwargs = { 'context': context, 'port': updated_port, 'mac_address_updated': mac_address_updated, 'original_port': original_port, } registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs) # TODO(apech) - handle errors raised by update_port, potentially # by re-calling update_port with the previous attributes. For # now the error is propogated to the caller, which is expected to # either undo/retry the operation or delete the resource. self.mechanism_manager.update_port_postcommit(mech_context) need_port_update_notify |= self.is_security_group_member_updated( context, original_port, updated_port) if original_port['admin_state_up'] != updated_port['admin_state_up']: need_port_update_notify = True bound_context = self._bind_port_if_needed( mech_context, allow_notify=True, need_notify=need_port_update_notify) return bound_context._port def _process_dvr_port_binding(self, mech_context, context, attrs): session = mech_context._plugin_context.session binding = mech_context._binding port = mech_context.current port_id = port['id'] if binding.vif_type != portbindings.VIF_TYPE_UNBOUND: binding.vif_details = '' binding.vif_type = portbindings.VIF_TYPE_UNBOUND if binding.host: db.clear_binding_levels(session, port_id, binding.host) binding.host = '' self._update_port_dict_binding(port, binding) binding.host = attrs and attrs.get(portbindings.HOST_ID) binding.router_id = attrs and attrs.get('device_id') def update_dvr_port_binding(self, context, id, port): attrs = port[attributes.PORT] host = attrs and attrs.get(portbindings.HOST_ID) host_set = attributes.is_attr_set(host) if not host_set: LOG.error(_LE("No Host supplied to bind DVR Port %s"), id) return session = context.session binding = db.get_dvr_port_binding_by_host(session, id, host) device_id = attrs and attrs.get('device_id') router_id = binding and binding.get('router_id') update_required = (not binding or binding.vif_type == portbindings.VIF_TYPE_BINDING_FAILED or router_id != device_id) if update_required: try: with session.begin(subtransactions=True): orig_port = self.get_port(context, id) if not binding: binding = db.ensure_dvr_port_binding( session, id, host, router_id=device_id) network = self.get_network(context, orig_port['network_id']) levels = db.get_binding_levels(session, id, host) mech_context = driver_context.PortContext(self, context, orig_port, network, binding, levels, original_port=orig_port) self._process_dvr_port_binding(mech_context, context, attrs) except (os_db_exception.DBReferenceError, exc.PortNotFound): LOG.debug("DVR Port %s has been deleted concurrently", id) return self._bind_port_if_needed(mech_context) def _pre_delete_port(self, context, port_id, port_check): """Do some preliminary operations before deleting the port.""" LOG.debug("Deleting port %s", port_id) try: # notify interested parties of imminent port deletion; # a failure here prevents the operation from happening kwargs = { 'context': context, 'port_id': port_id, 'port_check': port_check } registry.notify( resources.PORT, events.BEFORE_DELETE, self, **kwargs) except exceptions.CallbackFailure as e: # NOTE(armax): preserve old check's behavior if len(e.errors) == 1: raise e.errors[0].error raise exc.ServicePortInUse(port_id=port_id, reason=e) @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES, retry_on_deadlock=True) def delete_port(self, context, id, l3_port_check=True): self._pre_delete_port(context, id, l3_port_check) # TODO(armax): get rid of the l3 dependency in the with block removed_routers = [] router_ids = [] l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) is_dvr_enabled = utils.is_extension_supported( l3plugin, const.L3_DISTRIBUTED_EXT_ALIAS) session = context.session # REVISIT: Serialize this operation with a semaphore to # prevent deadlock waiting to acquire a DB lock held by # another thread in the same process, leading to 'lock wait # timeout' errors. with contextlib.nested(lockutils.lock('db-access'), session.begin(subtransactions=True)): port_db, binding = db.get_locked_port_and_binding(session, id) if not port_db: LOG.debug("The port '%s' was deleted", id) return port = self._make_port_dict(port_db) network = self.get_network(context, port['network_id']) bound_mech_contexts = [] device_owner = port['device_owner'] if device_owner == const.DEVICE_OWNER_DVR_INTERFACE: bindings = db.get_dvr_port_bindings(context.session, id) for bind in bindings: levels = db.get_binding_levels(context.session, id, bind.host) mech_context = driver_context.PortContext( self, context, port, network, bind, levels) self.mechanism_manager.delete_port_precommit(mech_context) bound_mech_contexts.append(mech_context) else: levels = db.get_binding_levels(context.session, id, binding.host) mech_context = driver_context.PortContext( self, context, port, network, binding, levels) if is_dvr_enabled and utils.is_dvr_serviced(device_owner): removed_routers = l3plugin.dvr_deletens_if_no_port( context, id) self.mechanism_manager.delete_port_precommit(mech_context) bound_mech_contexts.append(mech_context) if l3plugin: router_ids = l3plugin.disassociate_floatingips( context, id, do_notify=False) LOG.debug("Calling delete_port for %(port_id)s owned by %(owner)s", {"port_id": id, "owner": device_owner}) super(Ml2Plugin, self).delete_port(context, id) self._post_delete_port( context, port, router_ids, removed_routers, bound_mech_contexts) def _post_delete_port( self, context, port, router_ids, removed_routers, bound_mech_contexts): kwargs = { 'context': context, 'port': port, 'router_ids': router_ids, 'removed_routers': removed_routers } registry.notify(resources.PORT, events.AFTER_DELETE, self, **kwargs) try: # Note that DVR Interface ports will have bindings on # multiple hosts, and so will have multiple mech_contexts, # while other ports typically have just one. for mech_context in bound_mech_contexts: self.mechanism_manager.delete_port_postcommit(mech_context) except ml2_exc.MechanismDriverError: # TODO(apech) - One or more mechanism driver failed to # delete the port. Ideally we'd notify the caller of the # fact that an error occurred. LOG.error(_LE("mechanism_manager.delete_port_postcommit failed for" " port %s"), port['id']) self.notifier.port_delete(context, port['id']) self.notify_security_groups_member_updated(context, port) def get_bound_port_context(self, plugin_context, port_id, host=None, cached_networks=None): session = plugin_context.session with session.begin(subtransactions=True): try: port_db = (session.query(models_v2.Port). enable_eagerloads(False). filter(models_v2.Port.id.startswith(port_id)). one()) except sa_exc.NoResultFound: LOG.debug("No ports have port_id starting with %s", port_id) return except sa_exc.MultipleResultsFound: LOG.error(_LE("Multiple ports have port_id starting with %s"), port_id) return port = self._make_port_dict(port_db) network = (cached_networks or {}).get(port['network_id']) if not network: network = self.get_network(plugin_context, port['network_id']) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: binding = db.get_dvr_port_binding_by_host( session, port['id'], host) if not binding: LOG.error(_LE("Binding info for DVR port %s not found"), port_id) return None levels = db.get_binding_levels(session, port_db.id, host) port_context = driver_context.PortContext( self, plugin_context, port, network, binding, levels) else: # since eager loads are disabled in port_db query # related attribute port_binding could disappear in # concurrent port deletion. # It's not an error condition. binding = port_db.port_binding if not binding: LOG.info(_LI("Binding info for port %s was not found, " "it might have been deleted already."), port_id) return levels = db.get_binding_levels(session, port_db.id, port_db.port_binding.host) port_context = driver_context.PortContext( self, plugin_context, port, network, binding, levels) return self._bind_port_if_needed(port_context) def update_port_status(self, context, port_id, status, host=None): """ Returns port_id (non-truncated uuid) if the port exists. Otherwise returns None. """ updated = False session = context.session # REVISIT: Serialize this operation with a semaphore to # prevent deadlock waiting to acquire a DB lock held by # another thread in the same process, leading to 'lock wait # timeout' errors. with contextlib.nested(lockutils.lock('db-access'), session.begin(subtransactions=True)): port = db.get_port(session, port_id) if not port: LOG.warning(_LW("Port %(port)s updated up by agent not found"), {'port': port_id}) return None if (port.status != status and port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE): original_port = self._make_port_dict(port) port.status = status updated_port = self._make_port_dict(port) network = self.get_network(context, original_port['network_id']) levels = db.get_binding_levels(session, port.id, port.port_binding.host) mech_context = driver_context.PortContext( self, context, updated_port, network, port.port_binding, levels, original_port=original_port) self.mechanism_manager.update_port_precommit(mech_context) updated = True elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: binding = db.get_dvr_port_binding_by_host( session, port['id'], host) if not binding: return binding['status'] = status binding.update(binding) updated = True if (updated and port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE): with contextlib.nested(lockutils.lock('db-access'), session.begin(subtransactions=True)): port = db.get_port(session, port_id) if not port: LOG.warning(_LW("Port %s not found during update"), port_id) return original_port = self._make_port_dict(port) network = self.get_network(context, original_port['network_id']) port.status = db.generate_dvr_port_status(session, port['id']) updated_port = self._make_port_dict(port) levels = db.get_binding_levels(session, port_id, host) mech_context = (driver_context.PortContext( self, context, updated_port, network, binding, levels, original_port=original_port)) self.mechanism_manager.update_port_precommit(mech_context) if updated: self.mechanism_manager.update_port_postcommit(mech_context) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: db.delete_dvr_port_binding_if_stale(session, binding) return port['id'] def port_bound_to_host(self, context, port_id, host): port = db.get_port(context.session, port_id) if not port: LOG.debug("No Port match for: %s", port_id) return False if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: bindings = db.get_dvr_port_bindings(context.session, port_id) for b in bindings: if b.host == host: return True LOG.debug("No binding found for DVR port %s", port['id']) return False else: port_host = db.get_port_binding_host(context.session, port_id) return (port_host == host) def get_ports_from_devices(self, devices): port_ids_to_devices = dict((self._device_to_port_id(device), device) for device in devices) port_ids = port_ids_to_devices.keys() ports = db.get_ports_and_sgs(port_ids) for port in ports: # map back to original requested id port_id = next((port_id for port_id in port_ids if port['id'].startswith(port_id)), None) port['device'] = port_ids_to_devices.get(port_id) return ports def _device_to_port_id(self, device): # REVISIT(rkukura): Consider calling into MechanismDrivers to # process device names, or having MechanismDrivers supply list # of device prefixes to strip. if device.startswith(const.TAP_DEVICE_PREFIX): return device[len(const.TAP_DEVICE_PREFIX):] else: # REVISIT(irenab): Consider calling into bound MD to # handle the get_device_details RPC, then remove the 'else' clause if not uuidutils.is_uuid_like(device): port = db.get_port_from_device_mac(device) if port: return port.id return device
It’s TechKnowledgy time again! Welcome to the 6th annual Virtual TechKnowledgy Conference! The Virginia Department of Education’s Training and Technical Assistance Centers are committed to helping professionals and families learn about new AT solutions by once again hosting the Virtual TechKnowledgy Conference. Explore the Conference Program to read full descriptions of the live webinars offered in November. February and April webinar descriptions will be posted at a later date. Don’t forget to visit the recently updated Virtual Exhibits Hall to learn about new AT products and resources offered by AT vendors.
import os import sys import tempfile import shutil import unittest import pkg_resources from setuptools.command import egg_info from setuptools import svn_utils ENTRIES_V10 = pkg_resources.resource_string(__name__, 'entries-v10') "An entries file generated with svn 1.6.17 against the legacy Setuptools repo" class TestEggInfo(unittest.TestCase): def setUp(self): self.test_dir = tempfile.mkdtemp() os.mkdir(os.path.join(self.test_dir, '.svn')) self.old_cwd = os.getcwd() os.chdir(self.test_dir) def tearDown(self): os.chdir(self.old_cwd) shutil.rmtree(self.test_dir) def _write_entries(self, entries): fn = os.path.join(self.test_dir, '.svn', 'entries') entries_f = open(fn, 'wb') entries_f.write(entries) entries_f.close() def test_version_10_format(self): """ """ #keeping this set for 1.6 is a good check on the get_svn_revision #to ensure I return using svnversion what would had been returned version_str = svn_utils.SvnInfo.get_svn_version() version = [int(x) for x in version_str.split('.')[:2]] if version != [1,6]: if hasattr(self, 'skipTest'): self.skipTest('') else: sys.stderr.write('\n Skipping due to SVN Version\n') return self._write_entries(ENTRIES_V10) rev = egg_info.egg_info.get_svn_revision() self.assertEqual(rev, '89000') def test_version_10_format_legacy_parser(self): """ """ path_variable = None for env in os.environ: if env.lower() == 'path': path_variable = env if path_variable is None: self.skipTest('Cannot figure out how to modify path') old_path = os.environ[path_variable] os.environ[path_variable] = '' try: self._write_entries(ENTRIES_V10) rev = egg_info.egg_info.get_svn_revision() finally: os.environ[path_variable] = old_path self.assertEqual(rev, '89000') def test_suite(): return unittest.defaultTestLoader.loadTestsFromName(__name__)
Register by March 31 & Save! With Chinese students from Guangdong Province, China, campers will explore Chinese and American cultures. Afternoons will include a variety of recreational opportunities, including swimming and hiking in Cuyahoga Valley National Park. This camp is a hands-on learning experience with instruction from teachers from The Lippman School. *Cost includes daily lunches. ​Camp ends daily at 4 p.m. Interested in learning more about our camp and youth programs? Contact Ashley Garson our Camp & Youth Director!
#!/usr/bin/env python3 import argparse import re import logging from PIL import Image from tyled.effects import apply_effects, apply_filters from tyled.patterns import apply_pattern def main(args): out = Image.new('RGBA', (args.width, args.height), args.background) tiles = [] if args.tiles: for tile in args.tiles.split(','): tile = Image.open(tile).convert('RGBA') logging.debug('Opened tile {0}'.format(tile)) check_tile(tile) tiles.append(tile) elif args.xcolours: colours = parse_xresources(args.xcolours) tiles = generate_tiles(colours, args.size) elif args.colours: colours = args.colours.split(',') tiles = generate_tiles(colours, args.size) else: raise ValueError('No list of tiles or colour information have been inputted') if args.tile_filters: tiles = apply_filters(tiles, args.tile_filters) out = apply_pattern(out, tiles, args.pattern) if args.out_filters: out = apply_filters(out, args.out_filters) if args.effects: out = apply_effects(out, args.effects) out.save(args.out) if args.show: out.show() def generate_tiles(colours, size): size = tuple([int(x) for x in size.lower().split('x')]) tiles = [] for colour in colours: tiles.append(Image.new('RGBA', size, colour)) logging.debug('Generated tile with colour {0}'.format(colour)) return tiles def parse_xresources(filename): colours = [] colour_re = re.compile('.*?(color[^:]+|foreground|background):\s*(#[\da-z]{6})') with open(filename, 'r') as xc: for line in xc.readlines(): if line.startswith('!'): continue match = colour_re.search(line.lower()) if match: _, colour = match.groups() logging.debug('Found colour {0} in file {1}'.format(colour, filename)) colours.append(colour) return colours def check_tile(tile): if tile.size[0] > 40: logging.warn('Tile image is larger than 40x40, making it into a thumbnail') tile.thumbnail((40, 40)) def init(): parser = argparse.ArgumentParser(description='A lightweight image tiler written in Python.', conflict_handler='resolve') parser.add_argument('-t', '--tiles', type=str, help='A comma separated list ' 'of tile images') parser.add_argument('-o', '--out', type=str, help='The name of the image used as output', required=True) parser.add_argument('-bg', '--background', type=str, default='#000000', help='The background colour that will be displayed where the tile has alpha') parser.add_argument('-w', '--width', type=int, required=True) parser.add_argument('-h', '--height', type=int, required=True) parser.add_argument('-of', '--out-filters', type=str, help='A comma ' 'separated list of filters to be applied to the output image. Args are colon ' 'separated and dictate how many times to apply the filter') parser.add_argument('-tf', '--tile-filters', type=str, help='A comma ' 'separated list of filters to be applied to the tile image. Args are colon ' 'separated and dictate how many times to apply the filter') parser.add_argument('-e', '--effects', type=str, help='A comma ' 'separated list of effects to be applied to the output image. Args are' 'colon separated e.g. effect_foo:1:2:3') parser.add_argument('-sh', '--show', action='store_true', help='Show the image upon completion') parser.add_argument('-xc', '--xcolours', type=str, help='The path to the ' 'file which contains the xcolours to be used') parser.add_argument('-p', '--pattern', type=str, help='The pattern that ' 'the tile should be arranged in', default='grid') parser.add_argument('-c', '--colours', type=str, help='The colours that ' 'should be used for generating tiles.') parser.add_argument('-s', '--size', type=str, help='The size of the tiles that will be ' 'generated if colours are passed.', default='10x10') parser.add_argument('-v', '--verbose', action='store_true') args = parser.parse_args() if args.xcolours and args.tiles: raise ValueError('Xcolours and tile image can\'t both be set') if args.xcolours and args.colours: raise ValueError('Xcolours and colours can\'t both be set') logging.basicConfig(level=logging.DEBUG if args.verbose else logging.WARN) main(args) if __name__ == '__main__': init()
Phylogenetic relationships of the genus Eriobotrya Lindl. were examined based on the nrDNA Adh sequences. A phylogenetic tree of 14 loquat accessions (species, varieties and forma) was generated by using Photinieae serrulaia L. as an outgroup and Rhaphiolepis indica (L.) Lindl. as an ingroup, which represent the two closest genera of Eriobotyra. The results showed that these loquat accessions were divided into two main clades in the consensus tree. Clade I included E. seguinii Card and group A formed by E. henryi Nakai, E.bengalensis Hook.f., and forma angustifolia Vidal. Clade II is composed of the other taxas which included three groups. E. cavaleriei Rehd and E. fragrans Champ formed group B; group C consisted of E. prinoides Rehd. & Wils. var. dadunensis H.Z.Zhang, and E. japonica Lindl.; and group D included E. deflexa Nakai and E. deflexa Nakai Var.buisanensis NaKai. Since E. deflexa Nakai, E. deflexa Nakai Var.buisanensis NaKai and E. kwangsiensis Chun, were closer in the phylogenetic tree; while E. prinoides Rehd. & Wils. var. dadunensis H.Z.Zhang, E. japonica Lindl, E. prinoides Rehd & Wils and E.elliptica Lindl. were close with each other, they may be locataed at a similar place of the phylogenetic stage. However, E. malipoensis Kuan need further studies on its phylogenetis relationship for it was separated from the others. Results further support the theory that E. cavaleriei Rehd could be a variety of E. fragrans Champ.
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow_lite_support.metadata.metadata.""" import enum import os from absl.testing import parameterized import six import tensorflow as tf import flatbuffers from tensorflow.python.platform import resource_loader from tensorflow_lite_support.metadata import metadata_schema_py_generated as _metadata_fb from tensorflow_lite_support.metadata import schema_py_generated as _schema_fb from tensorflow_lite_support.metadata.python import metadata as _metadata class Tokenizer(enum.Enum): BERT_TOKENIZER = 0 SENTENCE_PIECE = 1 class TensorType(enum.Enum): INPUT = 0 OUTPUT = 1 def _read_file(file_name, mode="rb"): with open(file_name, mode) as f: return f.read() class MetadataTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): super(MetadataTest, self).setUp() self._invalid_model_buf = None self._invalid_file = "not_existed_file" self._model_buf = self._create_model_buf() self._model_file = self.create_tempfile().full_path with open(self._model_file, "wb") as f: f.write(self._model_buf) self._metadata_file = self._create_metadata_file() self._metadata_file_with_version = self._create_metadata_file_with_version( self._metadata_file, "1.0.0") self._file1 = self.create_tempfile("file1").full_path self._file2 = self.create_tempfile("file2").full_path self._file2_content = b"file2_content" with open(self._file2, "wb") as f: f.write(self._file2_content) self._file3 = self.create_tempfile("file3").full_path def _create_model_buf(self): # Create a model with two inputs and one output, which matches the metadata # created by _create_metadata_file(). metadata_field = _schema_fb.MetadataT() subgraph = _schema_fb.SubGraphT() subgraph.inputs = [0, 1] subgraph.outputs = [2] metadata_field.name = "meta" buffer_field = _schema_fb.BufferT() model = _schema_fb.ModelT() model.subgraphs = [subgraph] # Creates the metadata and buffer fields for testing purposes. model.metadata = [metadata_field, metadata_field] model.buffers = [buffer_field, buffer_field, buffer_field] model_builder = flatbuffers.Builder(0) model_builder.Finish( model.Pack(model_builder), _metadata.MetadataPopulator.TFLITE_FILE_IDENTIFIER) return model_builder.Output() def _create_metadata_file(self): associated_file1 = _metadata_fb.AssociatedFileT() associated_file1.name = b"file1" associated_file2 = _metadata_fb.AssociatedFileT() associated_file2.name = b"file2" self.expected_recorded_files = [ six.ensure_str(associated_file1.name), six.ensure_str(associated_file2.name) ] input_meta = _metadata_fb.TensorMetadataT() output_meta = _metadata_fb.TensorMetadataT() output_meta.associatedFiles = [associated_file2] subgraph = _metadata_fb.SubGraphMetadataT() # Create a model with two inputs and one output. subgraph.inputTensorMetadata = [input_meta, input_meta] subgraph.outputTensorMetadata = [output_meta] model_meta = _metadata_fb.ModelMetadataT() model_meta.name = "Mobilenet_quantized" model_meta.associatedFiles = [associated_file1] model_meta.subgraphMetadata = [subgraph] b = flatbuffers.Builder(0) b.Finish( model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) metadata_file = self.create_tempfile().full_path with open(metadata_file, "wb") as f: f.write(b.Output()) return metadata_file def _create_model_buffer_with_wrong_identifier(self): wrong_identifier = b"widn" model = _schema_fb.ModelT() model_builder = flatbuffers.Builder(0) model_builder.Finish(model.Pack(model_builder), wrong_identifier) return model_builder.Output() def _create_metadata_buffer_with_wrong_identifier(self): # Creates a metadata with wrong identifier wrong_identifier = b"widn" metadata = _metadata_fb.ModelMetadataT() metadata_builder = flatbuffers.Builder(0) metadata_builder.Finish(metadata.Pack(metadata_builder), wrong_identifier) return metadata_builder.Output() def _populate_metadata_with_identifier(self, model_buf, metadata_buf, identifier): # For testing purposes only. MetadataPopulator cannot populate metadata with # wrong identifiers. model = _schema_fb.ModelT.InitFromObj( _schema_fb.Model.GetRootAsModel(model_buf, 0)) buffer_field = _schema_fb.BufferT() buffer_field.data = metadata_buf model.buffers = [buffer_field] # Creates a new metadata field. metadata_field = _schema_fb.MetadataT() metadata_field.name = _metadata.MetadataPopulator.METADATA_FIELD_NAME metadata_field.buffer = len(model.buffers) - 1 model.metadata = [metadata_field] b = flatbuffers.Builder(0) b.Finish(model.Pack(b), identifier) return b.Output() def _create_metadata_file_with_version(self, metadata_file, min_version): # Creates a new metadata file with the specified min_version for testing # purposes. metadata_buf = bytearray(_read_file(metadata_file)) metadata = _metadata_fb.ModelMetadataT.InitFromObj( _metadata_fb.ModelMetadata.GetRootAsModelMetadata(metadata_buf, 0)) metadata.minParserVersion = min_version b = flatbuffers.Builder(0) b.Finish( metadata.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) metadata_file_with_version = self.create_tempfile().full_path with open(metadata_file_with_version, "wb") as f: f.write(b.Output()) return metadata_file_with_version class MetadataPopulatorTest(MetadataTest): def _create_bert_tokenizer(self): vocab_file_name = "bert_vocab" vocab = _metadata_fb.AssociatedFileT() vocab.name = vocab_file_name vocab.type = _metadata_fb.AssociatedFileType.VOCABULARY tokenizer = _metadata_fb.ProcessUnitT() tokenizer.optionsType = _metadata_fb.ProcessUnitOptions.BertTokenizerOptions tokenizer.options = _metadata_fb.BertTokenizerOptionsT() tokenizer.options.vocabFile = [vocab] return tokenizer, [vocab_file_name] def _create_sentence_piece_tokenizer(self): sp_model_name = "sp_model" vocab_file_name = "sp_vocab" sp_model = _metadata_fb.AssociatedFileT() sp_model.name = sp_model_name vocab = _metadata_fb.AssociatedFileT() vocab.name = vocab_file_name vocab.type = _metadata_fb.AssociatedFileType.VOCABULARY tokenizer = _metadata_fb.ProcessUnitT() tokenizer.optionsType = ( _metadata_fb.ProcessUnitOptions.SentencePieceTokenizerOptions) tokenizer.options = _metadata_fb.SentencePieceTokenizerOptionsT() tokenizer.options.sentencePieceModel = [sp_model] tokenizer.options.vocabFile = [vocab] return tokenizer, [sp_model_name, vocab_file_name] def _create_tokenizer(self, tokenizer_type): if tokenizer_type is Tokenizer.BERT_TOKENIZER: return self._create_bert_tokenizer() elif tokenizer_type is Tokenizer.SENTENCE_PIECE: return self._create_sentence_piece_tokenizer() else: raise ValueError( "The tokenizer type, {0}, is unsupported.".format(tokenizer_type)) def _create_tempfiles(self, file_names): tempfiles = [] for name in file_names: tempfiles.append(self.create_tempfile(name).full_path) return tempfiles def _create_model_meta_with_subgraph_meta(self, subgraph_meta): model_meta = _metadata_fb.ModelMetadataT() model_meta.subgraphMetadata = [subgraph_meta] b = flatbuffers.Builder(0) b.Finish( model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) return b.Output() def testToValidModelFile(self): populator = _metadata.MetadataPopulator.with_model_file(self._model_file) self.assertIsInstance(populator, _metadata.MetadataPopulator) def testToInvalidModelFile(self): with self.assertRaises(IOError) as error: _metadata.MetadataPopulator.with_model_file(self._invalid_file) self.assertEqual("File, '{0}', does not exist.".format(self._invalid_file), str(error.exception)) def testToValidModelBuffer(self): populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf) self.assertIsInstance(populator, _metadata.MetadataPopulator) def testToInvalidModelBuffer(self): with self.assertRaises(ValueError) as error: _metadata.MetadataPopulator.with_model_buffer(self._invalid_model_buf) self.assertEqual("model_buf cannot be empty.", str(error.exception)) def testToModelBufferWithWrongIdentifier(self): model_buf = self._create_model_buffer_with_wrong_identifier() with self.assertRaises(ValueError) as error: _metadata.MetadataPopulator.with_model_buffer(model_buf) self.assertEqual( "The model provided does not have the expected identifier, and " "may not be a valid TFLite model.", str(error.exception)) def testSinglePopulateAssociatedFile(self): populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf) populator.load_associated_files([self._file1]) populator.populate() packed_files = populator.get_packed_associated_file_list() expected_packed_files = [os.path.basename(self._file1)] self.assertEqual(set(packed_files), set(expected_packed_files)) def testRepeatedPopulateAssociatedFile(self): populator = _metadata.MetadataPopulator.with_model_file(self._model_file) populator.load_associated_files([self._file1, self._file2]) # Loads file2 multiple times. populator.load_associated_files([self._file2]) populator.populate() packed_files = populator.get_packed_associated_file_list() expected_packed_files = [ os.path.basename(self._file1), os.path.basename(self._file2) ] self.assertLen(packed_files, 2) self.assertEqual(set(packed_files), set(expected_packed_files)) # Check if the model buffer read from file is the same as that read from # get_model_buffer(). model_buf_from_file = _read_file(self._model_file) model_buf_from_getter = populator.get_model_buffer() self.assertEqual(model_buf_from_file, model_buf_from_getter) def testPopulateInvalidAssociatedFile(self): populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf) with self.assertRaises(IOError) as error: populator.load_associated_files([self._invalid_file]) self.assertEqual("File, '{0}', does not exist.".format(self._invalid_file), str(error.exception)) def testPopulatePackedAssociatedFile(self): populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf) populator.load_associated_files([self._file1]) populator.populate() with self.assertRaises(ValueError) as error: populator.load_associated_files([self._file1]) populator.populate() self.assertEqual( "File, '{0}', has already been packed.".format( os.path.basename(self._file1)), str(error.exception)) def testLoadAssociatedFileBuffers(self): populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf) file_buffer = _read_file(self._file1) populator.load_associated_file_buffers({self._file1: file_buffer}) populator.populate() packed_files = populator.get_packed_associated_file_list() expected_packed_files = [os.path.basename(self._file1)] self.assertEqual(set(packed_files), set(expected_packed_files)) def testRepeatedLoadAssociatedFileBuffers(self): file_buffer1 = _read_file(self._file1) file_buffer2 = _read_file(self._file2) populator = _metadata.MetadataPopulator.with_model_file(self._model_file) populator.load_associated_file_buffers({ self._file1: file_buffer1, self._file2: file_buffer2 }) # Loads file2 multiple times. populator.load_associated_file_buffers({self._file2: file_buffer2}) populator.populate() packed_files = populator.get_packed_associated_file_list() expected_packed_files = [ os.path.basename(self._file1), os.path.basename(self._file2) ] self.assertEqual(set(packed_files), set(expected_packed_files)) # Check if the model buffer read from file is the same as that read from # get_model_buffer(). model_buf_from_file = _read_file(self._model_file) model_buf_from_getter = populator.get_model_buffer() self.assertEqual(model_buf_from_file, model_buf_from_getter) def testLoadPackedAssociatedFileBuffersFails(self): populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf) file_buffer = _read_file(self._file1) populator.load_associated_file_buffers({self._file1: file_buffer}) populator.populate() # Load file1 again should fail. with self.assertRaises(ValueError) as error: populator.load_associated_file_buffers({self._file1: file_buffer}) populator.populate() self.assertEqual( "File, '{0}', has already been packed.".format( os.path.basename(self._file1)), str(error.exception)) def testGetPackedAssociatedFileList(self): populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf) packed_files = populator.get_packed_associated_file_list() self.assertEqual(packed_files, []) def testPopulateMetadataFileToEmptyModelFile(self): populator = _metadata.MetadataPopulator.with_model_file(self._model_file) populator.load_metadata_file(self._metadata_file) populator.load_associated_files([self._file1, self._file2]) populator.populate() model_buf_from_file = _read_file(self._model_file) model = _schema_fb.Model.GetRootAsModel(model_buf_from_file, 0) # self._model_file already has two elements in the metadata field, so the # populated TFLite metadata will be the third element. metadata_field = model.Metadata(2) self.assertEqual( six.ensure_str(metadata_field.Name()), six.ensure_str(_metadata.MetadataPopulator.METADATA_FIELD_NAME)) buffer_index = metadata_field.Buffer() buffer_data = model.Buffers(buffer_index) metadata_buf_np = buffer_data.DataAsNumpy() metadata_buf = metadata_buf_np.tobytes() expected_metadata_buf = bytearray( _read_file(self._metadata_file_with_version)) self.assertEqual(metadata_buf, expected_metadata_buf) recorded_files = populator.get_recorded_associated_file_list() self.assertEqual(set(recorded_files), set(self.expected_recorded_files)) # Up to now, we've proved the correctness of the model buffer that read from # file. Then we'll test if get_model_buffer() gives the same model buffer. model_buf_from_getter = populator.get_model_buffer() self.assertEqual(model_buf_from_file, model_buf_from_getter) def testPopulateMetadataFileWithoutAssociatedFiles(self): populator = _metadata.MetadataPopulator.with_model_file(self._model_file) populator.load_metadata_file(self._metadata_file) populator.load_associated_files([self._file1]) # Suppose to populate self._file2, because it is recorded in the metadta. with self.assertRaises(ValueError) as error: populator.populate() self.assertEqual(("File, '{0}', is recorded in the metadata, but has " "not been loaded into the populator.").format( os.path.basename(self._file2)), str(error.exception)) def testPopulateMetadataBufferWithWrongIdentifier(self): metadata_buf = self._create_metadata_buffer_with_wrong_identifier() populator = _metadata.MetadataPopulator.with_model_file(self._model_file) with self.assertRaises(ValueError) as error: populator.load_metadata_buffer(metadata_buf) self.assertEqual( "The metadata buffer does not have the expected identifier, and may not" " be a valid TFLite Metadata.", str(error.exception)) def _assert_golden_metadata(self, model_file): model_buf_from_file = _read_file(model_file) model = _schema_fb.Model.GetRootAsModel(model_buf_from_file, 0) # There are two elements in model.Metadata array before the population. # Metadata should be packed to the third element in the array. metadata_field = model.Metadata(2) self.assertEqual( six.ensure_str(metadata_field.Name()), six.ensure_str(_metadata.MetadataPopulator.METADATA_FIELD_NAME)) buffer_index = metadata_field.Buffer() buffer_data = model.Buffers(buffer_index) metadata_buf_np = buffer_data.DataAsNumpy() metadata_buf = metadata_buf_np.tobytes() expected_metadata_buf = bytearray( _read_file(self._metadata_file_with_version)) self.assertEqual(metadata_buf, expected_metadata_buf) def testPopulateMetadataFileToModelWithMetadataAndAssociatedFiles(self): # First, creates a dummy metadata different from self._metadata_file. It # needs to have the same input/output tensor numbers as self._model_file. # Populates it and the associated files into the model. input_meta = _metadata_fb.TensorMetadataT() output_meta = _metadata_fb.TensorMetadataT() subgraph = _metadata_fb.SubGraphMetadataT() # Create a model with two inputs and one output. subgraph.inputTensorMetadata = [input_meta, input_meta] subgraph.outputTensorMetadata = [output_meta] model_meta = _metadata_fb.ModelMetadataT() model_meta.subgraphMetadata = [subgraph] b = flatbuffers.Builder(0) b.Finish( model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) metadata_buf = b.Output() # Populate the metadata. populator1 = _metadata.MetadataPopulator.with_model_file(self._model_file) populator1.load_metadata_buffer(metadata_buf) populator1.load_associated_files([self._file1, self._file2]) populator1.populate() # Then, populate the metadata again. populator2 = _metadata.MetadataPopulator.with_model_file(self._model_file) populator2.load_metadata_file(self._metadata_file) populator2.populate() # Test if the metadata is populated correctly. self._assert_golden_metadata(self._model_file) def testPopulateMetadataFileToModelFileWithMetadataAndBufFields(self): populator = _metadata.MetadataPopulator.with_model_file(self._model_file) populator.load_metadata_file(self._metadata_file) populator.load_associated_files([self._file1, self._file2]) populator.populate() # Tests if the metadata is populated correctly. self._assert_golden_metadata(self._model_file) recorded_files = populator.get_recorded_associated_file_list() self.assertEqual(set(recorded_files), set(self.expected_recorded_files)) # Up to now, we've proved the correctness of the model buffer that read from # file. Then we'll test if get_model_buffer() gives the same model buffer. model_buf_from_file = _read_file(self._model_file) model_buf_from_getter = populator.get_model_buffer() self.assertEqual(model_buf_from_file, model_buf_from_getter) def testPopulateInvalidMetadataFile(self): populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf) with self.assertRaises(IOError) as error: populator.load_metadata_file(self._invalid_file) self.assertEqual("File, '{0}', does not exist.".format(self._invalid_file), str(error.exception)) def testPopulateInvalidMetadataBuffer(self): populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf) with self.assertRaises(ValueError) as error: populator.load_metadata_buffer([]) self.assertEqual("The metadata to be populated is empty.", str(error.exception)) def testGetModelBufferBeforePopulatingData(self): populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf) model_buf = populator.get_model_buffer() expected_model_buf = self._model_buf self.assertEqual(model_buf, expected_model_buf) def testLoadMetadataBufferWithNoSubgraphMetadataThrowsException(self): # Create a dummy metadata without Subgraph. model_meta = _metadata_fb.ModelMetadataT() builder = flatbuffers.Builder(0) builder.Finish( model_meta.Pack(builder), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) meta_buf = builder.Output() populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf) with self.assertRaises(ValueError) as error: populator.load_metadata_buffer(meta_buf) self.assertEqual( "The number of SubgraphMetadata should be exactly one, but got 0.", str(error.exception)) def testLoadMetadataBufferWithWrongInputMetaNumberThrowsException(self): # Create a dummy metadata with no input tensor metadata, while the expected # number is 2. output_meta = _metadata_fb.TensorMetadataT() subgprah_meta = _metadata_fb.SubGraphMetadataT() subgprah_meta.outputTensorMetadata = [output_meta] model_meta = _metadata_fb.ModelMetadataT() model_meta.subgraphMetadata = [subgprah_meta] builder = flatbuffers.Builder(0) builder.Finish( model_meta.Pack(builder), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) meta_buf = builder.Output() populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf) with self.assertRaises(ValueError) as error: populator.load_metadata_buffer(meta_buf) self.assertEqual( ("The number of input tensors (2) should match the number of " "input tensor metadata (0)"), str(error.exception)) def testLoadMetadataBufferWithWrongOutputMetaNumberThrowsException(self): # Create a dummy metadata with no output tensor metadata, while the expected # number is 1. input_meta = _metadata_fb.TensorMetadataT() subgprah_meta = _metadata_fb.SubGraphMetadataT() subgprah_meta.inputTensorMetadata = [input_meta, input_meta] model_meta = _metadata_fb.ModelMetadataT() model_meta.subgraphMetadata = [subgprah_meta] builder = flatbuffers.Builder(0) builder.Finish( model_meta.Pack(builder), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) meta_buf = builder.Output() populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf) with self.assertRaises(ValueError) as error: populator.load_metadata_buffer(meta_buf) self.assertEqual( ("The number of output tensors (1) should match the number of " "output tensor metadata (0)"), str(error.exception)) def testLoadMetadataAndAssociatedFilesShouldSucceeds(self): # Create a src model with metadata and two associated files. src_model_buf = self._create_model_buf() populator_src = _metadata.MetadataPopulator.with_model_buffer(src_model_buf) populator_src.load_metadata_file(self._metadata_file) populator_src.load_associated_files([self._file1, self._file2]) populator_src.populate() # Create a model to be populated with the metadata and files from # src_model_buf. dst_model_buf = self._create_model_buf() populator_dst = _metadata.MetadataPopulator.with_model_buffer(dst_model_buf) populator_dst.load_metadata_and_associated_files( populator_src.get_model_buffer()) populator_dst.populate() # Tests if the metadata and associated files are populated correctly. dst_model_file = self.create_tempfile().full_path with open(dst_model_file, "wb") as f: f.write(populator_dst.get_model_buffer()) self._assert_golden_metadata(dst_model_file) recorded_files = populator_dst.get_recorded_associated_file_list() self.assertEqual(set(recorded_files), set(self.expected_recorded_files)) @parameterized.named_parameters( { "testcase_name": "InputTensorWithBert", "tensor_type": TensorType.INPUT, "tokenizer_type": Tokenizer.BERT_TOKENIZER }, { "testcase_name": "OutputTensorWithBert", "tensor_type": TensorType.OUTPUT, "tokenizer_type": Tokenizer.BERT_TOKENIZER }, { "testcase_name": "InputTensorWithSentencePiece", "tensor_type": TensorType.INPUT, "tokenizer_type": Tokenizer.SENTENCE_PIECE }, { "testcase_name": "OutputTensorWithSentencePiece", "tensor_type": TensorType.OUTPUT, "tokenizer_type": Tokenizer.SENTENCE_PIECE }) def testGetRecordedAssociatedFileListWithSubgraphTensor( self, tensor_type, tokenizer_type): # Creates a metadata with the tokenizer in the tensor process units. tokenizer, expected_files = self._create_tokenizer(tokenizer_type) # Create the tensor with process units. tensor = _metadata_fb.TensorMetadataT() tensor.processUnits = [tokenizer] # Create the subgrah with the tensor. subgraph = _metadata_fb.SubGraphMetadataT() dummy_tensor_meta = _metadata_fb.TensorMetadataT() subgraph.outputTensorMetadata = [dummy_tensor_meta] if tensor_type is TensorType.INPUT: subgraph.inputTensorMetadata = [tensor, dummy_tensor_meta] subgraph.outputTensorMetadata = [dummy_tensor_meta] elif tensor_type is TensorType.OUTPUT: subgraph.inputTensorMetadata = [dummy_tensor_meta, dummy_tensor_meta] subgraph.outputTensorMetadata = [tensor] else: raise ValueError( "The tensor type, {0}, is unsupported.".format(tensor_type)) # Create a model metadata with the subgraph metadata meta_buffer = self._create_model_meta_with_subgraph_meta(subgraph) # Creates the tempfiles. tempfiles = self._create_tempfiles(expected_files) # Creates the MetadataPopulator object. populator = _metadata.MetadataPopulator.with_model_file(self._model_file) populator.load_metadata_buffer(meta_buffer) populator.load_associated_files(tempfiles) populator.populate() recorded_files = populator.get_recorded_associated_file_list() self.assertEqual(set(recorded_files), set(expected_files)) @parameterized.named_parameters( { "testcase_name": "InputTensorWithBert", "tensor_type": TensorType.INPUT, "tokenizer_type": Tokenizer.BERT_TOKENIZER }, { "testcase_name": "OutputTensorWithBert", "tensor_type": TensorType.OUTPUT, "tokenizer_type": Tokenizer.BERT_TOKENIZER }, { "testcase_name": "InputTensorWithSentencePiece", "tensor_type": TensorType.INPUT, "tokenizer_type": Tokenizer.SENTENCE_PIECE }, { "testcase_name": "OutputTensorWithSentencePiece", "tensor_type": TensorType.OUTPUT, "tokenizer_type": Tokenizer.SENTENCE_PIECE }) def testGetRecordedAssociatedFileListWithSubgraphProcessUnits( self, tensor_type, tokenizer_type): # Creates a metadata with the tokenizer in the subgraph process units. tokenizer, expected_files = self._create_tokenizer(tokenizer_type) # Create the subgraph with process units. subgraph = _metadata_fb.SubGraphMetadataT() if tensor_type is TensorType.INPUT: subgraph.inputProcessUnits = [tokenizer] elif tensor_type is TensorType.OUTPUT: subgraph.outputProcessUnits = [tokenizer] else: raise ValueError( "The tensor type, {0}, is unsupported.".format(tensor_type)) # Creates the input and output tensor meta to match self._model_file. dummy_tensor_meta = _metadata_fb.TensorMetadataT() subgraph.inputTensorMetadata = [dummy_tensor_meta, dummy_tensor_meta] subgraph.outputTensorMetadata = [dummy_tensor_meta] # Create a model metadata with the subgraph metadata meta_buffer = self._create_model_meta_with_subgraph_meta(subgraph) # Creates the tempfiles. tempfiles = self._create_tempfiles(expected_files) # Creates the MetadataPopulator object. populator = _metadata.MetadataPopulator.with_model_file(self._model_file) populator.load_metadata_buffer(meta_buffer) populator.load_associated_files(tempfiles) populator.populate() recorded_files = populator.get_recorded_associated_file_list() self.assertEqual(set(recorded_files), set(expected_files)) def testPopulatedFullPathAssociatedFileShouldSucceed(self): # Create AssociatedFileT using the full path file name. associated_file = _metadata_fb.AssociatedFileT() associated_file.name = self._file1 # Create model metadata with the associated file. subgraph = _metadata_fb.SubGraphMetadataT() subgraph.associatedFiles = [associated_file] # Creates the input and output tensor metadata to match self._model_file. dummy_tensor = _metadata_fb.TensorMetadataT() subgraph.inputTensorMetadata = [dummy_tensor, dummy_tensor] subgraph.outputTensorMetadata = [dummy_tensor] md_buffer = self._create_model_meta_with_subgraph_meta(subgraph) # Populate the metadata to a model. populator = _metadata.MetadataPopulator.with_model_file(self._model_file) populator.load_metadata_buffer(md_buffer) populator.load_associated_files([self._file1]) populator.populate() # The recorded file name in metadata should only contain file basename; file # directory should not be included. recorded_files = populator.get_recorded_associated_file_list() self.assertEqual(set(recorded_files), set([os.path.basename(self._file1)])) class MetadataDisplayerTest(MetadataTest): def setUp(self): super(MetadataDisplayerTest, self).setUp() self._model_with_meta_file = ( self._create_model_with_metadata_and_associated_files()) def _create_model_with_metadata_and_associated_files(self): model_buf = self._create_model_buf() model_file = self.create_tempfile().full_path with open(model_file, "wb") as f: f.write(model_buf) populator = _metadata.MetadataPopulator.with_model_file(model_file) populator.load_metadata_file(self._metadata_file) populator.load_associated_files([self._file1, self._file2]) populator.populate() return model_file def testLoadModelBufferMetadataBufferWithWrongIdentifierThrowsException(self): model_buf = self._create_model_buffer_with_wrong_identifier() metadata_buf = self._create_metadata_buffer_with_wrong_identifier() model_buf = self._populate_metadata_with_identifier( model_buf, metadata_buf, _metadata.MetadataPopulator.TFLITE_FILE_IDENTIFIER) with self.assertRaises(ValueError) as error: _metadata.MetadataDisplayer.with_model_buffer(model_buf) self.assertEqual( "The metadata buffer does not have the expected identifier, and may not" " be a valid TFLite Metadata.", str(error.exception)) def testLoadModelBufferModelBufferWithWrongIdentifierThrowsException(self): model_buf = self._create_model_buffer_with_wrong_identifier() metadata_file = self._create_metadata_file() wrong_identifier = b"widn" metadata_buf = bytearray(_read_file(metadata_file)) model_buf = self._populate_metadata_with_identifier(model_buf, metadata_buf, wrong_identifier) with self.assertRaises(ValueError) as error: _metadata.MetadataDisplayer.with_model_buffer(model_buf) self.assertEqual( "The model provided does not have the expected identifier, and " "may not be a valid TFLite model.", str(error.exception)) def testLoadModelFileInvalidModelFileThrowsException(self): with self.assertRaises(IOError) as error: _metadata.MetadataDisplayer.with_model_file(self._invalid_file) self.assertEqual("File, '{0}', does not exist.".format(self._invalid_file), str(error.exception)) def testLoadModelFileModelWithoutMetadataThrowsException(self): with self.assertRaises(ValueError) as error: _metadata.MetadataDisplayer.with_model_file(self._model_file) self.assertEqual("The model does not have metadata.", str(error.exception)) def testLoadModelFileModelWithMetadata(self): displayer = _metadata.MetadataDisplayer.with_model_file( self._model_with_meta_file) self.assertIsInstance(displayer, _metadata.MetadataDisplayer) def testLoadModelBufferInvalidModelBufferThrowsException(self): with self.assertRaises(ValueError) as error: _metadata.MetadataDisplayer.with_model_buffer(_read_file(self._file1)) self.assertEqual("model_buffer cannot be empty.", str(error.exception)) def testLoadModelBufferModelWithOutMetadataThrowsException(self): with self.assertRaises(ValueError) as error: _metadata.MetadataDisplayer.with_model_buffer(self._create_model_buf()) self.assertEqual("The model does not have metadata.", str(error.exception)) def testLoadModelBufferModelWithMetadata(self): displayer = _metadata.MetadataDisplayer.with_model_buffer( _read_file(self._model_with_meta_file)) self.assertIsInstance(displayer, _metadata.MetadataDisplayer) def testGetAssociatedFileBufferShouldSucceed(self): # _model_with_meta_file contains file1 and file2. displayer = _metadata.MetadataDisplayer.with_model_file( self._model_with_meta_file) actual_content = displayer.get_associated_file_buffer("file2") self.assertEqual(actual_content, self._file2_content) def testGetAssociatedFileBufferFailsWithNonExistentFile(self): # _model_with_meta_file contains file1 and file2. displayer = _metadata.MetadataDisplayer.with_model_file( self._model_with_meta_file) non_existent_file = "non_existent_file" with self.assertRaises(ValueError) as error: displayer.get_associated_file_buffer(non_existent_file) self.assertEqual( "The file, {}, does not exist in the model.".format(non_existent_file), str(error.exception)) def testGetMetadataBufferShouldSucceed(self): displayer = _metadata.MetadataDisplayer.with_model_file( self._model_with_meta_file) actual_buffer = displayer.get_metadata_buffer() actual_json = _metadata.convert_to_json(actual_buffer) # Verifies the generated json file. golden_json_file_path = resource_loader.get_path_to_datafile( "testdata/golden_json.json") with open(golden_json_file_path, "r") as f: expected = f.read() self.assertEqual(actual_json, expected) def testGetMetadataJsonModelWithMetadata(self): displayer = _metadata.MetadataDisplayer.with_model_file( self._model_with_meta_file) actual = displayer.get_metadata_json() # Verifies the generated json file. golden_json_file_path = resource_loader.get_path_to_datafile( "testdata/golden_json.json") expected = _read_file(golden_json_file_path, "r") self.assertEqual(actual, expected) def testGetPackedAssociatedFileListModelWithMetadata(self): displayer = _metadata.MetadataDisplayer.with_model_file( self._model_with_meta_file) packed_files = displayer.get_packed_associated_file_list() expected_packed_files = [ os.path.basename(self._file1), os.path.basename(self._file2) ] self.assertLen( packed_files, 2, "The following two associated files packed to the model: {0}; {1}" .format(expected_packed_files[0], expected_packed_files[1])) self.assertEqual(set(packed_files), set(expected_packed_files)) class MetadataUtilTest(MetadataTest): def test_convert_to_json_should_succeed(self): metadata_buf = _read_file(self._metadata_file_with_version) metadata_json = _metadata.convert_to_json(metadata_buf) # Verifies the generated json file. golden_json_file_path = resource_loader.get_path_to_datafile( "testdata/golden_json.json") expected = _read_file(golden_json_file_path, "r") self.assertEqual(metadata_json, expected) if __name__ == "__main__": tf.test.main()
Help is at hand for the Amazon rainforest and Brazil's poverty-stricken rural people - courtesy of the country's famous native nut. Brazil nuts are a valuable food source with a huge market in Europe and North America: up to 7,000 tonnes of unshelled nuts and 20,000 tonnes of shelled nuts are shipped every year. And because the trees that supply the nuts grow wild, they offer a way for communities to make a living from the forest without destroying it - something that is now being put to use in the country. "This is a real financial resource for communities," Dr Rafael Salomao, who works at the Museu Goeldi, one of the most important centres for the study of the Amazon, told BBC World Service's One Planet programme. "A tree which is over 400 years old can provide for generations and generations." Brazil nuts are considered to be one of the most valuable products that can be harvested from undisturbed rainforest. The nuts, known to Brazilians as Castanha do Para, grow uniquely in the Amazon basin. They are hazardous to collect: each hard outer shell weighs over 1kg. However, they offer an alternative to the way that many areas of Brazil are trying to develop - by clearing the forest to create areas suitable for either grazing cattle or growing products such as soya. For many years, this meant the destruction of Brazil nut trees, even after they became officially protected. "Sadly, today, we have cemeteries of Brazil nut trees," said Dr Salomao. "It's because of the arrival of agriculture. We call it the 'agricultural frontier', which goes along with cattle ranching. "When this arrives, they destroy the forest. First, they exploit the valuable wood, and then the cattle ranchers come and turn it into pasture. "Having said that, they keep the Brazil nut trees as well as the rubber trees, as these are legally protected. But they burn the forest to clear the land and the Brazil nut tree is very sensitive to fire. After three years of repeating this process, the trees are dead." What is worse for Brazil nut collectors is that once the trees have been destroyed, there is little chance of getting them back. Attempts to replace them have been largely unsuccessful. Saplings will not grow in shade and take up to 15 years to begin producing nuts. "Brazil nut trees do not have an easy natural regeneration," said Hans Muller, who works at Belem's Embrapa Institute, specialising in agricultural research in the Amazon. "When you destroy one, it's a real loss. "Unfortunately, when they have talked about sanctuaries - places where you can't touch any plants at all - well, they don't exist." However, at the end of 2006, the governor of the large state of Para announced a protected reserve of 16.4 million hectares of forest, with the aim of creating a huge conservation corridor through northern Amazon. And in the state over Para's northern border, Amapa, small communities are taking to the challenge of using the Brazil nut to generate income from the rainforest without destroying it. "We had this idea, we've a vast resource of Brazil nuts and we needed to create organisations in the region in order to strengthen local production," said Ajama da Silva Mendes, from the Amapa state department of industry, commerce and minerals. "So the government gave some incentives to create co-ops, together with the communities. "Now we can see the promise of bigger production and better living conditions for rural workers." Brazil nut gatherers and their families are now able to maintain a decent livelihood. And small-scale factories have been set up producing Brazil nut biscuits and oil, broadening the range of products available for export, meaning there is a better way for people to get a fair price for their valuable resource. But there are further problems. Subsidised production in Bolivia is challenging Brazil's dominance in the market. And when, in 2004, the European Union found that Brazil nuts with shells on had traces of aflatoxins, which can cause liver cancer, strong regulations were put in place regarding the nuts. While the American limit on aflatoxin levels in Brazil nuts is 15 parts per billion, the European limit is just four parts per billion. This has hit Brazil nut exporters hard. "In case the product doesn't meet the EU's laws, all the costs which have arisen have to be paid by the exporter," said Benedito Mutra Emfilio, chair of Association of Brazil Nut Exporters. "This is impossible."
from distutils.command.config import config from pybulletgym.envs.scene_abstract import SingleRobotEmptyScene from pybulletgym.envs.scene_stadium import SinglePlayerStadiumScene from gym_mujoco_xml_env import PybulletMujocoXmlEnv import gym, gym.spaces, gym.utils, gym.utils.seeding import numpy as np import os, sys class PybulletForwardWalkersBase(PybulletMujocoXmlEnv): def __init__(self, fn, robot_name, action_dim, obs_dim, power): PybulletMujocoXmlEnv.__init__(self, fn, robot_name, action_dim, obs_dim) self.power = power self.camera_x = 0 self.walk_target_x = 1e3 # kilometer away self.walk_target_y = 0 def create_single_player_scene(self): self.stadium_scene = SinglePlayerStadiumScene(gravity=9.8, timestep=0.0165/4, frame_skip=4) return self.stadium_scene def robot_specific_reset(self): for j in self.ordered_joints: j.reset_current_position(self.np_random.uniform( low=-0.1, high=0.1 ), 0) self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(self.stadium_scene.ground_plane_mjcf) self.feet = [self.parts[f] for f in self.foot_list] self.feet_contact = np.array([0.0 for f in self.foot_list], dtype=np.float32) self.ground_ids = set([(self.parts[f].bodies[self.parts[f].bodyIndex], self.parts[f].bodyPartIndex) for f in self.foot_ground_object_names]) self.scene.actor_introduce(self) self.initial_z = None def move_robot(self, init_x, init_y, init_z): "Used by multiplayer stadium to move sideways, to another running lane." self.cpp_robot.query_position() pose = self.cpp_robot.root_part.pose() pose.move_xyz(init_x, init_y, init_z) # Works because robot loads around (0,0,0), and some robots have z != 0 that is left intact self.cpp_robot.set_pose(pose) def apply_action(self, a): assert( np.isfinite(a).all() ) for n,j in enumerate(self.ordered_joints): j.set_motor_torque( self.power*j.power_coef*float(np.clip(a[n], -1, +1)) ) def calc_state(self): j = np.array([j.current_relative_position() for j in self.ordered_joints], dtype=np.float32).flatten() # even elements [0::2] position, scaled to -1..+1 between limits # odd elements [1::2] angular speed, scaled to show -1..+1 self.joint_speeds = j[1::2] self.joints_at_limit = np.count_nonzero(np.abs(j[0::2]) > 0.99) body_pose = self.robot_body.pose() parts_xyz = np.array( [p.pose().xyz() for p in self.parts.values()] ).flatten() self.body_xyz = body_pose.xyz() #self.body_xyz = (parts_xyz[0::3].mean(), parts_xyz[1::3].mean(), body_pose.xyz()[2]) # torso z is more informative than mean z self.body_rpy = body_pose.rpy() z = self.body_xyz[2] r, p, yaw = self.body_rpy (qx, qy, qz, qw) = body_pose.orientation() if self.initial_z==None: self.initial_z = z self.walk_target_theta = np.arctan2( self.walk_target_y - self.body_xyz[1], self.walk_target_x - self.body_xyz[0] ) self.walk_target_dist = np.linalg.norm( [self.walk_target_y - self.body_xyz[1], self.walk_target_x - self.body_xyz[0]] ) angle_to_target = self.walk_target_theta - yaw # rot_speed = np.array( # [[np.cos(-yaw), -np.sin(-yaw), 0], # [np.sin(-yaw), np.cos(-yaw), 0], # [ 0, 0, 1]] # ) # vx, vy, vz = np.dot(rot_speed, self.robot_body.speed()) # rotate speed back to body point of view (vx, vy, vz) = self.robot_body.speed() more = np.array([ z-self.initial_z, # np.sin(angle_to_target), np.cos(angle_to_target), 0.1*vx, 0.1*vy, 0.1*vz, # 0.3 is just scaling typical speed into -1..+1, no physical sense here # r, p qx,qy,qz,qw #TODO: Update this for flagrun after pull-requesting ], dtype=np.float32) # # 8 + 34 + 2 return np.clip( np.concatenate([more] + [j] + [self.feet_contact]), -5, +5) def calc_potential(self): # progress in potential field is speed*dt, typical speed is about 2-3 meter per second, this potential will change 2-3 per frame (not per second), # all rewards have rew/frame units and close to 1.0 return - self.walk_target_dist / self.scene.dt electricity_cost = -2.0 # cost for using motors -- this parameter should be carefully tuned against reward for making progress, other values less improtant stall_torque_cost = -0.1 # cost for running electric current through a motor even at zero rotational speed, small foot_collision_cost = -1.0 # touches another leg, or other objects, that cost makes robot avoid smashing feet into itself foot_ground_object_names = set(["floor"]) # to distinguish ground and other objects joints_at_limit_cost = -0.1 # discourage stuck joints def _step(self, a): if not self.scene.multiplayer: # if multiplayer, action first applied to all robots, then global step() called, then _step() for all robots with the same actions self.apply_action(a) self.scene.global_step() state = self.calc_state() # also calculates self.joints_at_limit alive = float(self.alive_bonus(state[0]+self.initial_z, self.body_rpy[1])) # state[0] is body height above ground, body_rpy[1] is pitch done = alive < 0 if not np.isfinite(state).all(): print("~INF~", state) done = True potential_old = self.potential self.potential = self.calc_potential() progress = float(self.potential - potential_old) feet_collision_cost = 0.0 for i,f in enumerate(self.feet): contact_ids = set((x[2], x[4]) for x in f.contact_list()) #print("CONTACT OF '%s' WITH %s" % (f.name, ",".join(contact_names)) ) self.feet_contact[i] = 1.0 if (self.ground_ids & contact_ids) else 0.0 if contact_ids - self.ground_ids: feet_collision_cost += self.foot_collision_cost electricity_cost = self.electricity_cost * float(np.abs(a*self.joint_speeds).mean()) # let's assume we have DC motor with controller, and reverse current braking electricity_cost += self.stall_torque_cost * float(np.square(a).mean()) joints_at_limit_cost = float(self.joints_at_limit_cost * self.joints_at_limit) self.rewards = [ alive, progress, electricity_cost, joints_at_limit_cost, feet_collision_cost ] self.HUD(state, a, done) return state, sum(self.rewards), bool(done), {} def camera_adjust(self): x, y, z = self.body_xyz self.camera_x = 0.98*self.camera_x + (1-0.98)*x self.camera.move_and_look_at(self.camera_x, y-2.0, 1.4, x, y, 1.0) class PybulletHopper(PybulletForwardWalkersBase): foot_list = ["foot"] def __init__(self): PybulletForwardWalkersBase.__init__(self, "hopper.xml", "torso", action_dim=3, obs_dim=15, power=0.75) def alive_bonus(self, z, pitch): return +1 if z > 0.8 and abs(pitch) < 1.0 else -1 class PybulletWalker2d(PybulletForwardWalkersBase): foot_list = ["foot", "foot_left"] def __init__(self): PybulletForwardWalkersBase.__init__(self, "walker2d.xml", "torso", action_dim=6, obs_dim=22, power=0.40) def alive_bonus(self, z, pitch): return +1 if z > 0.8 and abs(pitch) < 1.0 else -1 def robot_specific_reset(self): PybulletForwardWalkersBase.robot_specific_reset(self) for n in ["foot_joint", "foot_left_joint"]: self.jdict[n].power_coef = 30.0 class PybulletHalfCheetah(PybulletForwardWalkersBase): foot_list = ["ffoot", "fshin", "fthigh", "bfoot", "bshin", "bthigh"] # track these contacts with ground def __init__(self): PybulletForwardWalkersBase.__init__(self, "half_cheetah.xml", "torso", action_dim=6, obs_dim=26, power=0.90) def alive_bonus(self, z, pitch): # Use contact other than feet to terminate episode: due to a lot of strange walks using knees return +1 if np.abs(pitch) < 1.0 and not self.feet_contact[1] and not self.feet_contact[2] and not self.feet_contact[4] and not self.feet_contact[5] else -1 def robot_specific_reset(self): PybulletForwardWalkersBase.robot_specific_reset(self) self.jdict["bthigh"].power_coef = 120.0 self.jdict["bshin"].power_coef = 90.0 self.jdict["bfoot"].power_coef = 60.0 self.jdict["fthigh"].power_coef = 140.0 self.jdict["fshin"].power_coef = 60.0 self.jdict["ffoot"].power_coef = 30.0 class PybulletAnt(PybulletForwardWalkersBase): foot_list = ['front_left_foot', 'front_right_foot', 'left_back_foot', 'right_back_foot'] def __init__(self): PybulletForwardWalkersBase.__init__(self, "ant.xml", "torso", action_dim=8, obs_dim=28, power=2.5) def alive_bonus(self, z, pitch): return +1 if z > 0.26 else -1 # 0.25 is central sphere rad, die if it scrapes the ground ## 3d Humanoid ## class PybulletHumanoid(PybulletForwardWalkersBase): self_collision = True foot_list = ["right_foot", "left_foot"] # "left_hand", "right_hand" def __init__(self): PybulletForwardWalkersBase.__init__(self, 'humanoid_symmetric.xml', 'torso', action_dim=17, obs_dim=44, power=0.082) # 17 joints, 4 of them important for walking (hip, knee), others may as well be turned off, 17/4 = 4.25 self.electricity_cost = 4.25*PybulletForwardWalkersBase.electricity_cost self.stall_torque_cost = 4.25*PybulletForwardWalkersBase.stall_torque_cost def robot_specific_reset(self): PybulletForwardWalkersBase.robot_specific_reset(self) self.motor_names = ["abdomen_z", "abdomen_y", "abdomen_x"] self.motor_power = [100, 100, 100] self.motor_names += ["right_hip_x", "right_hip_z", "right_hip_y", "right_knee"] self.motor_power += [100, 100, 300, 200] self.motor_names += ["left_hip_x", "left_hip_z", "left_hip_y", "left_knee"] self.motor_power += [100, 100, 300, 200] self.motor_names += ["right_shoulder1", "right_shoulder2", "right_elbow"] self.motor_power += [75, 75, 75] self.motor_names += ["left_shoulder1", "left_shoulder2", "left_elbow"] self.motor_power += [75, 75, 75] self.motors = [self.jdict[n] for n in self.motor_names] # if self.random_yaw: # TODO: Make leaning work as soon as the rest works # cpose = cpp_household.Pose() # yaw = self.np_random.uniform(low=-3.14, high=3.14) # if self.random_lean and self.np_random.randint(2)==0: # cpose.set_xyz(0, 0, 1.4) # if self.np_random.randint(2)==0: # pitch = np.pi/2 # cpose.set_xyz(0, 0, 0.45) # else: # pitch = np.pi*3/2 # cpose.set_xyz(0, 0, 0.25) # roll = 0 # cpose.set_rpy(roll, pitch, yaw) # else: # cpose.set_xyz(0, 0, 1.4) # cpose.set_rpy(0, 0, yaw) # just face random direction, but stay straight otherwise # self.cpp_robot.set_pose_and_speed(cpose, 0,0,0) self.initial_z = 0.8 random_yaw = False random_lean = False def apply_action(self, a): assert( np.isfinite(a).all() ) force_gain = 1 for i, m, power in zip(range(17), self.motors, self.motor_power): m.set_motor_torque( float(force_gain * power*self.power*a[i]) ) #m.set_motor_torque(float(force_gain * power * self.power * np.clip(a[i], -1, +1))) def alive_bonus(self, z, pitch): return +2 if z > 0.78 else -1 # 2 here because 17 joints produce a lot of electricity cost just from policy noise, living must be better than dying
Definition - What does Beehive Forum mean? Beehive Forum also has certain proprietary features, including a relationship system (similar to Facebook, but different) where users can raise their opinions about other posters. There is also a polling system built into the platform. Reviews of Beehive Forum have called it a good resource for DIY forum hosting, and reviewers have noted the full range of features built into this platform. It is a common choice for setting up forums in a straightforward way with limited hosting resources.
#!/usr/bin/env python # coding: utf-8 import os import sys import traceback import webbrowser import pyqrcode import requests import mimetypes import json import xml.dom.minidom import urllib import time import re import random from traceback import format_exc from requests.exceptions import ConnectionError, ReadTimeout import HTMLParser UNKONWN = 'unkonwn' SUCCESS = '200' SCANED = '201' TIMEOUT = '408' def show_image(file_path): """ 跨平台显示图片文件 :param file_path: 图片文件路径 """ if sys.version_info >= (3, 3): from shlex import quote else: from pipes import quote if sys.platform == "darwin": command = "open -a /Applications/Preview.app %s&" % quote(file_path) os.system(command) else: webbrowser.open(os.path.join(os.getcwd(),'temp',file_path)) class SafeSession(requests.Session): def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None, json=None): for i in range(3): try: return super(SafeSession, self).request(method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json) except Exception as e: print e.message, traceback.format_exc() continue class WXBot: """WXBot功能类""" def __init__(self): self.DEBUG = False self.uuid = '' self.base_uri = '' self.base_host = '' self.redirect_uri = '' self.uin = '' self.sid = '' self.skey = '' self.pass_ticket = '' self.device_id = 'e' + repr(random.random())[2:17] self.base_request = {} self.sync_key_str = '' self.sync_key = [] self.sync_host = '' #文件缓存目录 self.temp_pwd = os.path.join(os.getcwd(),'temp') if os.path.exists(self.temp_pwd) == False: os.makedirs(self.temp_pwd) self.session = SafeSession() self.session.headers.update({'User-Agent': 'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5'}) self.conf = {'qr': 'png'} self.my_account = {} # 当前账户 # 所有相关账号: 联系人, 公众号, 群组, 特殊账号 self.member_list = [] # 所有群组的成员, {'group_id1': [member1, member2, ...], ...} self.group_members = {} # 所有账户, {'group_member':{'id':{'type':'group_member', 'info':{}}, ...}, 'normal_member':{'id':{}, ...}} self.account_info = {'group_member': {}, 'normal_member': {}} self.contact_list = [] # 联系人列表 self.public_list = [] # 公众账号列表 self.group_list = [] # 群聊列表 self.special_list = [] # 特殊账号列表 self.encry_chat_room_id_list = [] # 存储群聊的EncryChatRoomId,获取群内成员头像时需要用到 self.file_index = 0 @staticmethod def to_unicode(string, encoding='utf-8'): """ 将字符串转换为Unicode :param string: 待转换字符串 :param encoding: 字符串解码方式 :return: 转换后的Unicode字符串 """ if isinstance(string, str): return string.decode(encoding) elif isinstance(string, unicode): return string else: raise Exception('Unknown Type') def get_contact(self): """获取当前账户的所有相关账号(包括联系人、公众号、群聊、特殊账号)""" url = self.base_uri + '/webwxgetcontact?pass_ticket=%s&skey=%s&r=%s' \ % (self.pass_ticket, self.skey, int(time.time())) r = self.session.post(url, data='{}') r.encoding = 'utf-8' if self.DEBUG: with open(os.path.join(self.temp_pwd,'contacts.json'), 'w') as f: f.write(r.text.encode('utf-8')) dic = json.loads(r.text) self.member_list = dic['MemberList'] special_users = ['newsapp', 'fmessage', 'filehelper', 'weibo', 'qqmail', 'fmessage', 'tmessage', 'qmessage', 'qqsync', 'floatbottle', 'lbsapp', 'shakeapp', 'medianote', 'qqfriend', 'readerapp', 'blogapp', 'facebookapp', 'masssendapp', 'meishiapp', 'feedsapp', 'voip', 'blogappweixin', 'weixin', 'brandsessionholder', 'weixinreminder', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'officialaccounts', 'notification_messages', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'wxitil', 'userexperience_alarm', 'notification_messages'] self.contact_list = [] self.public_list = [] self.special_list = [] self.group_list = [] for contact in self.member_list: if contact['VerifyFlag'] & 8 != 0: # 公众号 self.public_list.append(contact) self.account_info['normal_member'][contact['UserName']] = {'type': 'public', 'info': contact} elif contact['UserName'] in special_users: # 特殊账户 self.special_list.append(contact) self.account_info['normal_member'][contact['UserName']] = {'type': 'special', 'info': contact} elif contact['UserName'].find('@@') != -1: # 群聊 self.group_list.append(contact) self.account_info['normal_member'][contact['UserName']] = {'type': 'group', 'info': contact} elif contact['UserName'] == self.my_account['UserName']: # 自己 self.account_info['normal_member'][contact['UserName']] = {'type': 'self', 'info': contact} else: self.contact_list.append(contact) self.account_info['normal_member'][contact['UserName']] = {'type': 'contact', 'info': contact} self.batch_get_group_members() for group in self.group_members: for member in self.group_members[group]: if member['UserName'] not in self.account_info: self.account_info['group_member'][member['UserName']] = \ {'type': 'group_member', 'info': member, 'group': group} if self.DEBUG: with open(os.path.join(self.temp_pwd,'contact_list.json'), 'w') as f: f.write(json.dumps(self.contact_list)) with open(os.path.join(self.temp_pwd,'special_list.json'), 'w') as f: f.write(json.dumps(self.special_list)) with open(os.path.join(self.temp_pwd,'group_list.json'), 'w') as f: f.write(json.dumps(self.group_list)) with open(os.path.join(self.temp_pwd,'public_list.json'), 'w') as f: f.write(json.dumps(self.public_list)) with open(os.path.join(self.temp_pwd,'member_list.json'), 'w') as f: f.write(json.dumps(self.member_list)) with open(os.path.join(self.temp_pwd,'group_users.json'), 'w') as f: f.write(json.dumps(self.group_members)) with open(os.path.join(self.temp_pwd,'account_info.json'), 'w') as f: f.write(json.dumps(self.account_info)) return True def batch_get_group_members(self): """批量获取所有群聊成员信息""" url = self.base_uri + '/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (int(time.time()), self.pass_ticket) params = { 'BaseRequest': self.base_request, "Count": len(self.group_list), "List": [{"UserName": group['UserName'], "EncryChatRoomId": ""} for group in self.group_list] } r = self.session.post(url, data=json.dumps(params)) r.encoding = 'utf-8' dic = json.loads(r.text) group_members = {} encry_chat_room_id = {} for group in dic['ContactList']: gid = group['UserName'] members = group['MemberList'] group_members[gid] = members encry_chat_room_id[gid] = group['EncryChatRoomId'] self.group_members = group_members self.encry_chat_room_id_list = encry_chat_room_id def get_group_member_name(self, gid, uid): """ 获取群聊中指定成员的名称信息 :param gid: 群id :param uid: 群聊成员id :return: 名称信息,类似 {"display_name": "test_user", "nickname": "test", "remark_name": "for_test" } """ if gid not in self.group_members: return None group = self.group_members[gid] for member in group: if member['UserName'] == uid: names = {} if 'RemarkName' in member and member['RemarkName']: names['remark_name'] = member['RemarkName'] if 'NickName' in member and member['NickName']: names['nickname'] = member['NickName'] if 'DisplayName' in member and member['DisplayName']: names['display_name'] = member['DisplayName'] return names return None def get_contact_info(self, uid): return self.account_info['normal_member'].get(uid) def get_group_member_info(self, uid): return self.account_info['group_member'].get(uid) def get_contact_name(self, uid): info = self.get_contact_info(uid) if info is None: return None info = info['info'] name = {} if 'RemarkName' in info and info['RemarkName']: name['remark_name'] = info['RemarkName'] if 'NickName' in info and info['NickName']: name['nickname'] = info['NickName'] if 'DisplayName' in info and info['DisplayName']: name['display_name'] = info['DisplayName'] if len(name) == 0: return None else: return name @staticmethod def get_contact_prefer_name(name): if name is None: return None if 'remark_name' in name: return name['remark_name'] if 'nickname' in name: return name['nickname'] if 'display_name' in name: return name['display_name'] return None @staticmethod def get_group_member_prefer_name(name): if name is None: return None if 'remark_name' in name: return name['remark_name'] if 'display_name' in name: return name['display_name'] if 'nickname' in name: return name['nickname'] return None def get_user_type(self, wx_user_id): """ 获取特定账号与自己的关系 :param wx_user_id: 账号id: :return: 与当前账号的关系 """ for account in self.contact_list: if wx_user_id == account['UserName']: return 'contact' for account in self.public_list: if wx_user_id == account['UserName']: return 'public' for account in self.special_list: if wx_user_id == account['UserName']: return 'special' for account in self.group_list: if wx_user_id == account['UserName']: return 'group' for group in self.group_members: for member in self.group_members[group]: if member['UserName'] == wx_user_id: return 'group_member' return 'unknown' def is_contact(self, uid): for account in self.contact_list: if uid == account['UserName']: return True return False def is_public(self, uid): for account in self.public_list: if uid == account['UserName']: return True return False def is_special(self, uid): for account in self.special_list: if uid == account['UserName']: return True return False def handle_msg_all(self, msg): """ 处理所有消息,请子类化后覆盖此函数 msg: msg_id -> 消息id msg_type_id -> 消息类型id user -> 发送消息的账号id content -> 消息内容 :param msg: 收到的消息 """ pass @staticmethod def proc_at_info(msg): if not msg: return '', [] segs = msg.split(u'\u2005') str_msg_all = '' str_msg = '' infos = [] if len(segs) > 1: for i in range(0, len(segs) - 1): segs[i] += u'\u2005' pm = re.search(u'@.*\u2005', segs[i]).group() if pm: name = pm[1:-1] string = segs[i].replace(pm, '') str_msg_all += string + '@' + name + ' ' str_msg += string if string: infos.append({'type': 'str', 'value': string}) infos.append({'type': 'at', 'value': name}) else: infos.append({'type': 'str', 'value': segs[i]}) str_msg_all += segs[i] str_msg += segs[i] str_msg_all += segs[-1] str_msg += segs[-1] infos.append({'type': 'str', 'value': segs[-1]}) else: infos.append({'type': 'str', 'value': segs[-1]}) str_msg_all = msg str_msg = msg return str_msg_all.replace(u'\u2005', ''), str_msg.replace(u'\u2005', ''), infos def extract_msg_content(self, msg_type_id, msg): """ content_type_id: 0 -> Text 1 -> Location 3 -> Image 4 -> Voice 5 -> Recommend 6 -> Animation 7 -> Share 8 -> Video 9 -> VideoCall 10 -> Redraw 11 -> Empty 99 -> Unknown :param msg_type_id: 消息类型id :param msg: 消息结构体 :return: 解析的消息 """ mtype = msg['MsgType'] content = HTMLParser.HTMLParser().unescape(msg['Content']) msg_id = msg['MsgId'] msg_content = {} if msg_type_id == 0: return {'type': 11, 'data': ''} elif msg_type_id == 2: # File Helper return {'type': 0, 'data': content.replace('<br/>', '\n')} elif msg_type_id == 3: # 群聊 sp = content.find('<br/>') uid = content[:sp] content = content[sp:] content = content.replace('<br/>', '') uid = uid[:-1] name = self.get_contact_prefer_name(self.get_contact_name(uid)) if not name: name = self.get_group_member_prefer_name(self.get_group_member_name(msg['FromUserName'], uid)) if not name: name = 'unknown' msg_content['user'] = {'id': uid, 'name': name} else: # Self, Contact, Special, Public, Unknown pass msg_prefix = (msg_content['user']['name'] + ':') if 'user' in msg_content else '' if mtype == 1: if content.find('http://weixin.qq.com/cgi-bin/redirectforward?args=') != -1: r = self.session.get(content) r.encoding = 'gbk' data = r.text pos = self.search_content('title', data, 'xml') msg_content['type'] = 1 msg_content['data'] = pos msg_content['detail'] = data if self.DEBUG: print ' %s[Location] %s ' % (msg_prefix, pos) else: msg_content['type'] = 0 if msg_type_id == 3 or (msg_type_id == 1 and msg['ToUserName'][:2] == '@@'): # Group text message msg_infos = self.proc_at_info(content) str_msg_all = msg_infos[0] str_msg = msg_infos[1] detail = msg_infos[2] msg_content['data'] = str_msg_all msg_content['detail'] = detail msg_content['desc'] = str_msg else: msg_content['data'] = content if self.DEBUG: try: print ' %s[Text] %s' % (msg_prefix, msg_content['data']) except UnicodeEncodeError: print ' %s[Text] (illegal text).' % msg_prefix elif mtype == 3: msg_content['type'] = 3 msg_content['data'] = self.get_msg_img_url(msg_id) msg_content['img'] = self.session.get(msg_content['data']).content.encode('hex') if self.DEBUG: image = self.get_msg_img(msg_id) print ' %s[Image] %s' % (msg_prefix, image) elif mtype == 34: msg_content['type'] = 4 msg_content['data'] = self.get_voice_url(msg_id) msg_content['voice'] = self.session.get(msg_content['data']).content.encode('hex') if self.DEBUG: voice = self.get_voice(msg_id) print ' %s[Voice] %s' % (msg_prefix, voice) elif mtype == 37: msg_content['type'] = 37 msg_content['data'] = msg['RecommendInfo'] if self.DEBUG: print ' %s[useradd] %s' % (msg_prefix,msg['RecommendInfo']['NickName']) elif mtype == 42: msg_content['type'] = 5 info = msg['RecommendInfo'] msg_content['data'] = {'nickname': info['NickName'], 'alias': info['Alias'], 'province': info['Province'], 'city': info['City'], 'gender': ['unknown', 'male', 'female'][info['Sex']]} if self.DEBUG: print ' %s[Recommend]' % msg_prefix print ' -----------------------------' print ' | NickName: %s' % info['NickName'] print ' | Alias: %s' % info['Alias'] print ' | Local: %s %s' % (info['Province'], info['City']) print ' | Gender: %s' % ['unknown', 'male', 'female'][info['Sex']] print ' -----------------------------' elif mtype == 47: msg_content['type'] = 6 msg_content['data'] = self.search_content('cdnurl', content) if self.DEBUG: print ' %s[Animation] %s' % (msg_prefix, msg_content['data']) elif mtype == 49: msg_content['type'] = 7 if msg['AppMsgType'] == 3: app_msg_type = 'music' elif msg['AppMsgType'] == 5: app_msg_type = 'link' elif msg['AppMsgType'] == 7: app_msg_type = 'weibo' else: app_msg_type = 'unknown' msg_content['data'] = {'type': app_msg_type, 'title': msg['FileName'], 'desc': self.search_content('des', content, 'xml'), 'url': msg['Url'], 'from': self.search_content('appname', content, 'xml'), 'content': msg.get('Content') # 有的公众号会发一次性3 4条链接一个大图,如果只url那只能获取第一条,content里面有所有的链接 } if self.DEBUG: print ' %s[Share] %s' % (msg_prefix, app_msg_type) print ' --------------------------' print ' | title: %s' % msg['FileName'] print ' | desc: %s' % self.search_content('des', content, 'xml') print ' | link: %s' % msg['Url'] print ' | from: %s' % self.search_content('appname', content, 'xml') print ' | content: %s' % (msg.get('content')[:20] if msg.get('content') else "unknown") print ' --------------------------' elif mtype == 62: msg_content['type'] = 8 msg_content['data'] = content if self.DEBUG: print ' %s[Video] Please check on mobiles' % msg_prefix elif mtype == 53: msg_content['type'] = 9 msg_content['data'] = content if self.DEBUG: print ' %s[Video Call]' % msg_prefix elif mtype == 10002: msg_content['type'] = 10 msg_content['data'] = content if self.DEBUG: print ' %s[Redraw]' % msg_prefix elif mtype == 10000: # unknown, maybe red packet, or group invite msg_content['type'] = 12 msg_content['data'] = msg['Content'] if self.DEBUG: print ' [Unknown]' else: msg_content['type'] = 99 msg_content['data'] = content if self.DEBUG: print ' %s[Unknown]' % msg_prefix return msg_content def handle_msg(self, r): """ 处理原始微信消息的内部函数 msg_type_id: 0 -> Init 1 -> Self 2 -> FileHelper 3 -> Group 4 -> Contact 5 -> Public 6 -> Special 99 -> Unknown :param r: 原始微信消息 """ for msg in r['AddMsgList']: user = {'id': msg['FromUserName'], 'name': 'unknown'} if msg['MsgType'] == 51: # init message msg_type_id = 0 user['name'] = 'system' elif msg['MsgType'] == 37: # friend request msg_type_id = 37 pass # content = msg['Content'] # username = content[content.index('fromusername='): content.index('encryptusername')] # username = username[username.index('"') + 1: username.rindex('"')] # print u'[Friend Request]' # print u' Nickname:' + msg['RecommendInfo']['NickName'] # print u' 附加消息:'+msg['RecommendInfo']['Content'] # # print u'Ticket:'+msg['RecommendInfo']['Ticket'] # Ticket添加好友时要用 # print u' 微信号:'+username #未设置微信号的 腾讯会自动生成一段微信ID 但是无法通过搜索 搜索到此人 elif msg['FromUserName'] == self.my_account['UserName']: # Self msg_type_id = 1 user['name'] = 'self' elif msg['ToUserName'] == 'filehelper': # File Helper msg_type_id = 2 user['name'] = 'file_helper' elif msg['FromUserName'][:2] == '@@': # Group msg_type_id = 3 user['name'] = self.get_contact_prefer_name(self.get_contact_name(user['id'])) elif self.is_contact(msg['FromUserName']): # Contact msg_type_id = 4 user['name'] = self.get_contact_prefer_name(self.get_contact_name(user['id'])) elif self.is_public(msg['FromUserName']): # Public msg_type_id = 5 user['name'] = self.get_contact_prefer_name(self.get_contact_name(user['id'])) elif self.is_special(msg['FromUserName']): # Special msg_type_id = 6 user['name'] = self.get_contact_prefer_name(self.get_contact_name(user['id'])) else: msg_type_id = 99 user['name'] = 'unknown' if not user['name']: user['name'] = 'unknown' user['name'] = HTMLParser.HTMLParser().unescape(user['name']) if msg.has_key('CreateTime'): CreateTime = msg['CreateTime'] if self.DEBUG and msg_type_id != 0: print u'[MSG] %s:' % user['name'] content = self.extract_msg_content(msg_type_id, msg) message = {'msg_type_id': msg_type_id, 'msg_id': msg['MsgId'], 'content': content, 'to_user_id': msg['ToUserName'], 'user': user, 'time':CreateTime} self.handle_msg_all(message) def schedule(self): """ 做任务型事情的函数,如果需要,可以在子类中覆盖此函数 此函数在处理消息的间隙被调用,请不要长时间阻塞此函数 """ pass def proc_msg(self): self.test_sync_check() while True: check_time = time.time() try: [retcode, selector] = self.sync_check() # print '[DEBUG] sync_check:', retcode, selector if retcode == '1100': # 从微信客户端上登出 break elif retcode == '1101': # 从其它设备上登了网页微信 break elif retcode == '0': if selector == '2': # 有新消息 r = self.sync() if r is not None: self.handle_msg(r) elif selector == '3': # 未知 r = self.sync() if r is not None: self.handle_msg(r) elif selector == '4': # 通讯录更新 r = self.sync() if r is not None: self.get_contact() elif selector == '6': # 可能是红包 r = self.sync() if r is not None: self.handle_msg(r) elif selector == '7': # 在手机上操作了微信 r = self.sync() if r is not None: self.handle_msg(r) elif selector == '0': # 无事件 pass else: print '[DEBUG] sync_check:', retcode, selector r = self.sync() if r is not None: self.handle_msg(r) else: print '[DEBUG] sync_check:', retcode, selector time.sleep(10) self.schedule() except: print '[ERROR] Except in proc_msg' print format_exc() check_time = time.time() - check_time if check_time < 0.8: time.sleep(1 - check_time) def apply_useradd_requests(self,RecommendInfo): url = self.base_uri + '/webwxverifyuser?r='+str(int(time.time()))+'&lang=zh_CN' params = { "BaseRequest": self.base_request, "Opcode": 3, "VerifyUserListSize": 1, "VerifyUserList": [ { "Value": RecommendInfo['UserName'], "VerifyUserTicket": RecommendInfo['Ticket'] } ], "VerifyContent": "", "SceneListCount": 1, "SceneList": [ 33 ], "skey": self.skey } headers = {'content-type': 'application/json; charset=UTF-8'} data = json.dumps(params, ensure_ascii=False).encode('utf8') try: r = self.session.post(url, data=data, headers=headers) except (ConnectionError, ReadTimeout): return False dic = r.json() return dic['BaseResponse']['Ret'] == 0 def add_groupuser_to_friend_by_uid(self,uid,VerifyContent): """ 主动向群内人员打招呼,提交添加好友请求 uid-群内人员得uid VerifyContent-好友招呼内容 慎用此接口!封号后果自负!慎用此接口!封号后果自负!慎用此接口!封号后果自负! """ if self.is_contact(uid): return True url = self.base_uri + '/webwxverifyuser?r='+str(int(time.time()))+'&lang=zh_CN' params ={ "BaseRequest": self.base_request, "Opcode": 2, "VerifyUserListSize": 1, "VerifyUserList": [ { "Value": uid, "VerifyUserTicket": "" } ], "VerifyContent": VerifyContent, "SceneListCount": 1, "SceneList": [ 33 ], "skey": self.skey } headers = {'content-type': 'application/json; charset=UTF-8'} data = json.dumps(params, ensure_ascii=False).encode('utf8') try: r = self.session.post(url, data=data, headers=headers) except (ConnectionError, ReadTimeout): return False dic = r.json() return dic['BaseResponse']['Ret'] == 0 def add_friend_to_group(self,uid,group_name): """ 将好友加入到群聊中 """ gid = '' #通过群名获取群id,群没保存到通讯录中的话无法添加哦 for group in self.group_list: if group['NickName'] == group_name: gid = group['UserName'] if gid == '': return False #通过群id判断uid是否在群中 for user in self.group_members[gid]: if user['UserName'] == uid: #已经在群里面了,不用加了 return True url = self.base_uri + '/webwxupdatechatroom?fun=addmember&pass_ticket=%s' % self.pass_ticket params ={ "AddMemberList": uid, "ChatRoomName": gid, "BaseRequest": self.base_request } headers = {'content-type': 'application/json; charset=UTF-8'} data = json.dumps(params, ensure_ascii=False).encode('utf8') try: r = self.session.post(url, data=data, headers=headers) except (ConnectionError, ReadTimeout): return False dic = r.json() return dic['BaseResponse']['Ret'] == 0 def delete_user_from_group(self,uname,gid): """ 将群用户从群中剔除,只有群管理员有权限 """ uid = "" for user in self.group_members[gid]: if user['NickName'] == uname: uid = user['UserName'] if uid == "": return False url = self.base_uri + '/webwxupdatechatroom?fun=delmember&pass_ticket=%s' % self.pass_ticket params ={ "DelMemberList": uid, "ChatRoomName": gid, "BaseRequest": self.base_request } headers = {'content-type': 'application/json; charset=UTF-8'} data = json.dumps(params, ensure_ascii=False).encode('utf8') try: r = self.session.post(url, data=data, headers=headers) except (ConnectionError, ReadTimeout): return False dic = r.json() return dic['BaseResponse']['Ret'] == 0 def set_group_name(self,gid,gname): """ 设置群聊名称 """ url = self.base_uri + '/webwxupdatechatroom?fun=modtopic&pass_ticket=%s' % self.pass_ticket params ={ "NewTopic": gname, "ChatRoomName": gid, "BaseRequest": self.base_request } headers = {'content-type': 'application/json; charset=UTF-8'} data = json.dumps(params, ensure_ascii=False).encode('utf8') try: r = self.session.post(url, data=data, headers=headers) except (ConnectionError, ReadTimeout): return False dic = r.json() return dic['BaseResponse']['Ret'] == 0 def send_msg_by_uid(self, word, dst='filehelper'): url = self.base_uri + '/webwxsendmsg?pass_ticket=%s' % self.pass_ticket msg_id = str(int(time.time() * 1000)) + str(random.random())[:5].replace('.', '') word = self.to_unicode(word) params = { 'BaseRequest': self.base_request, 'Msg': { "Type": 1, "Content": word, "FromUserName": self.my_account['UserName'], "ToUserName": dst, "LocalID": msg_id, "ClientMsgId": msg_id } } headers = {'content-type': 'application/json; charset=UTF-8'} data = json.dumps(params, ensure_ascii=False).encode('utf8') try: r = self.session.post(url, data=data, headers=headers) except (ConnectionError, ReadTimeout): return False dic = r.json() return dic['BaseResponse']['Ret'] == 0 def upload_media(self, fpath, is_img=False): if not os.path.exists(fpath): print '[ERROR] File not exists.' return None url_1 = 'https://file.'+self.base_host+'/cgi-bin/mmwebwx-bin/webwxuploadmedia?f=json' url_2 = 'https://file2.'+self.base_host+'/cgi-bin/mmwebwx-bin/webwxuploadmedia?f=json' flen = str(os.path.getsize(fpath)) ftype = mimetypes.guess_type(fpath)[0] or 'application/octet-stream' files = { 'id': (None, 'WU_FILE_%s' % str(self.file_index)), 'name': (None, os.path.basename(fpath)), 'type': (None, ftype), 'lastModifiedDate': (None, time.strftime('%m/%d/%Y, %H:%M:%S GMT+0800 (CST)')), 'size': (None, flen), 'mediatype': (None, 'pic' if is_img else 'doc'), 'uploadmediarequest': (None, json.dumps({ 'BaseRequest': self.base_request, 'ClientMediaId': int(time.time()), 'TotalLen': flen, 'StartPos': 0, 'DataLen': flen, 'MediaType': 4, })), 'webwx_data_ticket': (None, self.session.cookies['webwx_data_ticket']), 'pass_ticket': (None, self.pass_ticket), 'filename': (os.path.basename(fpath), open(fpath, 'rb'),ftype.split('/')[1]), } self.file_index += 1 try: r = self.session.post(url_1, files=files) if json.loads(r.text)['BaseResponse']['Ret'] != 0: # 当file返回值不为0时则为上传失败,尝试第二服务器上传 r = self.session.post(url_2, files=files) if json.loads(r.text)['BaseResponse']['Ret'] != 0: print '[ERROR] Upload media failure.' return None mid = json.loads(r.text)['MediaId'] return mid except Exception,e: return None def send_file_msg_by_uid(self, fpath, uid): mid = self.upload_media(fpath) if mid is None or not mid: return False url = self.base_uri + '/webwxsendappmsg?fun=async&f=json&pass_ticket=' + self.pass_ticket msg_id = str(int(time.time() * 1000)) + str(random.random())[:5].replace('.', '') data = { 'BaseRequest': self.base_request, 'Msg': { 'Type': 6, 'Content': ("<appmsg appid='wxeb7ec651dd0aefa9' sdkver=''><title>%s</title><des></des><action></action><type>6</type><content></content><url></url><lowurl></lowurl><appattach><totallen>%s</totallen><attachid>%s</attachid><fileext>%s</fileext></appattach><extinfo></extinfo></appmsg>" % (os.path.basename(fpath).encode('utf-8'), str(os.path.getsize(fpath)), mid, fpath.split('.')[-1])).encode('utf8'), 'FromUserName': self.my_account['UserName'], 'ToUserName': uid, 'LocalID': msg_id, 'ClientMsgId': msg_id, }, } try: r = self.session.post(url, data=json.dumps(data)) res = json.loads(r.text) if res['BaseResponse']['Ret'] == 0: return True else: return False except Exception,e: return False def send_img_msg_by_uid(self, fpath, uid): mid = self.upload_media(fpath, is_img=True) if mid is None: return False url = self.base_uri + '/webwxsendmsgimg?fun=async&f=json' data = { 'BaseRequest': self.base_request, 'Msg': { 'Type': 3, 'MediaId': mid, 'FromUserName': self.my_account['UserName'], 'ToUserName': uid, 'LocalID': str(time.time() * 1e7), 'ClientMsgId': str(time.time() * 1e7), }, } if fpath[-4:] == '.gif': url = self.base_uri + '/webwxsendemoticon?fun=sys' data['Msg']['Type'] = 47 data['Msg']['EmojiFlag'] = 2 try: r = self.session.post(url, data=json.dumps(data)) res = json.loads(r.text) if res['BaseResponse']['Ret'] == 0: return True else: return False except Exception,e: return False def get_user_id(self, name): if name == '': return None name = self.to_unicode(name) for contact in self.contact_list: if 'RemarkName' in contact and contact['RemarkName'] == name: return contact['UserName'] elif 'NickName' in contact and contact['NickName'] == name: return contact['UserName'] elif 'DisplayName' in contact and contact['DisplayName'] == name: return contact['UserName'] for group in self.group_list: if 'RemarkName' in group and group['RemarkName'] == name: return group['UserName'] if 'NickName' in group and group['NickName'] == name: return group['UserName'] if 'DisplayName' in group and group['DisplayName'] == name: return group['UserName'] return '' def send_msg(self, name, word, isfile=False): uid = self.get_user_id(name) if uid is not None: if isfile: with open(word, 'r') as f: result = True for line in f.readlines(): line = line.replace('\n', '') print '-> ' + name + ': ' + line if self.send_msg_by_uid(line, uid): pass else: result = False time.sleep(1) return result else: word = self.to_unicode(word) if self.send_msg_by_uid(word, uid): return True else: return False else: if self.DEBUG: print '[ERROR] This user does not exist .' return True @staticmethod def search_content(key, content, fmat='attr'): if fmat == 'attr': pm = re.search(key + '\s?=\s?"([^"<]+)"', content) if pm: return pm.group(1) elif fmat == 'xml': pm = re.search('<{0}>([^<]+)</{0}>'.format(key), content) if pm: return pm.group(1) return 'unknown' def run(self): self.get_uuid() self.gen_qr_code(os.path.join(self.temp_pwd,'wxqr.png')) print '[INFO] Please use WeChat to scan the QR code .' result = self.wait4login() if result != SUCCESS: print '[ERROR] Web WeChat login failed. failed code=%s' % (result,) return if self.login(): print '[INFO] Web WeChat login succeed .' else: print '[ERROR] Web WeChat login failed .' return if self.init(): print '[INFO] Web WeChat init succeed .' else: print '[INFO] Web WeChat init failed' return self.status_notify() self.get_contact() print '[INFO] Get %d contacts' % len(self.contact_list) print '[INFO] Start to process messages .' self.proc_msg() def get_uuid(self): url = 'https://login.weixin.qq.com/jslogin' params = { 'appid': 'wx782c26e4c19acffb', 'fun': 'new', 'lang': 'zh_CN', '_': int(time.time()) * 1000 + random.randint(1, 999), } r = self.session.get(url, params=params) r.encoding = 'utf-8' data = r.text regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)"' pm = re.search(regx, data) if pm: code = pm.group(1) self.uuid = pm.group(2) return code == '200' return False def gen_qr_code(self, qr_file_path): string = 'https://login.weixin.qq.com/l/' + self.uuid qr = pyqrcode.create(string) if self.conf['qr'] == 'png': qr.png(qr_file_path, scale=8) show_image(qr_file_path) # img = Image.open(qr_file_path) # img.show() elif self.conf['qr'] == 'tty': print(qr.terminal(quiet_zone=1)) def do_request(self, url): r = self.session.get(url) r.encoding = 'utf-8' data = r.text param = re.search(r'window.code=(\d+);', data) code = param.group(1) return code, data def wait4login(self): """ http comet: tip=1, 等待用户扫描二维码, 201: scaned 408: timeout tip=0, 等待用户确认登录, 200: confirmed """ LOGIN_TEMPLATE = 'https://login.weixin.qq.com/cgi-bin/mmwebwx-bin/login?tip=%s&uuid=%s&_=%s' tip = 1 try_later_secs = 1 MAX_RETRY_TIMES = 10 code = UNKONWN retry_time = MAX_RETRY_TIMES while retry_time > 0: url = LOGIN_TEMPLATE % (tip, self.uuid, int(time.time())) code, data = self.do_request(url) if code == SCANED: print '[INFO] Please confirm to login .' tip = 0 elif code == SUCCESS: # 确认登录成功 param = re.search(r'window.redirect_uri="(\S+?)";', data) redirect_uri = param.group(1) + '&fun=new' self.redirect_uri = redirect_uri self.base_uri = redirect_uri[:redirect_uri.rfind('/')] temp_host = self.base_uri[8:] self.base_host = temp_host[:temp_host.find("/")] return code elif code == TIMEOUT: print '[ERROR] WeChat login timeout. retry in %s secs later...' % (try_later_secs,) tip = 1 # 重置 retry_time -= 1 time.sleep(try_later_secs) else: print ('[ERROR] WeChat login exception return_code=%s. retry in %s secs later...' % (code, try_later_secs)) tip = 1 retry_time -= 1 time.sleep(try_later_secs) return code def login(self): if len(self.redirect_uri) < 4: print '[ERROR] Login failed due to network problem, please try again.' return False r = self.session.get(self.redirect_uri) r.encoding = 'utf-8' data = r.text doc = xml.dom.minidom.parseString(data) root = doc.documentElement for node in root.childNodes: if node.nodeName == 'skey': self.skey = node.childNodes[0].data elif node.nodeName == 'wxsid': self.sid = node.childNodes[0].data elif node.nodeName == 'wxuin': self.uin = node.childNodes[0].data elif node.nodeName == 'pass_ticket': self.pass_ticket = node.childNodes[0].data if '' in (self.skey, self.sid, self.uin, self.pass_ticket): return False self.base_request = { 'Uin': self.uin, 'Sid': self.sid, 'Skey': self.skey, 'DeviceID': self.device_id, } return True def init(self): url = self.base_uri + '/webwxinit?r=%i&lang=en_US&pass_ticket=%s' % (int(time.time()), self.pass_ticket) params = { 'BaseRequest': self.base_request } r = self.session.post(url, data=json.dumps(params)) r.encoding = 'utf-8' dic = json.loads(r.text) self.sync_key = dic['SyncKey'] self.my_account = dic['User'] self.sync_key_str = '|'.join([str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.sync_key['List']]) return dic['BaseResponse']['Ret'] == 0 def status_notify(self): url = self.base_uri + '/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % self.pass_ticket self.base_request['Uin'] = int(self.base_request['Uin']) params = { 'BaseRequest': self.base_request, "Code": 3, "FromUserName": self.my_account['UserName'], "ToUserName": self.my_account['UserName'], "ClientMsgId": int(time.time()) } r = self.session.post(url, data=json.dumps(params)) r.encoding = 'utf-8' dic = json.loads(r.text) return dic['BaseResponse']['Ret'] == 0 def test_sync_check(self): for host1 in ['webpush.', 'webpush2.']: self.sync_host = host1+self.base_host try: retcode = self.sync_check()[0] except: retcode = -1 if retcode == '0': return True return False def sync_check(self): params = { 'r': int(time.time()), 'sid': self.sid, 'uin': self.uin, 'skey': self.skey, 'deviceid': self.device_id, 'synckey': self.sync_key_str, '_': int(time.time()), } url = 'https://' + self.sync_host + '/cgi-bin/mmwebwx-bin/synccheck?' + urllib.urlencode(params) try: r = self.session.get(url, timeout=60) r.encoding = 'utf-8' data = r.text pm = re.search(r'window.synccheck=\{retcode:"(\d+)",selector:"(\d+)"\}', data) retcode = pm.group(1) selector = pm.group(2) return [retcode, selector] except: return [-1, -1] def sync(self): url = self.base_uri + '/webwxsync?sid=%s&skey=%s&lang=en_US&pass_ticket=%s' \ % (self.sid, self.skey, self.pass_ticket) params = { 'BaseRequest': self.base_request, 'SyncKey': self.sync_key, 'rr': ~int(time.time()) } try: r = self.session.post(url, data=json.dumps(params), timeout=60) r.encoding = 'utf-8' dic = json.loads(r.text) if dic['BaseResponse']['Ret'] == 0: self.sync_key = dic['SyncKey'] self.sync_key_str = '|'.join([str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.sync_key['List']]) return dic except: return None def get_icon(self, uid, gid=None): """ 获取联系人或者群聊成员头像 :param uid: 联系人id :param gid: 群id,如果为非None获取群中成员头像,如果为None则获取联系人头像 """ if gid is None: url = self.base_uri + '/webwxgeticon?username=%s&skey=%s' % (uid, self.skey) else: url = self.base_uri + '/webwxgeticon?username=%s&skey=%s&chatroomid=%s' % ( uid, self.skey, self.encry_chat_room_id_list[gid]) r = self.session.get(url) data = r.content fn = 'icon_' + uid + '.jpg' with open(os.path.join(self.temp_pwd,fn), 'wb') as f: f.write(data) return fn def get_head_img(self, uid): """ 获取群头像 :param uid: 群uid """ url = self.base_uri + '/webwxgetheadimg?username=%s&skey=%s' % (uid, self.skey) r = self.session.get(url) data = r.content fn = 'head_' + uid + '.jpg' with open(os.path.join(self.temp_pwd,fn), 'wb') as f: f.write(data) return fn def get_msg_img_url(self, msgid): return self.base_uri + '/webwxgetmsgimg?MsgID=%s&skey=%s' % (msgid, self.skey) def get_msg_img(self, msgid): """ 获取图片消息,下载图片到本地 :param msgid: 消息id :return: 保存的本地图片文件路径 """ url = self.base_uri + '/webwxgetmsgimg?MsgID=%s&skey=%s' % (msgid, self.skey) r = self.session.get(url) data = r.content fn = 'img_' + msgid + '.jpg' with open(os.path.join(self.temp_pwd,fn), 'wb') as f: f.write(data) return fn def get_voice_url(self, msgid): return self.base_uri + '/webwxgetvoice?msgid=%s&skey=%s' % (msgid, self.skey) def get_voice(self, msgid): """ 获取语音消息,下载语音到本地 :param msgid: 语音消息id :return: 保存的本地语音文件路径 """ url = self.base_uri + '/webwxgetvoice?msgid=%s&skey=%s' % (msgid, self.skey) r = self.session.get(url) data = r.content fn = 'voice_' + msgid + '.mp3' with open(os.path.join(self.temp_pwd,fn), 'wb') as f: f.write(data) return fn def set_remarkname(self,uid,remarkname):#设置联系人的备注名 url = self.base_uri + '/webwxoplog?lang=zh_CN&pass_ticket=%s' \ % (self.pass_ticket) remarkname = self.to_unicode(remarkname) params = { 'BaseRequest': self.base_request, 'CmdId': 2, 'RemarkName': remarkname, 'UserName': uid } try: r = self.session.post(url, data=json.dumps(params), timeout=60) r.encoding = 'utf-8' dic = json.loads(r.text) return dic['BaseResponse']['ErrMsg'] except: return None
A couple of days ago I mentioned in this thread (http://forum.kitz.co.uk/index.php/topic,21193.msg367603.html#msg367603), that I'd been playing with a custom html interface for the dslstats inbuilt webserver display. The custom interface displays all graphs but just outputs them in to more logical groups, that can be tabbed through. I've had a request if I could make these files publicly available for use, so I've quickly packaged them together in a zip file attached below. All the live stats work and update every minute. Working example can be viewed here (https://kitz.co.uk/linestats/index.htm). I'm using WinSCP (https://winscp.net/eng/index.php) to automatically FTP graphs from DSLstats (http://dslstats.me.uk/) to a remote website. Please note: The history section requires PHP to run and the output graphs work perfectly well by synchronising with local machine folders, but in time it would require a more robust FTP script to handle file management of older files. As there has been little interest and zilch feedback in this section I stopped development after I got it to do what I needed (historic SNRM & errors). My original aim was that it would only upload latest graphs in the relevant local folders and delete older files on the remote server to keep hosting bandwidth to a minimum. The relevant files are included in the attached zip file if you want to make use of them yourself. By default history section expects files to be stored in categories eg SNRM/CRC etc . This is because it's the way I have my system set up to store on my local machine and it was more logical (for me) to replicate this method on the remote server. If you prefer to have snapshots in daily folders, then Jelv has written a cron script which creates symlinks to files in daily folders. His script can be found in this post here (https://forum.kitz.co.uk/index.php/topic,21237.msg388420.html#msg388420). Cosmetic changes to slightly improve layout. g3uiss (https://forum.kitz.co.uk/index.php?action=profile;u=7826) has developed an asp version of the history page for anyone running a windows server and is kindly making his aspnet scripts available for download. These files are attached below as g3uiss_asp_version. You will still need the contents of DSLstats adv_web_interface, just replace the php file with the asp ones. Thank you – looks great and far better than anything I could have done. Many thanks Kitz, a vast improvement on my efforts. Many thanks to kitz for the development work on this project. 1) Will this work in a RPi program environment. 2) What do I need to select in the DSLStats Webserver window to correctly activate the process. 3) Extract from README file. Q.) I don't have a webserver folder even after enabling Webserver, assuming I have done this correctly ! While these questions probably appear very basic to the more informed forum members I would suspect that many of the less informed members, myself included may be potentially confused. Have now discovered (from the DSLStats Event Log) that the "webserver" folder is created in /home/pi/.dslstats in a RPi program environment, in my case /home/pi/.dslstats/snapshots/webserver as I added a "snapshots" path, was looking in the /home/pi/dslstats (not .dslstats) folder where the program files reside. With the kitz files now copied to there still none the wiser how to proceed further ? Pi’s ip address). That works fine for both of mine. Loving it although I think I need a new Pi to sick my fingers in DSLStats murders my CPU right now. I've tried this out on my Linux system, and it works perfectly. It's a very nice enhancement. Just so you know, I'm currently working on the webserver, but what I'm doing won't conflict with this. I'm adding the options for the user to choose where to save the webserver files, and to save them even when the webserver itself isn't running. I think I need a new Pi to sick my fingers in DSLStats murders my CPU right now. To reduce the CPU usage you can disable monitoring "SNR per tone" and "Bitloading". With this options disabled dslstats cpu time gets reduced to a quarter on my Rpi 1 B+. Thanks for the info will have a fiddle but SNRM graph I need as well as bitloading however its working great now. Best of all if anything breaks I scripted the whole install setup for xrdp custom with fuse and xfce4. Then all I have to do is unzip some files for dslstats, set it up config wise then change raspi-config to auto boot GUI + login, boom dslstats runs automatically and rdp is blocked from use. Also i thought with @kitz sharing his stuff i thought i'd share my script for setup DSL stats with RDP via xrdp. Then change password from raspberry to whatever with option (1), followed by going to Advance Options (7) and expanding the volume (A1), the reboot. Then select Boot Options (3) , then Desktop / CLI (B1) , then select Desktop (B3). then exit it the raspi-config and reboot again. You can then dial into RDP and copy and paste files via clip board from your host computer. Once you have finished you can then block rdp but simply going back into raspi-config and changing the Boot option for Desktop to Desktop Auto Login (B4). This will then kick the RDP out when you try to RDP in. If you need to RDP back in just change the Boot option back to Desktop option B3 and reboot once more. Apologies in advance for my lack of knowledge on this subject, I still can't produce the "kitz" interface on my RPi 1B setup running DSLStats, I think the issue is to do with file locations on my RPi ? Note: I previously added the "snapshots" folder path in the "snapshots directory" option of DSLStats configuration just for clarity. As per the instructions in the kitz "readme.txt" file I have copied the 8 new files to the "webserver" folder. In my case the "stats.htm" file resides in the /home/pi/.dslstats folder. Opening this with the RPi, Raspbian Jessie default browser, Chromium, produces the new kitz interface but with no data. Opening the *.html files resident in the /home/pi/.dslstats/snapshots/webserver folder with RPi's Chromium browser just produce graph snapshots, no kitz interface. As I said, I believe the issue is with file locations, any suggestions welcome. What happens if you move the stats.htm file into the /home/Pi/.dlstats/snapshots/webserver directory? Thanks to both for your interest. Yes, as suspected, the issue was with file locations, deleted the "snapshots" folder and changed the snapshots storage path back to default /home/pi/.dslstats. The kitz webserver interface now opens in Chromium browser and displays the relevent data. My RPi Mk.1B really struggles with the webserver interface as would be expected, CPU running at 100% usage most of the time, have not tried running on my RPi Zero W's as yet, would likely cope slightly better. Have changed to running DSLStats on a RPi 3B with Wi-Fi link which is a much faster experience, have ordered a just released RPi 3B+ just to compare the performance and for future project reference. Many thanks to kitz for the hard work on the project and providing public release also roseway for his continuing development of DSLStats. Anyone any ideas on creating a desktop shortcut on RPi 3B running Raspbian Jessie to fire up the kitz webserver interface ? I can double click the /home/pi/.dslstats/webserver/stats.htm file, or indeed any of the *.htm files which will start the webserver application within Chromium browser but can't get the correct syntax to achieve this from a desktop shortcut, with any of the *.htm files nominated as "exec" within the desktop shortcut nothing happens ! I'am sure there must be a way of nominating a browser for file execution from within a DT shortcut ? A quick addition to Kitz work, add your TBB image like so. I've added mine in connection.htm, Open it in a text editor or whatever you prefer.... leave for the moment. Now web browse to your TBB monitor page on the TBB site. Under your monitor image, click on 'Share Live Graph', Copy the link under Large Graph. Below <div class="graphs"> add the link. It should end up like this. Note that in the example above there is a hspace="20" towards the end of the TBB line, this is to bring it into line with the other images. Tweaked mine slightly to reduce it to just the <img alt= part, but great addition. Thanks, marjohn56. I've had a request if I could make these files publicly available for use, so I've quickly packaged them together in a zip file. Please note, it's nothing particularly fancy and the history section isn't yet complete as I'd need to install PHP on my local machine and this section isnt yet working. The relevant files are all included in the attached rar file if you want to make use of them yourself. Noted in one of your later links that you have made some further progress in the "history" tab but the original files posted on 15/03/18 have not yet been updated, would it be possible to update, I have very limited knowledge of the system but presume that only an updated "history.htm" file would be required. Fully understand if you prefer to complete the project, time permitting, before re-posting or indeed just don't have the time at present to carry out further development. Your efforts on this project to produce some form of local MDWS substitute are very much appreciated certainly by the less knowledgable forum members like myself who don't have the necessary technical ability. right now me martin and skyeci are using these files to upload to my webserver once a minute, I had planned to request a couple of things from you related to this such as using the built in ftp uploader to upload these web files, and to be able to set the interval so e.g. every 5 mins for the upload. The graph generation server side for a proper MDWS replacement is probably a while off so these graphs will work well in the mean time. When I have fully integrated kitz code to my graphs and settled on it, I will add the link to my graphs in my signature so they public. If Martin is ok with it on the domain I can probably actually host anyone's graphs just making an ftp account, providing in PM and they upload, then the graphs are public not just just for private use. Many thanks for the update, I eagerly await further developments. I currently use "broadstairs" web, php based system which he has kindly made available on request to forum members, running on my Win. 8.1 desktop PC via Apache server, manually uploading my 3 RPi's (different lines) historic graph and data files on a 2 day basis, works very well but will still be keen to review any future alternative systems. yes he provided to me as well and I have yet to have a look at it, so there is 2 different system right now available which is great, the bit I am trying to bring together is the public access again for shared knowledge, php is not a problem on my server. Sorry didn't see this earlier - my hands have been too bad to type. Yes I mentioned in another thread that I'd been doing more work on it, but as no-one else seemed interested in running from a remote site I stopped any further development and this was about as far as I got. I'm just about to release a test version of DSLstats which adds the capability to upload all the webserver files (including this extension if it's there) to a remote location of the user's choice. At present it uploads after every sample, but if different timing is needed that can certainly be arranged. Thanks for the update, fully appreciate you situation with typing difficulty at present, hope this improves soon. As you can see earlier in the thread, Chrysalis did provide an update on his, Martin and skyeci's project progress. Thanks to you also for the DSLStats update and for your continuing work on the program. I have merged stuart's code onto kitz's code to get a solution that takes advantage of both, but its not my work to distribute, but I will post a link to my stats after I or stuart enhance the security of the code, e.g. viewing historical eventlog reveals the broadband ip so I need to remove that facility before I post the link. Then people can see where we at, but its basically the same as what kitz posted since its using her templates with the addition of a more functional history page. On my setup the graphs are been automatically synced every minute using winscp cli via a batch script on task scheduler and also a second batch script to upload the snapshots just after midnight. Eric after my request kindly added native support to dslstats tho so winscp or any other ftp client wont be needed in future. I have also almost completed a linux script to mass rename files to support the history code, so e.g. if you have snapshots in date order, it will rename them en mase to name order so they work on the history page. Mass rename is done, check your history page skyeci, I moved the graphs there. sweet. got all my old stats back up on line. happy days. Just to pick up in the security bit and IP addresses. I originally created my scripts to be run locally where there would be no security issue however if people want a version which does not display the IP address when viewing the event log then I'll take a look to see how I can achieve this easily. Currently the script simply loads the file contents and displays it, so in order to achieve this it will need some re-writing and expansion of the code to check the data. yeah, I planned to simply just remove the eventlog option so it doesnt appear in the file list, I dont know why that would be needed for viewing remote stats. I was going to add a debug flag at start of the script, so if its set to 1, it effectively runs as is now, and if 0, it hides eventlog, and also hides the displayed path to files. Yes I'll make some changes later over the weekend. I have today sent chrysalis the updated code for displaying the archived images and text files from DSLStats for him to test, this has an option to remove the event log from display as it can contain your external IP address. If anyone else would like a copy of either script to test please PM me. Edit: Should have mentioned - please supply an email if you want the scripts. I have been working on my scripts some more and now have a version of the script which display graphs built from the stats.log file created by DSLStats which is customised in a similar fashion to Kitz' original. Only the Historical statistics are available right now. This is in part thanks to skyECI who started the idea. If you want to run this script locally you will need a web server running which has PHP available (the cut down server built into DSLStats does not support this understandably). PM me if you would like a copy. Also if you would like to see more data options please tell me your suggestions. I have a version with very limited data available on my web sit at https://www.stella-maris.org.uk/Stats.php (https://www.stella-maris.org.uk/Stats.php) but should give some idea of its functionality. The graph will resize automatically if you resize the window. Also if you mouseover the graph some small icons appear top right which allow you to manipulate the graph by zooming and panning etc. I also have my old snapshots converted over and Stuarts new zooming chart tool is there too. Any chance of a zip file of your linestats pages, please. I am using your original version with a minor tweak and would like to upgrade to this version. I have made some more changes, mainly cosmetic, to my version of the DSLgraphs and Stats script. they can be view at https://www.stella-maris.org.uk/DSLgraphs.php (https://www.stella-maris.org.uk/DSLgraphs.php) and https://www.stella-maris.org.uk/Stats.php (https://www.stella-maris.org.uk/Stats.php). I would really appreciate some feedback on them, both positive and negative. BTW they do link to each other from the top menu. Currently they do not display the live graphs from DSLStats but that could possibly be added if needed but these are available from the DSLStats built in web server anyway. Looks very good Stuart, sorry I havent responded the past few days. Updated the attached file in first post. There's several changes which are mostly cosmetic to layout. All files have been updated with the newer menu which is IMHO more logical than the previous one. I've been using the new layout for quite a while so some of you will have already seen it, but if not it can be viewed here (https://kitz.co.uk/linestats/index.htm). Re history - I've not included that file as although the php is working on my version, I still need to develop a script which will only upload selected history files to the remote server automatically. If anyone's DSLstats folder is like mine you'll have years worth of files and I'd hate anyone to upload a zillion image files and use up a ton of bandwidth. My live graphs are here (http://stats.browni.co.uk) including your latest update. I have made some more changes, mainly cosmetic, to my version of the DSLgraphs and Stats script. they can be view at https://www.stella-maris.org.uk/DSLgraphs.php (https://www.stella-maris.org.uk/DSLgraphs.php) and https://www.stella-maris.org.uk/Stats.php (https://www.stella-maris.org.uk/Stats.php). I would really appreciate some feedback on them, both positive and negative. BTW they do link to each other from the top menu. Well done :) Just noticed though that you appear to be at the stage where I got to for DSLstat graphs in that you haven't yet managed to auto FTP update for history. That was the point at which I stopped as I really thought no-one else was interested in running from a remote website and had no feedback at all. Slight difference between ours is that mine runs on graph folders rather than date only because that's the way I stored them locally. Currently they do not display the live graphs from DSLStats but that could possibly be added if needed but these are available from the DSLStats built in web server anyway. I've already done that and FTPing current stats for running remotely. Hate to sound like I'm doing the sour grape thing (and yeah I admit it is), but I have had absolutely zero interest in what I had done. Not a single comment in any of the various threads where I said I'd got something working for running on a remote website since March. Which is why I thought I was wasting my time developing an FTP version of historic and stopped as soon as I'd done only what I wanted (historic CRC, Err/Secs and SNRM) for my own line. I've had to change it to custom to get rid of the surplus tones, so thanks for pointing it out. Nah, leave your name for now, my name is in the web address! The preset tone ranges are only approximations for practical reasons (programming convenience really). They actually define a number of pixels horizontally, so the optimum settings will depend on the size of the DSLstats window. I have started my own thread for my scripts. It can be viewed at https://forum.kitz.co.uk/index.php/topic,21488.0.html (https://forum.kitz.co.uk/index.php/topic,21488.0.html). I've done this for two reasons, first to separate them from here and stop distracting folks from Kitz' script and second to make life easier for me to track stuff going on with them. So please from now on post in my thread when discussing my scripts. I been real busy but disabled the IP leak part of the history page, page is now in my sig. My stats show dslstats graphs, history graphs, and tbb graphs. This is using kitz's code except for the history page which is Stuart's code. They do not include Stuart's newer stuff yet, although I think Ned is using his new scripts now. I have snapshots including historical stuff thanks to Chris. tbb, Stuart's new zoom tool/script and live data . Interesting stuff – thanks for showing it. Time has been against me lately so I’ve only managed to get the webserver files uploading from my pi3 to a VPS using rsync. When I get some time I will look into the history part. Thanks to Kitz for the original files. nice. One of my other lines is on a pi3. Can you share how you have done it from the pi3. I did it the lazy way which will be frowned upon by some, but I’m the only one with access to the pi and then only from my home network. From the terminal ran sshpass -p ssh_password rsync -aP/home/pi/.dslstats/Webserver/ user@domain.net:/path/to/dslstatsfolder just to be able to accept the certificate on first use. Then set up a task to run the command every minute using this (https://www.raspberrypi.org/documentation/linux/usage/cron.md). The information I used started from reading here (https://unix.stackexchange.com/questions/111526/rsync-without-prompt-for-password?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa), there are other ways. when the new dslstats is released Ned, you can just add sftp account to it and upload to the server as is now. Here's my pi3 setup with fully automated DSL stats uploads to our TR server and it uses Stuart's new scripts for viewing data,see dsl stats files & historical tabs. Really good bit of software, with the associated web interface it makes it really easy to monitor both at home and away. Thanks for spending your time on developing it as it has really opened my eyes up to my connection(s) and I've learnt a lot! Why is there no G.INP ? And no G.Vector either? That is not good. looks like both are on eci cab. not going to happen any time soon.. ECI Cab. that's not happening this side of 2030. Just to clarify - the IP leak was not in my history page. Again for clarification. I have history graphs which automatically upload every 2hrs. My code is different to Stuarts and works using the graph type rather than dates for those who have DSLstats to save folders in that way. The reason the thread got split up & Stuart started his own is that I don't think people realised that I have had both live and history graphs working for several months now. The only thing that requires more work on mine is further development of the FTP script to ensure that the remote files on the webserver don't get too large and will automatically delete files after 'x' time. Currently my ftp scripts automatically mirror and synchronises to the local machine. I've used synchronize as that requires very little bandwidth and ensures that only the newest files are uploaded. I didn't bother developing this script further as absolutely no-one showed any interest in what I'd done with the history files and since I only wanted to view history for SNR and Errors on my line then I saw no point putting more time into it. It should however be really easy for me to add other folders if I wanted. Just noticed though that it's using v1 of the web interface. I updated the headers to a more logical layout a while ago and made the page layout a bit better looking by amending the CSS & html. I shall see if I can do anything with my CSS to make it more compartmentalised for those who may wish to add on what Stuart is doing with the zoom tool graph etc. I would really be interested in you history graphs even if just ES. I can’t run PHP. On my hosted server being a Windows 2016 which for some reason has PHP disabled. If anyone is interested I will update the CSS to try made it more modularised so it can fit with what Stuart is doing and I'll also upload my php code. kitz are the files you shared up to date? the ones I had access to the history page was non functional and marked as something like coming soon hence stuart developing a solution. Also interesting you commented about a v1 layout as well. By 31st of March I had a working version which was outputting history graphs fine but it was a bit messy, but had not developed a robust FTP script to automatically update the history files. - See here (https://forum.kitz.co.uk/index.php/topic,21305.msg369052.html#msg369052). My post on 1st April explained the difficulty with the FTP script I was having in that I had masses of history files on my local PC which could be problematic for web storage. At that I asked Stuart what he was doing re the uploads, but then realised that he didn't have a working ftp script for history either. Live stats have never been a problem and the ftp script for that has been working fine. - See here (https://forum.kitz.co.uk/index.php/topic,21305.msg369153.html#msg369153). On 12th April (https://forum.kitz.co.uk/index.php/topic,21305.15.html) - I mentioned I had a working version which was outputting history to a remote website rather than installing WAMP. Again I mentioned history file size problem for the FTP script. Atm I've only got historic data for SNRM & Err Secs purely because that's what I would look at first when it came to any historic data. I've mentioned time and time again that the problem was not with the php code but rather a WinSCP script that was needed that would perhaps only upload the last months historic data because I had several years of DSLstat data. In the end I took the easy option and just cleared out my local files of old data... and since then am using the FTP script to sync to local folders. I'm not quite sure how I could have made it any clearer that my problems were developing a robust FTP script and NOT the php side of things. I believe Stuart is also having problems in this area and he has to manually manage the history folders so we both got stuck at the same point. Can you perhaps see now why I was getting frustrated to the point whereby I stopped mentioning that I had a working version and doing any further development, because as far as I'm concerned mine is currently doing all that it needs for me personally. I have passed the link out enough. I think b*cat will be able to confirm he saw it was working ages ago, but his comment was about the DSLstats graph size and nothing to do with my side of things. Also interesting you commented about a v1 layout as well. I changed the layout to make the headers more logical and there were also a few cosmetic changes in layout so that things just looked a bit nicer. They were purely CSS type changes and have been freely available for download since 5th May. I edited the CSS again last night so there is another version that I will package and re-upload when I get chance. The only changes I have made recently are CSS type things to tidy up html layout. The reason I mentioned it is I noticed ned was using the very first version and not the 5th may version. PS. If some of the pages on my linestats page look weird you may need to refresh cache. (Ctrl & F5). I uploaded a new .css file last night and chrome in particular tends to cache old css files. . . . I think b*cat will be able to confirm he saw it was working ages ago, but his comment was about the DSLstats graph size and nothing to do with my side of things. Yes, that I can confirm. It was working nicely and provided all that I would have expected. Its all good :) We have different options on the table. i think i must be running a slightly older version of the web interface, as mine doesn't have the footer. i downloaded the files in post 1 only a few days ago.... are those the most recent version? Thanks. I think I’ve got the hosting company to allow the PHP to be implemented. It looks to be working fine, on both my VDSL and ADSL. Lines, but I will let it run and post a link over weekend. I changed the .css early hours 7th June which is why you won't have that yet and why it's only on my own version atm. I'm using WinSCP and some scripts to automatically FTP the live stats every min and synchronise the history graphs every 2 hours. I think the latest edition of DSLstats now may contain ftp info for the live stats, but admittedly I haven't tried that yet as my ftp script for livestats works fine and uses very little bandwidth. A bit harder was trying to get history files to synchronise and only update the latest files. I'm very conscious that my script is designed to run on a remote web-server and therefore was aware that some sites will have bandwidth restrictions. This has always been what caused my own scripts to come to standstill as ideally I would have liked it to automatically remove old remote files after 'x' period. It was also a bit more complicated as my DSLstat graphs are stored using the graph type name rather than date which would have been easier. Anyhow I do have a working synchronise ftp script for history - I cheated and cleared out the old files on my local machine which went back a few years. Until someone can devise a more robust FTP script to deal with automatically clearing out the remote webfiles every so often you may have to just keep an eye on remote storage. I really need to do a help file which explains WinSCP & the ftp scripts I've written so they can be adapted for various scenarios which is something I saw no point in doing as no-one else appeared to be interested. However, it is worth pointing out that you would likely need to use similar scripts if you want to use Stuarts history pages on a remote server too.... or else you are going to run the webpages locally or do it all manually... as this is something beyond what DSLstats inbuilt ftp server does. I'm afraid I can't just pass out the ftp scripts in their current form as they contain my server info. The latest version of dsl stats does indeed upload automatically. I was originally using WinSCP for datastore,webserver and snap shot files but now all 3 are being done via dsl stats real time. Thanks. I can synchronise the files I have as I seem to have mixture of versions. Seems ok here in FF. It may be your browser caching the old css file rather than the new one? Chrome has a habit of doing that. Thanks for that confirmation. :) I'd used WinSCP as it was the only way of doing so when I first wrote my code which was prior to the latest DSLstats release. I think you will need WinSCP though for any of the history files and folders, so I will continue using it for now and attempt how to explain for those who want to use it with the history folders. You should in practice just be able to drop the new version straight in over the top. Not sure about anything to do with Stuarts though as I don't have those. In the long term it would be an idea mrege Stuarts files and mine, which is why I rewrote my CSS the other night. I will attach the files to the first post in the thread. Might be a silly question, but I get the following on the history page. EDIT - I think it's a PHP issue with my PN webspace. Good news its working, even with SSL. Bad I could not get @Kitz history working, and I could not get @Broadstairs live data working so I ended up cobbling them both together. However the is a small problem . . . the one I seem to be regularly pointing out to Kitizens . . . your Bit Loading, Hlog and QLN plots need to be adjusted to show the entire 4096 sub-carriers of a profile 17a, G.993.2, service. You should go to the Bit Loading, Hlog & QLN screens and left-click on the "Change tone range" button. In the Pop-Up that then appears, left-click on "Custom" and then adjust the slider so that the plot is just sufficiently wide to show the 4096 sub-carriers. You should go to the Bit Loading, Hlog & QLN screens .... then adjust the slider so that the plot is just sufficiently wide to show the 4096 sub-carriers. I wonder why it doesn't default to better values? 4096 must be the correct figure for the majority of users. I don't mind your suggesting it, because I've already noted the number of times this comes up. Unfortunately it's not a simple as just setting a default number of tones, because of the way the per-tone graphs are constructed. What I really need to do is dimension the graphs automatically according to the highest used tone, and I'll try to do that for the next release. Thanks B*cat, I've made those suggested changes. The history graphs do show the full range though. unfortunately you need to do this every time you start DSLstats, as it doesn't seem to remember your settings when closing the app. well i do anyway, not sure if others have the same. If I change the tone range on the hlog it also changes bitloading and QLN and saves this change permanently. Try changing the tone range, exiting DslStats via the red x in the top right and selecting yes when asked if you want to close the program. Launch DslStats and check if the tone range saved. That is not my experience, it behaves like any other configurable option. take a look at what mine looks like at the moment. this is what happens when dslstats closes and re-opens. before it closed it was displaying correctly. this happens every time for me, not just a 1 time occurance. just to note there's only a 32 bit Windows version. Strange that your tone range doesn't stick. I've ran the Windows, Linux and Pi versions and they all behaved as they should, the tone range only needing set once on 1 graph. when i looked at the downloads page i saw 64-bit and didn't check if it was windows or not. but yes, every time dslstats starts i have to go to that graph settings and set it again otherwise it does what you can see currently. How to do this? Please explain step by step instruction, sorry as I am thick, haven't got any clue how to do this? What is the host name, username and password? Does host name mean I have to built my own website? username and password where I can find it from? Sorry for being numb. Yes, you need webspace to be able to host the files for the webserver. You may get free webspace from your ISP, if you do you can SFTP the files to there both from DSLStats and the Custom Interface files. I would start by finding out if you have webspace. Thanks but don't think Plusnet don't have free webspace? I got my webspace but nothing is loading dslstats? Have you put the interface in the webspace, and uploading the files to there? If you are not doing that it will not work. Pretty sure wiz don't allow PHP. Max how would you get your files on the site? You really should stick to the DslStats webserver max. The DslStats webserver should allow you to view your live stats. Setup snapshots and then you can view your history when you get home. You will not be able to get the history part working on kitz script it's obviously way too technical for you. All you have managed so far is a blank hosting page saying welcome, with a Google maps image probably with your home address. You shouldn't have been advised to get webspace if it's something you have never used before. It is not in the same category of "technical knowledge needed" as entering your mdws username/key. Running the v2 release of kitz custom interface on my on line web server, all working fine with the exception of history, the "history" tab just produces a blank browser page. The web server does support php, I am already running broadstairs, php based custom interface there without any issues. All the kitz utility program files are in the server "webserver" folder which is also the destination for the stats webserver files as uploaded via DSLStats from my RPI 3B via inbuilt routine. Noted that the kitz interface stats image & text files must be stored in name as opposed to date format, have renamed to conform and tried storage in name based folders or directly in the webserver folder, no joy, the history browser page does not even get to the stage of reporting "file not found" or indeed produce any form of header or dialogue, just a blank page ! I am obviously doing something wrong as quite a few forum members have the kitz interface or a combination of kitz & broadstairs interface running and available on their profiles, any suggestions as to what appreciated. @tiffy history.php requires you have PHP installed as well as Apache and an FTP server. From what I have read. Yes, appreciate that the history program file is php based, however, as stated, my webserver does support this and broadstairs, php based interface is already running there without any issues. Have also tried running locally on Windows using xampp (with Apache) which again runs broadstairs interface without any problems, same issue there with the kitz history page. Many thanks and sorry for late reply, I had to go out and mobile died. Many thanks to John (d2d4j) for a very prompt solution to my issue which I believed involved a slight tweek of the history.php file which he edited on the server, having access to this as his company provide my web hosting. Strangley, there was a re-occurance of the issue a few hours after John's fix was applied, he very quickly diagnosed and attributed this to the history.php file being overwritten by an old revision from my RPi 3B source which I certainly had not manually applied. Had been aware at an earlier stage that name changes (Kitz to my ID) edited in the *.htm or history.php files on the server only would at some stage change back to original, carrying out the edit's to the files in the RPi webserver folder gave a permanent result, concluded that all these files must be overwritten on the server during the DSLStats auto upload sequence. With no other connection other than becoming a recent customer with John's web hosting company I can only repeat that his service and support with any of my issues has been exceptional and with his patronage of this forum/knowledge of *DSL related matters it makes it so much easier explaining any issues that arise, would thoroughly recommend. Very quick comment from me. I think the problem may be that I used php short tags in some of my code... which won't work unless the server has specifically got php short tags enabled. I'd used short tags where I quickly swap out from php to use html forms. Thanks for the explanation of "short tags", John (d2d4j) had mentioned this was causing the issue on his system with respect to running the history.php file, I did not really understand the logistics which I'am sure John would have explained had I asked. Your custom interface is now working very well on my system, many thanks for the development and free supply of this utility, much appreciated. Kitz's history.php code relies on "Save snapshots in categories" being set. I had "Save snapshots in daily folders" set - it seems more logical to me and allows easier housekeeping to delete older data no longer required. I know at least one other person was running a script to copy the image files from the daily snapshot folders to the category folders so that history.php worked. I am now using a script which rather than copying the files creates symlinks in the category folders which link to the files in the daily folders. Also if daily snapshot folders or files within them are deleted the symlinks pointing to them are automatically removed. This script can be run by cron (either at regular intervals or set to run a short time after the auto snapshots timing) or run manually after ad-hoc snapshots. It creates links for all possible snapshot graphs depending on what it finds. The only thing that needs amending is to set DSLstatspath - note the trailing / is required. I've added a note to the first post in the thread linking people to your post should they wish to use your script to store in daily folders. Many thanks for making your code publically available, as someone running a very similar setup to yours, ie. kitz & broadstairs stats display interfaces running together on a web hosting site and to date running very simple but effective cron job routines engineered by the site manager, John (d2d4j) to enable the kitz interface history function to work, your further development is very much appreciated. Your "cron symlink" script has worked perfectly on my web based, kitz history interface, beyond expectation even converted the existing png & txt files in the associated history folders from full size to symlinks, considerable saving of space on the disk with the avoidance of file duplication, again, many thanks for sharing. Is there any significance in the two identical SNRM lines in your code ? Struggling to understand the format of the last line of code, can you elaborate further or point to reference ? Sorry the duplication maybe my fault. I think I initially tried deleting lines you did not use so may have deleted argin from SNRMargin but I cannot remember exactly now sorry. -L follows symbolic links so you get information on the target files unless the link is broken when it gives you the information on the symlink. John, thanks for the clarification, I was aware that the cron code lines had been edited to suit my particular history preferences but as the duplicate line was also in jelv's published code thought it may be deliberate or significant, will remove or comment out one of the lines in my script. Thanks to both for the detailed explanation of the code final command line. Whoops - it's a duplicate that should be removed (not that is doing any harm). It stems from the SNRMperbanddownstream and SNRperbandupstream which I combined. Firstly many thanks @kitz for providing these pages for the DSLStats data. I am new on the forum and have been playing for the last day or so on getting it working with my data. I just wanted to share a change I made to the history.php page, so that the drop-down choices do not revert back to the default choices, and stay on the currently viewed folder and graph file. It makes it a little more intuitive when reviewing the graph for me. I hope this is the correct place to share. Script updated and tested on my system, working perfectly and certainly worth the effort, many thanks for sharing. From previous experience on my system, RPi 3B running DSLStats and uploading webserver files to web server, if edits are made to web site, server *.php files only they can be overwriten back to original by the corresponding file on the RPi at the next scheduled DSLStats upload, not sure if this would apply to the history.php file but has certainly happened to other *.php files I have edited on the web server in the past. Probably best to edit at source and then copy / replace the file on the web server. Absolutely, it kind of boils down to the release medium. The source needs to be synchronised with changes. I don't know how savvy you guys are at using sites such as github.com or bitbucket.org where you can freely host your code as an owner, but others can push 'merge' (read change) requests to your codebase which you can choose to accept or deny? You can then mark a 'release' at a certain point in the development phase that can be downloaded as a zip, at a point you think merged changes are stable or significantly different enough to merit a new release tag. I'm using Kitz custom interface on a raspberry pi and it works great but I need to change the default apache port from port 80. I've updated /etc/apache2/sites-enabled/000-default.conf and /etc/apache2/ports.conf and I get the linestats OK but lose the page formatting, any ideas please? Not good is it. Interestingly when the line was busy downloading yesterday afternoon the packet loss reduced. I get the linestats OK but lose the page formatting, any ideas please? Have you set up custom folders? The .css file should be in the same folder as index.htm etc.
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import itertools import logging import os import unittest import warnings from abc import ABC, ABCMeta, abstractmethod from collections import defaultdict from contextlib import contextmanager from tempfile import mkdtemp from textwrap import dedent from typing import Any, Dict, Iterable, List, Optional, Sequence, Type, TypeVar, Union, cast from pants.base.build_root import BuildRoot from pants.base.cmd_line_spec_parser import CmdLineSpecParser from pants.base.exceptions import TaskError from pants.base.specs import AddressSpec, AddressSpecs, FilesystemSpecs, Specs from pants.build_graph.address import Address, BuildFileAddress from pants.build_graph.build_configuration import BuildConfiguration from pants.build_graph.build_file_aliases import BuildFileAliases from pants.build_graph.target import Target as TargetV1 from pants.engine.fs import GlobMatchErrorBehavior, PathGlobs, PathGlobsAndRoot, Snapshot from pants.engine.internals.scheduler import SchedulerSession from pants.engine.legacy.graph import HydratedField from pants.engine.legacy.structs import SourceGlobs, SourcesField from pants.engine.rules import RootRule from pants.engine.selectors import Params from pants.engine.target import Target from pants.init.engine_initializer import EngineInitializer from pants.init.util import clean_global_runtime_state from pants.option.global_options import BuildFileImportsBehavior from pants.option.options_bootstrapper import OptionsBootstrapper from pants.source.source_root import SourceRootConfig from pants.source.wrapped_globs import EagerFilesetWithSpec from pants.subsystem.subsystem import Subsystem from pants.task.goal_options_mixin import GoalOptionsMixin from pants.testutil.base.context_utils import create_context_from_options from pants.testutil.engine.util import init_native from pants.testutil.option.fakes import create_options_for_optionables from pants.testutil.subsystem import util as subsystem_util from pants.util.collections import assert_single_element from pants.util.contextutil import temporary_dir from pants.util.dirutil import ( recursive_dirname, relative_symlink, safe_file_dump, safe_mkdir, safe_mkdtemp, safe_open, safe_rmtree, ) from pants.util.memo import memoized_method from pants.util.meta import classproperty class AbstractTestGenerator(ABC): """A mixin that facilitates test generation at runtime.""" @classmethod @abstractmethod def generate_tests(cls): """Generate tests for a given class. This should be called against the composing class in its defining module, e.g. class ThingTest(TestGenerator): ... ThingTest.generate_tests() """ @classmethod def add_test(cls, method_name, method): """A classmethod that adds dynamic test methods to a given class. :param string method_name: The name of the test method (e.g. `test_thing_x`). :param callable method: A callable representing the method. This should take a 'self' argument as its first parameter for instance method binding. """ assert not hasattr( cls, method_name ), f"a test with name `{method_name}` already exists on `{cls.__name__}`!" assert method_name.startswith("test_"), f"{method_name} is not a valid test name!" setattr(cls, method_name, method) class TestBase(unittest.TestCase, metaclass=ABCMeta): """A baseclass useful for tests requiring a temporary buildroot. :API: public """ _scheduler: Optional[SchedulerSession] = None _build_graph = None _address_mapper = None def build_path(self, relpath): """Returns the canonical BUILD file path for the given relative build path. :API: public """ if os.path.basename(relpath).startswith("BUILD"): return relpath else: return os.path.join(relpath, "BUILD") def create_dir(self, relpath): """Creates a directory under the buildroot. :API: public relpath: The relative path to the directory from the build root. """ path = os.path.join(self.build_root, relpath) safe_mkdir(path) self.invalidate_for(relpath) return path def create_workdir_dir(self, relpath): """Creates a directory under the work directory. :API: public relpath: The relative path to the directory from the work directory. """ path = os.path.join(self.pants_workdir, relpath) safe_mkdir(path) self.invalidate_for(relpath) return path def invalidate_for(self, *relpaths): """Invalidates all files from the relpath, recursively up to the root. Many python operations implicitly create parent directories, so we assume that touching a file located below directories that do not currently exist will result in their creation. """ if self._scheduler is None: return files = {f for relpath in relpaths for f in recursive_dirname(relpath)} return self._scheduler.invalidate_files(files) def create_link(self, relsrc, reldst): """Creates a symlink within the buildroot. :API: public relsrc: A relative path for the source of the link. reldst: A relative path for the destination of the link. """ src = os.path.join(self.build_root, relsrc) dst = os.path.join(self.build_root, reldst) relative_symlink(src, dst) self.invalidate_for(reldst) def create_file(self, relpath, contents="", mode="w"): """Writes to a file under the buildroot. :API: public relpath: The relative path to the file from the build root. contents: A string containing the contents of the file - '' by default.. mode: The mode to write to the file in - over-write by default. """ path = os.path.join(self.build_root, relpath) with safe_open(path, mode=mode) as fp: fp.write(contents) self.invalidate_for(relpath) return path def create_files(self, path, files): """Writes to a file under the buildroot with contents same as file name. :API: public path: The relative path to the file from the build root. files: List of file names. """ for f in files: self.create_file(os.path.join(path, f), contents=f) def create_workdir_file(self, relpath, contents="", mode="w"): """Writes to a file under the work directory. :API: public relpath: The relative path to the file from the work directory. contents: A string containing the contents of the file - '' by default.. mode: The mode to write to the file in - over-write by default. """ path = os.path.join(self.pants_workdir, relpath) with safe_open(path, mode=mode) as fp: fp.write(contents) return path def add_to_build_file(self, relpath, target): """Adds the given target specification to the BUILD file at relpath. :API: public relpath: The relative path to the BUILD file from the build root. target: A string containing the target definition as it would appear in a BUILD file. """ self.create_file(self.build_path(relpath), target, mode="a") def make_target( self, spec="", target_type=TargetV1, dependencies=None, derived_from=None, synthetic=False, make_missing_sources=True, **kwargs, ): """Creates a target and injects it into the test's build graph. :API: public :param string spec: The target address spec that locates this target. :param type target_type: The concrete target subclass to create this new target from. :param list dependencies: A list of target instances this new target depends on. :param derived_from: The target this new target was derived from. :type derived_from: :class:`pants.build_graph.target.Target` """ self._init_target_subsystem() address = Address.parse(spec) if make_missing_sources and "sources" in kwargs: for source in kwargs["sources"]: if "*" not in source: self.create_file(os.path.join(address.spec_path, source), mode="a", contents="") kwargs["sources"] = self.sources_for(kwargs["sources"], address.spec_path) target = target_type( name=address.target_name, address=address, build_graph=self.build_graph, **kwargs ) dependencies = dependencies or [] self.build_graph.apply_injectables([target]) self.build_graph.inject_target( target, dependencies=[dep.address for dep in dependencies], derived_from=derived_from, synthetic=synthetic, ) # TODO(John Sirois): This re-creates a little bit too much work done by the BuildGraph. # Fixup the BuildGraph to deal with non BuildFileAddresses better and just leverage it. traversables = [target.compute_dependency_address_specs(payload=target.payload)] for dependency_spec in itertools.chain(*traversables): dependency_address = Address.parse(dependency_spec, relative_to=address.spec_path) dependency_target = self.build_graph.get_target(dependency_address) if not dependency_target: raise ValueError( "Tests must make targets for dependency specs ahead of them " "being traversed, {} tried to traverse {} which does not exist.".format( target, dependency_address ) ) if dependency_target not in target.dependencies: self.build_graph.inject_dependency( dependent=target.address, dependency=dependency_address ) target.mark_transitive_invalidation_hash_dirty() return target def sources_for( self, package_relative_path_globs: List[str], package_dir: str = "", ) -> EagerFilesetWithSpec: sources_field = SourcesField( address=BuildFileAddress( rel_path=os.path.join(package_dir, "BUILD"), target_name="_bogus_target_for_test", ), arg="sources", source_globs=SourceGlobs(*package_relative_path_globs), ) field = self.scheduler.product_request(HydratedField, [sources_field])[0] return cast(EagerFilesetWithSpec, field.value) @classmethod def alias_groups(cls): """ :API: public """ return BuildFileAliases(targets={"target": TargetV1}) @classmethod def rules(cls): # Required for sources_for: return [RootRule(SourcesField)] @classmethod def target_types(cls) -> Sequence[Type[Target]]: return () @classmethod def build_config(cls): build_config = BuildConfiguration() build_config.register_aliases(cls.alias_groups()) build_config.register_rules(cls.rules()) build_config.register_target_types(cls.target_types()) return build_config def setUp(self): """ :API: public """ super().setUp() # Avoid resetting the Runtracker here, as that is specific to fork'd process cleanup. clean_global_runtime_state(reset_subsystem=True) self.addCleanup(self._reset_engine) safe_mkdir(self.build_root, clean=True) safe_mkdir(self.pants_workdir) self.addCleanup(safe_rmtree, self.build_root) BuildRoot().path = self.build_root self.addCleanup(BuildRoot().reset) self.subprocess_dir = os.path.join(self.build_root, ".pids") self.options = defaultdict(dict) # scope -> key-value mapping. self.options[""] = { "pants_workdir": self.pants_workdir, "pants_supportdir": os.path.join(self.build_root, "build-support"), "pants_distdir": os.path.join(self.build_root, "dist"), "pants_configdir": os.path.join(self.build_root, "config"), "pants_subprocessdir": self.subprocess_dir, "cache_key_gen_version": "0-test", } self.options["cache"] = { "read_from": [], "write_to": [], } self._build_configuration = self.build_config() self._inited_target = False subsystem_util.init_subsystem(TargetV1.TagAssignments) def buildroot_files(self, relpath=None): """Returns the set of all files under the test build root. :API: public :param string relpath: If supplied, only collect files from this subtree. :returns: All file paths found. :rtype: set """ def scan(): for root, dirs, files in os.walk(os.path.join(self.build_root, relpath or "")): for f in files: yield os.path.relpath(os.path.join(root, f), self.build_root) return set(scan()) def _reset_engine(self): if self._scheduler is not None: self._build_graph.reset() self._scheduler.invalidate_all_files() @contextmanager def isolated_local_store(self): """Temporarily use an anonymous, empty Store for the Scheduler. In most cases we re-use a Store across all tests, since `file` and `directory` entries are content addressed, and `process` entries are intended to have strong cache keys. But when dealing with non-referentially transparent `process` executions, it can sometimes be necessary to avoid this cache. """ self._scheduler = None local_store_dir = os.path.realpath(safe_mkdtemp()) self._init_engine(local_store_dir=local_store_dir) try: yield finally: self._scheduler = None safe_rmtree(local_store_dir) @property def build_root(self): return self._build_root() @property def pants_workdir(self): return self._pants_workdir() @memoized_method def _build_root(self): return os.path.realpath(mkdtemp(suffix="_BUILD_ROOT")) @memoized_method def _pants_workdir(self): return os.path.join(self._build_root(), ".pants.d") def _init_engine(self, local_store_dir: Optional[str] = None) -> None: if self._scheduler is not None: return options_bootstrapper = OptionsBootstrapper.create(args=["--pants-config-files=[]"]) local_store_dir = ( local_store_dir or options_bootstrapper.bootstrap_options.for_global_scope().local_store_dir ) # NB: This uses the long form of initialization because it needs to directly specify # `cls.alias_groups` rather than having them be provided by bootstrap options. graph_session = EngineInitializer.setup_legacy_graph_extended( pants_ignore_patterns=[], use_gitignore=False, local_store_dir=local_store_dir, build_file_prelude_globs=(), build_file_imports_behavior=BuildFileImportsBehavior.error, glob_match_error_behavior=GlobMatchErrorBehavior.error, native=init_native(), options_bootstrapper=options_bootstrapper, build_root=self.build_root, build_configuration=self.build_config(), build_ignore_patterns=None, ).new_session(zipkin_trace_v2=False, build_id="buildid_for_test") self._scheduler = graph_session.scheduler_session self._build_graph, self._address_mapper = graph_session.create_build_graph( Specs(address_specs=AddressSpecs([]), filesystem_specs=FilesystemSpecs([])), self._build_root(), ) @property def scheduler(self) -> SchedulerSession: if self._scheduler is None: self._init_engine() self.post_scheduler_init() return cast(SchedulerSession, self._scheduler) def post_scheduler_init(self): """Run after initializing the Scheduler, it will have the same lifetime.""" pass @property def address_mapper(self): if self._address_mapper is None: self._init_engine() return self._address_mapper @property def build_graph(self): if self._build_graph is None: self._init_engine() return self._build_graph def reset_build_graph(self, reset_build_files=False, delete_build_files=False): """Start over with a fresh build graph with no targets in it.""" if delete_build_files or reset_build_files: files = [f for f in self.buildroot_files() if os.path.basename(f) == "BUILD"] if delete_build_files: for f in files: os.remove(os.path.join(self.build_root, f)) self.invalidate_for(*files) if self._build_graph is not None: self._build_graph.reset() _P = TypeVar("_P") def request_single_product( self, product_type: Type["TestBase._P"], subject: Union[Params, Any] ) -> "TestBase._P": result = assert_single_element(self.scheduler.product_request(product_type, [subject])) return cast(TestBase._P, result) def set_options_for_scope(self, scope, **kwargs): self.options[scope].update(kwargs) def context( self, for_task_types=None, for_subsystems=None, options=None, target_roots=None, console_outstream=None, workspace=None, scheduler=None, address_mapper=None, **kwargs, ): """ :API: public :param dict **kwargs: keyword arguments passed in to `create_options_for_optionables`. """ # Many tests use source root functionality via the SourceRootConfig.global_instance(). # (typically accessed via Target.target_base), so we always set it up, for convenience. for_subsystems = set(for_subsystems or ()) for subsystem in for_subsystems: if subsystem.options_scope is None: raise TaskError( "You must set a scope on your subsystem type before using it in tests." ) optionables = {SourceRootConfig} | self._build_configuration.optionables() | for_subsystems for_task_types = for_task_types or () for task_type in for_task_types: scope = task_type.options_scope if scope is None: raise TaskError("You must set a scope on your task type before using it in tests.") optionables.add(task_type) # If task is expected to inherit goal-level options, register those directly on the task, # by subclassing the goal options registrar and settings its scope to the task scope. if issubclass(task_type, GoalOptionsMixin): subclass_name = "test_{}_{}_{}".format( task_type.__name__, task_type.goal_options_registrar_cls.options_scope, task_type.options_scope, ) optionables.add( type( subclass_name, (task_type.goal_options_registrar_cls,), {"options_scope": task_type.options_scope}, ) ) # Now expand to all deps. all_optionables = set() for optionable in optionables: all_optionables.update(si.optionable_cls for si in optionable.known_scope_infos()) # Now default the option values and override with any caller-specified values. # TODO(benjy): Get rid of the options arg, and require tests to call set_options. options = options.copy() if options else {} for s, opts in self.options.items(): scoped_opts = options.setdefault(s, {}) scoped_opts.update(opts) fake_options = create_options_for_optionables(all_optionables, options=options, **kwargs) Subsystem.reset(reset_options=True) Subsystem.set_options(fake_options) scheduler = scheduler or self.scheduler address_mapper = address_mapper or self.address_mapper context = create_context_from_options( fake_options, target_roots=target_roots, build_graph=self.build_graph, build_configuration=self._build_configuration, address_mapper=address_mapper, console_outstream=console_outstream, workspace=workspace, scheduler=scheduler, ) return context def tearDown(self): """ :API: public """ super().tearDown() Subsystem.reset() @classproperty def subsystems(cls): """Initialize these subsystems when running your test. If your test instantiates a target type that depends on any subsystems, those subsystems need to be initialized in your test. You can override this property to return the necessary subsystem classes. :rtype: list of type objects, all subclasses of Subsystem """ return TargetV1.subsystems() def _init_target_subsystem(self): if not self._inited_target: subsystem_util.init_subsystems(self.subsystems) self._inited_target = True def target(self, spec): """Resolves the given target address to a V1 Target object. :API: public address: The BUILD target address to resolve. Returns the corresponding V1 Target or else None if the address does not point to a defined Target. """ self._init_target_subsystem() address = Address.parse(spec) self.build_graph.inject_address_closure(address) return self.build_graph.get_target(address) def targets(self, address_spec): """Resolves a target spec to one or more V1 Target objects. :API: public spec: Either BUILD target address or else a target glob using the siblings ':' or descendants '::' suffixes. Returns the set of all Targets found. """ address_spec = CmdLineSpecParser(self.build_root).parse_spec(address_spec) assert isinstance(address_spec, AddressSpec) targets = [] for address in self.build_graph.inject_address_specs_closure([address_spec]): targets.append(self.build_graph.get_target(address)) return targets def create_library( self, *, path: str, target_type: str, name: str, sources: Optional[List[str]] = None, java_sources: Optional[List[str]] = None, provides: Optional[str] = None, dependencies: Optional[List[str]] = None, requirements: Optional[str] = None, ): """Creates a library target of given type at the BUILD file at path with sources. :API: public path: The relative path to the BUILD file from the build root. target_type: valid pants target type. name: Name of the library target. sources: List of source file at the path relative to path. java_sources: List of java sources. provides: Provides with a format consistent with what should be rendered in the resulting BUILD file, eg: "artifact(org='org.pantsbuild.example', name='hello-greet', repo=public)" dependencies: List of dependencies: [':protobuf-2.4.1'] requirements: Python requirements with a format consistent with what should be in the resulting build file, eg: "[python_requirement(foo==1.0.0)]" """ if sources: self.create_files(path, sources) sources_str = f"sources={repr(sources)}," if sources is not None else "" if java_sources is not None: formatted_java_sources = ",".join(f'"{str_target}"' for str_target in java_sources) java_sources_str = f"java_sources=[{formatted_java_sources}]," else: java_sources_str = "" provides_str = f"provides={provides}," if provides is not None else "" dependencies_str = f"dependencies={dependencies}," if dependencies is not None else "" requirements_str = f"requirements={requirements}," if requirements is not None else "" self.add_to_build_file( path, dedent( f""" {target_type}(name='{name}', {sources_str} {java_sources_str} {provides_str} {dependencies_str} {requirements_str} ) """ ), ) return self.target(f"{path}:{name}") def create_resources(self, path, name, *sources): """ :API: public """ return self.create_library(path=path, target_type="resources", name=name, sources=sources,) def assertUnorderedPrefixEqual(self, expected, actual_iter): """Consumes len(expected) items from the given iter, and asserts that they match, unordered. :API: public """ actual = list(itertools.islice(actual_iter, len(expected))) self.assertEqual(sorted(expected), sorted(actual)) def assertPrefixEqual(self, expected, actual_iter): """Consumes len(expected) items from the given iter, and asserts that they match, in order. :API: public """ self.assertEqual(expected, list(itertools.islice(actual_iter, len(expected)))) def assertInFile(self, string, file_path): """Verifies that a string appears in a file. :API: public """ with open(file_path, "r") as f: content = f.read() self.assertIn(string, content, f'"{string}" is not in the file {f.name}:\n{content}') @contextmanager def assertRaisesWithMessage(self, exception_type, error_text): """Verifies than an exception message is equal to `error_text`. :param type exception_type: The exception type which is expected to be raised within the body. :param str error_text: Text that the exception message should match exactly with `self.assertEqual()`. :API: public """ with self.assertRaises(exception_type) as cm: yield cm self.assertEqual(error_text, str(cm.exception)) @contextmanager def assertRaisesWithMessageContaining(self, exception_type, error_text): """Verifies that the string `error_text` appears in an exception message. :param type exception_type: The exception type which is expected to be raised within the body. :param str error_text: Text that the exception message should contain with `self.assertIn()`. :API: public """ with self.assertRaises(exception_type) as cm: yield cm self.assertIn(error_text, str(cm.exception)) @contextmanager def assertDoesNotRaise(self, exc_class: Type[BaseException] = Exception): """Verifies that the block does not raise an exception of the specified type. :API: public """ try: yield except exc_class as e: raise AssertionError(f"section should not have raised, but did: {e}") from e def get_bootstrap_options(self, cli_options=()): """Retrieves bootstrap options. :param cli_options: An iterable of CLI flags to pass as arguments to `OptionsBootstrapper`. """ args = tuple(["--pants-config-files=[]"]) + tuple(cli_options) return OptionsBootstrapper.create(args=args).bootstrap_options.for_global_scope() def make_snapshot(self, files: Dict[str, Union[str, bytes]]) -> Snapshot: """Makes a snapshot from a map of file name to file content.""" with temporary_dir() as temp_dir: for file_name, content in files.items(): mode = "wb" if isinstance(content, bytes) else "w" safe_file_dump(os.path.join(temp_dir, file_name), content, mode=mode) return cast( Snapshot, self.scheduler.capture_snapshots((PathGlobsAndRoot(PathGlobs(("**",)), temp_dir),))[ 0 ], ) def make_snapshot_of_empty_files(self, files: Iterable[str]) -> Snapshot: """Makes a snapshot with empty content for each file. This is a convenience around `TestBase.make_snapshot`, which allows specifying the content for each file. """ return self.make_snapshot({fp: "" for fp in files}) class LoggingRecorder: """Simple logging handler to record warnings.""" def __init__(self): self._records = [] self.level = logging.DEBUG def handle(self, record): self._records.append(record) def _messages_for_level(self, levelname): return [ f"{record.name}: {record.getMessage()}" for record in self._records if record.levelname == levelname ] def infos(self): return self._messages_for_level("INFO") def warnings(self): return self._messages_for_level("WARNING") def errors(self): return self._messages_for_level("ERROR") @contextmanager def captured_logging(self, level=None): root_logger = logging.getLogger() old_level = root_logger.level root_logger.setLevel(level or logging.NOTSET) handler = self.LoggingRecorder() root_logger.addHandler(handler) try: yield handler finally: root_logger.setLevel(old_level) root_logger.removeHandler(handler) @contextmanager def warnings_catcher(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") yield w def assertWarning(self, w, category, warning_text): single_warning = assert_single_element(w) self.assertEqual(single_warning.category, category) warning_message = single_warning.message self.assertEqual(warning_text, str(warning_message)) def retrieve_single_product_at_target_base(self, product_mapping, target): mapping_for_target = product_mapping.get(target) single_base_dir = assert_single_element(list(mapping_for_target.keys())) single_product = assert_single_element(mapping_for_target[single_base_dir]) return single_product def populate_target_dict(self, target_map): """Return a dict containing targets with files generated according to `target_map`. The keys of `target_map` are target address strings, while the values of `target_map` should be a dict which contains keyword arguments fed into `self.make_target()`, along with a few special keys. Special keys are: - 'key': used to access the target in the returned dict. Defaults to the target address spec. - 'filemap': creates files at the specified relative paths to the target. An `OrderedDict` of 2-tuples must be used with the targets topologically ordered, if they have dependencies on each other. Note that dependency cycles are not currently supported with this method. :param target_map: Dict mapping each target address to generate -> kwargs for `self.make_target()`, along with a 'key' and optionally a 'filemap' argument. :return: Dict mapping the required 'key' argument -> target instance for each element of `target_map`. :rtype: dict """ target_dict = {} # Create a target from each specification and insert it into `target_dict`. for address_spec, target_kwargs in target_map.items(): unprocessed_kwargs = target_kwargs.copy() target_base = Address.parse(address_spec).spec_path # Populate the target's owned files from the specification. filemap = unprocessed_kwargs.pop("filemap", {}) for rel_path, content in filemap.items(): buildroot_path = os.path.join(target_base, rel_path) self.create_file(buildroot_path, content) # Ensure any dependencies exist in the target dict (`target_map` must then be an # OrderedDict). # The 'key' is used to access the target in `target_dict`, and defaults to `target_spec`. target_address = Address.parse(address_spec) key = unprocessed_kwargs.pop("key", target_address.target_name) dep_targets = [] for dep_spec in unprocessed_kwargs.pop("dependencies", []): existing_tgt_key = target_map[dep_spec]["key"] dep_targets.append(target_dict[existing_tgt_key]) # Register the generated target. generated_target = self.make_target( spec=address_spec, dependencies=dep_targets, **unprocessed_kwargs ) target_dict[key] = generated_target return target_dict
In addition, a driver can have his or her license automatically suspended for a certain period of time for certain convictions. Those reasons include a conviction for homicide by vehicle, any felony in the commission of which a motor vehicle is used, using a motor vehicle in fleeing or attempting to elude an officer, fraudulent application for a license or fictitious use of a license, hit and run or leaving the scene of an accident, racing, operating a motor vehicle with a revoked, canceled, or suspended registration, felony forgery relating to an Identification document, refusal to take a chemical test in conjunction with an arrest for DUI, conviction for driving without insurance, conviction for driving while license is suspended, failure to appear in court or respond to a traffic citation, any violation of the Georgia Control Substance Act, a conviction for driving under the influence of alcohol, or drugs or accumulating 15 points within 24 months under the point system, including violations committed out of state. A driver can, however, apply for a limited driving permit while his or her license is suspended. The driver can only drive to and from work, to get medical treatment or prescription drugs, attend class, attend support group or treatment meetings, or attend driver education classes while being issued a temporary driving permit. If the driver violates the permit, however, he or she could face an additional six-month suspension of their license. If you received a speeding ticket or were cited for other moving violations, you could benefit from discussing your options with a traffic ticket defense attorney prior to taking any action. Drivers often fail to realize how quickly his or her driving record can be blemished by unexpected citations and how costly auto insurance surcharges and increases in annual premiums can be. Although paying a traffic fine may appear to be the easiest way out, it may not be the smartest thing for you to do. Many people do this without understanding the collateral consequences a speeding ticket, particularly anything greater than 14 mph, can have on your driving record and insurance rates. I talked with a friend the other day who mentioned a relative of hers had two speeding tickets within a year and simply paid both as quickly as possible thinking it to be the best way to handle the infractions. The friend further told me the relative’s insurance premium had doubled as a result of the tickets. This is a disastrous scenario for anyone. If ticketed for speeding or any other traffic offense, you must contact an experienced attorney who can help you avoid similar pitfalls. Dove Law Firm, LLC is located in Ocilla and serves residents throughout South Georgia. From the beginning to the end, we deliver excellent legal services for residents of Ocilla, Fitzgerald, Tifton, Douglas, Moultrie, Hazlehurst, Abbeville, Nashville, Adel, Cordele, McRae, Sylvester, and Ashburn.
from __future__ import generators from bike.query.common import Match, MatchFinder, \ getScopeForLine, indexToCoordinates, \ translateSourceCoordsIntoASTNode, scanScopeForMatches, \ isAMethod, convertNodeToMatchObject, walkLinesContainingStrings from bike.parsing.parserutils import generateLogicalLines,\ generateLogicalLinesAndLineNumbers, \ splitLogicalLines, makeLineParseable import compiler from compiler.ast import Getattr, Name, AssName, AssAttr from bike.parsing.fastparserast import getRoot, Package, Class, \ Module, Function, Instance import re from bike.query.getTypeOf import getTypeOfExpr, UnfoundType, \ isWordInLine, resolveImportedModuleOrPackage from bike.parsing import visitor from bike.parsing.visitor import walkAndGenerate from bike.parsing.parserutils import makeLineParseable,splitLogicalLines from bike.parsing.newstuff import getSourceNodesContainingRegex from bike.parsing.load import getSourceNode from bike import log class CantFindDefinitionException: pass def findAllPossibleDefinitionsByCoords(filepath,lineno,col): #try: node = translateSourceCoordsIntoASTNode(filepath,lineno,col) #except: # import traceback # traceback.print_exc() if node is None: raise "selected node type not supported" scope = getScopeForLine(getSourceNode(filepath),lineno) match = findDefinitionFromASTNode(scope,node) if match is not None: yield match if isinstance(node,Getattr) and (match is None or match.confidence != 100): root = getRoot() name = node.attrname for match in scanPythonPathForMatchingMethodNames(name,filepath): yield match print >>log.progress,"done" def findDefinitionFromASTNode(scope,node): assert node is not None if isinstance(node,Name) or isinstance(node,AssName): while 1: # try scope children childscope = scope.getChild(node.name) if childscope is not None: return convertNodeToMatchObject(childscope,100) if isinstance(scope,Package): scope = scope.getChild("__init__") # try arguments and assignments match = scanScopeAST(scope,node.name, AssignmentAndFnArgsSearcher(node.name)) if match is not None: return match # try imports match = searchImportedModulesForDefinition(scope,node) if match is not None: return match if not isinstance(scope,Module): # try parent scope scope = scope.getParent() else: break assert isinstance(scope,Module) elif isinstance(node,Getattr) or isinstance(node,AssAttr): exprtype = getTypeOfExpr(scope,node.expr) if not (exprtype is None or isinstance(exprtype,UnfoundType)): if isinstance(exprtype,Instance): exprtype = exprtype.getType() match = findDefinitionOfAttributeFromASTNode(exprtype, node.attrname) else: match = findDefinitionFromASTNode(exprtype, Name(node.attrname)) if match is not None: return match elif isinstance(node,compiler.ast.Function) or \ isinstance(node,compiler.ast.Class): if isAMethod(scope,node): match = findDefinitionOfAttributeFromASTNode(scope, node.name) else: match = findDefinitionFromASTNode(scope,Name(node.name)) if match is not None: return match type = getTypeOfExpr(scope,node) if type is not None and (not isinstance(type,UnfoundType)) and \ (not isinstance(type,Instance)): return convertNodeToMatchObject(type,100) else: return None def findDefinitionOfAttributeFromASTNode(type,name): assert isinstance(type,Class) attrfinder = AttrbuteDefnFinder([type],name) # first scan the method names: for child in type.getChildNodes(): if child.name == name: return convertNodeToMatchObject(child,100) # then scan the method source for attribues for child in type.getChildNodes(): if isinstance(child,Function): try: return scanScopeForMatches(child.module.getSourceNode(), child, attrfinder, name).next() except StopIteration: continue class AttrbuteDefnFinder(MatchFinder): def __init__(self,targetClasses,targetAttribute): self.targetClasses = targetClasses self.targetAttributeName = targetAttribute def visitAssAttr(self, node): for c in node.getChildNodes(): self.visit(c) if node.attrname == self.targetAttributeName: exprtype = getTypeOfExpr(self.scope,node.expr) if isinstance(exprtype,Instance) and \ exprtype.getType() in self.targetClasses: self.appendMatch(self.targetAttributeName) #else: # self.appendMatch(self.targetAttributeName,50) self.popWordsUpTo(node.attrname) def searchImportedModulesForDefinition(scope,node): lines = scope.module.getSourceNode().getLines() for lineno in scope.getImportLineNumbers(): logicalline = getLogicalLine(lines,lineno) logicalline = makeLineParseable(logicalline) ast = compiler.parse(logicalline) class ImportVisitor: def __init__(self,node): self.target = node self.match = None assert isinstance(self.target,Name), \ "Getattr not supported" def visitFrom(self, node): module = resolveImportedModuleOrPackage(scope,node.modname) if module is None: # couldn't find module return if node.names[0][0] == '*': # e.g. from foo import * match = findDefinitionFromASTNode(module,self.target) if match is not None: self.match = match return for name, alias in node.names: if alias is None and name == self.target.name: match = findDefinitionFromASTNode(module,self.target) if match is not None: self.match = match return match = visitor.walk(ast, ImportVisitor(node)).match if match: return match # loop def getLogicalLine(lines,lineno): return generateLogicalLines(lines[lineno-1:]).next() class AssignmentAndFnArgsSearcher(MatchFinder): def __init__(self,name): self.targetname = name self.match = None def visitAssName(self, node): if node.name == self.targetname: idx = self.getNextIndexOfWord(self.targetname) self.match = idx return def visitFunction(self, node): self.popWordsUpTo(node.name) for arg, default in self.zipArgs(node.argnames, node.defaults): if arg == self.targetname: idx = self.getNextIndexOfWord(self.targetname) self.match = idx return self.popWordsUpTo(arg) if default is not None: self.visit(default) self.visit(node.code) def getMatch(self): return self.match # scans for lines containing keyword, and then runs the visitor over # the parsed AST for that line def scanScopeAST(scope,keyword,matchfinder): lines = scope.generateLinesNotIncludingThoseBelongingToChildScopes() match = None for line,linenum in generateLogicalLinesAndLineNumbers(lines): if isWordInLine(keyword, line): doctoredline = makeLineParseable(line) ast = compiler.parse(doctoredline) matchfinder.reset(line) match = visitor.walk(ast,matchfinder).getMatch() if match is not None: column,yoffset = indexToCoordinates(line,match) m = createMatch(scope,linenum + yoffset,column) return m return None def createMatch(scope,lineno,x): m = Match() m.sourcenode = scope.module.getSourceNode() m.filename = m.sourcenode.filename m.lineno = lineno m.colno = x m.confidence = 100 return m # scan for methods globally (from perspective of 'perspectiveFilename') def scanPythonPathForMatchingMethodNames(name, contextFilename): class MethodFinder: def __init__(self,srcnode): self.matches = [] self.srcnode = srcnode def visitFunction(self,node): node = getScopeForLine(self.srcnode, self.lineno) if isinstance(node.getParent(),Class): if node.name == name: self.matches.append(convertNodeToMatchObject(node,50)) for srcnode in getSourceNodesContainingRegex(name,contextFilename): m = MethodFinder(srcnode) walkLinesContainingStrings(srcnode.fastparseroot,m,[name]) for match in m.matches: yield match def getIndexOfWord(line,targetword): words = re.split("(\w+)", line) idx = 0 for word in words: if word == targetword: break idx += len(word) return idx
A circular has been issued making the Grade 5 Scholarship non-compulsory in future. The Ministry of Education issued the Circular on the advise of President Maithripala Sirisena. Education Minister Akila Viraj Kariyawasam said that the Circular was issued considering recommendations submitted by a committee appointed to look into reforming the Grade 5 Scholarship Examination.
# Copyright 2014, Doug Wiegley, A10 Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import from __future__ import unicode_literals try: import unittest2 as unittest except ImportError: import unittest from acos_client import client import responses HOSTNAME = 'fake_a10' BASE_URL = "https://{}:443/services/rest/v2.1/?format=json&method=".format(HOSTNAME) AUTH_URL = "{}authenticate".format(BASE_URL) HA_URL = '{}ha.sync_config&session_id={}'.format(BASE_URL, 'foobar') class TestHighAvailability(unittest.TestCase): def setUp(self): self.client = client.Client(HOSTNAME, '21', 'fake_username', 'fake_password') @responses.activate def test_high_availability_sync(self): responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'}) responses.add(responses.POST, HA_URL) resp = self.client.ha.sync('192.168.2.254', 'fake_username', 'fake_password') self.assertIsNone(resp) self.assertEqual(len(responses.calls), 2) self.assertEqual(responses.calls[1].request.method, responses.POST) self.assertEqual(responses.calls[1].request.url, HA_URL)
These sales terms and conditions apply to Art Andersen ApS (hereinafter “AA”) and a given buyer relative to all offers, orders and deliveries, unless otherwise agreed in writing. Other conditions, including the buyer’s eventual purchasing conditions, do not apply, unless expressly agreed in writing. The buyer’s specification of particular terms in an order etc. is not regarded as a derogation of these conditions, unless AA has accepted such specification in writing. AA is entitled to amend these terms and conditions at any time. Offers proposed by AA are valid for 30 days from the dates hereof. Agreements between a buyer and AA are only considered concluded when AA has submitted an order confirmation. Any objections from a buyer to the content of an order must be in writing and be received by AA no later than 3 business days after the date of the order confirmation. Cancellation or modification of an order can only be done with AA’s written approval. The buyer must refund AA any costs or losses at a cancellation or a change, at the minimum an amount equivalent to 25% of the agreed DKK price + VAT. AA is entitled to change the materials and design of the product offered, provided that product continues to meet the usual standard and the specifications de-fined by the buyer. Normally recognized standard and typical industrial tolerances apply for non-tolerance measurements and dimensions. AA’s advice on the product’s usability to the buyer is for guidance only and represents no assurance of the product’s suitability for the intended use, unless otherwise agreed. AA prices are added the current VAT, taxes, duties, fees, shipping, etc. Delivery is exclusive of installation and commissioning, unless otherwise specifically agreed. AA is entitled to adjust the purchase price if AA’s total costs associated with the manufacture of the goods sold have increased due to unforeseeable circumstances, such as price increases in raw materials, electricity, taxes etc. Unless otherwise agreed, the purchase price is due for payment 30 days after the invoice date. For orders of less than DKK 5,000 AA reserves the right to charge a handling fee of DKK 200. AA reserves the right to change the agreed terms of payment if the purchaser’s ability to pay deteriorates after conclusion of the order. If the buyer does not pay the purchase price in due time, AA may from the due date add an interest rate of 2 % per month started. The buyer is not entitled to withhold any part of the purchasing price as security for fullfilling potential counterclaims for other supplies, and such a detention will be considered a breach of the agreement. AA reserves the ownership of any delivery until payment has been made in full. Delivery times indicated are approximate and non-binding, unless otherwise expressly agreed. Delivery time is determined assuming that all of the buyer’s specifications are available at the latest at AA’s submission of the order confirmation. Otherwise, the delivery time is counted from the time when such specifications are received from the buyer. On full or partial prepayment delayed prepayment will cause a corresponding postponement of the delivery time. In the absence of another specific indication in an order the delivery place is AA’s address, and delivery will take place Ex Works (INCOTERMS). All drawings and technical descriptions regarding the product or its manufacture, which are or will be handed over to the buyer, remain AA’s property unless otherwise agreed. Such material must not without AA written permission be used for purposes other than assembly, mounting, operation and maintenance of the supplied, and must not be copied or otherwise communicated to a third-party. On new material AA provides a warranty of 12 months from the date of delivery against errors and defects in materials and construction. During the warranty period AA shall remedy cited defects free of charge, cf. clause 10. AA’s duty to remedy is applicable when goods are in Denmark. AA’s duty to remedy does not apply to defects caused by natural wear and tear, abuse, improper operation, faulty maintenance, accidental damages or other circumstances for which the buyer bears the responsibility or risk. The buyer shall bear all costs of transportation and freight related to the returning of goods to AA. Warranty is void if the agreed payment conditions are not met, or if the buyer makes changes or repairs to the material or use non-original parts within the warranty period. Immediately upon receiving a delivery the buyer shall inspect the goods to state any deficiencies. The buyer must provide a written notice specifying the defect (including differences in quantity) directly to AA without undue delay, after the defect is or should have been discovered. Failure to communicate within the time limits will result in a lapse of the buyer’s rights in respect of the defect. In case of complaints the buyer is not entitled to dispose of the product or return it to AA without AA’s written approval. If the buyer has complained about the product, and it turns out that there is no defect for which AA is responsible, AA must be compensated for the work and the costs the complaint has incurred on AA. Complaints about information and specifications in invoices must be made in writing within 8 days after receipt of the invoice. AA’s liability for defects is limited to replacement or repair. The buyer may rescind the contract if the deficiency is not reme-died within reasonable time. If the agreement is terminated as a result of the defect, the buyer is entitled to a refund of the price paid, against delivering it back to AA. Any additional requirements, including extra costs, operational profit losses or other losses as a result of the defective delivery cannot be made against AA. If AA’s service consists of processing the buyer’s products, AA’s liability is limited to an amount equivalent to the agreed payment for the processing, excluding VAT. If AA or its subcontractors is prevented from fulfilling their obligations as a result of war, revolution, government restrictions on imports or exports, exceptional natural or meteorological phenomena, labor conflicts, fire or the like that are beyond AA’s reasonable control, AA’s obligations are suspended until the unusual situation is rectified. If the force majeure situation applies for more than 30 days, the agreement may be cancelled by AA or the buyer, without any of the parties demanding compensation from the other party. In case of personal injury AA is liable in accordance with the Danish Product Liability Act. In case of damage on a product which, by its nature, is intended for commercial use, AA is liable under the following rules: AA is not liable for damage to real or personal property that occurs while the product is in the buyer’s possession. AA is only liable for damage to real and personal property if the damage is caused by negligence on the part of AA. AA is not liable for damage to products manufactured by the buyer or products in which the parts from AA have been integrated. AA is not liable for loss of profit or other indirect losses. AA’s maximum liability for product liability amounts to DKK 2.5 million per event or series of events arising from the same delivery. To the extent that AA may incur product liability towards third parties the buyer is obliged to indemnify AA to the same extent that AA’s liability is limited under this provision. Any disagreement or dispute between the parties on the supplied product or these terms and conditions will be determined under Danish law by the Copenhagen City Court as the first instance.
import os import sys import matplotlib.pyplot as plt sys.path.append("../src") from Chain import Chain from plot_lik import plot_lik_2D base_folder = "./chains" chain = "planck_WP" chains = [Chain(os.path.join(base_folder, chain))] params=["H0", "omega_b"] labels=[r"$H_0$", r"$\omega_b$"] fig, axarr = plt.subplots(2,2) axes_locations = { "profile": axarr[0,1], "mean": axarr[1,0], "marginal": axarr[1,1]} for mode, axes in axes_locations.items(): ax, options = plot_lik_2D(mode, chains, params=params, labels=labels, format = "-loglik", dpi=200, fontsize_labels=14, fontsize_ticks=8, save=0, axes=axes ) axes.set_title(mode.title(), fontdict={'fontsize':16}) # Text: text = ("Chain:\n %s\n\n"%[c.name() for c in chains] + "Parameters:\n%s"%(params)) axarr[0,0].set_axis_off() axarr[0,0].text(0, 1, text, weight="bold", verticalalignment="top") # Plot plt.tight_layout() plt.savefig("summary.png", **options) plt.show() plt.close()
Paint your nails in any artistic fashion! Choose from our selective polish supply or bring your own! ​Our vision was to create a relaxing environment with the best team to pamper your every need. We targeted easily accessible locations that make it easy for you to take time for a little relaxation despite your busy lifestyle. We offer services to help you improve your appearance and promote your health and well being. Beyond our vast array of nail services, pedicures and massages, we also offer eyebrow threading, eyebrow tinting, eyelash extensions, permanent make up, waxing and tanning. Nail Talk & Tan is your beauty destination and we have a location near you. Our licensed Nail Technicians, Massage Therapists and Skin Care Specialists want the best for you. We will always recommend the best treatments for you and advise you on your personal selections.
"""The error checking chain is a list of status word (sw1, sw2) error check strategies. __author__ = "http://www.gemalto.com" Copyright 2001-2012 gemalto Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com This file is part of pyscard. pyscard is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. pyscard is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with pyscard; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA """ from sys import exc_info class ErrorCheckingChain(object): """The error checking chain is a list of response apdu status word (sw1, sw2) error check strategies. Each strategy in the chain is called until an error is detected. A L{smartcard.sw.SWException} exception is raised when an error is detected. No exception is raised if no error is detected. Implementation derived from Bruce Eckel, Thinking in Python. The L{ErrorCheckingChain} implements the Chain Of Responsibility design pattern. """ def __init__(self, chain, strategy): """constructor. Appends a strategy to the L{ErrorCheckingChain} chain.""" self.strategy = strategy self.chain = chain self.chain.append(self) self.excludes = [] def next(self): """Returns next error checking strategy.""" # Where this link is in the chain: location = self.chain.index(self) if not self.end(): return self.chain[location + 1] def addFilterException(self, exClass): """Add an exception filter to the error checking chain. @param exClass: the exception to exclude, e.g. L{smartcard.sw.SWExceptions.WarningProcessingException} A filtered exception will not be raised when the sw1,sw2 conditions that would raise the excption are met. """ self.excludes.append(exClass) if self.end(): return self.next().addFilterException(exClass) def end(self): """Returns True if this is the end of the error checking strategy chain.""" return (self.chain.index(self) + 1 >= len(self.chain)) def __call__(self, data, sw1, sw2): """Called to test data, sw1 and sw2 for error on the chain.""" try: self.strategy(data, sw1, sw2) except tuple(self.excludes) as exc: # The following addtional filter may look redundant, it isn't. # It checks that type(exc) is *equal* to any of self.excludes, # rather than equal-or-subclass to any of self.excludes. # This maintains backward compatibility with the behaviour of # pyscard <= 1.6.16. # if exception is filtered, return for exception in self.excludes: if exception == exc_info()[0]: return # otherwise reraise exception raise # if not done, call next strategy if self.end(): return return self.next()(data, sw1, sw2)
New Member-in-Training, Affiliate-in-Training, and Affiliate, Allied Health Student member applications will be submitted to the Membership Committee for approval. Once approved an invoice will be sent to you for your membership fees. Payment can be made by Visa or Mastercard online, or by cheque. If applying for the Retired Membership category, please click here to view the application form and instructions.
""" Given a string s and a dictionary of words dict, determine if s can be segmented into a space-separated sequence of one or more dictionary words. For example, given s = "leetcode", dict = ["leet", "code"]. Return true because "leetcode" can be segmented as "leet code". """ __author__ = 'Danyang' class Solution: def wordBreak_TLE(self, s, dict): """ TLE dfs O(n^2) Algorithm: DFS. The reason is that DFS repeatedly calculate whether a certain part of string can be segmented. Therefore we can use dynamic programming. :param s: a string :param dict: a set of string :return: a boolean """ string_builder = "" if s=="": return True # greedy for i in range(len(s)): string_builder += s[i] if string_builder in dict: try: if self.wordBreak_TLE(s[i+1:], dict): return True else: continue except IndexError: return True return False def wordBreak(self, s, dict): """ __ __________ ___ __ ______ ______ .__ __. _______. | | | ____\ \ / / | | / | / __ \ | \ | | / | | | | |__ \ V / | | | ,----'| | | | | \| | | (----` | | | __| > < | | | | | | | | | . ` | \ \ | `----.| |____ / . \ | | | `----.| `--' | | |\ | .----) | |_______||_______/__/ \__\ |__| \______| \______/ |__| \__| |_______/ Dynamic programming The dynamic solution can tell us whether the string can be broken to words, but can not tell us what words the string is broken to. O(n*m) Google On Campus Presentation, demonstration questions. 4 Sep 2014, Nanyang Technological University, Singapore dp[i] rolling dp (rather than using 2D dp[i, j] dp[i] means s[:i] can be made up of sequence of lexicons - l e e t c o d e T F F F T F F F T Lexicons = {the, theta, table, down, there, bled, own} - t h e t a b l e d o w n t h e r e T F F T F T F F T T F F T F F F F T :param s: a string :param dict: a set of string :return: a boolean """ dp = [False] * (len(s)+1) dp[0] = True # dummy for i in range(len(dp)): # [0, len(s)+1) # continue from matched condition if dp[i]: for word in dict: try: # trivial if dp[i+len(word)]==True: continue # main if s[i:i+len(word)]==word: # test whether [i, i+len) can construct a word. THE BEAUTY OF HALF OPEN dp[i+len(word)] = True # record the checking except IndexError: continue return dp[-1] if __name__=="__main__": assert Solution().wordBreak("aaaaaaa", ["aaaa", "aaa"])==True
A small inn just outside of town, hotel new oh dog, a place where adventurers come to rest their tired bodies. The inn owner’s fur is as wonderful and fluffy as always. Lilith the inn’s poster girl wants to go to school and look for treasure in dungeons? A new fluffy inn comedy by Kai Yoshino starts now! Ch.009 - Mister fenrir became tiny! Copyrights and trademarks for the manga, and other promotional materials are held by their respective owners and their use is allowed under the fair use clause of the Copyright Law. © 2018 MangaFox. Current Time is GMT 23:57 下午.
# -*- coding: utf-8 -*- # This file is part of Shuup. # # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. from jinja2.utils import contextfunction from shuup.core.models import ( AttributeVisibility, Product, ProductAttribute, ProductCrossSell, ProductCrossSellType, Supplier ) from shuup.utils.text import force_ascii def get_visible_attributes(product): return ProductAttribute.objects.filter( product=product, attribute__visibility_mode=AttributeVisibility.SHOW_ON_PRODUCT_PAGE ) # Deprecated, see `get_product_cross_sells()` @contextfunction def get_products_bought_with(context, product, count=5): related_product_cross_sells = ( ProductCrossSell.objects .filter(product1=product, type=ProductCrossSellType.COMPUTED) .order_by("-weight")[:(count * 4)]) products = [] for cross_sell in related_product_cross_sells: product2 = cross_sell.product2 if product2.is_visible_to_user(context["request"].user) and product2.is_list_visible(): products.append(product2) if len(products) >= count: break return products @contextfunction def is_visible(context, product): request = context["request"] shop_product = product.get_shop_instance(shop=request.shop) for error in shop_product.get_visibility_errors(customer=request.customer): # pragma: no branch return False return True @contextfunction def get_product_cross_sells( context, product, relation_type=ProductCrossSellType.RELATED, count=4, orderable_only=True): request = context["request"] rtype = map_relation_type(relation_type) related_product_ids = list(( ProductCrossSell.objects .filter(product1=product, type=rtype) .order_by("weight")[:(count * 4)]).values_list("product2_id", flat=True) ) related_products = [] for product in Product.objects.filter(id__in=related_product_ids): shop_product = product.get_shop_instance(request.shop) if orderable_only: for supplier in Supplier.objects.all(): if shop_product.is_orderable(supplier, request.customer, shop_product.minimum_purchase_quantity): related_products.append(product) break elif shop_product.is_visible(request.customer): related_products.append(product) # Order related products by weight. Related product ids is in weight order. # If same related product is linked twice to product then lowest weight stands. related_products.sort(key=lambda prod: list(related_product_ids).index(prod.id)) return related_products[:count] def map_relation_type(relation_type): """ Map relation type to enum value. :type relation_type: ProductCrossSellType|str :rtype: ProductCrossSellType :raises: `LookupError` if unknown string is given """ if isinstance(relation_type, ProductCrossSellType): return relation_type attr_name = force_ascii(relation_type).upper() try: return getattr(ProductCrossSellType, attr_name) except AttributeError: raise LookupError('Unknown ProductCrossSellType %r' % (relation_type,))
Population of Outines was 147 inhabitants in 2007. You will find below a series of charts and statistical curves of population census of the town of Outines. Population of Outines was 134 inhabitants in 1999, 128 inhabitants in 1990, 164 inhabitants in 1982, 204 inhabitants in 1975 and 223 inhabitants in 1968. Area of Outines is 15,42 km² and has a population density of 9,53 inhabs/km². Must see: housing of Outines, the road map of Outines, the photos of Outines, the map of Outines. Population of Outines was 147 inhabitants in 2007, 134 inhabitants in 1999, 128 inhabitants in 1990, 164 inhabitants in 1982, 204 inhabitants in 1975 and 223 inhabitants in 1968. This population Census of the town of Outines was made without duplicated data, which means that each Outines resident that have ties to another municipality is counted only once for the two municipalities. This curve shows the history of the population of Outines from 1968 to 2007 in cubic interpolation. This provides more precisely the population of the municipality of Outines the years where no census has been taken. - From 1982 and 1990 : 11 births and 25 deaths. - From 1975 and 1982 : 11 births and 19 deaths. - From 1968 and 1975 : 17 births and 19 deaths.
""" This module is responsible for doing all the authentication. Adapted from the Google API Documentation. """ from __future__ import print_function import os import httplib2 import apiclient import oauth2client try: import argparse flags = argparse.ArgumentParser( parents=[oauth2client.tools.argparser]).parse_args() except ImportError: flags = None SCOPES = 'https://www.googleapis.com/auth/drive' CLIENT_SECRET_FILE = 'client_secret.json' # Enter your project name here!! APPLICATION_NAME = 'API Project' def get_credentials(): """Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. Returns: Credentials, the obtained credential. """ home_dir = os.path.expanduser('~') credential_dir = os.path.join(home_dir, '.gdrive-credentials-cache') if not os.path.exists(credential_dir): os.makedirs(credential_dir) credential_path = os.path.join(credential_dir, 'gdrive-credentials.json') store = oauth2client.file.Storage(credential_path) credentials = store.get() if not credentials or credentials.invalid: flow = oauth2client.client.flow_from_clientsecrets( CLIENT_SECRET_FILE, SCOPES) flow.user_agent = APPLICATION_NAME if flags: credentials = oauth2client.tools.run_flow(flow, store, flags) else: # Needed only for compatibility with Python 2.6 credentials = oauth2client.tools.run(flow, store) print('Storing credentials to ' + credential_path) return credentials credentials = get_credentials() http = credentials.authorize(httplib2.Http()) file_service = apiclient.discovery.build('drive', 'v3', http=http).files()
ACS are recruiting for an MOJ/First Response Team Manager for a Law firm in Liverpool. Up-to-date technical knowledge and experience in dealing with multiple orthopaedic injuries, psychiatric injuries and cases up to the value of £25000.00. Either a Graduate or close to completion of LPC is desirable. Whilst following guidelines set by the firm, taking detailed information from clients in relation to their claim, to enable you to assess and set up the claim or decline. Obtaining all necessary information needed from the client such as insurance details, accident circumstances and injury/ vehicle damage information. Managing incoming work and providing financial projections. Liaising with sources of work and ensuring regular financial reviews are conducted. To complete and submit the Claim Notification Forms to the Defendant Law Firm via email and post. Arrange medical and rehabilitation appointments. Transferring cases to either the liability admitted or liability disputed departments. Inputting client’s instructions and all other correspondence onto the Case Management System. Ensuring all required initial forms are completed and returned. Promoting Best Practice and Company Values. Training and development needs of staff. A salary of £21,000.00 is on offer with progression opportunities for the right person. The normal working hours will be Monday to Friday 9.00am to 5.00pm, with 2 late shifts where the working hours are 11.00am – 7.00pm.
import sys import os.path import logging import colorlog import inspect from logging.handlers import RotatingFileHandler # make external modules only log above warning and upper logging.getLogger("paramiko").setLevel(logging.WARNING) logging.getLogger("requests").setLevel(logging.WARNING) # define root logging strategy root = logging.getLogger() root.setLevel(logging.DEBUG) #################### # define new log level for SUCCESS SUCCESS = logging.INFO + 1 logging.addLevelName( SUCCESS, 'SUCCESS') #################### # log on stdout stdout_formatter = colorlog.ColoredFormatter( "%(asctime)s - %(log_color)s%(levelname)-7s%(reset)s %(message)s", datefmt="%H:%M:%S", reset=True, log_colors={ 'DEBUG': 'white', 'INFO': 'white', 'SUCCESS': 'green', 'WARNING': 'yellow', 'ERROR': 'white,bg_red', 'CRITICAL': 'white,bg_red', }, secondary_log_colors={}, style='%' ) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.DEBUG) ch.setFormatter(stdout_formatter) root.addHandler(ch) #################### # also log in a dedicated log file (full date, no color) file_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') fh = RotatingFileHandler('figaro_deploy.log', maxBytes=10000000, backupCount=5) fh.setLevel(logging.DEBUG) fh.setFormatter(file_formatter) root.addHandler(fh) def __get_log_msg(txt): '''Get filename and line number where the log occurs''' frame = inspect.currentframe().f_back.f_back if frame and frame.f_back: frame = frame.f_back func = frame.f_code return "[%s:%s] %s" % (os.path.basename(func.co_filename), frame.f_lineno, txt) def debug(msg): logging.debug(__get_log_msg(msg)) def info(msg): logging.info(__get_log_msg(msg)) def success(msg): logging.log(SUCCESS, __get_log_msg(msg)) def warning(msg): logging.warning(__get_log_msg(msg)) def error(msg, exit_on_error = True): logging.error(__get_log_msg(msg)) if exit_on_error: exit(1) def critical(msg): logging.critical(__get_log_msg(msg)) exit(1)
Can you explain in more detail what you mean with: “I even tried signing out of axcrypt and it is still decrypted“? It would be great with a screen shot showing how you open the encrypted file even though AxCrypt is signed out. My guess is that it’s some form of misunderstanding about the mechanisms, or that the file is open in the application, and AxCrypt can’t remove the temporary decrypted copy because of that.
#!/usr/bin/python # # Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # -*- coding: utf-8 -*- """ PyCOMPSs Mathematical Library: Algebra: Mean ============================================ This file contains the arithmetic mean algorithm. """ from pycompss.api.task import task from pycompss.functions.reduce import merge_reduce def _list_lenght(l): """ Recursive function to get the size of any list :return: List length """ if l: if not isinstance(l[0], list): return 1 + _list_lenght(l[1:]) else: return _list_lenght(l[0]) + _list_lenght(l[1:]) return 0 @task(returns=float) def _mean(data, n): """ Calculate the mean of a list, :param data: List of elements :param n: Number of elements :return: Mean """ return sum(data) / float(n) def mean(data, wait=False): """ Arithmetic mean. :param data: chunked data :param wait: if we want to wait for result. Default False :return: mean of data. """ n = _list_lenght(data) result = merge_reduce(reduce_add, [_mean(x, n) for x in data]) if wait: from pycompss.api.api import compss_wait_on result = compss_wait_on(result) return result
I am so excited to share with you all my first ever vlog for missaLAnnis.com on my YouTube Channel! The video shows how to do a super simple fresh faced “no makeup” make up look using 5 easy products, and it only takes about 5 minutes to do! This look can be used by anyone, especially someone who feels like a beginner when it comes to makeup. No crazy techniques or complicated tools, mostly just your hands and a couple brushes 🙂 I explain all of the steps in the video, and have included links to all of the products here below! Also don’t mind my arms flailing all over the place 😉 Let me know what you think! Special thanks to KARL HERRMANN for helping me make this video! Even a makeup moron like me could understand what you showed. BRAVO! I am definitely going to take your advise! Looking forward to your next video.
#!/usr/bin/python """As usual, we eat chapters from stdin and output xhtml files, this time with nice CSS and no tables. We don't copy the images to the relevant places since that's not really amenable to parallelization (which is too much fun).""" import os import pkg_resources import pyratemp import common chapter_template = pyratemp.Template( string=pkg_resources.resource_string(__name__, "html_chapter_template.xhtml")) if __name__ == "__main__": chapters = common.get_chapters_from_stdin() if not os.path.isdir("html_mirror"): os.mkdir("html_mirror") for introonly_chapter in chapters.chapter: chapter = common.full_chapter_from_introonly(introonly_chapter) chapter_html = chapter_template(chapter=chapter) output_file_name = os.path.join("html_mirror", common.chapter_to_internal_name(chapter)) with open(output_file_name, mode="w") as xhtml_file: xhtml_file.write(chapter_html.encode('utf-8'))
NORTON, MA- Ranked first in the country by the National Soccer Coaches Association of America (NSCAA), the Wheaton College women's soccer team held on to defeat MIT 1-0 in New England Women's and Men's Athletic Conference (NEWMAC) play Tuesday. The Lyons move to 14-0 overall and 5-0 in league play, while the Engineers fall to 4-5-3 and 1-3-1 in the NEWMAC. Senior co-captain Erin Duffy tucked a perfectly placed penalty kick past MIT goalie Katherine Vater in just the ninth minute of play to put the Lyons on top for good, as both teams squandered scoring opportunities as the game proceeded. Perhaps MIT's best chance came later in the first half as freshman forward Amy Ludlum corralled a deflection and blasted a shot directly at Wheaton junior goalkeeper Emily Lipsky. The ball squirted under the netminder, coming within inches from crossing the end line, before senior co-captain Christy Schneider cleared it from harms way. Sophomore Andrea Staid also narrowly missed an equalizer for the Engineers with two minutes remaining in the half. The second half opened with six consecutive corner kicks for the Lyons, but the hosts failed to capitalize, as MIT deflected shot after shot out of bounds. The opportunity for an insurance goal came again just minutes later, as Vater blocked numerous Wheaton shots in a logjam in front of the Tech goal, but the ball eventually rolled harmlessly out of bounds for a goal kick. With 37 seconds remaining in the game, the Engineers made one final push for the game-tying goal as Ludlum took a well-placed through ball into the box before lifting it above the crossbar, all but ending MIT's chances at tying the match. The shot count was 7-7 at halftime, but Wheaton's defense controlled most of the second half, allowing MIT only three shots in the stanza, bringing the total count to 26-10 in favor of the Lyons. The Blue and White also took a 17-0 advantage in corner kicks. Lipsky made three of the home team's four saves, combining with freshman goalkeeper Stacey Kronenberg (Belchertown, MA/Belchertown) to notch Wheaton' 12th shutout of the year, while Vater recorded nine saves for the Engineers in the loss.
import numpy as np from PIL import Image from random import randint from math import sqrt, factorial ################### A #################### def perm(l): if not l: return [l] else: res = [] for i in range(len(l)): r = l[:i] + l[i+1:] for x in perm(r): res.append(l[i:i+1] + x) return res ################### B #################### def drawPascalMod(n, d): pallete = [(randint(0, 256),)*3 for i in range(d)] img = Image.new('RGB', (n, n)) px = img.load() A = np.full((n,n), 1, int); for j in range(2,n): for i in range(1,j): A[i,j] = (A[i-1,j-1] + A[i,j-1]) % d px[i,j] = pallete[A[i,j]] img.save('pascal.png') ################### C #################### def fact(n): res = 1; for i in range(2,n+1): res = res * i return res def powerInt(x, y): res = 1 for i in bin(y)[2:]: res *= res if i == '1': res *= x return res def nroot(x, n, eps): g = x/n dx = g while abs(dx) > eps or not dx: dx = (1/n) * (x/powerInt(g, n-1) - g) g = g + dx return g def exp(x, k): res = 0 for i in range(k+1): res += powerInt(x,i)/fact(i) return res def loge(x, k): n = len(str(int(x))) a = x / powerInt(10, n-1) y = (a - 1) / (a + 1) res = 0 for i in range(k+1): res += powerInt(y, 2*i + 1) / (2 * i + 1) return res * 2 def powerFractionApprox(x, y, prec): n = int(y*prec) d = prec return nroot(powerInt(x, n), d, 0.00001) def powerExpApprox(x, y, prec): return exp(y * loge(x, 100), prec) ################### D #################### def piApproxGL(k): pi = 0 s = 1 for i in range(k+1): pi += s / (2*i + 1) s *= -1 return 4 * pi def piApproxArch(k): a = 2 * sqrt(3) b = 3 for i in range(k+1): a = 2*a*b / (a+b) b = sqrt(a*b) return a def piApproxMonteCarlo(k): count = 0 for i in range(k): x = random() y = random() if x**2 + y**2 < 1: count += 1 return 4*count/k ########################################## if __name__=='__main__': # print(piApproxMonteCarlo(10000)) # print(piApproxArch(10000)) # print(piApproxGL(10000)) # print(powerExpApprox(2, 2.2, 33)) # print(powerFractionApprox(2, 2.2, 100)) # print(nroot(676786786, 7878, 0.00000001)) # print(powerInt(12,2)) # print(perm([1,2,3,4]), len(perm([1,2,3,4]))) drawPascalMod(30, 5)
SAN JOSE, CA - February 25, 2013 - OCZ Technology Group, Inc. (Nasdaq:OCZ), a leading provider of high-performance solid-state drives (SSDs) for computing devices and systems, today announced that it will preview a variety of enterprise storage solutions at next week's CeBIT 2013 conference in Hannover, Germany. As a renowned global forum, CeBIT represents a great opportunity for attendees to be the first to see and experience the latest innovations in solid-state storage from an industry leader in enterprise SSDs, virtualization, and caching software. OCZ offers a complete suite of storage solutions that address VMware, Linux, and SQL Server platforms, and invites IT decision-makers who are evaluating or implementing solid-state storage in the data center to visit the Company's exhibit in Hall 2, Stand E43, from March 5th through 9th. OCZ will unveil the next-generation ZD-XL SQL Accelerator, a culmination of enterprise hardware and software converging as one tightly integrated and optimized solution. The ZD-XL Accelerator addresses SQL Server database applications to not only ensure that the data for this implementation is right, relevant, and readily available on SSD flash when the SQL Server needs it, but also that the data is accessed with the highest possible I/O performance. For simple deployment and ease of use, this tightly integrated, optimized solution features 'implementation wizards' to guide DBAs so they can optimally manage data cached to the flash. While showcasing the ZD-XL solution OCZ will invite enterprise customers to become beta testers for this exciting solution. Also included in OCZ's exhibition at CeBIT will be demonstrations to preview the upcoming VXL 1.3 Virtualization Software and LXL Acceleration Software with OCZ's innovative Direct Pass Caching Technology, which not only addresses VMware but is also optimized for Linux applications. OCZ is one of the few SSD providers with a robust portfolio of virtualization and caching software that combine the power of flash acceleration with the power of storage virtualization. This enables multiple virtual server loads to run concurrently on a single physical host, not only increasing CPU and memory resource utilization, but also simplifying deployment, high availability (HA), and maintenance of the server loads. The next-generation of workstation PCI Express (PCIe)-based SSDs will also be available soon as part of the Company's award-winning Vector Series. These drives reside directly on the PCIe bus and will support four PCIe Gen2 lanes providing lower latency to data, faster file transfers and system boot-ups, expanded storage capacities, and an even quicker, more responsive experience over the already blazingly fast SATA 3.0-based Vector Series. The Vector PCIe Series will feature an advanced suite of flash management tools that deliver enhanced drive endurance and data, making it ideally suited for power computing, content creation, and workstation applications. Join OCZ Technology at CeBIT 2013, Hannover, Germany, March 5th through 9th, Hall 2, Stand E43 from 9:00am to 6:00pm (local time).