text
stringlengths
29
850k
""" 2015 September 30 Shane Bussmann Find the whale. Model the whale as a rectangle with aspect ratio = 3.0. """ import numpy as np from skimage.color import rgb2gray, rgb2hsv from scipy.ndimage import gaussian_filter def xy_rotate(x, y, x0, y0, phi): phirad = np.deg2rad(phi) xnew = (x - x0) * np.cos(phirad) + (y - y0) * np.sin(phirad) ynew = (y - y0) * np.cos(phirad) - (x - x0) * np.sin(phirad) return (xnew,ynew) def ellipse_2d(x, y, par): (xnew,ynew) = xy_rotate(x, y, par[2], par[3], par[5]) r_ell_sq = ((xnew**2)*par[4] + (ynew**2)/par[4]) / np.abs(par[1])**2 ellipse = r_ell_sq.copy() ellipse[:] = 0. inside = r_ell_sq < 1 ellipse[inside] = par[0] #import matplotlib.pyplot as plt #plt.imshow(r_ell_sq, origin='lower', vmax=10*par[1]) #plt.colorbar() #plt.contour(ellipse) #plt.show() return ellipse def whale_2d(x, y, par): # the head and body of the whale e1 = ellipse_2d(x, y, par) ## the tail of the whale #r1 = par[1] / 3. #q1 = 0.5 #b1 = r1 * np.sqrt(q1) #a0 = par[1] / np.sqrt(par[4]) #d = a0 + b1 #dx = d * np.cos(par[5]) #dy = d * np.sin(par[5]) #x1 = par[2] - dx #y1 = par[3] - dy #phi1 = par[5] - 90. #par2 = [par[0], r1, x1, y1, q1, phi1] #e2 = ellipse_2d(x, y, par2) #import matplotlib.pyplot as plt #plt.contour(e1) #plt.contour(e2) #plt.show() #import pdb; pdb.set_trace() #print(par) #print(par2) return e1# + e2 def color(im): diff = 2 * im[:, :, 0] - im[:, :, 1] - im[:, :, 2] invdiff = diff.max() / diff uhoh = invdiff * 0 != 0 invdiff[uhoh] = 0 invdiff = gaussian_filter(invdiff, 20) return invdiff def lumin(im): diff = im[:, :, 0] + im[:, :, 1] + im[:, :, 2] return diff def colorlumin(im): #diff = rgb2hsv(im) #diff = diff[:, :, 0]# im = np.array(im).astype('float') diff = 2 * im[:, :, 0] - im[:, :, 1] - im[:, :, 2] print(np.median(diff)) imcolor = diff - np.median(diff) colorthresh = np.percentile(imcolor, 97) print("Found color threshold of " + str(colorthresh)) #invdiff = diff.max() / diff #uhoh = invdiff * 0 != 0 #invdiff[uhoh] = 0 #invdiff = gaussian_filter(diff, 2) #import matplotlib.pyplot as plt #plt.hist(imcolor.flatten(), bins=100) #plt.show() #import pdb; pdb.set_trace() diff = rgb2gray(im) imlumin = diff.copy() imlumin /= imlumin.max() #plt.imshow(imlumin) #plt.colorbar() #plt.show() # mask regions with a strong wave signature waveindex = imlumin > 0.9 imcolor[waveindex] = imcolor.min() #plt.imshow(imcolor) #plt.colorbar() #plt.show() # first guess at whale region #import matplotlib.pyplot as plt #plt.imshow(imcolor) #plt.colorbar() #plt.show() hicol = imcolor >= colorthresh locol = imcolor < colorthresh imcolor[hicol] = 10#np.abs(colorthresh) #locol = imcolor < colorthresh imcolor[locol] = 0 #plt.imshow(imcolor) #plt.colorbar() #plt.show() #print(smallim.mean()) return (imcolor, imlumin, colorthresh)
There’s another “win a house” competition on the block: this one (left) is a Grade II* listed cottage in the Cotswolds. The 350-year-old stone property, located in Broadway village, is located 20 minutes drive from Cheltenham and Stratford-upon-Avon. It comes with three bedrooms, solid oak furniture and a walled garden. The competition, organised by the cottage’s owners, runs between now and 30 September 2009. The winner will snag a four-week hoiday at the cottage; the holiday can be taken in one go, or spread across multiple visits. The entry fee is £20, and each entrant must answer three multiple choice questions correctly. For further information and to enter, visit the competition website: Win a holiday in the Cotswolds. …So is the competition to win a pad in Majorca, by the look of it. And the Devon house, in case you’re wondering, is still in limbo.
# -*- coding: utf-8 -*- """ ################################################################################ # Copyright (c) 2010, Ilgar Mashayev # # E-mail: pyzimbra@lab.az # Website: http://github.com/ilgarm/pyzimbra ################################################################################ # This file is part of pyzimbra. # # Pyzimbra is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Pyzimbra is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Pyzimbra. If not, see <http://www.gnu.org/licenses/>. ################################################################################ @author: ilgar """ from test.util import load_test_properties class BaseTest(object): # --------------------------------------------------------------- properties domain = property(lambda self: self._domain, lambda self, v: setattr(self, '_domain', v)) hostname = property(lambda self: self._hostname, lambda self, v: setattr(self, '_hostname', v)) domain_key = property(lambda self: self._domain_key, lambda self, v: setattr(self, '_domain_key', v)) username = property(lambda self: self._username, lambda self, v: setattr(self, '_username', v)) account_name = property(lambda self: self._account_name, lambda self, v: setattr(self, '_account_name', v)) account_id = property(lambda self: self._account_id, lambda self, v: setattr(self, '_account_id', v)) password = property(lambda self: self._password, lambda self, v: setattr(self, '_password', v)) token = property(lambda self: self._token, lambda self, v: setattr(self, '_token', v)) session_id = property(lambda self: self._session_id, lambda self, v: setattr(self, '_session_id', v)) # ------------------------------------------------------------------ unbound def setUp(self): load_test_properties(self) def tearDown(self): pass
Detached house located in a hamlet, the Llaro Farmhouse, at 5 km from the sea. Ground floor: living room with kitchenette (sofa), 2 bedrooms (1 double bed) (2 single beds), shower-room, wtoilet. Enclosed ground with flowers(barbecue, garden furniture, swing, private parking).
# Django settings for skeleton project. import os, datetime, socket PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__)) def abspath(*args): """convert relative paths to absolute paths relative to PROJECT_ROOT""" return os.path.join(PROJECT_ROOT, *args) DEBUG = False TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'sideloader', 'USER': 'postgres', 'PASSWORD': '', 'HOST': 'localhost', 'PORT': '', } } # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = ['*'] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'UTC' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = abspath('media') # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '/media/' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = abspath('static') # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. # Leaving this intentionally blank because you have to generate one yourself. SECRET_KEY = 'please-change-me' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'skeleton.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'skeleton.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. abspath('templates'), ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Uncomment the next line to enable the admin: 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', 'gunicorn', 'raven.contrib.django.raven_compat', 'social.apps.django_app.default', 'crispy_forms', 'sideloader', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } # Celery configuration options BROKER_URL = 'redis://localhost:6379/0' # Defer email sending to Celery, except if we're in debug mode, # then just print the emails to stdout for debugging. LOGIN_REDIRECT_URL = '/' SOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/' CRISPY_TEMPLATE_PACK = 'bootstrap3' SESSION_COOKIE_AGE = 1209600 SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer' SIDELOADER_DOMAIN = socket.getfqdn() SIDELOADER_FROM = 'Sideloader <no-reply@%s>' % SIDELOADER_DOMAIN SIDELOADER_PACKAGEURL = "http://%s/packages" % SIDELOADER_DOMAIN SLACK_TOKEN = None SLACK_CHANNEL = '' SLACK_HOST = 'foo.slack.com' try: from local_settings import * except ImportError: pass
To register for your maternity and childcare stay at our Family Birthing Suites, please complete the following information and click submit. Please bring your Insurance Card and Picture ID with you on the day of admission. After clicking the send button at the bottom of this page, a screen will show confirming that your registration has been received. It will also provide you with next steps.
import os import shutil import bottle import socket import re import logging from ast import literal_eval as make_tuple from ..utils.appdirs import user_data_dir from .server import AbstractServer from .httpserver import EnableCors logger = logging.getLogger(__name__) def get_snap_user_projects_directory(): snap_user_projects_directory = user_data_dir('pypot', 'SnapRobotServer') if not os.path.exists(snap_user_projects_directory): os.makedirs(snap_user_projects_directory) return snap_user_projects_directory def find_local_ip(): # see here: http://stackoverflow.com/questions/166506/ return [(s.connect(('8.8.8.8', 80)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1] def set_snap_server_variables(host, port, snap_extension='.xml', path=None): """ Allow to change dynamically port and host variable in xml Snap! project file""" localdir = os.getcwd() if path is None: os.chdir(os.path.dirname(os.path.realpath(__file__))) else: os.chdir(path) xml_files = [f for f in os.listdir('.') if f.endswith(snap_extension)] for filename in xml_files: with open(filename, 'r') as xf: xml = xf.read() with open(filename, 'w') as xf: xml = re.sub(r'''<variable name="host"><l>[\s\S]*?<\/l><\/variable>''', '''<variable name="host"><l>{}</l></variable>'''.format(host), xml) xml = re.sub(r'''<variable name="port"><l>[\s\S]*?<\/l><\/variable>''', '''<variable name="port"><l>{}</l></variable>'''.format(port), xml) xf.write(xml) os.chdir(localdir) class SnapRobotServer(AbstractServer): def __init__(self, robot, host, port, quiet=True): AbstractServer.__init__(self, robot, host, port) self.quiet = quiet self.app = bottle.Bottle() self.app.install(EnableCors()) rr = self.restfull_robot # Copy Snap files from system directory to user directory. It avoids # right issue while PyPot is installed from pip in an admin directory snap_system_projects_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'snap_projects') xml_files = [os.path.join(snap_system_projects_directory, f) for f in os.listdir(snap_system_projects_directory) if f.endswith('.xml')] for xml_file in xml_files: dst = os.path.join(get_snap_user_projects_directory(), os.path.basename(xml_file)) logger.warning('Src: {}, Dest {}'.format(xml_file, dst)) shutil.copyfile(xml_file, dst) set_snap_server_variables(find_local_ip(), port, path=get_snap_user_projects_directory()) @self.app.get('/motors/<alias>') def get_motors(alias): return '/'.join(rr.get_motors_list(alias)) @self.app.get('/motor/<motor>/get/<register>') def get_motor_register(motor, register): return str(rr.get_motor_register_value(motor, register)) @self.app.get('/motors/get/positions') def get_motors_positions(): get_pos = lambda m: rr.get_motor_register_value( m, 'present_position') msg = '/'.join('{}'.format(get_pos(m)) for m in rr.get_motors_list()) msg = ';'.join('{}'.format(get_pos(m)) for m in rr.get_motors_list()) return msg @self.app.get('/motors/alias') def get_robot_aliases(): return '/'.join('{}'.format(alias) for alias in rr.get_motors_alias()) @self.app.get('/motors/set/goto/<motors_position_duration>') def set_motors_goto(motors_position_duration): """ Allow lot of motors position settings with a single http request Be carefull: with lot of motors, it could overlap the GET max lentgh of your web browser """ for m_settings in motors_position_duration.split(';'): settings = m_settings.split(':') rr.set_goto_position_for_motor(settings[0], float(settings[1]), float(settings[2])) return 'Done!' @self.app.get('/motors/set/registers/<motors_register_value>') def set_motors_registers(motors_register_value): """ Allow lot of motors register settings with a single http request Be carefull: with lot of motors, it could overlap the GET max lentgh of your web browser """ for m_settings in motors_register_value.split(';'): settings = m_settings.split(':') rr.set_motor_register_value(settings[0], settings[1], make_tuple(settings[2])) return 'Done!' # TODO : delete ? @self.app.get('/motors/set/positions/<positions>') def set_motors_positions(positions): positions = map(lambda s: float(s), positions[:-1].split(';')) for m, p in zip(rr.get_motors_list(), positions): rr.set_motor_register_value(m, 'goal_position', p) return 'Done!' @self.app.get('/motor/<motor>/set/<register>/<value>') def set_reg(motor, register, value): rr.set_motor_register_value(motor, register, float(value)) return 'Done!' @self.app.get('/motor/<motor>/goto/<position>/<duration>') def set_goto(motor, position, duration): rr.set_goto_position_for_motor( motor, float(position), float(duration)) return 'Done!' @self.app.get('/snap-blocks.xml') def get_pypot_snap_blocks(): with open(os.path.join(get_snap_user_projects_directory(), 'pypot-snap-blocks.xml')) as f: return f.read() @self.app.get('/ip') def get_ip(): return socket.gethostbyname(socket.gethostname()) @self.app.get('/reset-simulation') def reset_simulation(): if hasattr(robot, 'reset_simulation'): robot.reset_simulation() return 'Done!' @self.app.get('/primitives') def get_primitives(): return '/'.join(rr.get_primitives_list()) @self.app.get('/primitives/running') def get_running_primitives(): return '/'.join(rr.get_running_primitives_list()) @self.app.get('/primitive/<primitive>/start') def start_primitive(primitive): rr.start_primitive(primitive) return 'Done!' @self.app.get('/primitive/<primitive>/stop') def stop_primitive(primitive): rr.stop_primitive(primitive) return 'Done!' @self.app.get('/primitive/<primitive>/pause') def pause_primitive(primitive): rr.pause_primitive(primitive) return 'Done!' @self.app.get('/primitive/<primitive>/resume') def resume_primitive(primitive): rr.resume_primitive(primitive) return 'Done!' @self.app.get('/primitive/<primitive>/properties') def get_primitive_properties_list(primitive): return '/'.join(rr.get_primitive_properties_list(primitive)) @self.app.get('/primitive/<primitive>/get/<property>') def get_primitive_property(primitive, property): return rr.get_primitive_property(primitive, property) @self.app.get('/primitive/<primitive>/set/<property>/<value>') def set_primitive_property(primitive, property, value): return rr.set_primitive_property(primitive, property, value) @self.app.get('/primitive/<primitive>/methodes') def get_primitive_methodes_list(primitive): return '/'.join(rr.get_primitive_methods_list(primitive)) @self.app.get('/primitive/<primitive>/call/<method>/<args>') def call_primitive_methode(primitive, method, args): kwargs = dict(item.split(":") for item in args.split(";")) return rr._call_primitive_method(primitive, method, **kwargs) # Hacks (no restfull) to record movements @self.app.get('/primitive/MoveRecorder/<move_name>/start') def start_move_recorder(move_name): rr.start_move_recorder(move_name) return 'Done!' @self.app.get('/primitive/MoveRecorder/<move_name>/stop') def stop_move_recorder(move_name): rr.stop_move_recorder(move_name) return 'Done!' @self.app.get('/primitive/MoveRecorder/<move_name>/attach/<motors>') def attach_move_recorder(move_name, motors): rr.attach_move_recorder(move_name, motors.split(';')) return 'Done!' @self.app.get('/primitive/MoveRecorder/<move_name>/get_motors') def get_move_recorder_motors(move_name): motors = rr.get_move_recorder_motors(move_name) return '/'.join(motors) if motors is not None else 'None' # Obsolete ? @self.app.get('/primitive/MoveRecorder/<move_name>/start/<motors>') def start_move_recorder_with_motors(move_name, motors): raise DeprecationWarning rr.start_move_recorder(move_name, motors.split(';')) return 'Done!' @self.app.get('/primitive/MoveRecorder/<move_name>/remove') def remove_move_record(move_name): rr.remove_move_record(move_name) return 'Done!' @self.app.get('/primitive/MoveRecorder') def get_available_records(): return '/'.join(rr.get_available_record_list()) @self.app.get('/primitive/MovePlayer') def get_available_records2(): return '/'.join(rr.get_available_record_list()) @self.app.get('/primitive/MovePlayer/<move_name>/start') def start_move_player(move_name): return str(rr.start_move_player(move_name)) @self.app.get('/primitive/MovePlayer/<move_name>/start/<move_speed>') def start_move_player_with_speed(move_name, move_speed): return str(rr.start_move_player(move_name, float(move_speed))) @self.app.get('/primitive/MovePlayer/<move_name>/start/<move_speed>/backwards') def start_move_player_backwards_with_speed(move_name, move_speed): return str(rr.start_move_player(move_name, float(move_speed), backwards=True)) @self.app.get('/primitive/MovePlayer/<move_name>/stop') def stop_move_player(move_name): rr.stop_primitive('_{}_player'.format(move_name)) return 'Done!' def run(self): bottle.run(self.app, host=self.host, port=self.port, quiet=self.quiet)
I'm going to be in NYC (54th and 6th) from superbowl sunday until thursday morning for work. I was hoping to find a welcoming place to play once or twice in the evening when I was there. Where would you suggest I go? If it helps, my rating from 3+ years ago was in the low 1600s though people have said I'm playing above that now (not sure how true it is). I will be on my own so I'll have to find people to play. Join Table Tennis Daily, get a subway card, link up with UpsidedownCarl and Co to visit some clubs, and record the mayhem. Just google "table tennis clubs in nyc" and a number of them will show up. If you drive I suggest going to Westchester TTC in Pleasantville, the friendliest people and best facilities. I liked Lily Yip's club, it's professional and the league is competitive for all. Westchester was my home club back when they were Rivertowns Table Tennis Club and I was a member there for quite a while. Great people and place. I just don't want to drive up there (or take the train) from midtown and back on a weeknight after work hours. It just takes too much time. Part of the reason why I asked is to make sure I go somewhere where I'll get to play with a variety of people. You never know which places have a good turnout during the week. Take train to Queens (Flushing), there are couple of clubs within walking distance from the last stop (I went to Robert Chen's one). It was good. That's a bit too far considering the timing. I figure I can't head out anywhere until maybe 6pm and want to be back by 11-11:30 so I'll be good for work in the morning. I don't want to spend more time commuting than playing. Here you will have a lot of different people to play matches with. I think it is usually too crowded to actually spend time training so you warm up for 2 minutes and play matches. You could also try SPIN NYC but it can be pricey and unless you prearrange to play with someone, I am not sure how successful you will be finding someone. Depending on when you go, there might not be anyone to play. It is usually best 3-6pm or 9-10pm on weekdays or 1-4p on weekends. You can also post on their facebook page and ask to see if anyone would like to play. Here you can actually spend a lot of time on a table training or playing matches. If you are willing to go to Flushing then NYISC or Rober Chen's clubs are also good options however they are quite a trip from midtown. Thanks, this was the type of thing I was looking for. I was thinking WCTTC and didn't notice NYTTA. I can get to either of them in under 30 minutes which is a huge plus. I was under the impression that for SPIN you had to arrange to play someone there. I have had so many successful visits to NYC and TT clubs with my TT friends who live there. I was in NYC in Nov 2018 and visited Robert Chen's club and did a team tourney at NYISC. If you get friends with Carl, he will get you into SPiN for free, he teaches yoga to the staff. come visit me in brooklyn! So I ended up at NYTTA on Monday evening. It wasn't crowded so I played almost non-stop. Strangely I didn't play too many people but there was good competition and fun was had by all. Because of the setup, I played 1 guy like 4 matches in a row because nobody was waiting. They were good matches, but looking back I should have tried to play more people to get more variety. A number of people thought I was rated around 1800 (I'm not, though it's been a while since I played a tournament), so I'm pretty happy even though I didn't feel like I played my best. I've clearly been playing n the same small club too long as I had a lot of trouble returning some serves. There's only a couple of guys with good serves at my club so that's probably the hardest thing for me to work on. Who operates the club? Is it Ernesto Ebuen? That's what I read, but he wasn't there that night.
import numpy as np from pychemia.utils.mathematics import lcm, shortest_triple_set from pychemia import Structure import itertools class StructureMatch: def __init__(self, structure1, structure2): """ Creates a structure match between 2 structures The structures will be change to match their number of atoms and the order of atoms inside such that the distances between atoms in equivalent positions on both structures is minimized. :param structure1: (Structure) :param structure2: (Structure) """ assert (isinstance(structure1, Structure)) assert (isinstance(structure2, Structure)) assert structure1.is_perfect assert structure2.is_perfect self.structure1 = structure1.copy() self.structure2 = structure2.copy() self.base_lattice = self.structure1.lattice def match_size(self): assert self.structure1.is_crystal assert self.structure2.is_crystal gcd1 = self.structure1.get_composition().gcd gcd2 = self.structure2.get_composition().gcd sts = np.array(shortest_triple_set(lcm(gcd1, gcd2) / gcd1)).astype(int) supercell_multiples = sts[self.structure1.lattice.lengths.argsort()[::-1]] self.structure1 = self.structure1.supercell(supercell_multiples) sts = np.array(shortest_triple_set(lcm(gcd1, gcd2) / gcd2)) supercell_multiples = sts[self.structure2.lattice.lengths.argsort()[::-1]] self.structure2 = self.structure2.supercell(supercell_multiples) def match_shape(self): self.structure1.canonical_form() self.structure2.canonical_form() assert (self.structure1.symbols == self.structure2.symbols) def match_atoms(self): if self.structure1.natom != self.structure2.natom: raise ValueError('Match the size first') best = {} for specie in self.structure1.species: selection = np.array(self.structure1.symbols) == specie distance_matrix, close_images = self.base_lattice.minimal_distances(self.structure1.reduced[selection], self.structure2.reduced[selection]) min_trace = 1E10 best[specie] = None if self.structure1.natom < 7: for i in itertools.permutations(range(len(distance_matrix))): if distance_matrix[:, np.array(i)].trace() < min_trace: min_trace = distance_matrix[:, np.array(i)].trace() best[specie] = i else: # Only consider permutations of 2 positions if len(distance_matrix) > 1: for ipar in itertools.permutations(range(len(distance_matrix)), 2): i = list(range(len(distance_matrix))) i[ipar[0]] = ipar[1] i[ipar[1]] = ipar[0] if distance_matrix[:, np.array(i)].trace() < min_trace: min_trace = distance_matrix[:, np.array(i)].trace() best[specie] = i for ipar in itertools.permutations(range(len(distance_matrix)), 4): i = list(range(len(distance_matrix))) i[ipar[0]] = ipar[1] i[ipar[1]] = ipar[0] i[ipar[2]] = ipar[3] i[ipar[3]] = ipar[2] if distance_matrix[:, np.array(i)].trace() < min_trace: min_trace = distance_matrix[:, np.array(i)].trace() best[specie] = i else: best[specie] = [0] print('For specie %s best permutation is %s' % (specie, str(best[specie]))) best_permutation = np.zeros(self.structure1.natom, dtype=int) index = 0 while index < self.structure1.natom: specie = self.structure1.symbols[index] selection = np.array(self.structure1.symbols) == specie best_permutation[selection] = index + np.array(best[specie]) index += len(best[specie]) self.structure2.sort_sites_using_list(best_permutation) def reduced_displacement(self): assert (self.structure1.symbols == self.structure2.symbols) assert (self.structure1.nsites == self.structure2.nsites) assert (self.structure1.natom == self.structure2.natom) ret = np.zeros((self.structure1.nsites, 3)) distance_matrix, close_images = self.base_lattice.minimal_distances(self.structure1.reduced, self.structure2.reduced) for i in range(self.structure1.nsites): x1 = self.structure1.reduced[i] x2 = self.structure2.reduced[i] + close_images[i, i] ret[i] = x2 - x1 return ret def cell_displacement(self): return np.dot(self.structure1.cell, np.linalg.inv(self.structure2.cell)) def cartesian_distances(self): rd = self.reduced_displacement() ret = np.zeros(self.structure1.nsites) for i in range(self.structure1.nsites): ret[i] = np.dot(np.dot(rd[i], self.base_lattice.metric), rd[i]) ret[i] = np.sqrt(ret[i]) return ret
Are you in the process of relocating from Texas to Washington? There are many things you need to consider. If one of them is auto shipping, it is important that you do your research and find the most reliable company that offers auto transport services in texas that fit your needs and will meet your expectations. There are a few things to keep in mind and one of them is cost. The cost can fluctuate greatly from one car transport company to another. You’d be surprised at how much you can save by looking into Texas to Washington seasonal discounts and specials. To get the most accurate Texas to Washington auto shipping estimate it is important to have as much information as possible when filling a vehicle shipping quote or speaking to a auto moving representative. Your estimate is based not only the distance of travel between Texas and Washington, but also the specifications of your vehicle. Another important factor to consider when looking for a shippers company is finding one that insures your vehicle or offers any type of recovery plan. You never want to use a company that doesn’t offer coverage for your vehicle in the event that something should happen to it. Be sure to also check if the Better Business Bureau (BBB) accredits the company and feel free to check their reviews. Not only can you find reviews on the BBB website but you can also find reviews on the company website as well as on yelp.com and yellowpages.com, just to name a few. Reading shipping reviews allows you to get insight on how other parties feel about the potential company and the shipping services they provide. Auto Transport Group has been shipping vehicles to and from Texas to Washington for well over 10 years and we must say, we've gotten pretty good at it! Our experts will handle your shippers needs with high quality care. When you ship through us your vehicle will be FULLY insured and this adds an extra layer of protection for your peace of mind. We offer door to door service and both open AND enclosed carrier auto moving! Time is of the essence and when you ship through us we can assure you that your vehicle will be on time, every time with our expertise in customer care. We offer exclusive and even expedite vehicle shipping services so that your vehicle arrives when you need it to. To top it off, your vehicle can be tracked at any point in time of the shipping process. Just call one of our Texas to Washington representatives to find the best shipping solution for YOU! - Remember, here at Auto Transport Group we're here to serve you and give you the quality care you deserve when shipping your vehicle. Please fill out the quote form above to get a hassle free quote that is customized just for you! Call to speak with an expert that can walk you through the process and help you choose the shippers service that's right for you.
# Mantid Repository : https://github.com/mantidproject/mantid # # Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI, # NScD Oak Ridge National Laboratory, European Spallation Source, # Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS # SPDX - License - Identifier: GPL - 3.0 + import os from isis_powder.abstract_inst import AbstractInst from isis_powder.routines import absorb_corrections, common, instrument_settings from isis_powder.hrpd_routines import hrpd_advanced_config, hrpd_algs, hrpd_param_mapping import mantid.simpleapi as mantid # A bug on the instrument when recording historic NeXus files (< 2015) caused # corrupted data. Use raw files for now until sufficient time has past and old # data is unlikely to be reanalysed. RAW_DATA_EXT = '.raw' # Constants PROMPT_PULSE_INTERVAL = 20000.0 PROMPT_PULSE_RIGHT_WIDTH = 140.0 PROMPT_PULSE_LEFT_WIDTH = 30.0 class HRPD(AbstractInst): def __init__(self, **kwargs): self._inst_settings = instrument_settings.InstrumentSettings( param_map=hrpd_param_mapping.attr_mapping, kwargs=kwargs, adv_conf_dict=hrpd_advanced_config.get_all_adv_variables()) super(HRPD, self).__init__(user_name=self._inst_settings.user_name, calibration_dir=self._inst_settings.calibration_dir, output_dir=self._inst_settings.output_dir, inst_prefix="HRPD") self._cached_run_details = {} self._sample_details = None def focus(self, **kwargs): self._switch_tof_window_inst_settings(kwargs.get("window")) self._inst_settings.update_attributes(kwargs=kwargs) return self._focus( run_number_string=self._inst_settings.run_number, do_van_normalisation=self._inst_settings.do_van_norm, do_absorb_corrections=self._inst_settings.do_absorb_corrections) def create_vanadium(self, **kwargs): self._switch_tof_window_inst_settings(kwargs.get("window")) self._inst_settings.update_attributes(kwargs=kwargs) return self._create_vanadium(run_number_string=self._inst_settings.run_in_range, do_absorb_corrections=self._inst_settings.do_absorb_corrections) def set_sample_details(self, **kwargs): kwarg_name = "sample" sample_details_obj = common.dictionary_key_helper( dictionary=kwargs, key=kwarg_name, exception_msg="The argument containing sample details was not found. Please" " set the following argument: {}".format(kwarg_name)) self._sample_details = sample_details_obj def mask_prompt_pulses_if_necessary(self, ws_list): for ws in ws_list: self._mask_prompt_pulses(ws) def should_subtract_empty_inst(self): return self._inst_settings.subtract_empty_inst def create_solid_angle_corrections(self, vanadium, run_details): """ Creates the solid angle corrections from a vanadium run, only applicable on HRPD otherwise return None :param vanadium: The vanadium used to create this :param run_details: the run details of to use """ settings = self._inst_settings if not settings.do_solid_angle: return solid_angle = mantid.SolidAngle(InputWorkspace=vanadium) solid_angle = mantid.Scale(InputWorkspace=solid_angle, Factor=100, Operation='Multiply') eff = mantid.Divide(LHSWorkspace=vanadium, RHSWorkspace=solid_angle) eff = mantid.ConvertUnits(InputWorkspace=eff, Target='Wavelength') integration_range = settings.eff_integration_range # use full range if no range is supplied integration_range = integration_range if integration_range is not None else (None, None) eff = mantid.Integration(InputWorkspace=eff, RangeLower=integration_range[0], RangeUpper=integration_range[1]) correction = mantid.Multiply(LHSWorkspace=solid_angle, RHSWorkspace=eff) correction = mantid.Scale(InputWorkspace=correction, Factor=1e-5, Operation='Multiply') name = "sac" + common.generate_splined_name(run_details.run_number, []) path = run_details.van_paths mantid.SaveNexus(InputWorkspace=correction, Filename=os.path.join(path, name)) common.remove_intermediate_workspace(eff) common.remove_intermediate_workspace(correction) def get_solid_angle_corrections(self, vanadium, run_details): if not self._inst_settings.do_solid_angle: return name = "sac" + common.generate_splined_name(vanadium, []) path = run_details.van_paths try: solid_angle = mantid.Load(Filename=os.path.join(path,name)) return solid_angle except ValueError: raise RuntimeError("Could not find " + os.path.join(path, name)+" please run create_vanadium with " "\"do_solid_angle_corrections=True\"") def _generate_input_file_name(self, run_number, file_ext=""): """ Generates a name which Mantid uses within Load to find the file. :param run_number: The run number to convert into a valid format for Mantid :param file_ext: An optional file extension to add to force a particular format :return: A filename that will allow Mantid to find the correct run for that instrument. """ if not file_ext: file_ext = RAW_DATA_EXT return self._generate_inst_filename(run_number=run_number, file_ext=file_ext) def _apply_absorb_corrections(self, run_details, ws_to_correct): if self._is_vanadium: return hrpd_algs.calculate_van_absorb_corrections( ws_to_correct=ws_to_correct, multiple_scattering=self._inst_settings.multiple_scattering) elif self._sample_details is None: raise RuntimeError("Absorption corrections cannot be run without sample details." " Please set sample details using set_sample before running absorption corrections.") elif self._sample_details.shape_type() == "slab": return hrpd_algs.calculate_slab_absorb_corrections(ws_to_correct=ws_to_correct, sample_details_obj=self._sample_details) else: return absorb_corrections.run_cylinder_absorb_corrections( ws_to_correct=ws_to_correct, multiple_scattering=self._inst_settings.multiple_scattering, sample_details_obj=self._sample_details, is_vanadium=self._is_vanadium) def _crop_banks_to_user_tof(self, focused_banks): return common.crop_banks_using_crop_list(focused_banks, self._inst_settings.tof_cropping_values) def _crop_van_to_expected_tof_range(self, van_ws_to_crop): return common.crop_in_tof(ws_to_crop=van_ws_to_crop, x_min=self._inst_settings.van_tof_cropping[0], x_max=self._inst_settings.van_tof_cropping[-1]) def _get_instrument_bin_widths(self): return self._inst_settings.focused_bin_widths def _get_run_details(self, run_number_string): run_number_string_key = self._generate_run_details_fingerprint(run_number_string, self._inst_settings.file_extension) if run_number_string_key in self._cached_run_details: return self._cached_run_details[run_number_string_key] self._cached_run_details[run_number_string_key] = hrpd_algs.get_run_details( run_number_string=run_number_string, inst_settings=self._inst_settings, is_vanadium=self._is_vanadium) return self._cached_run_details[run_number_string_key] def _mask_prompt_pulses(self, ws): """ HRPD has a long flight path from the moderator resulting in sharp peaks from the proton pulse that maintain their sharp resolution. Here we mask these pulses out that occur at 20ms intervals. :param ws: The workspace containing the pulses. It is masked in place. """ # The number of pulse can vary depending on the data range # Compute number of pulses that occur at each 20ms interval. x_data = ws.readX(0) pulse_min = int(round(x_data[0]) / PROMPT_PULSE_INTERVAL) + 1 pulse_max = int(round(x_data[-1]) / PROMPT_PULSE_INTERVAL) + 1 for i in range(pulse_min, pulse_max): centre = PROMPT_PULSE_INTERVAL * float(i) mantid.MaskBins(InputWorkspace=ws, OutputWorkspace=ws, XMin=centre - PROMPT_PULSE_LEFT_WIDTH, XMax=centre + PROMPT_PULSE_RIGHT_WIDTH) def _switch_tof_window_inst_settings(self, tof_window): self._inst_settings.update_attributes( advanced_config=hrpd_advanced_config.get_tof_window_dict(tof_window=tof_window))
For more information about Aeryon Defense, contact us here and stop by the Aeryon Defense booth 514 at the AUVSI Unmanned Systems show February 6-8 in National Harbor, MD. Aeryon Labs, Inc. is the world’s leading provider of high-performance, small Unmanned Aircraft Systems (sUAS) and software for military, public safety, and commercial customers. Aeryon’s battle-tested sUAS set the standard for real-time, secure, aerial intelligence across a wide range of mission-critical applications from covert tactical missions, to search and rescue operations, to commercial inspections and more. Aeryon aircraft are deployed with over 20 militaries and in daily use with customers in over 30 countries. Visit www.aeryon.com to learn more. Aeryon Defense USA, Inc. is an approved U.S. defense contractor delivering the next-generation of high-performance, small Unmanned Aircraft Systems (sUAS) and software to U.S. military forces and government agencies. Building off Aeryon’s battle-tested sUAS, Aeryon Defense partners with the Department of Defense to develop mission-specific solutions for specialized payloads, enhanced command and control, greater network interoperability and more. Based in Denver, Colorado, the Aeryon Defense team has the military UAS experts and resources needed to deliver innovative, multi-role sUAS that empower the modern warfighter. Visit aeryondefense.us to learn more.
#!/usr/bin/python # Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TR-069 has mandatory attribute names that don't comply with policy #pylint: disable-msg=C6409 """Implementation of tr-140 Storage Services objects.""" __author__ = 'dgentry@google.com (Denton Gentry)' import ctypes import fcntl import os import os.path import re import subprocess import tr.core import tr.tr140_v1_1 import tr.x_catawampus_storage_1_0 BASESTORAGE = tr.x_catawampus_storage_1_0.X_CATAWAMPUS_ORG_Storage_v1_0.StorageService class MtdEccStats(ctypes.Structure): """<mtd/mtd-abi.h> struct mtd_ecc_stats.""" _fields_ = [('corrected', ctypes.c_uint32), ('failed', ctypes.c_uint32), ('badblocks', ctypes.c_uint32), ('bbtblocks', ctypes.c_uint32)] def _GetMtdStats(mtddev): """Return the MtdEccStats for the given mtd device. Arguments: mtddev: the string path to the device, ex: '/dev/mtd14' Raises: IOError: if the ioctl fails. Returns: an MtdEccStats. """ ECCGETSTATS = 0x40104d12 # ECCGETSTATS _IOR('M', 18, struct mtd_ecc_stats) with open(mtddev, 'r') as f: ecc = MtdEccStats() if fcntl.ioctl(f, ECCGETSTATS, ctypes.addressof(ecc)) != 0: raise IOError('ECCGETSTATS failed') return ecc # Unit tests can override these GETMTDSTATS = _GetMtdStats PROC_FILESYSTEMS = '/proc/filesystems' PROC_MOUNTS = '/proc/mounts' SLASHDEV = '/dev/' SMARTCTL = '/usr/sbin/smartctl' STATVFS = os.statvfs SYS_BLOCK = '/sys/block/' SYS_UBI = '/sys/class/ubi/' def _FsType(fstype): supported = {'vfat': 'FAT32', 'ext2': 'ext2', 'ext3': 'ext3', 'ext4': 'ext4', 'msdos': 'FAT32', 'xfs': 'xfs', 'reiserfs': 'REISER'} if fstype in supported: return supported[fstype] else: return 'X_CATAWAMPUS-ORG_' + fstype def _IsSillyFilesystem(fstype): """Filesystems which are not interesting to export to the ACS.""" SILLY = frozenset(['devtmpfs', 'proc', 'sysfs', 'usbfs', 'devpts', 'rpc_pipefs', 'autofs', 'nfsd', 'binfmt_misc', 'fuseblk']) return fstype in SILLY def _GetFieldFromOutput(prefix, output, default=''): """Search output for line of the form 'Foo: Bar', return 'Bar'.""" field_re = re.compile(prefix + '\s*(\S+)') for line in output.splitlines(): result = field_re.search(line) if result is not None: return result.group(1).strip() return default def _ReadOneLine(filename, default): """Read one line from a file. Return default if anything fails.""" try: f = open(filename, 'r') return f.readline().strip() except IOError: return default def IntFromFile(filename): """Read one line from a file and return an int, or zero if an error occurs.""" try: buf = _ReadOneLine(filename, '0') return int(buf) except ValueError: return 0 class LogicalVolumeLinux26(BASESTORAGE.LogicalVolume): """Implementation of tr-140 StorageService.LogicalVolume for Linux FS.""" def __init__(self, rootpath, fstype): BASESTORAGE.LogicalVolume.__init__(self) self.rootpath = rootpath self.fstype = fstype self.Unexport('Alias') self.Unexport('Encrypted') self.Unexport('ThresholdReached') self.Unexport('PhysicalReference') self.FolderList = {} self.ThresholdLimit = 0 @property def Name(self): return self.rootpath @property def Status(self): return 'Online' @property def Enable(self): return True @property def FileSystem(self): return self.fstype # TODO(dgentry) need @sessioncache decorator def _GetStatVfs(self): return STATVFS(self.rootpath) @property def Capacity(self): vfs = self._GetStatVfs() return int(vfs.f_blocks * vfs.f_bsize / 1024 / 1024) @property def ThresholdReached(self): vfs = self._GetStatVfs() require = self.ThresholdLimit * 1024 * 1024 avail = vfs.f_bavail * vfs.f_bsize return True if avail < require else False @property def UsedSpace(self): vfs = self._GetStatVfs() b_used = vfs.f_blocks - vfs.f_bavail return int(b_used * vfs.f_bsize / 1024 / 1024) @property def X_CATAWAMPUS_ORG_ReadOnly(self): ST_RDONLY = 0x0001 vfs = self._GetStatVfs() return True if vfs.f_flag & ST_RDONLY else False @property def FolderNumberOfEntries(self): return len(self.FolderList) class PhysicalMediumDiskLinux26(BASESTORAGE.PhysicalMedium): """tr-140 PhysicalMedium implementation for non-removable disks.""" CONNECTION_TYPES = frozenset( ['USB 1.1', 'USB 2.0', 'IEEE1394', 'IEEE1394b', 'IDE', 'EIDE', 'ATA/33', 'ATA/66', 'ATA/100', 'ATA/133', 'SATA/150', 'SATA/300', 'SCSI-1', 'Fast SCSI', 'Fast-Wide SCSI', 'Ultra SCSI', 'Ultra Wide SCSI', 'Ultra2 SCSI', 'Ultra2 Wide SCSI', 'Ultra3 SCSI', 'Ultra-320 SCSI', 'Ultra-640 SCSI', 'SSA', 'SSA-40', 'Fibre Channel']) def __init__(self, dev, conn_type=None): BASESTORAGE.PhysicalMedium.__init__(self) self.dev = dev self.name = dev self.Unexport('Alias') # TODO(dgentry) read SMART attribute for PowerOnHours self.Unexport('Uptime') # TODO(dgentry) What does 'Standby' or 'Offline' mean? self.Unexport('Status') if conn_type is None: # transport is really, really hard to infer programatically. # If platform code doesn't provide it, don't try to guess. self.Unexport('ConnectionType') else: # Provide a hint to the platform code: use a valid enumerated string, # or define a vendor extension. Don't just make something up. assert conn_type[0:1] == 'X_' or conn_type in self.CONNECTION_TYPES self.conn_type = conn_type # TODO(dgentry) need @sessioncache decorator def _GetSmartctlOutput(self): """Return smartctl info and health output.""" dev = SLASHDEV + self.dev smart = subprocess.Popen([SMARTCTL, '--info', '--health', dev], stdout=subprocess.PIPE) out, _ = smart.communicate(None) return out def GetName(self): return self.name def SetName(self, value): self.name = value Name = property(GetName, SetName, None, 'PhysicalMedium.Name') @property def Vendor(self): filename = SYS_BLOCK + '/' + self.dev + '/device/vendor' vendor = _ReadOneLine(filename=filename, default='') # /sys/block/?da/device/vendor is often 'ATA'. Not useful. return '' if vendor == 'ATA' else vendor @property def Model(self): filename = SYS_BLOCK + '/' + self.dev + '/device/model' return _ReadOneLine(filename=filename, default='') @property def SerialNumber(self): return _GetFieldFromOutput(prefix='Serial Number:', output=self._GetSmartctlOutput(), default='') @property def FirmwareVersion(self): return _GetFieldFromOutput(prefix='Firmware Version:', output=self._GetSmartctlOutput(), default='') @property def ConnectionType(self): return self.conn_type @property def Removable(self): return False @property def Capacity(self): """Return capacity in Megabytes.""" filename = SYS_BLOCK + '/' + self.dev + '/size' size = _ReadOneLine(filename=filename, default='0') try: # TODO(dgentry) Do 4k sector drives populate size in 512 byte blocks? return int(size) * 512 / 1048576 except ValueError: return 0 @property def SMARTCapable(self): capable = _GetFieldFromOutput(prefix='SMART support is: Enab', output=self._GetSmartctlOutput(), default=None) return True if capable else False @property def Health(self): health = _GetFieldFromOutput( prefix='SMART overall-health self-assessment test result:', output=self._GetSmartctlOutput(), default='') if health == 'PASSED': return 'OK' elif health.find('FAIL') >= 0: return 'Failing' else: return 'Error' @property def HotSwappable(self): filename = SYS_BLOCK + '/' + self.dev + '/removable' removable = _ReadOneLine(filename=filename, default='0').strip() return False if removable == '0' else True class FlashSubVolUbiLinux26(BASESTORAGE.X_CATAWAMPUS_ORG_FlashMedia.SubVolume): """Catawampus Storage Flash SubVolume implementation for UBI volumes.""" def __init__(self, ubivol): BASESTORAGE.X_CATAWAMPUS_ORG_FlashMedia.SubVolume.__init__(self) self.ubivol = ubivol @property def DataMBytes(self): bytesiz = IntFromFile(os.path.join(SYS_UBI, self.ubivol, 'data_bytes')) return int(bytesiz / 1024 / 1024) @property def Name(self): return _ReadOneLine(os.path.join(SYS_UBI, self.ubivol, 'name'), self.ubivol) @property def Status(self): corr = IntFromFile(os.path.join(SYS_UBI, self.ubivol, 'corrupted')) return 'OK' if corr == 0 else 'Corrupted' class FlashMediumUbiLinux26(BASESTORAGE.X_CATAWAMPUS_ORG_FlashMedia): """Catawampus Storage FlashMedium implementation for UBI volumes.""" def __init__(self, ubiname): BASESTORAGE.X_CATAWAMPUS_ORG_FlashMedia.__init__(self) self.ubiname = ubiname self.SubVolumeList = {} num = 0 for i in range(128): subvolname = ubiname + '_' + str(i) try: if os.stat(os.path.join(SYS_UBI, self.ubiname, subvolname)): self.SubVolumeList[str(num)] = FlashSubVolUbiLinux26(subvolname) num += 1 except OSError: pass @property def BadEraseBlocks(self): return IntFromFile(os.path.join(SYS_UBI, self.ubiname, 'bad_peb_count')) @property def CorrectedErrors(self): mtdnum = IntFromFile(os.path.join(SYS_UBI, self.ubiname, 'mtd_num')) ecc = GETMTDSTATS(os.path.join(SLASHDEV, 'mtd' + str(mtdnum))) return ecc.corrected @property def EraseBlockSize(self): return IntFromFile(os.path.join(SYS_UBI, self.ubiname, 'eraseblock_size')) @property def IOSize(self): return IntFromFile(os.path.join(SYS_UBI, self.ubiname, 'min_io_size')) @property def MaxEraseCount(self): return IntFromFile(os.path.join(SYS_UBI, self.ubiname, 'max_ec')) @property def SubVolumeNumberOfEntries(self): return len(self.SubVolumeList) @property def Name(self): return self.ubiname @property def ReservedEraseBlocks(self): return IntFromFile(os.path.join(SYS_UBI, self.ubiname, 'reserved_for_bad')) @property def TotalEraseBlocks(self): return IntFromFile(os.path.join(SYS_UBI, self.ubiname, 'total_eraseblocks')) @property def UncorrectedErrors(self): mtdnum = IntFromFile(os.path.join(SYS_UBI, self.ubiname, 'mtd_num')) ecc = GETMTDSTATS(os.path.join(SLASHDEV, 'mtd' + str(mtdnum))) return ecc.failed class CapabilitiesNoneLinux26(BASESTORAGE.Capabilities): """Trivial tr-140 StorageService.Capabilities, all False.""" def __init__(self): BASESTORAGE.Capabilities.__init__(self) @property def FTPCapable(self): return False @property def HTTPCapable(self): return False @property def HTTPSCapable(self): return False @property def HTTPWritable(self): return False @property def SFTPCapable(self): return False @property def SupportedFileSystemTypes(self): """Returns possible filesystems. Parses /proc/filesystems, omit any defined as uninteresting in _IsSillyFileSystem(), and return the rest. Returns: a string of comma-separated filesystem types. """ fslist = set() f = open(PROC_FILESYSTEMS) for line in f: if line.find('nodev') >= 0: # rule of thumb to skip internal, non-interesting filesystems continue fstype = line.strip() if _IsSillyFilesystem(fstype): continue fslist.add(_FsType(fstype)) return ','.join(sorted(fslist, key=str.lower)) @property def SupportedNetworkProtocols(self): return '' @property def SupportedRaidTypes(self): return '' @property def VolumeEncryptionCapable(self): return False class StorageServiceLinux26(BASESTORAGE): """Implements a basic tr-140 for Linux 2.6-ish systems. This class implements no network file services, it only exports the LogicalVolume information. """ def __init__(self): BASESTORAGE.__init__(self) self.Capabilities = CapabilitiesNoneLinux26() self.Unexport('Alias') self.Unexport(objects='NetInfo') self.Unexport(objects='NetworkServer') self.Unexport(objects='FTPServer') self.Unexport(objects='SFTPServer') self.Unexport(objects='HTTPServer') self.Unexport(objects='HTTPSServer') self.PhysicalMediumList = {} self.StorageArrayList = {} self.LogicalVolumeList = tr.core.AutoDict( 'LogicalVolumeList', iteritems=self.IterLogicalVolumes, getitem=self.GetLogicalVolumeByIndex) self.UserAccountList = {} self.UserGroupList = {} self.X_CATAWAMPUS_ORG_FlashMediaList = {} @property def Enable(self): # TODO(dgentry): tr-140 says this is supposed to be writable return True @property def PhysicalMediumNumberOfEntries(self): return len(self.PhysicalMediumList) @property def StorageArrayNumberOfEntries(self): return len(self.StorageArrayList) @property def LogicalVolumeNumberOfEntries(self): return len(self.LogicalVolumeList) @property def UserAccountNumberOfEntries(self): return len(self.UserAccountList) @property def UserGroupNumberOfEntries(self): return len(self.UserGroupList) @property def X_CATAWAMPUS_ORG_FlashMediaNumberOfEntries(self): return len(self.X_CATAWAMPUS_ORG_FlashMediaList) def _ParseProcMounts(self): """Return list of (mount point, filesystem type) tuples.""" mounts = dict() try: f = open(PROC_MOUNTS) except IOError: return [] for line in f: fields = line.split() # ex: /dev/mtdblock9 / squashfs ro,relatime 0 0 if len(fields) < 6: continue fsname = fields[0] mountpoint = fields[1] fstype = fields[2] if fsname == 'none' or _IsSillyFilesystem(fstype): continue mounts[mountpoint] = _FsType(fstype) return sorted(mounts.items()) def GetLogicalVolume(self, fstuple): """Get an LogicalVolume object for a mounted filesystem.""" (mountpoint, fstype) = fstuple return LogicalVolumeLinux26(mountpoint, fstype) def IterLogicalVolumes(self): """Retrieves a list of all mounted filesystems.""" fstuples = self._ParseProcMounts() for idx, fstuple in enumerate(fstuples): yield idx, self.GetLogicalVolume(fstuple) def GetLogicalVolumeByIndex(self, index): fstuples = self._ParseProcMounts() if index >= len(fstuples): raise IndexError('No such object LogicalVolume.{0}'.format(index)) return self.GetLogicalVolume(fstuples[index]) def main(): pass if __name__ == '__main__': main()
Long range relative positioning of offshore support vessels and drilling rigs is more reliable and effective with a new sensor unit. Kongsberg Maritime introduced the XPR 100 microwave-based solution for DP applications that require long-range relative positioning in 2018 to enhance safety and reliability even in harsh weather. For this innovation, Kongsberg was awarded Offshore Support Journal’s Dynamic Positioning Award during a gala dinner presentation in London. This award, which is sponsored by Damen Shipyards, recognises the developer of an innovative DP product or system, or contractor responsible for an especially innovative application of DP on a project. It was presented to Kongsberg during the Annual Offshore Support Journal Conference in London. "This award recognises what we have done in developing sensors for DP operations for years to come," said Kongberg Maritime Vidar Bjorkedal. Kongsberg’s XPR 100 provides precision readings, with high bearing accuracy, up to a distance of 10 km from a target, which introduces a new level of versatility for diverse DP operations. It is compact and lightweight, has no moving parts and operates in the most extreme weather conditions. It operates in the 9.2-9.3 GHz band and each lightweight panel has an opening angle of 100°. XPR 100 can utilise several sensor panels, which can be deployed on suitable locations on the vessel dependent on the construction and operation. This provides an extended operational area and avoids blind angles. Its performance and operation can be configured and monitored using application software, which enables the sensors to interface with remote systems via serial lines or an Ethernet-based network. XPR 100 is designed to fill the need specified by IMO for DP-classed vessels. XPR 100 has an in-built system for testing, which means it can be verified before arriving at the location, reducing the risk of costly vessel DP system downtime. Kongsberg said XPR 100 features a highly intuitive human-machine interface that enables operators to assess the quality of vessel positioning rapidly and effectively during operations. Operators can select between a set of colour palettes and night display for operations under different light conditions. Kongsberg won the award against stiff competition from the International Marine Contractors Association (IMCA) and ABB Marine. IMCA was nominated for its DP station keeping events database and detailed case studies to ensure that lessons learned from industry incidents and accidents can be disseminated throughout the industry. ABB was nominated for its Ability Marine Pilot Control DP system. This fulfils the same role and functionality as a traditional DP system, while allowing for simplified operation through a touchscreen-based interface.
#!/usr/bin/python # # Copyright 2018-2021 Polyaxon, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import numpy as np from marshmallow import ValidationError from polyaxon.polyflow import ( V1HpChoice, V1HpGeomSpace, V1HpLinSpace, V1HpLogNormal, V1HpLogSpace, V1HpLogUniform, V1HpNormal, V1HpPChoice, V1HpQLogNormal, V1HpQLogUniform, V1HpQNormal, V1HpQUniform, V1HpRange, V1HpUniform, ) from polyaxon.polyflow.matrix.params import pchoice from polyaxon.polytune.matrix import dist def space_sample(value, size, rand_generator): size = None if size == 1 else size rand_generator = rand_generator or np.random try: return rand_generator.choice(value, size=size) except ValueError: idx = rand_generator.randint(0, len(value)) return value[idx] def dist_sample(fct, value, size, rand_generator): size = None if size == 1 else size rand_generator = rand_generator or np.random value = copy.deepcopy(value) value["size"] = size value["rand_generator"] = rand_generator return fct(**value) def get_length(matrix): if matrix.IDENTIFIER == V1HpChoice.IDENTIFIER: return len(matrix.value) if matrix.IDENTIFIER == V1HpPChoice.IDENTIFIER: return len(matrix.value) if matrix.IDENTIFIER == V1HpRange.IDENTIFIER: return len(np.arange(**matrix.value)) if matrix.IDENTIFIER == V1HpLinSpace.IDENTIFIER: return len(np.linspace(**matrix.value)) if matrix.IDENTIFIER == V1HpLogSpace.IDENTIFIER: return len(np.logspace(**matrix.value)) if matrix.IDENTIFIER == V1HpGeomSpace.IDENTIFIER: return len(np.geomspace(**matrix.value)) if matrix.IDENTIFIER in { V1HpUniform.IDENTIFIER, V1HpQUniform.IDENTIFIER, V1HpLogUniform.IDENTIFIER, V1HpQLogUniform.IDENTIFIER, V1HpNormal.IDENTIFIER, V1HpQNormal.IDENTIFIER, V1HpLogNormal.IDENTIFIER, V1HpQLogNormal.IDENTIFIER, }: raise ValidationError("Distribution should not call `length`") def get_min(matrix): if matrix.IDENTIFIER == V1HpChoice.IDENTIFIER: if matrix.is_categorical: return None return min(to_numpy(matrix)) if matrix.IDENTIFIER == V1HpPChoice.IDENTIFIER: return None if matrix.IDENTIFIER in { V1HpRange.IDENTIFIER, V1HpLinSpace.IDENTIFIER, V1HpLogSpace.IDENTIFIER, V1HpGeomSpace.IDENTIFIER, }: return matrix.value.get("start") if matrix.IDENTIFIER == V1HpUniform.IDENTIFIER: return matrix.value.get("low") if matrix.IDENTIFIER in { V1HpQUniform.IDENTIFIER, V1HpLogUniform.IDENTIFIER, V1HpQLogUniform.IDENTIFIER, V1HpNormal.IDENTIFIER, V1HpQNormal.IDENTIFIER, V1HpLogNormal.IDENTIFIER, V1HpQLogNormal.IDENTIFIER, }: return None def get_max(matrix): if matrix.IDENTIFIER == V1HpChoice.IDENTIFIER: if matrix.is_categorical: return None return max(to_numpy(matrix)) if matrix.IDENTIFIER == V1HpPChoice.IDENTIFIER: return None if matrix.IDENTIFIER in { V1HpRange.IDENTIFIER, V1HpLinSpace.IDENTIFIER, V1HpLogSpace.IDENTIFIER, V1HpGeomSpace.IDENTIFIER, }: return matrix.value.get("stop") if matrix.IDENTIFIER == V1HpUniform.IDENTIFIER: return matrix.value.get("high") if matrix.IDENTIFIER in { V1HpQUniform.IDENTIFIER, V1HpLogUniform.IDENTIFIER, V1HpQLogUniform.IDENTIFIER, V1HpNormal.IDENTIFIER, V1HpQNormal.IDENTIFIER, V1HpLogNormal.IDENTIFIER, V1HpQLogNormal.IDENTIFIER, }: return None def to_numpy(matrix): if matrix.IDENTIFIER == V1HpChoice.IDENTIFIER: return matrix.value if matrix.IDENTIFIER == V1HpPChoice.IDENTIFIER: raise ValidationError( "Distribution should not call `to_numpy`, " "instead it should call `sample`." ) if matrix.IDENTIFIER == V1HpRange.IDENTIFIER: return np.arange(**matrix.value) if matrix.IDENTIFIER == V1HpLinSpace.IDENTIFIER: return np.linspace(**matrix.value) if matrix.IDENTIFIER == V1HpLogSpace.IDENTIFIER: return np.logspace(**matrix.value) if matrix.IDENTIFIER == V1HpGeomSpace.IDENTIFIER: return np.geomspace(**matrix.value) if matrix.IDENTIFIER in { V1HpUniform.IDENTIFIER, V1HpQUniform.IDENTIFIER, V1HpLogUniform.IDENTIFIER, V1HpQLogUniform.IDENTIFIER, V1HpNormal.IDENTIFIER, V1HpQNormal.IDENTIFIER, V1HpLogNormal.IDENTIFIER, V1HpQLogNormal.IDENTIFIER, }: raise ValidationError( "Distribution should not call `to_numpy`, " "instead it should call `sample`." ) def sample(matrix, size=None, rand_generator=None): size = None if size == 1 else size if matrix.IDENTIFIER == V1HpChoice.IDENTIFIER: return space_sample( value=to_numpy(matrix), size=size, rand_generator=rand_generator ) if matrix.IDENTIFIER == V1HpPChoice.IDENTIFIER: return pchoice(values=matrix.value, size=size, rand_generator=rand_generator) if matrix.IDENTIFIER == V1HpRange.IDENTIFIER: return space_sample( value=to_numpy(matrix), size=size, rand_generator=rand_generator ) if matrix.IDENTIFIER == V1HpLinSpace.IDENTIFIER: return space_sample( value=to_numpy(matrix), size=size, rand_generator=rand_generator ) if matrix.IDENTIFIER == V1HpLogSpace.IDENTIFIER: return space_sample( value=to_numpy(matrix), size=size, rand_generator=rand_generator ) if matrix.IDENTIFIER == V1HpGeomSpace.IDENTIFIER: return space_sample( value=to_numpy(matrix), size=size, rand_generator=rand_generator ) if matrix.IDENTIFIER == V1HpUniform.IDENTIFIER: return dist_sample(dist.uniform, matrix.value, size, rand_generator) if matrix.IDENTIFIER == V1HpQUniform.IDENTIFIER: return dist_sample(dist.quniform, matrix.value, size, rand_generator) if matrix.IDENTIFIER == V1HpLogUniform.IDENTIFIER: return dist_sample(dist.loguniform, matrix.value, size, rand_generator) if matrix.IDENTIFIER == V1HpQLogUniform.IDENTIFIER: return dist_sample(dist.qloguniform, matrix.value, size, rand_generator) if matrix.IDENTIFIER == V1HpNormal.IDENTIFIER: return dist_sample(dist.normal, matrix.value, size, rand_generator) if matrix.IDENTIFIER == V1HpQNormal.IDENTIFIER: return dist_sample(dist.qnormal, matrix.value, size, rand_generator) if matrix.IDENTIFIER == V1HpLogNormal.IDENTIFIER: return dist_sample(dist.lognormal, matrix.value, size, rand_generator) if matrix.IDENTIFIER == V1HpQLogNormal.IDENTIFIER: return dist_sample(dist.qlognormal, matrix.value, size, rand_generator)
Would you love a “smooth & delectable treat for your taste buds,” that also happens to be marijuana-infused? We thought so! Dixie Elixirs’ Krispy Kraken chocolate bar is amazing. The mixture of crispy rice and 34% cacao milk chocolate makes for a delectable treat – with benefits: The Kraken comes with THC. The two THC potentcies available are 84mg and 180mg. The Kraken tastes like a high-end chocolate bar that any chocolate connoisseur would be delighted to consume. Within 60 minutes, the Kraken’s THC begin to take effect and can provide up to a few hours of a calming body high that will have you chillin’ and singin’ Bob Marley tunes. As always, Dixie Elixirs has patients in mind: The Kraken breaks into 7mg pieces to provide reliable and optimal dosing, and it has a unique child-proof package. This yummy chocolate bar will be loved by anyone lucky enough to try it! Contact your local dispensary to see if they carry Dixie Elixirs’ chocolate bars.
def dot(a, b): ''' Matrixmultiplication :param RaytracerColor a: color triple :param RaytracerColor b: color triple :returns matrixmultiplicition of a and b ''' return RaytracerColor(a.r * b.r, a.g * b.g, a.b * b.b) class RaytracerColor: ''' represents a color ''' def __init__(self, r=0.0, g=0.0, b=0.0): ''' :param float r: red color component between 0 and 1 :param float g: green color component between 0 and 1 :param float b: blue color component between 0 and 1 :var float r: red color component between 0 and 1 :var float g: green color component between 0 and 1 :var float b: blue color component between 0 and 1 ''' self._r = r self._g = g self._b = b @property def r(self): return self._r @property def g(self): return self._g @property def b(self): return self._b def __add__(self, a): ''' Matrixaddition :param RaytracerColor a: color triple :param RaytracerColor b: color triple :returns matrxaddition of a and b ''' return RaytracerColor(self.r + a.r, self.g + a.g, self.b + a.b) def __mul__(self, a): ''' Scalarmultiplication :param float a: scalar :param RaytracerColor b: color triple :returns scalarmultiplicition of a and b ''' return RaytracerColor(self.r * a, self.g * a, self.b * a) def __imul__(self, a): self._r *= a self._g *= a self._b *= a return self def __ne__(self, a): return not (self._r == a.r and self._g == a.g and self._b == a.b) def get_color(self): return (int(self._r*255), int(self._g*255), int(self._b*255)) @property def sanitized(self): return RaytracerColor(min(max(0.0, self._r), 1), min(max(0.0, self._g), 1), min(max(0.0, self._b), 1))
My current project is focused on the development of novel statistical tools for the analysis of epigentic data, namely DNA methylation. A key research problem within this area in the estimation of cell composition in individual mixed samples, for the purposes of adjustment in Epigenome-wide Association Studies (EWAS). My current research in this area has focussed on supervised and unsupervised approaches to address this problem. This work forms part of an active collaboration with the Institute for Health and Biomedical Innovation (IHBI) and the Wellcome Trust Centre for Human Genetics at the University of Oxford. My previous research has been conducted in collaboration with the Cooperative Research Centre for Spatial Information http://www.crcsi.com.au . During this project, I developed spatial modelling approaches for the analysis of health services data, with a focus on the prediction of service-specific catchments and the representation of excess spatial variation in utilisation rates. White, N. M., Mengersen, K. L. (2015) Predicting health program participation: a gravity-based, hierarchical modelling approach. Journal of the Royal Statistical Society: Series C (Applied Statistics), 65(1), 145-166. Baker, J., White, N., & Mengersen, K. (2015). Spatial modelling of type II diabetes outcomes: a systematic review of approaches used. Royal Society Open Science, 2(6), 140460. Cramb, S. M., Baade, P. D., White, N. M., Ryan, L. M., & Mengersen, K. L. (2015). Inferring lung cancer risk factor patterns through joint Bayesian spatio-temporal analysis. Cancer epidemiology, 39(3), 430-439. Wiemers, P., Marney, L., Muller, R., Brandon, M., Kuchu, P., Kuhlar, K., Uchime, C., Kang, D., White, N., Greenup, R., Fraser, J., Yadav, S., Tam, R. (2014) Cardiac Surgery in Indigenous Australians–How Wide is ‘The Gap’?. Heart, Lung and Circulation, 23(3), 265-272. Wiemers, P., Marney, L., White, N., Hustig, A., Tan, W., Cheng, C. S., Kang, D., Yadav, S., Fraser, J., Tam, R. (2014) Midterm Results of Coronary Artery Bypass Grafting in an Australian Indigenous Population. Heart, Lung and Circulation23: e34-e35. Baker, J., White, N., Mengersen, K. (2014). Missing in space: an evaluation of imputation methods for missing data in spatial analysis of risk factors for type II diabetes. International journal of health geographics 13: 47. White, N., Johnson, H., Silburn, P. Rousseau, J., Mengersen, K. (2013) Hidden Markov models for complex stochastic processes: A case study in electrophysiology. In Case Studies in Bayesian Statistical Modelling and Analysis (Eds. Alston, C., Mengersen, K. and Pettitt, A.) Wiley Series in Probability and Statistics. Rolfe, M., White, N. and Chen, C. (2013) Latent class models in medicine. In Case Studies in Bayesian Statistical Modelling and Analysis (Eds. Alston, C., Mengersen, K. and Pettitt, A.) Wiley Series in Probability and Statistics. Earnest, A., Cramb, S. White, N. (2013) Disease mapping using Bayesian hierarchical models. In Case Studies in Bayesian Statistical Modelling and Analysis (Eds. Alston, C., Mengersen, K. and Pettitt, A.) Wiley Series in Probability and Statistics. White, N., Johnson, H., Silburn, P., Mellick, G., Dissanayaka, N., Mengersen, K. (2012) Probabilistic subgroup identification using Bayesian finite mixture modelling: A case study in Parkinson’s disease phenotype identification.Statistical Methods in Medical Research, 21: 563-583. White, N., Johnson, H., Silburn, P., Mengersen, K. (2012) Dirichlet Process mixture models for unsupervised clustering of symptoms in Parkinson’s disease. Journal of Applied Statistics, 39: 2363-2377. White, N., Benton, M., Lea R., Griffiths, L., Mengersen, K. Cellular heterogeneity in DNA methylation: A new approach for estimation of cellular proportions in whole blood. Invited talk at B3 2015: Big Biology and Bioinformatics.
import requests class Solus_Enduser_API: def __init__(self, url, api_hash, api_key): self.url = url self.api_hash = api_hash self.api_key = api_key self.values = ({'rdtype': 'json', 'hash': self.api_hash, 'key': self.api_key}) def to_json(self, data): data=data.replace('><', '>...<') data = data.split('>') result = [] for i in data: i = i.replace('<', '') i = i.replace('...', '') i = i.split('/')[0] result.append(i) if len(result) % 2 == 0: result.pop() result = {result[i]: result[i+1] for i in range(0, len(result) - 1, 2)} return result def sQuery(self, url, api_hash, api_key, values, action, extra=''): if not extra: values.update({'rdtype': 'json', 'hash': api_hash, 'key': api_key, 'action': action}) response = requests.get('https://'+url+'/api/client/command.php', params=values, timeout=50) else: response = requests.get('https://'+url+'/api/client/command.php?key=' + api_key + "&hash=" + api_hash + "&action=info&" + extra, timeout=50) return response.text def get_status(self): data = self.sQuery(self.url, self.api_hash, self.api_key, self.values, action='status') return self.to_json(data) def get_info(self): data = self.sQuery(self.url, self.api_hash, self.api_key, self.values, action='info') return self.to_json(data) def get_full_info(self): extra = 'ipaddr=true&hdd=true&mem=true&bw=true' data = self.sQuery(self.url, self.api_hash, self.api_key, self.values, action='info', extra=extra) return self.to_json(data) def server_reboot(self): data = self.sQuery(self.url, self.api_hash, self.api_key, self.values, action='reboot') return self.to_json(data) def server_shutdown(self): data = self.sQuery(self.url, self.api_hash, self.api_key, self.values, action='shutdown') return self.to_json(data) def server_boot(self): data = self.sQuery(self.url, self.api_hash, self.api_key, self.values, action='boot') return self.to_json(data)
Where you live can influence greatly the educational outcomes of your children. Some education observers go so far as to say: “The quality of education is determined by your postal code.” In school systems with strict student attendance zones, it is, for all intents and purposes, the iron law of public education. Not to say your mailing address and which school your child goes to determines all. Students, whatever their background, can overcome significant disadvantages. Former U.S. president Barack Obama, for one, urged parents to empower children rather than encourage excuses. How a school district identifies struggling schools and how it responds is what matters. Simply using family income to explain away differences in performance and outcomes isn’t good enough. Neither is ignoring those stark realities. It only serves to reinforce the ingrained assumption, contribute to lowered academic expectations, and possibly influence school leadership, student behaviour standards, teacher attitudes, and parent-school relations. While there are risks involved in comparing school performance, parents and the public are entitled to know more about how students in our public schools are performing in relation to socio-economic factors influencing student success. The Halifax Regional Centre for Education (formerly the Halifax regional school board) has 47,770 students in 135 schools. Student achievement and attainment results over the past decade, from 2008-09 to 2015-16, have been published in school-by-school community reports and, when aggregated, provide clear evidence of how schools are performing in Halifax region. Former school board superintendent Carole Olsen introduced the existing accountability system in 2008-09 along with a new “good schools to great schools” mission that set a far more specific goal: “Every student will learn; every school will improve” within five years. Following the release of aggregated board-wide data, the school board produced school-by-school accountability reports, made freely available to not only the school advisory councils, but to all parents in each school. School-by-school reporting was critical to the whole project. “Knowing how each school is doing is the first important step in making sure resources and support reach the schools — and the students — that need them the most,” Olsen declared. The school year 2008-09 provided the benchmark for the board and for this series of articles taking stock of student achievement and school-by-school performance over the past decade. The first set of student results in reading and math demonstrated that Halifax-area student scores were comparable to other Canadian school systems, but there was room for improvement. In Grade 2 reading, the system-wide target was that 77 per cent of all students would meet established board standards. Only 25 out of some 91 schools (27.5 per cent) met or exceeded the established target. While Grade 2 and Grade 5 mathematics students performed better, problems surfaced at the Grade 8 level, where two out of three schools (67.5 per cent) failed to meet the board’s standard, struggling with measurement, whole number operations and problem-solving. In September 2012, Olsen was appointed the province’s deputy minister of education. The robust commitment to school-by-school improvement and demonstrably improved standards in reading and mathematics faltered and the school community reports, appended routinely as PDFs to school websites, attracted little attention. Twenty of its 84 elementary schools were identified as struggling and designated as priority schools requiring more attention, resources, and extra support programs to close the student achievement gap. The focus changed once again, when the 2017-18 provincial results in Grade 6 math and literacy revealed that struggling students came disproportionately from marginalized communities. So a school improvement project focused on schools drawing from lower-income neighbourhoods was transformed into one addressing differences along ethno-racial lines. As this series continues on Thursday, we will see how an in-depth comparison of school-by-school performance over the past decade runs smack up against “postal code education” assumptions, raises critical questions and yields some startling results. Paul W. Bennett is director of Schoolhouse Institute, Halifax and author of Raising the Bar and Closing the Gap: Schools, Income and Student Success.
#!/usr/bin/env python2.7 # Copyright 2015 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import subprocess import os import tempfile import sys import time import signal import yaml argp = argparse.ArgumentParser(description='Runs a DNS server for LB interop tests') argp.add_argument('-l', '--grpclb_ips', default=None, type=str, help='Comma-separated list of IP addresses of balancers') argp.add_argument('-f', '--fallback_ips', default=None, type=str, help='Comma-separated list of IP addresses of fallback servers') argp.add_argument('-c', '--cause_no_error_no_data_for_balancer_a_record', default=False, action='store_const', const=True, help=('Used for testing the case in which the grpclb ' 'balancer A record lookup results in a DNS NOERROR response ' 'but with no ANSWER section i.e. no addresses')) args = argp.parse_args() balancer_records = [] grpclb_ips = args.grpclb_ips.split(',') if grpclb_ips[0]: for ip in grpclb_ips: balancer_records.append({ 'TTL': '2100', 'data': ip, 'type': 'A', }) fallback_records = [] fallback_ips = args.fallback_ips.split(',') if fallback_ips[0]: for ip in fallback_ips: fallback_records.append({ 'TTL': '2100', 'data': ip, 'type': 'A', }) records_config_yaml = { 'resolver_tests_common_zone_name': 'test.google.fr.', 'resolver_component_tests': [{ 'records': { '_grpclb._tcp.server': [ { 'TTL': '2100', 'data': '0 0 12000 balancer', 'type': 'SRV' }, ], 'balancer': balancer_records, 'server': fallback_records, } }] } if args.cause_no_error_no_data_for_balancer_a_record: balancer_records = records_config_yaml[ 'resolver_component_tests'][0]['records']['balancer'] assert not balancer_records # Insert a TXT record at the balancer.test.google.fr. domain. # This TXT record won't actually be resolved or used by gRPC clients; # inserting this record is just a way get the balancer.test.google.fr. # A record queries to return NOERROR DNS responses that also have no # ANSWER section, in order to simulate this failure case. balancer_records.append({ 'TTL': '2100', 'data': 'arbitrary string that wont actually be resolved', 'type': 'TXT', }) # Generate the actual DNS server records config file records_config_path = tempfile.mktemp() with open(records_config_path, 'w') as records_config_generated: records_config_generated.write(yaml.dump(records_config_yaml)) with open(records_config_path, 'r') as records_config_generated: sys.stderr.write('===== DNS server records config: =====\n') sys.stderr.write(records_config_generated.read()) sys.stderr.write('======================================\n') # Run the DNS server # Note that we need to add the extra # A record for metadata.google.internal in order for compute engine # OAuth creds and ALTS creds to work. # TODO(apolcyn): should metadata.google.internal always resolve # to 169.254.169.254? subprocess.check_output([ '/var/local/git/grpc/test/cpp/naming/utils/dns_server.py', '--port=53', '--records_config_path', records_config_path, '--add_a_record=metadata.google.internal:169.254.169.254', ])
The Arizona Grand Resort & Spa is conveniently located less than 10 minutes from Sky Harbor International Airport in Phoenix, Arizona. The resort is easily accessible via AZ-153 and AZ-143 and numerous other major freeways. Please consult our turn-by-turn directions below when mapping your travel route. Take the AZ-153 S towards 1-10/AZ-143. Turn SLIGHT LEFT onto EAST UNIVERSITY DR. Turn RIGHT onto AZ-143 S/HOHOKAM EXPWY. AZ-143 S turns into 48th ST. Take 48th St. to BASELINE RD. Turn LEFT onto BASELINE RD. Turn RIGHT onto S. Arizona Grand Parkway. Take US-60 W towards PHOENIX. Take the I-10 E exit on the left towards TUCSON. Turn RIGHT onto W BASELINE RD. Turn LEFT onto S. Arizona Grand Parkway. Exit the Car Rental Center and turn left onto Sky Harbor Circle South. Turn RIGHT onto Buckeye Road. Stay in the right lane and turn right onto the I-10 East Freeway. Stay on I-10 East until exit 155, Baseline Road. Turn RIGHT onto Baseline Road. Turn LEFT onto S. Arizona Grand Parkway Phoenix.
"""Timeout mechanism.""" from client import exceptions import threading import traceback def timed(timeout, fn, args=(), kargs={}): """Evaluates expr in the given frame. PARAMETERS: fn -- function; Python function to be evaluated args -- tuple; positional arguments for fn kargs -- dict; keyword arguments for fn timeout -- int; number of seconds before timer interrupt RETURN: Result of calling fn(*args, **kargs). RAISES: Timeout -- if thread takes longer than timeout to execute Error -- if calling fn raises an error, raise it """ submission = __ReturningThread(fn, args, kargs) submission.start() submission.join(timeout) if submission.is_alive(): raise exceptions.Timeout(timeout) if submission.error is not None: raise submission.error return submission.result class __ReturningThread(threading.Thread): """Creates a daemon Thread with a result variable.""" def __init__(self, fn, args, kargs): super().__init__() self.daemon = True self.result = None self.error = None self.fn = fn self.args = args self.kargs = kargs def run(self): try: self.result = self.fn(*self.args, **self.kargs) except Exception as e: e._message = traceback.format_exc(limit=2) self.error = e
Write a 350-word essay describing the accounting equation. Provide examples that show how the components of the accounting equation affect each other and how transactions affect the accounting equation. Complete problem set P4-2A in Ch. 4 of Financial Accounting. Write a 350-word summary on one or two of the media segments you watched this week. Write a 1,050- to 1,400-word paper in which you research a publicly traded corporation that has exhibited global business strategies. Summarize and include the organization’s financial statements. · How are the corporation’s debt securities reported on the financial statements? · How are the corporation’s stock investments reported on the financial statements? · Why would the corporation invest in stocks and debt securities? · What are the corporation’s relative risks and rewards of equity versus debt securities? · What is the difference between equity and debt securities? Use the organization’s financial statements to determine its financial health. Identify examples from the organization’s financial statements to justify the team’s responses. Write a 1250-word paper to summarize the main points in this class. •Your ability to feel confident moving on to more Accounting classes. 1) Which of the following statements is true? 3) Which of the following financial statements is divided into major categories of operating, investing, and financing activities? 12) On July 1 the Fisher Shoe Store paid $15,000 to Acme Realty for 6 months rent beginning July 1. Prepaid Rent was debited for the full amount. 13) Use the following data to determine the total dollar amount of assets to be classified as current assets. 17) Which of the following statements is true with respect to financial statement reporting for all cases when a company changes from one acceptable accounting method to another? 18) Which of the following would be considered a change in accounting principle? 20) A very small company would have the most difficulty in implementing which of the following internal control activities? 23) Which of the following items on a bank reconciliation would require an adjusting entry on the company’s books? 25) Why do pension and mutual funds invest in debt and equity securities? 26) Which of the following is a debt security? 30) If a parent company has two wholly owned subsidiaries, how many legal and economic entities are there from the viewpoint of the shareholders of the parent company?
import pkg_resources from requests_aws4auth import AWS4Auth from elasticsearch import Elasticsearch, RequestsHttpConnection import json class Resource: __instance = None __elasticsearch = None def __new__(cls, val): if Resource.__instance is None: Resource.__instance = object.__new__(cls) Resource.__instance.val = val return Resource.__instance def connect(self): if self.__elasticsearch is not None: return self.__elasticsearch config_package = 'config' file_path = 'properties.json' properties = pkg_resources.resource_string(config_package, file_path) configuration = json.loads(properties) awsauth = AWS4Auth(configuration['access_key'], configuration['secret_key'], configuration['region'], 'es') self.__elasticsearch = Elasticsearch( hosts=[{'host': configuration['host'], 'port': 443}], http_auth=awsauth, use_ssl=True, verify_certs=True, connection_class=RequestsHttpConnection ) return self.__elasticsearch
At Ignite 2018, Microsoft provided a great perspective about how Modern Intranets (using SharePoint Communication Sites) and Collaboration features are becoming a major collaboration platform for many companies and investments Microsoft is doing to make them better. Some of these cool features were mentioned during the event. We will take a look at many of them in this blog and how we could use it to make Modern Intranets, Modern Team Sites and Microsoft Teams work even better and seamlessly together. If you are looking a strategy or starter guide for building Modern Intranet using Communication sites, check here. Another reason for writing this blog is that earlier I had posted some of these postings from Microsoft updates in my LinkedIn and Twitter feed but found that information was too scattered and hard to collate, so thought of combining all here. Please feel free to use it as useful. 1. New Page designs – It is possible to select different page designs when adding a page, which adds a lot of variety, and additionally provides prebuild content besides the standard page sections. 2. Audience targeting – Audience targeting provides personalised content to users and can be set on a unified group, active directory group or user. 3. Conversations (New Yammer) web part – This is going to be updated version of the current Yammer web part. 7. Web part Connections (Dynamic data) – Not exactly clear yet how this will work yet but assuming it will be the same the old web part connections (but better) to be able to pass a property from existing to another web part. 9. Column formatting from edit properties – Now we can add column formatting on the edit pane of the list instead of writing JSON (at least for the simple ones) for it. 10. Custom web parts on list edit web part views– We can have custom web parts on list views, so no need to create custom pages to have custom views for lists. 12. Modern Teams linked libraries now visible on SharePoint – This allows users to know the content is also accessible through Teams. 13. SharePoint news feed into Microsoft Teams – Now we can publish SharePoint news to a Teams Channel allowing users to view the news content in teams itself. 15. Stream Mobile App and Live Streaming video from SharePoint – With the Stream mobile app to stream videos and keep for offline viewing. Also there is an additional feature to live stream videos from the portal. 17. File Plans, New Labels and Label analytics – File plans allow to manage department, categories, identifiers etc at scale. New labels coming up such as Immutable Record labels and sensitivity labels applied to a site. Finally we will have label analytics to look at usage reports. Above, we saw some of the important and highlighting updates from Ignite 2018. There will be another blog about updates about latest SharePoint Admin Center and Security and Compliance updates, not covered here.
import time from django.conf import settings import requests from grants.sync.helpers import is_txn_done_recently, record_contribution_activity, txn_already_used headers = { "X-APIKEY" : settings.VIEW_BLOCK_API_KEY } DECIMALS = 12 def find_txn_on_zil_explorer(contribution): subscription = contribution.subscription grant = subscription.grant token_symbol = subscription.token_symbol if subscription.tenant != 'ZIL': return None if token_symbol != 'ZIL': return None to_address = grant.zil_payout_address from_address = subscription.contributor_address amount = subscription.amount_per_period url = f'https://api.viewblock.io/v1/zilliqa/addresses/{to_address}/txs?network=mainnet' response = requests.get(url, headers=headers).json() if len(response): for txn in response: if ( txn['from'] == from_address.lower() and txn['to'] == to_address.lower() and txn['direction'] == 'in' and float(txn['value']) / 10 ** DECIMALS == float(amount) and is_txn_done_recently(txn['timestamp']/1000) and not txn_already_used(txn['hash'], token_symbol) ): return txn['hash'] return None def get_zil_txn_status(txnid, network='mainnet'): if not txnid or txnid == "0x0": return None url = f'https://api.viewblock.io/v1/zilliqa/txs/{txnid}?network={network}' view_block_response = requests.get(url, headers=headers).json() if view_block_response: response = { 'blockHeight': int(view_block_response['blockHeight']), 'receiptSuccess': view_block_response['receiptSuccess'] } if response['receiptSuccess']: response['has_mined'] = True else: response['has_mined'] = False return response return None def sync_zil_payout(contribution): time.sleep(0.5) # to avoid rate limit if not contribution.tx_id or contribution.tx_id == '0x0': txn = find_txn_on_zil_explorer(contribution) if txn: contribution.tx_id = txn contribution.save() if contribution.tx_id and contribution.tx_id != '0x0': txn_status = get_zil_txn_status(contribution.tx_id) if txn_status and txn_status.get('has_mined'): contribution.success = True contribution.tx_cleared = True contribution.checkout_type = 'zil_std' record_contribution_activity(contribution) contribution.save()
Reading the blogs of lcamtuf and Chris Evans is really what got me interested in browser security, so I’m always on the lookout for novel cross-domain data theft vectors. Today I’m going to go into the discovery and exploitation of such a bug: A timing attack on Firefox’s document.elementFromPoint and document.caretPositionFromPoint implementations. I was looking at ways to automatically exploit another bug that required user interaction when I noticed elementFromPoint and caretPositionFromPoint on the MDN. Curious as to how they behaved with frames, I did a little testing. caretPositionFromPoint(x,y), however, was returning elements from the page on cbc.ca! But there was a small snag: I couldn’t actually access the CaretPosition’s offsetNode from JS without getting a security exception. It seems that Firefox noticed that offsetNode was being set to an element from a cross-origin document, and wrapped the CaretPosition object so that I couldn’t access any of its members from my document. Great. However, I found I could access offsetNode when it was set to null. offsetNode seems to be set to null when the topmost element at a given point is a button, and that includes scrollbar thumbs. That’s great for us, because knowing the size and location of the frame’s scrollbar thumb tells us how large the framed document is, and also allows us to leak which elements exist on the page. The vertical scrollbar thumb has obviously moved, so we know that an element with an id of Create_a_local_Certificate_Signing_Request_(CSR) exists in the framed document. Knowing the page’s size and whether certain elements are present is nice, but I wanted more. I remembered Paul Stone’s excellent paper about timing attacks on browser renderers and figured a timing attack might help us here. caretPositionFromPoint has to do hit testing on the document to determine what the topmost element is at a given point, and I figured that’s not likely to be a constant time operation. It was also clear that hit testing was being performed on cross-origin frame contents, since caretPositionFromPoint was returning elements from them. I guessed that the time it took for a caretPositionFromPoint(x,y) call to return would leak information about the element at (x,y). To test my theory I made a script that runs caretPositionFromPoint(x,y) on a given point 50 times, then stores the median time that the call took to complete. Using the median is important so we can eliminate timing differences due to unrelated factors, like CPU load at the time of the call. // window. getter is slow, apparently. // Run caretPositionFromPoint() NUM_SAMPLES times and store runtime for each call. You can see a number of things in the timing data: the bounding boxes of individual elements, how the lines of text wrap, the position of the bullets in the list, etc. It also seems that even though elementFromPoint doesn’t return elements from the framed document, it still descends into it for its hit testing, so it’s vulnerable to the same timing attack as caretPositionFromPoint. So we can infer quite a bit about the framed document from the timing information, but can we actually steal text from it? Maybe, with a lot of work, depending on the page’s styling. I’d hoped that caretPositionFromPoint’s real purpose (determining what character index the caret should be at for a given point) would yield large enough timing differences to leak the width of individual characters, but that didn’t seem to be the case. Since we can tell how wide a line of text is, we can extract text using a similar method to sirdarckcat’s. First we measure how long the line is, then we make the iframe more narrow to force the text to wrap, then we subtract the new width of the the line from the old width, giving us the width of the word that just wrapped. Since most sites use variable-width fonts (“O” and “i” are different widths on this blog, for example,) many small words have distinct widths that make them easy to pick out. With longer words, there may be a number of valid words with that width, however an attacker may be able to determine what word fits best using the context of the surrounding words. Note that since we need to force text wrapping to get these measurements, it’s harder to steal text from fixed-width pages, or pages that display a horizontal scrollbar instead of wrapping text (like view-source: URIs.) Pages that use fixed-width fonts are also more difficult to analyze because characters do not have distinct widths, we can only determine the number of characters in a word. Note that the last Firefox version these actually work in is 26, if you want to try them out you’ll have to find a download for it. Judging from Robert O’Callahan’s fix, it looks like Firefox was using a general hit testing function that descended cross-document for both elementFromPoint and caretPositionFromPoint. The fix was to disable cross-document descent in the hit testing function when called by either elementFromPoint or caretPositionFromPoint.
import unittest from troposphere import Retain from troposphere.logs import LogGroup, Destination class TestLogs(unittest.TestCase): def test_loggroup_deletionpolicy_is_preserved(self): log_group = LogGroup( "LogGroupWithDeletionPolicy", DeletionPolicy=Retain ) self.assertIn('DeletionPolicy', log_group.to_dict()) def test_loggroup_retention(self): for days in [7, "7"]: LogGroup( "LogGroupWithDeletionPolicy", RetentionInDays=days, ) for days in [6, "6"]: with self.assertRaises(ValueError): LogGroup( "LogGroupWithDeletionPolicy", RetentionInDays=days, ) def test_log_destination(self): log_destination = Destination( 'MyLogDestination', DestinationName='destination-name', RoleArn='role-arn', TargetArn='target-arn', DestinationPolicy='destination-policy' ) log_destination_json = log_destination.to_dict() self.assertIn('Type', log_destination_json) self.assertIn('Properties', log_destination_json) if __name__ == '__main__': unittest.main()
LOW KLM'S, only 97,000km's with service history/log books, 2010 Mitsubishi ASX in a 5 Speed Manual. Air conditioning, power steering, p/winds, cd Player, Cruise Control. September 2018 Rego. Drives well. Come in today and test drive the vehicle you desire. EASY FINANCE AVAILABLE with same day approvals.WARANTY AVAILABLE, periods available between 1-5 years, covering both parts and labour. 24/7 ROADSIDE ASSIATANCE Australia wide available. VEHICLE DELIVERY available Australia wide. OPEN 7 DAYS a week, from 8am-6pm Monday to Saturday, and 9am-5pm on Sunday. TRADE INS welcome for all vehicles in stock. Cash for cars available. Over 150 cars in stock at 2 great locations. Visit our website now via hotspotautos.com.au Or phone: 02 9682 4000, or call 0450 921 234 for all after hours inquiries/inspections.
import numpy as np def bad_pix(evtdata, fpm='A'): """Do some basic filtering on known bad pixels. Parameters ---------- evtdata: FITS data class This should be an hdu.data structure from a NuSTAR FITS file. fpm: {"FPMA" | "FPMB"} Which FPM you're filtering on. Assumes A if not set. Returns ------- goodinds: iterable Index of evtdata that passes the filtering. """ # Hot pixel filters # FPMA or FPMB if fpm.find('B') == -1 : pix_filter = np.invert( ( (evtdata['DET_ID'] == 2) & (evtdata['RAWX'] == 16) & (evtdata['RAWY'] == 5) | (evtdata['DET_ID'] == 2) & (evtdata['RAWX'] == 24) & (evtdata['RAWY'] == 22) | (evtdata['DET_ID'] == 2) & (evtdata['RAWX'] == 27) & (evtdata['RAWY'] == 6) | (evtdata['DET_ID'] == 2) & (evtdata['RAWX'] == 27) & (evtdata['RAWY'] == 21) | (evtdata['DET_ID'] == 3) & (evtdata['RAWX'] == 22) & (evtdata['RAWY'] == 1) | (evtdata['DET_ID'] == 3) & (evtdata['RAWX'] == 15) & (evtdata['RAWY'] == 3) | (evtdata['DET_ID'] == 3) & (evtdata['RAWX'] == 5) & (evtdata['RAWY'] == 5) | (evtdata['DET_ID'] == 3) & (evtdata['RAWX'] == 22) & (evtdata['RAWY'] == 7) | (evtdata['DET_ID'] == 3) & (evtdata['RAWX'] == 16) & (evtdata['RAWY'] == 11) | (evtdata['DET_ID'] == 3) & (evtdata['RAWX'] == 18) & (evtdata['RAWY'] == 3) | (evtdata['DET_ID'] == 3) & (evtdata['RAWX'] == 24) & (evtdata['RAWY'] == 4) | (evtdata['DET_ID'] == 3) & (evtdata['RAWX'] == 25) & (evtdata['RAWY'] == 5) ) ) else: pix_filter = np.invert( ( (evtdata['DET_ID'] == 0) & (evtdata['RAWX'] == 24) & (evtdata['RAWY'] == 24)) ) inds = (pix_filter).nonzero() goodinds=inds[0] return goodinds def by_energy(evtdata, energy_low=2.5, energy_high=10.): """ Apply energy filtering to the data. Parameters ---------- evtdata: FITS data class This should be an hdu.data structure from a NuSTAR FITS file. energy_low: float Low-side energy bound for the map you want to produce (in keV). Defaults to 2.5 keV. energy_high: float High-side energy bound for the map you want to produce (in keV). Defaults to 10 keV. """ pilow = (energy_low - 1.6) / 0.04 pihigh = (energy_high - 1.6) / 0.04 pi_filter = ( ( evtdata['PI']>pilow ) & ( evtdata['PI']<pihigh)) inds = (pi_filter).nonzero() goodinds=inds[0] return goodinds def gradezero(evtdata): """ Only accept counts with GRADE==0. Parameters ---------- evtdata: FITS data class This should be an hdu.data structure from a NuSTAR FITS file. Returns ------- goodinds: iterable Index of evtdata that passes the filtering. """ # Grade filter grade_filter = ( evtdata['GRADE'] == 0) inds = (grade_filter).nonzero() goodinds = inds[0] return goodinds def event_filter(evtdata, fpm='FPMA', energy_low=2.5, energy_high=10): """ All in one filter module. By default applies an energy cut, selects only events with grade == 0, and removes known hot pixel. Note that this module returns a cleaned eventlist rather than the indices to the cleaned events. Parameters ---------- evtdata: FITS data structure This should be an hdu.data structure from a NuSTAR FITS file. fpm: {"FPMA" | "FPMB"} Which FPM you're filtering on. Defaults to FPMA. energy_low: float Low-side energy bound for the map you want to produce (in keV). Defaults to 2.5 keV. energy_high: float High-side energy bound for the map you want to produce (in keV). Defaults to 10 keV. Returns ------- cleanevt: FITS data class. This is the subset of evtdata that pass the data selection cuts. """ goodinds = bad_pix(evtdata, fpm=fpm) evt_badfilter = evtdata[goodinds] goodinds = by_energy(evt_badfilter, energy_low=energy_low, energy_high = energy_high) evt_energy = evt_badfilter[goodinds] goodinds = gradezero(evt_energy) cleanevt = evt_energy[goodinds] return cleanevt
From the alter of God! With those itching for a fight. Blowing themself up for a cause! Is far from a coward! But heck of a brave!
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> # Copyright 2012 Google, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import from mom.builtins import b from pyoauth.oauth1.client import Client from pyoauth.oauth1.client.mixins import OAuthMixin class TwitterClient(Client): """ Creates an instance of a Twitter OAuth 1.0 client. """ _TEMP_URI = b("https://api.twitter.com/oauth/request_token") _TOKEN_URI = b("https://api.twitter.com/oauth/access_token") _AUTHORIZATION_URI = b("https://api.twitter.com/oauth/authorize") _AUTHENTICATION_URI = b("https://api.twitter.com/oauth/authenticate") def __init__(self, http_client, client_credentials, use_authorization_header=True, strict=False): super(TwitterClient, self).__init__( http_client, client_credentials, self._TEMP_URI, self._TOKEN_URI, self._AUTHORIZATION_URI, self._AUTHENTICATION_URI, use_authorization_header=use_authorization_header, strict=strict ) class TwitterMixin(OAuthMixin): """ OAuth handler mixin. Use with an HttpAdapterMixin for your framework. """ pass
trends that will reign over this upcoming season. FFT covers latest fashion trends in denims. The PowerPoint PPT presentation: "Denim Fashion Trends" is the property of its rightful owner.
# This library is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see # <http://www.gnu.org/licenses/>. """ Utility module for discovering the differences between two directory trees :author: Christopher O'Brien <obriencj@gmail.com> :license: LGPL """ from filecmp import dircmp from fnmatch import fnmatch from os import makedirs, walk from os.path import exists, isdir, join, relpath from shutil import copy LEFT = "left only" RIGHT = "right only" DIFF = "changed" SAME = "same" BOTH = SAME # meh, synonyms def fnmatches(entry, *pattern_list): """ returns true if entry matches any of the glob patterns, false otherwise """ for pattern in pattern_list: if pattern and fnmatch(entry, pattern): return True return False def makedirsp(dirname): """ create dirname if it doesn't exist """ if dirname and not exists(dirname): makedirs(dirname) def copydir(orig, dest): """ copies directory orig to dest. Returns a list of tuples of relative filenames which were copied from orig to dest """ copied = list() makedirsp(dest) for root, dirs, files in walk(orig): for d in dirs: # ensure directories exist makedirsp(join(dest, d)) for f in files: root_f = join(root, f) dest_f = join(dest, relpath(root_f, orig)) copy(root_f, dest_f) copied.append((root_f, dest_f)) return copied def compare(left, right): """ generator emiting pairs indicating the contents of the left and right directories. The pairs are in the form of (difference, filename) where difference is one of the LEFT, RIGHT, DIFF, or BOTH constants. This generator recursively walks both trees. """ dc = dircmp(left, right, ignore=[]) return _gen_from_dircmp(dc, left, right) def _gen_from_dircmp(dc, lpath, rpath): """ do the work of comparing the dircmp """ left_only = dc.left_only left_only.sort() for f in left_only: fp = join(dc.left, f) if isdir(fp): for r, _ds, fs in walk(fp): r = relpath(r, lpath) for f in fs: yield(LEFT, join(r, f)) else: yield (LEFT, relpath(fp, lpath)) right_only = dc.right_only right_only.sort() for f in right_only: fp = join(dc.right, f) if isdir(fp): for r, _ds, fs in walk(fp): r = relpath(r, rpath) for f in fs: yield(RIGHT, join(r, f)) else: yield (RIGHT, relpath(fp, rpath)) diff_files = dc.diff_files diff_files.sort() for f in diff_files: yield (DIFF, join(relpath(dc.right, rpath), f)) same_files = dc.same_files same_files.sort() for f in same_files: yield (BOTH, join(relpath(dc.left, lpath), f)) subdirs = dc.subdirs.values() subdirs = sorted(subdirs) for sub in subdirs: for event in _gen_from_dircmp(sub, lpath, rpath): yield event def collect_compare(left, right): """ returns a tuple of four lists describing the file paths that have been (in order) added, removed, altered, or left the same """ return collect_compare_into(left, right, [], [], [], []) def collect_compare_into(left, right, added, removed, altered, same): """ collect the results of compare into the given lists (or None if you do not wish to collect results of that type. Returns a tuple of (added, removed, altered, same) """ for event, filename in compare(left, right): if event == LEFT: group = removed elif event == RIGHT: group = added elif event == DIFF: group = altered elif event == BOTH: group = same else: assert False if group is not None: group.append(filename) return added, removed, altered, same class ClosingContext(object): # pylint: disable=R0903 # too few public methods (none) """ A simple context manager which is created with an object instance, and will return that instance from __enter__ and call the close method on the instance in __exit__ """ def __init__(self, managed): self.managed = managed def __enter__(self): return self.managed def __exit__(self, exc_type, _exc_val, _exc_tb): managed = self.managed self.managed = None if managed is not None and hasattr(managed, "close"): managed.close() return exc_type is None def closing(managed): """ If the managed object already provides its own context management via the __enter__ and __exit__ methods, it is returned unchanged. However, if the instance does not, a ClosingContext will be created to wrap it. When the ClosingContext exits, it will call managed.close() """ if managed is None or hasattr(managed, "__enter__"): return managed else: return ClosingContext(managed) # # The end.
I am taking a holiday break and expect to post again on Sunday, June 5. This fact has not stopped aggressive enforcement of the GMO industry's intellectual property rights which involves threats and lawsuits designed to intimidate not just those supposedly in violation of crop patents, but the entire farming community even when the cases involve contamination by adjacent farms and passing vehicles containing GMO seeds. Here's the message: To avoid lawsuits that threaten to take away your farming livelihood, you might as well sign up to buy our seeds because contamination by us or our farmer customers will be no defense in court. Thus a farmer whose field contains seed or plants originating from seed spilled into them, or blown as seed, in swaths from a neighbour's land or even growing from germination by pollen carried into his field from elsewhere by insects, birds, or by the wind, may own the seed or plants on his land even if he did not set about to plant them. He does not, however, own the right to the use of the patented gene, or of the seed or plant containing the patented gene or cell. I am reminded of King Henry's conversation with his counterpart King Philip of France in the play Lion in Winter. Philip is insisting that his sister, Alais, be wedded to Henry's son, as previously agreed by Henry and Philip's father, the now deceased King Louis. It's that or the return of the Vexin, a key county north of Paris given to England in exchange for the betrothal. Philip: It's their wedding or the Vexin back. Those are the terms you made with Louis. Henry: True, but academic, lad. The Vexin's mine. Henry: It's got my troops all over it. That makes it mine. Just substitute "crops" for "troops," and you'll see an age-old strategy at work. I am also reminded of Hitler's remilitarization of the Rhineland and the Anschluss, his occupation of Austria. Once his troops were on the ground, nobody wanted to challenge him. The contamination strategy solves two perceived problems for the industry. First, the industry attempted to include GMO plants as acceptable in the original National Organic Program of the U.S. Department of Agriculture. But the outcry was so great from activists that GMOs were taken out of the standards. One way, however, to overcome this resistance is through contamination. By forcing food regulators to accept GMO contamination in organic food as inevitable, the GMO industry is paving the way for eventual capitulation by the organic community and conventional growers as well. The industry wants to propagate the attitude that nothing can be done to stop it. Second, although Europe has long had labeling requirements for GMO foods, in the United States the industry has so far been able to prevent enactment of any such requirement. The response from food activists has been to launch a campaign for voluntary labeling of non-GMO foods and that now has the GMO industry on the defensive. But, what better way to undermine such an effort than to contaminate conventional and organic crops? What would change the calculus of the GMO industry? Perhaps it would change if some of the contamination suits (mostly outside the United States) were to result in huge verdicts, ones large enough to be financially ruinous to the industry. Nothing like that, however, is on the horizon. In the meantime, we can all look forward to the involuntary consumption of genetically modified food ingredients against our will. The GMO industry tells us that they want consumers to have a choice, that GMO foods should "coexist" with conventional and organic foods. Yet, they oppose labeling. Meanwhile, the equivalent of the GMO industry's panzer corps is moving into our farm fields and from there into our kitchens. We may soon regret this creeping annexation of our dinner tables. Once the invasion of GMO genes around the world is complete, we may find it harder to roll back than Hitler's armies. Would vested interests starve the world? In his latest book entitled Bottleneck sociologist and ecologist William Catton Jr. explains in detail why he believes human society is destined for a major dieoff, a "bottleneck" from which few survivors will emerge. One cause, he says, is an array of vested interests who manipulate the media and the power structure, oblivious to the consequences of their actions. Many would say that this is business-as-usual. After all, what do we expect when governments are thoroughly dominated by the industries they are supposed to regulate? As a result, we may say, a few more people will be maimed or killed or maybe just ripped off than would otherwise be the case. But, would such interests be so crazy as to persist in their manipulations when faced with compelling evidence that suggests their actions could result in widespread starvation? France, Germany, Italy, and Slovenia have severely restricted or banned this class of pesticides. Ironically, Germany is home to Bayer, one of the largest manufacturers of neonicotinoids, a company which continues to profit from hefty sales abroad. In the aftermath of the bans and restrictions, bee populations have quickly recovered. Naturally, this is not absolute proof that the bans generated the revival. But as the evidence continued to mount that neonicotinoids are strongly implicated in CCD, these European countries applied the so-called precautionary principle. Better to be safe than sorry when it comes to something as critical as food, and honeybees are pollinators for as much as a third of the world's food supply. Other nations have been slow to act because of pressure from the agricultural chemicals industry. The industry's hue and cry is that there is no definitive proof that neonicotinoids are a central cause of CCD. But, of course, the industry has the burden of proof backwards. If the industry is going to put one-third of the world's food supply at risk, then it ought to prove that its products are harmless. That would cost money, lots of money, and it would mean that many new chemicals with expensive development costs might never be approved. Naturally, the industry wants the burden of proof to fall on government and university scientists spending public money to prove a pesticide is dangerous. Nice arrangement! For the industry, that is. A more recent revelation is that glyphosate, the world's most widely used herbicide, may be setting us up for a major crop failure worldwide. Sold primarily under the trade name Roundup, the herbicide has been central to chemical and seed giant Monsanto's strategy to lock-in alfalfa, corn, cotton, canola, soybean, and sugar beet growers who must buy the company's genetically engineered and patent-protected seeds every year from Monsanto if they want to reseed their fields with herbicide-proof crops. Now a leaked private letter from an agricultural researcher to the secretary of agriculture seeking funds to research possible connections between the herbicide and increased levels of plant and animal disease has called into question the safety of this herbicide. Apparently, glyphosate promotes what is now being called Sudden Death Syndrome in plants by making them more susceptible to soil-borne diseases. This might not be so urgent an issue if it were relegated to crops that were of minor importance in the food supply or if the size of the genetically engineered crop were small. But neither is the case. Keep in mind that some 80 percent of all calories consumed by humans originate as grains or oilseeds. (A significant portion of these, of course, is used as feed for dairy and meat production.) In 2010 in the United States, the world's major grain and oilseed exporter, 90 percent of the soybean crop was Roundup Ready (i.e. glyphosate-resistant) as was 70 percent of the corn. For the world the numbers were lower but considerable: 77 percent for soybeans and 26 percent for corn. A major decline in yields of these crops could certainly result in sky-high food prices and therefore hunger and starvation for many of the poorest in the world. One would think that authorities would be rushing to determine whether such dangers exist and how severe they are. But while many agricultural governmental agencies are aware of the concerns, little is being done. Perhaps it will take a major harvest catastrophe to convince policymakers that the dangers are real. By then, of course, it will be too late for many. But, at least the agricultural chemical interests will be pleased that their political and financial muscle extended profits right up to the moment when it became clear to everyone why the harvest failed. In lieu of my weekly piece I'm posting this interview with Max Keiser, host of the financial talk show On the Edge. The interview aired May 6th and is available in two parts from YouTube below. Can dictators solve our problems? Recently in my state under the auspices of a new law, a small, financially troubled city was the first to be completely taken over by a so-called emergency financial manager appointed by the governor. The extraordinary powers given to this manager under the law allowed him to strip all governing powers from elected officials and the boards appointed by them. This manager is now the de facto dictator of Benton Harbor, Michigan. This event is not, however, the beginning of a trend toward greater centralization of political, economic and social control. We are, in fact, well along that path which is a response to the inscrutable problems that our complex society faces--a society so complex that no one really knows how to govern it. As Joseph Tainter, author of The Collapse of Complex Societies, explains, complex societies at first find that complexity solves problems such as food storage, transport, border security, and the maintenance of social order. But as those societies become ever more complex, the returns on increased complexity diminish and then finally turn negative. Many thinkers have noticed that our society may have already passed the point of diminishing returns and that we are now experiencing negative returns on complexity. Economist Herman Daly says we long ago reached the point of "uneconomic growth." (PDF) The costs of growth now exceed the benefits. William Catton Jr. said much the same thing in his ecological classic entitled Overshoot. Human society now consumes resources at a rate far beyond the Earth's long-term carrying capacity. A recent dramatic example of the impenetrable complexity we live with was the financial crash of 2008, an event that policymakers seemed helpless to halt. Their response was to ask for extraordinary powers to inject money into the banking system, to take over large enterprises, and to run huge fiscal deficits to boost the world's economy. Central banks engaged in what some people believe is illegal activity to shore up flagging financial firms. Give us dictatorial powers, government officials said, or everything will fall apart. When there wasn't time to get authorization, those government officials simply did what they thought was necessary to stop the financial haemorrhaging. I don't want to rehash the wisdom of these acts of financial desperation. What I'm interested in is the notion the world has become too complex and fast-moving for democratic governance. Tainter explains that increased complexity calls for increasingly complex systems of people and machines to manage that complexity. The United States and its allies have given a highly complex and powerful military the task of winning the so-called "War on Terror." Setting aside the fact that terror is a tactic, not a defined enemy, consider whether that military has made progress in that multi-front conflict in the decade since it was announced by President George W. Bush. Today, Afghanistan remains a lawless and corrupt state. Pakistan has become a haven for those who oppose American power. Iraq--which was never a haven for terrorists--has now become one, full of rebels who are largely an indigenous religious minority dissatisfied with the outcome of the war. And, the persistent request from George Bush and now his successor, Barack Obama, more less amounts to this: "Give me extraordinary powers to detain people and fight wars. Too much interference from elected officials will hamper our efforts. Basic constitutional protections against search and seizure and against imprisonment without trial need to be overridden." But the so-called "War on Terror" is really many conflicts involving local grievances in countries which have strategic importance to the United States and its allies for resource or geopolitical reasons. Lax oversight by elected officials and concentration of power in the hands of generals and civilian managers has not resolved these conflicts. When the usual processes of democracy fail often enough to resolve perceived difficulties, people sometimes choose to relinquish their voice in society's decisions in exchange for order and stability. We may, however, be approaching an era where no such tradeoff exists. A dictator running an ungovernable system may be no better at achieving stability than a democratically elected body with all its messy procedures. There is an alternative. Simplify the systems we live under. That, of course, means challenging the existing power structure. Energy constraints may soon do the challenging for us and force us to simplify systems that will have trouble surviving declines in energy inputs such as multi-national corporations and large centralized states. I saw Tainter in person not too long ago at a conference. He was asked if any complex society has ever voluntarily reduced its complexity, meaning before events forced it to. He couldn't think of any. That tells me we can look forward to more governments claiming the need for additional extraordinary powers to deal with seemingly intractable problems that, in all likelihood, cannot be solved by continuing with the hypercomplex arrangements that now govern our world. As our difficulties increase, a new crop of dictators or quasi-dictators in various realms of our society will emerge, offering to solve our problems. Increasingly, I think we will let them try.
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import math import sys import numpy as np class FixedPointNumber(object): """Represents a float or int fixedpoit encoding;. """ BASE = 16 LOG2_BASE = math.log(BASE, 2) FLOAT_MANTISSA_BITS = sys.float_info.mant_dig Q = 293973345475167247070445277780365744413 def __init__(self, encoding, exponent, n=None, max_int=None): self.n = n self.max_int = max_int if self.n is None: self.n = self.Q self.max_int = self.Q // 3 - 1 self.encoding = encoding self.exponent = exponent @classmethod def encode(cls, scalar, n=None, max_int=None, precision=None, max_exponent=None): """return an encoding of an int or float. """ # Calculate the maximum exponent for desired precision exponent = None # Too low value preprocess; # avoid "OverflowError: int too large to convert to float" if np.abs(scalar) < 1e-200: scalar = 0 if n is None: n = cls.Q max_int = cls.Q // 3 - 1 if precision is None: if isinstance(scalar, int) or isinstance(scalar, np.int16) or \ isinstance(scalar, np.int32) or isinstance(scalar, np.int64): exponent = 0 elif isinstance(scalar, float) or isinstance(scalar, np.float16) \ or isinstance(scalar, np.float32) or isinstance(scalar, np.float64): flt_exponent = math.frexp(scalar)[1] lsb_exponent = cls.FLOAT_MANTISSA_BITS - flt_exponent exponent = math.floor(lsb_exponent / cls.LOG2_BASE) else: raise TypeError("Don't know the precision of type %s." % type(scalar)) else: exponent = math.floor(math.log(precision, cls.BASE)) if max_exponent is not None: exponent = max(max_exponent, exponent) int_fixpoint = int(round(scalar * pow(cls.BASE, exponent))) if abs(int_fixpoint) > max_int: raise ValueError('Integer needs to be within +/- %d but got %d' % (max_int, int_fixpoint)) return cls(int_fixpoint % n, exponent, n, max_int) def decode(self): """return decode plaintext. """ if self.encoding >= self.n: # Should be mod n raise ValueError('Attempted to decode corrupted number') elif self.encoding <= self.max_int: # Positive mantissa = self.encoding elif self.encoding >= self.n - self.max_int: # Negative mantissa = self.encoding - self.n else: raise OverflowError('Overflow detected in decode number') return mantissa * pow(self.BASE, -self.exponent) def increase_exponent_to(self, new_exponent): """return FixedPointNumber: new encoding with same value but having great exponent. """ if new_exponent < self.exponent: raise ValueError('New exponent %i should be greater than' 'old exponent %i' % (new_exponent, self.exponent)) factor = pow(self.BASE, new_exponent - self.exponent) new_encoding = self.encoding * factor % self.n return FixedPointNumber(new_encoding, new_exponent, self.n, self.max_int) def __align_exponent(self, x, y): """return x,y with same exponet """ if x.exponent < y.exponent: x = x.increase_exponent_to(y.exponent) elif x.exponent > y.exponent: y = y.increase_exponent_to(x.exponent) return x, y def __truncate(self, a): scalar = a.decode() return FixedPointNumber.encode(scalar) def __add__(self, other): if isinstance(other, FixedPointNumber): return self.__add_fixpointnumber(other) else: return self.__add_scalar(other) def __radd__(self, other): return self.__add__(other) def __sub__(self, other): if isinstance(other, FixedPointNumber): return self.__sub_fixpointnumber(other) else: return self.__sub_scalar(other) def __rsub__(self, other): x = self.__sub__(other) x = -1 * x.decode() return self.encode(x) def __rmul__(self, other): return self.__mul__(other) def __mul__(self, other): if isinstance(other, FixedPointNumber): return self.__mul_fixpointnumber(other) else: return self.__mul_scalar(other) def __truediv__(self, other): if isinstance(other, FixedPointNumber): scalar = other.decode() else: scalar = other return self.__mul__(1 / scalar) def __rtruediv__(self, other): res = 1.0 / self.__truediv__(other).decode() return FixedPointNumber.encode(res) def __lt__(self, other): x = self.decode() if isinstance(other, FixedPointNumber): y = other.decode() else: y = other if x < y: return True else: return False def __gt__(self, other): x = self.decode() if isinstance(other, FixedPointNumber): y = other.decode() else: y = other if x > y: return True else: return False def __le__(self, other): x = self.decode() if isinstance(other, FixedPointNumber): y = other.decode() else: y = other if x <= y: return True else: return False def __ge__(self, other): x = self.decode() if isinstance(other, FixedPointNumber): y = other.decode() else: y = other if x >= y: return True else: return False def __eq__(self, other): x = self.decode() if isinstance(other, FixedPointNumber): y = other.decode() else: y = other if x == y: return True else: return False def __ne__(self, other): x = self.decode() if isinstance(other, FixedPointNumber): y = other.decode() else: y = other if x != y: return True else: return False def __add_fixpointnumber(self, other): x, y = self.__align_exponent(self, other) encoding = (x.encoding + y.encoding) % self.Q return FixedPointNumber(encoding, x.exponent) def __add_scalar(self, scalar): encoded = self.encode(scalar) return self.__add_fixpointnumber(encoded) def __sub_fixpointnumber(self, other): scalar = -1 * other.decode() return self.__add_scalar(scalar) def __sub_scalar(self, scalar): scalar = -1 * scalar return self.__add_scalar(scalar) def __mul_fixpointnumber(self, other): encoding = (self.encoding * other.encoding) % self.Q exponet = self.exponent + other.exponent mul_fixedpoint = FixedPointNumber(encoding, exponet) truncate_mul_fixedpoint = self.__truncate(mul_fixedpoint) return truncate_mul_fixedpoint def __mul_scalar(self, scalar): encoded = self.encode(scalar) return self.__mul_fixpointnumber(encoded)
Get a 7 gram sample pack of BHO and SAVE!!! Each sample pack comes with 7 individual grams of BHO. Now you can try a variety of different strains and see what works best for you. Please make a note in your order as to which strains you would like to receive or we will choose at random.
# This file is part of pyRFXtrx, a Python library to communicate with # the RFXtrx family of devices from http://www.rfxcom.com/ # See https://github.com/woudt/pyRFXtrx for the latest version. # # Copyright (C) 2012 Edwin Woudt <edwin@woudt.nl> # # pyRFXtrx is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # pyRFXtrx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with pyRFXtrx. See the file COPYING.txt in the distribution. # If not, see <http://www.gnu.org/licenses/>. """ This module provides low level packet parsing and generation code for the RFXtrx. """ # pylint: disable=C0302,R0902,R0903,R0911,R0913 def parse(data): """ Parse a packet from a bytearray """ if data[0] == 0: # null length packet - sometimes happens on initialization return None if data[1] == 0x01: pkt = Status() pkt.load_receive(data) return pkt if data[1] == 0x10: pkt = Lighting1() pkt.load_receive(data) return pkt if data[1] == 0x11: pkt = Lighting2() pkt.load_receive(data) return pkt if data[1] == 0x12: pkt = Lighting3() pkt.load_receive(data) return pkt if data[1] == 0x13: pkt = Lighting4() pkt.load_receive(data) return pkt if data[1] == 0x14: pkt = Lighting5() pkt.load_receive(data) return pkt if data[1] == 0x15: pkt = Lighting6() pkt.load_receive(data) return pkt if data[1] == 0x50: pkt = Temp() pkt.load_receive(data) return pkt if data[1] == 0x52: pkt = TempHumid() pkt.load_receive(data) return pkt if data[1] == 0x54: pkt = TempHumidBaro() pkt.load_receive(data) return pkt if data[1] == 0x55: pkt = RainGauge() pkt.load_receive(data) return pkt if data[1] == 0x56: pkt = Wind() pkt.load_receive(data) return pkt ############################################################################### # Packet class ############################################################################### class Packet(object): """ Abstract superclass for all low level packets """ _UNKNOWN_TYPE = "Unknown type ({0:#04x}/{1:#04x})" _UNKNOWN_CMND = "Unknown command ({0:#04x})" def __init__(self): """Constructor""" self.data = None self.packetlength = None self.packettype = None self.subtype = None self.seqnbr = None self.rssi = None self.rssi_byte = None self.type_string = None self.id_string = None def has_value(self, datatype): """Return True if the sensor supports the given data type. sensor.has_value(RFXCOM_TEMPERATURE) is identical to calling sensor.has_temperature(). """ return hasattr(self, dataype) def value(self, datatype): """Return the :class:`SensorValue` for the given data type. sensor.value(RFXCOM_TEMPERATURE) is identical to calling sensor.temperature(). """ return getattr(self, datatype, None) def __getattr__(self, name): typename = name.replace("has_", "", 1) if not name == typename: return lambda: self.has_value(datatype) raise AttributeError(name) def __eq__(self, other): if not isinstance(other, Sensor): return False return self.id_string == other.id_string def __repr__(self): return(self.id_string + ": " + str(self.temperature)) ############################################################################### # Status class ############################################################################### def _decode_flags(v, words): words = words.split() s = set() for w in words: if v % 2: s.add(w) v//= 2 return s class Status(Packet): """ Data class for the Status packet type """ TYPES = { 0x50: '310MHz', 0x51: '315MHz', 0x53: '433.92MHz', 0x55: '868.00MHz', 0x56: '868.00MHz FSK', 0x57: '868.30MHz', 0x58: '868.30MHz FSK', 0x59: '868.35MHz', 0x5A: '868.35MHz FSK', 0x5B: '868.95MHz' } """ Mapping of numeric subtype values to strings, used in type_string """ def __str__(self): return ("Status [subtype={0}, firmware={1}, devices={2}]") \ .format(self.type_string, self.firmware_version, self.devices) def __init__(self): """Constructor""" super(Status, self).__init__() self.tranceiver_type = None self.firmware_version = None self.devices = None def load_receive(self, data): """Load data from a bytearray""" self.data = data self.packetlength = data[0] self.packettype = data[1] self.tranceiver_type = data[5] self.firmware_version = data[6] devs = set() devs.update(_decode_flags(data[7] / 0x80, 'undecoded')) devs.update(_decode_flags(data[8], 'mertik lightwarerf hideki lacrosse fs20 proguard')) devs.update(_decode_flags(data[9], 'x10 arc ac homeeasy ikeakoppla oregon ati visonic')) self.devices = sorted(devs) self._set_strings() def _set_strings(self): """Translate loaded numeric values into convenience strings""" if self.tranceiver_type in self.TYPES: self.type_string = self.TYPES[self.tranceiver_type] else: #Degrade nicely for yet unknown subtypes self.type_string = 'Unknown' ############################################################################### # Lighting1 class ############################################################################### class Lighting1(Packet): """ Data class for the Lighting1 packet type """ TYPES = {0x00: 'X10 lighting', 0x01: 'ARC', 0x02: 'ELRO AB400D', 0x03: 'Waveman', 0x04: 'Chacon EMW200', 0x05: 'IMPULS', 0x06: 'RisingSun', 0x07: 'Philips SBC', 0x08: 'Energenie', } """ Mapping of numeric subtype values to strings, used in type_string """ ALIAS_TYPES = {'KlikAanKlikUit code wheel': 0x01, 'NEXA code wheel': 0x01, 'CHACON code wheel': 0x01, 'HomeEasy code wheel': 0x01, 'Proove': 0x01, 'DomiaLite': 0x01, 'InterTechno': 0x01, 'AB600': 0x01, } """ Mapping of subtype aliases to the corresponding subtype value """ HOUSECODES = {0x41: 'A', 0x42: 'B', 0x43: 'C', 0x44: 'D', 0x45: 'E', 0x46: 'F', 0x47: 'G', 0x48: 'H', 0x49: 'I', 0x4A: 'J', 0x4B: 'K', 0x4C: 'L', 0x4D: 'M', 0x4E: 'N', 0x4F: 'O', 0x50: 'P'} """ Mapping of housecode numeric values to strings, used in id_string """ COMMANDS = {0x00: 'Off', 0x01: 'On', 0x02: 'Dim', 0x03: 'Bright', 0x05: 'All/group Off', 0x06: 'All/group On', 0x07: 'Chime', 0xFF: 'Illegal command'} """ Mapping of command numeric values to strings, used for cmnd_string """ def __str__(self): return ("Lighting1 [subtype={0}, seqnbr={1}, id={2}, cmnd={3}, " + "rssi={4}]") \ .format(self.type_string, self.seqnbr, self.id_string, self.cmnd_string, self.rssi) def __init__(self): """Constructor""" super(Lighting1, self).__init__() self.housecode = None self.unitcode = None self.cmnd = None self.cmnd_string = None def parse_id(self, subtype, id_string): """Parse a string id into individual components""" try: self.packettype = 0x10 self.subtype = subtype hcode = id_string[0:1] for hcode_num in self.HOUSECODES: if self.HOUSECODES[hcode_num] == hcode: self.housecode = hcode_num self.unitcode = int(id_string[1:]) self._set_strings() except: raise ValueError("Invalid id_string") if self.id_string != id_string: raise ValueError("Invalid id_string") def load_receive(self, data): """Load data from a bytearray""" self.data = data self.packetlength = data[0] self.packettype = data[1] self.subtype = data[2] self.seqnbr = data[3] self.housecode = data[4] self.unitcode = data[5] self.cmnd = data[6] self.rssi_byte = data[7] self.rssi = self.rssi_byte >> 4 self._set_strings() def set_transmit(self, subtype, seqnbr, housecode, unitcode, cmnd): """Load data from individual data fields""" self.packetlength = 7 self.packettype = 0x10 self.subtype = subtype self.seqnbr = seqnbr self.housecode = housecode self.unitcode = unitcode self.cmnd = cmnd self.rssi_byte = 0 self.rssi = 0 self.data = bytearray([self.packetlength, self.packettype, self.subtype, self.seqnbr, self.housecode, self.unitcode, self.cmnd, self.rssi_byte]) self._set_strings() def _set_strings(self): """Translate loaded numeric values into convenience strings""" self.id_string = self.HOUSECODES[self.housecode] + str(self.unitcode) if self.subtype in self.TYPES: self.type_string = self.TYPES[self.subtype] else: #Degrade nicely for yet unknown subtypes self.type_string = self._UNKNOWN_TYPE.format(self.packettype, self.subtype) if self.cmnd is not None: if self.cmnd in self.COMMANDS: self.cmnd_string = self.COMMANDS[self.cmnd] else: self.cmnd_string = self._UNKNOWN_CMND.format(self.cmnd) ############################################################################### # Lighting2 class ############################################################################### class Lighting2(Packet): """ Data class for the Lighting2 packet type """ TYPES = {0x00: 'AC', 0x01: 'HomeEasy EU', 0x02: 'ANSLUT', } """ Mapping of numeric subtype values to strings, used in type_string """ ALIAS_TYPES = {'KlikAanKlikUit automatic': 0x00, 'NEXA automatic': 0x00, 'CHACON autometic': 0x00, 'HomeEasy UK': 0x00, } """ Mapping of subtype aliases to the corresponding subtype value """ COMMANDS = {0x00: 'Off', 0x01: 'On', 0x02: 'Set level', 0x03: 'Group off', 0x04: 'Group on', 0x05: 'Set group level', } """ Mapping of command numeric values to strings, used for cmnd_string """ def __str__(self): return ("Lighting2 [subtype={0}, seqnbr={1}, id={2}, cmnd={3}, " + "level={4}, rssi={5}]") \ .format(self.type_string, self.seqnbr, self.id_string, self.cmnd_string, self.level, self.rssi) def __init__(self): """Constructor""" super(Lighting2, self).__init__() self.id1 = None self.id2 = None self.id3 = None self.id4 = None self.id_combined = None self.unitcode = None self.cmnd = None self.level = None self.cmnd_string = None def parse_id(self, subtype, id_string): """Parse a string id into individual components""" try: self.packettype = 0x11 self.subtype = subtype self.id_combined = int(id_string[:7], 16) self.id1 = self.id_combined >> 24 self.id2 = self.id_combined >> 16 & 0xff self.id3 = self.id_combined >> 8 & 0xff self.id4 = self.id_combined & 0xff self.unitcode = int(id_string[8:]) self._set_strings() except: raise ValueError("Invalid id_string") if self.id_string != id_string: raise ValueError("Invalid id_string") def load_receive(self, data): """Load data from a bytearray""" self.data = data self.packetlength = data[0] self.packettype = data[1] self.subtype = data[2] self.seqnbr = data[3] self.id1 = data[4] self.id2 = data[5] self.id3 = data[6] self.id4 = data[7] self.id_combined = (self.id1 << 24) + (self.id2 << 16) \ + (self.id3 << 8) + self.id4 self.unitcode = data[8] self.cmnd = data[9] self.level = data[10] self.rssi_byte = data[11] self.rssi = self.rssi_byte >> 4 self._set_strings() def set_transmit(self, subtype, seqnbr, id_combined, unitcode, cmnd, level): """Load data from individual data fields""" self.packetlength = 0x0b self.packettype = 0x11 self.subtype = subtype self.seqnbr = seqnbr self.id_combined = id_combined self.id1 = id_combined >> 24 self.id2 = id_combined >> 16 & 0xff self.id3 = id_combined >> 8 & 0xff self.id4 = id_combined & 0xff self.unitcode = unitcode self.cmnd = cmnd self.level = level self.rssi_byte = 0 self.rssi = 0 self.data = bytearray([self.packetlength, self.packettype, self.subtype, self.seqnbr, self.id1, self.id2, self.id3, self.id4, self.unitcode, self.cmnd, self.level, self.rssi_byte]) self._set_strings() def _set_strings(self): """Translate loaded numeric values into convenience strings""" self.id_string = "{0:07x}:{1}".format(self.id_combined, self.unitcode) if self.subtype in self.TYPES: self.type_string = self.TYPES[self.subtype] else: #Degrade nicely for yet unknown subtypes self.type_string = self._UNKNOWN_TYPE.format(self.packettype, self.subtype) if self.cmnd is not None: if self.cmnd in self.COMMANDS: self.cmnd_string = self.COMMANDS[self.cmnd] else: self.cmnd_string = self._UNKNOWN_CMND.format(self.cmnd) ############################################################################### # Lighting3 class ############################################################################### class Lighting3(Packet): """ Data class for the Lighting3 packet type """ TYPES = {0x00: 'Ikea Koppla', } """ Mapping of numeric subtype values to strings, used in type_string """ COMMANDS = {0x00: 'Bright', 0x08: 'Dim', 0x10: 'On', 0x11: 'Level 1', 0x12: 'Level 2', 0x13: 'Level 3', 0x14: 'Level 4', 0x15: 'Level 5', 0x16: 'Level 6', 0x17: 'Level 7', 0x18: 'Level 8', 0x19: 'Level 9', 0x1a: 'Off', 0x1c: 'Program', } """ Mapping of command numeric values to strings, used for cmnd_string """ def __str__(self): return ("Lighting3 [subtype={0}, seqnbr={1}, id={2}, cmnd={3}, " + "battery={4}, rssi={5}]") \ .format(self.type_string, self.seqnbr, self.id_string, self.cmnd_string, self.battery, self.rssi) def __init__(self): """Constructor""" super(Lighting3, self).__init__() self.system = None self.channel1 = None self.channel2 = None self.channel = None self.cmnd = None self.battery = None self.cmnd_string = None def parse_id(self, subtype, id_string): """Parse a string id into individual components""" try: self.packettype = 0x12 self.subtype = subtype self.system = int(id_string[:1], 16) self.channel = int(id_string[2:], 16) self.channel1 = self.channel & 0xff self.channel2 = self.channel >> 8 self._set_strings() except: raise ValueError("Invalid id_string") if self.id_string != id_string: raise ValueError("Invalid id_string") def load_receive(self, data): """Load data from a bytearray""" self.data = data self.packetlength = data[0] self.packettype = data[1] self.subtype = data[2] self.seqnbr = data[3] self.system = data[4] self.channel1 = data[5] self.channel2 = data[6] self.channel = (self.channel2 << 8) + self.channel1 self.cmnd = data[7] self.rssi_byte = data[8] self.battery = self.rssi_byte & 0x0f self.rssi = self.rssi_byte >> 4 self._set_strings() def set_transmit(self, subtype, seqnbr, system, channel, cmnd): """Load data from individual data fields""" self.packetlength = 0x08 self.packettype = 0x12 self.subtype = subtype self.seqnbr = seqnbr self.system = system self.channel = channel self.channel1 = channel & 0xff self.channel2 = channel >> 8 self.cmnd = cmnd self.rssi_byte = 0 self.battery = 0 self.rssi = 0 self.data = bytearray([self.packetlength, self.packettype, self.subtype, self.seqnbr, self.system, self.channel1, self.channel2, self.cmnd, self.rssi_byte]) self._set_strings() def _set_strings(self): """Translate loaded numeric values into convenience strings""" self.id_string = "{0:1x}:{1:03x}".format(self.system, self.channel) if self.subtype in self.TYPES: self.type_string = self.TYPES[self.subtype] else: #Degrade nicely for yet unknown subtypes self.type_string = self._UNKNOWN_TYPE.format(self.packettype, self.subtype) if self.cmnd is not None: if self.cmnd in self.COMMANDS: self.cmnd_string = self.COMMANDS[self.cmnd] else: self.cmnd_string = self._UNKNOWN_CMND.format(self.cmnd) ############################################################################### # Lighting4 class ############################################################################### class Lighting4(Packet): """ Data class for the Lighting4 packet type """ TYPES = {0x00: 'PT2262', } """ Mapping of numeric subtype values to strings, used in type_string """ def __str__(self): return ("Lighting4 [subtype={0}, seqnbr={1}, cmd={2}, pulse={3}, " + "rssi={4}]") \ .format(self.type_string, self.seqnbr, self.id_string, self.pulse, self.rssi) def __init__(self): """Constructor""" super(Lighting4, self).__init__() self.cmd1 = None self.cmd2 = None self.cmd3 = None self.cmd = None self.pulsehigh = None self.pulselow = None self.pulse = None def parse_id(self, subtype, id_string): """Parse a string id into individual components""" try: self.packettype = 0x13 self.subtype = subtype self.cmd = int(id_string, 16) self.cmd1 = self.cmd >> 16 self.cmd2 = (self.cmd >> 8) & 0xff self.cmd3 = self.cmd & 0xff self._set_strings() except: raise ValueError("Invalid id_string") if self.id_string != id_string: raise ValueError("Invalid id_string") def load_receive(self, data): """Load data from a bytearray""" self.data = data self.packetlength = data[0] self.packettype = data[1] self.subtype = data[2] self.seqnbr = data[3] self.cmd1 = data[4] self.cmd2 = data[5] self.cmd3 = data[6] self.cmd = (self.cmd1 << 16) + (self.cmd2 << 8) + self.cmd3 self.pulsehigh = data[7] self.pulselow = data[8] self.pulse = (self.pulsehigh << 8) + self.pulselow self.rssi_byte = data[9] self.rssi = self.rssi_byte >> 4 self._set_strings() def set_transmit(self, subtype, seqnbr, cmd, pulse): """Load data from individual data fields""" self.packetlength = 0x09 self.packettype = 0x13 self.subtype = subtype self.seqnbr = seqnbr self.cmd = cmd self.cmd1 = self.cmd >> 16 self.cmd2 = (self.cmd >> 8) & 0xff self.cmd3 = self.cmd & 0xff self.pulse = pulse self.pulsehigh = self.pulse >> 8 self.pulselow = self.pulse & 0xff self.rssi_byte = 0 self.rssi = 0 self.data = bytearray([self.packetlength, self.packettype, self.subtype, self.seqnbr, self.cmd1, self.cmd2, self.cmd3, self.pulsehigh, self.pulselow, self.rssi_byte]) self._set_strings() def _set_strings(self): """Translate loaded numeric values into convenience strings""" self.id_string = "{0:06x}".format(self.cmd) if self.subtype in self.TYPES: self.type_string = self.TYPES[self.subtype] else: #Degrade nicely for yet unknown subtypes self.type_string = self._UNKNOWN_TYPE.format(self.packettype, self.subtype) ############################################################################### # Lighting5 class ############################################################################### class Lighting5(Packet): """ Data class for the Lighting5 packet type """ TYPES = {0x00: 'LightwaveRF, Siemens', 0x01: 'EMW100 GAO/Everflourish', 0x02: 'BBSB new types', 0x03: 'MDREMOTE LED dimmer', 0x04: 'Conrad RSL2', } """ Mapping of numeric subtype values to strings, used in type_string """ ALIAS_TYPES = {'LightwaveRF': 0x00, 'Siemens': 0x00, 'EMW100 GAO': 0x01, 'Everflourish': 0x01, } """ Mapping of subtype aliases to the corresponding subtype value """ COMMANDS_00 = {0x00: 'Off', 0x01: 'On', 0x02: 'Group off', 0x03: 'Mood1', 0x04: 'Mood2', 0x05: 'Mood3', 0x06: 'Mood4', 0x07: 'Mood5', 0x0a: 'Unlock', 0x0b: 'Lock', 0x0c: 'All lock', 0x0d: 'Close (inline relay)', 0x0e: 'Stop (inline relay)', 0x0f: 'Open (inline relay)', 0x10: 'Set level', } """ Mapping of command numeric values to strings, used for cmnd_string """ COMMANDS_01 = {0x00: 'Off', 0x01: 'On', 0x02: 'Learn', } """ Mapping of command numeric values to strings, used for cmnd_string """ COMMANDS_02_04 = {0x00: 'Off', 0x01: 'On', 0x02: 'Group off', 0x03: 'Group on', } """ Mapping of command numeric values to strings, used for cmnd_string """ COMMANDS_03 = {0x00: 'Power', 0x01: 'Light', 0x02: 'Bright', 0x03: 'Dim', 0x04: '100%', 0x05: '50%', 0x06: '25%', 0x07: 'Mode+', 0x08: 'Speed-', 0x09: 'Speed+', 0x0a: 'Mode-', } """ Mapping of command numeric values to strings, used for cmnd_string """ COMMANDS_XX = {0x00: 'Off', 0x01: 'On', } """ Mapping of command numeric values to strings, used for cmnd_string """ def __str__(self): return ("Lighting5 [subtype={0}, seqnbr={1}, id={2}, cmnd={3}, " + "level={4}, rssi={5}]") \ .format(self.type_string, self.seqnbr, self.id_string, self.cmnd_string, self.level, self.rssi) def __init__(self): """Constructor""" super(Lighting5, self).__init__() self.id1 = None self.id2 = None self.id3 = None self.id_combined = None self.unitcode = None self.cmnd = None self.level = None self.cmnd_string = None def parse_id(self, subtype, id_string): """Parse a string id into individual components""" try: self.packettype = 0x14 self.subtype = subtype self.id_combined = int(id_string[:6], 16) self.id1 = self.id_combined >> 16 self.id2 = self.id_combined >> 8 & 0xff self.id3 = self.id_combined & 0xff self.unitcode = int(id_string[7:]) self._set_strings() except: raise ValueError("Invalid id_string") if self.id_string != id_string: raise ValueError("Invalid id_string") def load_receive(self, data): """Load data from a bytearray""" self.data = data self.packetlength = data[0] self.packettype = data[1] self.subtype = data[2] self.seqnbr = data[3] self.id1 = data[4] self.id2 = data[5] self.id3 = data[6] self.id_combined = (self.id1 << 16) + (self.id2 << 8) + self.id3 self.unitcode = data[7] self.cmnd = data[8] self.level = data[9] self.rssi_byte = data[10] self.rssi = self.rssi_byte >> 4 self._set_strings() def set_transmit(self, subtype, seqnbr, id_combined, unitcode, cmnd, level): """Load data from individual data fields""" self.packetlength = 0x0a self.packettype = 0x14 self.subtype = subtype self.seqnbr = seqnbr self.id_combined = id_combined self.id1 = id_combined >> 16 self.id2 = id_combined >> 8 & 0xff self.id3 = id_combined & 0xff self.unitcode = unitcode self.cmnd = cmnd self.level = level self.rssi_byte = 0 self.rssi = 0 self.data = bytearray([self.packetlength, self.packettype, self.subtype, self.seqnbr, self.id1, self.id2, self.id3, self.unitcode, self.cmnd, self.level, self.rssi_byte]) self._set_strings() def _set_strings(self): """Translate loaded numeric values into convenience strings""" self.id_string = "{0:06x}:{1}".format(self.id_combined, self.unitcode) if self.subtype in self.TYPES: self.type_string = self.TYPES[self.subtype] else: #Degrade nicely for yet unknown subtypes self.type_string = self._UNKNOWN_TYPE.format(self.packettype, self.subtype) if self.cmnd is not None: if self.subtype == 0x00 and self.cmnd in self.COMMANDS_00: self.cmnd_string = self.COMMANDS_00[self.cmnd] elif self.subtype == 0x01 and self.cmnd in self.COMMANDS_01: self.cmnd_string = self.COMMANDS_01[self.cmnd] elif self.subtype == 0x02 and self.cmnd in self.COMMANDS_02_04: self.cmnd_string = self.COMMANDS_02_04[self.cmnd] elif self.subtype == 0x03 and self.cmnd in self.COMMANDS_03: self.cmnd_string = self.COMMANDS_03[self.cmnd] elif self.subtype == 0x04 and self.cmnd in self.COMMANDS_02_04: self.cmnd_string = self.COMMANDS_02_04[self.cmnd] elif self.subtype >= 0x05 and self.cmnd in self.COMMANDS_XX: self.cmnd_string = self.COMMANDS_XX[self.cmnd] else: self.cmnd_string = self._UNKNOWN_CMND.format(self.cmnd) ############################################################################### # Lighting6 class ############################################################################### class Lighting6(Packet): """ Data class for the Lighting6 packet type """ TYPES = {0x00: 'Blyss', } """ Mapping of numeric subtype values to strings, used in type_string """ COMMANDS = {0x00: 'On', 0x01: 'Off', 0x02: 'Group on', 0x03: 'Group off', } """ Mapping of command numeric values to strings, used for cmnd_string """ def __str__(self): return ("Lighting6 [subtype={0}, seqnbr={1}, id={2}, cmnd={3}, " + "cmndseqnbr={4}, rssi={5}]") \ .format(self.type_string, self.seqnbr, self.id_string, self.cmnd_string, self.cmndseqnbr, self.rssi) def __init__(self): """Constructor""" super(Lighting6, self).__init__() self.id1 = None self.id2 = None self.id_combined = None self.groupcode = None self.unitcode = None self.cmnd = None self.cmndseqnbr = None self.rfu = None self.level = None self.cmnd_string = None def parse_id(self, subtype, id_string): """Parse a string id into individual components""" try: self.packettype = 0x15 self.subtype = subtype self.id_combined = int(id_string[:4], 16) self.id1 = self.id_combined >> 8 & 0xff self.id2 = self.id_combined & 0xff self.groupcode = ord(id_string[5]) self.unitcode = int(id_string[6:]) self._set_strings() except: raise ValueError("Invalid id_string") if self.id_string != id_string: raise ValueError("Invalid id_string") def load_receive(self, data): """Load data from a bytearray""" self.data = data self.packetlength = data[0] self.packettype = data[1] self.subtype = data[2] self.seqnbr = data[3] self.id1 = data[4] self.id2 = data[5] self.id_combined = (self.id1 << 8) + self.id2 self.groupcode = data[6] self.unitcode = data[7] self.cmnd = data[8] self.cmndseqnbr = data[9] self.rfu = data[10] self.rssi_byte = data[11] self.rssi = self.rssi_byte >> 4 self._set_strings() def set_transmit(self, subtype, seqnbr, id_combined, groupcode, unitcode, cmnd, cmndseqnbr): """Load data from individual data fields""" self.packetlength = 0x0b self.packettype = 0x15 self.subtype = subtype self.seqnbr = seqnbr self.id_combined = id_combined self.id1 = id_combined >> 8 & 0xff self.id2 = id_combined & 0xff self.groupcode = groupcode self.unitcode = unitcode self.cmnd = cmnd self.cmndseqnbr = cmndseqnbr self.rfu = 0 self.rssi_byte = 0 self.rssi = 0 self.data = bytearray([self.packetlength, self.packettype, self.subtype, self.seqnbr, self.id1, self.id2, self.groupcode, self.unitcode, self.cmnd, self.cmndseqnbr, self.rfu, self.rssi_byte]) self._set_strings() def _set_strings(self): """Translate loaded numeric values into convenience strings""" self.id_string = "{0:04x}:{1}{2}".format(self.id_combined, chr(self.groupcode), self.unitcode) if self.subtype in self.TYPES: self.type_string = self.TYPES[self.subtype] else: #Degrade nicely for yet unknown subtypes self.type_string = self._UNKNOWN_TYPE.format(self.packettype, self.subtype) if self.cmnd is not None: if self.cmnd in self.COMMANDS: self.cmnd_string = self.COMMANDS[self.cmnd] else: self.cmnd_string = self._UNKNOWN_CMND.format(self.cmnd) ############################################################################### # SensorPacket class ############################################################################### class SensorPacket(Packet): """ Abstract superclass for all sensor related packets """ HUMIDITY_TYPES = {0x00: 'dry', 0x01: 'comfort', 0x02: 'normal', 0x03: 'wet', -1: 'unknown humidity'} """ Mapping of humidity types to string """ FORECAST_TYPES = {0x00: 'no forecast available', 0x01: 'sunny', 0x02: 'partly cloudy', 0x03: 'cloudy', 0x04: 'rain', -1: 'unknown forecast'} """ Mapping of forecast types to string """ def __init__(self): """Constructor""" super(SensorPacket, self).__init__() ############################################################################### # Temp class ############################################################################### class Temp(SensorPacket): """ Data class for the Temp1 packet type """ TYPES = {0x01: 'THR128/138, THC138', 0x02: 'THC238/268,THN132,THWR288,THRN122,THN122,AW129/131', 0x03: 'THWR800', 0x04: 'RTHN318', 0x05: 'La Crosse TX2, TX3, TX4, TX17', 0x06: 'TS15C', 0x07: 'Viking 02811', 0x08: 'La Crosse WS2300', 0x09: 'RUBiCSON', 0x0a: 'TFA 30.3133', } """ Mapping of numeric subtype values to strings, used in type_string """ def __str__(self): return ("Temp [subtype={0}, seqnbr={1}, id={2}, temp={3}, " + "battery={4}, rssi={5}]") \ .format(self.type_string, self.seqnbr, self.id_string, self.temp, self.battery, self.rssi) def __init__(self): """Constructor""" super(Temp, self).__init__() self.id1 = None self.id2 = None self.temphigh = None self.templow = None self.temp = None self.battery = None def load_receive(self, data): """Load data from a bytearray""" self.data = data self.packetlength = data[0] self.packettype = data[1] self.subtype = data[2] self.seqnbr = data[3] self.id1 = data[4] self.id2 = data[5] self.temphigh = data[6] self.templow = data[7] self.temp = float(((self.temphigh & 0x7f) << 8) + self.templow) / 10 if self.temphigh >= 0x80: self.temp = -self.temp self.rssi_byte = data[8] self.battery = self.rssi_byte & 0x0f self.rssi = self.rssi_byte >> 4 self._set_strings() def _set_strings(self): """Translate loaded numeric values into convenience strings""" self.id_string = "{0:02x}:{1:02x}".format(self.id1, self.id2) if self.subtype in self.TYPES: self.type_string = self.TYPES[self.subtype] else: #Degrade nicely for yet unknown subtypes self.type_string = self._UNKNOWN_TYPE.format(self.packettype, self.subtype) ############################################################################### # Humid class ############################################################################### class Humid(SensorPacket): """ Data class for the Humid packet type """ TYPES = {0x01: 'LaCrosse TX3', 0x02: 'LaCrosse WS2300', } """ Mapping of numeric subtype values to strings, used in type_string """ def __str__(self): return ("Humid [subtype={0}, seqnbr={1}, id={2}, " + "humidity={3}, humidity_status={4}, battery={5}, rssi={6}]") \ .format(self.type_string, self.seqnbr, self.id_string, self.humidity, self.humidity_status, self.battery, self.rssi) def __init__(self): """Constructor""" super(Humid, self).__init__() self.id1 = None self.id2 = None self.humidity = None self.humidity_status = None self.humidity_status_string = None self.battery = None def load_receive(self, data): """Load data from a bytearray""" self.data = data self.packetlength = data[0] self.packettype = data[1] self.subtype = data[2] self.seqnbr = data[3] self.id1 = data[4] self.id2 = data[5] self.humidity = data[6] self.humidity_status = data[7] self.rssi_byte = data[8] self.battery = self.rssi_byte & 0x0f self.rssi = self.rssi_byte >> 4 self._set_strings() def _set_strings(self): """Translate loaded numeric values into convenience strings""" self.id_string = "{0:02x}:{1:02x}".format(self.id1, self.id2) if self.subtype in self.TYPES: self.type_string = self.TYPES[self.subtype] else: #Degrade nicely for yet unknown subtypes self.type_string = self._UNKNOWN_TYPE.format(self.packettype, self.subtype) if self.humidity_status in self.HUMIDITY_TYPES: self.humidity_status_string = \ self.HUMIDITY_TYPES[self.humidity_status] else: self.humidity_status_string = self.HUMIDITY_TYPES[-1] ############################################################################### # TempHumid class ############################################################################### class TempHumid(SensorPacket): """ Data class for the TempHumid packet type """ TYPES = {0x01: 'THGN122/123, THGN132, THGR122/228/238/268', 0x02: 'THGR810, THGN800', 0x03: 'RTGR328', 0x04: 'THGR328', 0x05: 'WTGR800', 0x06: 'THGR918/928, THGRN228, THGN500', 0x07: 'TFA TS34C, Cresta', 0x08: 'WT260,WT260H,WT440H,WT450,WT450H', 0x09: 'Viking 02035,02038', 0x0a: 'Rubicson', } """ Mapping of numeric subtype values to strings, used in type_string """ def __str__(self): return ("TempHumid [subtype={0}, seqnbr={1}, id={2}, temp={3}, " + "humidity={4}, humidity_status={5}, battery={6}, rssi={7}]") \ .format(self.type_string, self.seqnbr, self.id_string, self.temp, self.humidity, self.humidity_status, self.battery, self.rssi) def __init__(self): """Constructor""" super(TempHumid, self).__init__() self.id1 = None self.id2 = None self.temphigh = None self.templow = None self.temp = None self.humidity = None self.humidity_status = None self.humidity_status_string = None self.battery = None def load_receive(self, data): """Load data from a bytearray""" self.data = data self.packetlength = data[0] self.packettype = data[1] self.subtype = data[2] self.seqnbr = data[3] self.id1 = data[4] self.id2 = data[5] self.temphigh = data[6] self.templow = data[7] self.temp = float(((self.temphigh & 0x7f) << 8) + self.templow) / 10 if self.temphigh >= 0x80: self.temp = -self.temp self.humidity = data[8] self.humidity_status = data[9] self.rssi_byte = data[10] self.battery = self.rssi_byte & 0x0f self.rssi = self.rssi_byte >> 4 self._set_strings() def _set_strings(self): """Translate loaded numeric values into convenience strings""" self.id_string = "{0:02x}:{1:02x}".format(self.id1, self.id2) if self.subtype in self.TYPES: self.type_string = self.TYPES[self.subtype] else: #Degrade nicely for yet unknown subtypes self.type_string = self._UNKNOWN_TYPE.format(self.packettype, self.subtype) if self.humidity_status in self.HUMIDITY_TYPES: self.humidity_status_string = \ self.HUMIDITY_TYPES[self.humidity_status] else: self.humidity_status_string = self.HUMIDITY_TYPES[-1] ############################################################################### # Baro class ############################################################################### class Baro(SensorPacket): """ Data class for the Baro packet type """ TYPES = {} """ Mapping of numeric subtype values to strings, used in type_string """ def __str__(self): return ("Baro [subtype={0}, seqnbr={1}, id={2}, baro={3}, " + "forecast={4}, battery={5}, rssi={6}]") \ .format(self.type_string, self.seqnbr, self.id_string, self.baro, self.forecast, self.battery, self.rssi) def __init__(self): """Constructor""" super(Baro, self).__init__() self.id1 = None self.id2 = None self.baro1 = None self.baro2 = None self.baro = None self.forecast = None self.forecast_string = None self.battery = None def load_receive(self, data): """Load data from a bytearray""" self.data = data self.packetlength = data[0] self.packettype = data[1] self.subtype = data[2] self.seqnbr = data[3] self.id1 = data[4] self.id2 = data[5] self.baro1 = data[6] self.baro2 = data[7] self.baro = (self.baro1 << 8) + self.baro2 self.forecast = data[8] self.rssi_byte = data[9] self.battery = self.rssi_byte & 0x0f self.rssi = self.rssi_byte >> 4 self._set_strings() def _set_strings(self): """Translate loaded numeric values into convenience strings""" self.id_string = "{0:02x}:{1:02x}".format(self.id1, self.id2) if self.subtype in self.TYPES: self.type_string = self.TYPES[self.subtype] else: #Degrade nicely for yet unknown subtypes self.type_string = self._UNKNOWN_TYPE.format(self.packettype, self.subtype) if self.forecast in self.FORECAST_TYPES: self.forecast_string = self.FORECAST_TYPES[self.forecast] else: self.forecast_string = self.FORECAST_TYPES[-1] ############################################################################### # TempHumidBaro class ############################################################################### class TempHumidBaro(SensorPacket): """ Data class for the TempHumidBaro packet type """ TYPES = {0x01: 'BTHR918', 0x02: 'BTHR918N, BTHR968', } """ Mapping of numeric subtype values to strings, used in type_string """ def __str__(self): return ("TempHumidBaro [subtype={0}, seqnbr={1}, id={2}, temp={3}, " + "humidity={4}, humidity_status={5}, baro={6}, forecast={7}, " + "battery={8}, rssi={9}]") \ .format(self.type_string, self.seqnbr, self.id_string, self.temp, self.humidity, self.humidity_status, self.baro, self.forecast, self.battery, self.rssi) def __init__(self): """Constructor""" super(TempHumidBaro, self).__init__() self.id1 = None self.id2 = None self.temphigh = None self.templow = None self.temp = None self.humidity = None self.humidity_status = None self.humidity_status_string = None self.baro1 = None self.baro2 = None self.baro = None self.forecast = None self.forecast_string = None self.battery = None def load_receive(self, data): """Load data from a bytearray""" self.data = data self.packetlength = data[0] self.packettype = data[1] self.subtype = data[2] self.seqnbr = data[3] self.id1 = data[4] self.id2 = data[5] self.temphigh = data[6] self.templow = data[7] self.temp = float(((self.temphigh & 0x7f) << 8) + self.templow) / 10 if self.temphigh >= 0x80: self.temp = -self.temp self.humidity = data[8] self.humidity_status = data[9] self.baro1 = data[10] self.baro2 = data[11] self.baro = (self.baro1 << 8) + self.baro2 self.forecast = data[12] self.rssi_byte = data[13] self.battery = self.rssi_byte & 0x0f self.rssi = self.rssi_byte >> 4 self._set_strings() def _set_strings(self): """Translate loaded numeric values into convenience strings""" self.id_string = "{0:02x}:{1:02x}".format(self.id1, self.id2) if self.subtype in self.TYPES: self.type_string = self.TYPES[self.subtype] else: #Degrade nicely for yet unknown subtypes self.type_string = self._UNKNOWN_TYPE.format(self.packettype, self.subtype) if self.humidity_status in self.HUMIDITY_TYPES: self.humidity_status_string = \ self.HUMIDITY_TYPES[self.humidity_status] else: self.humidity_status_string = self.HUMIDITY_TYPES[-1] if self.forecast in self.FORECAST_TYPES: self.forecast_string = self.FORECAST_TYPES[self.forecast] else: self.forecast_string = self.FORECAST_TYPES[-1] ############################################################################### # Rain class ############################################################################### class Rain(SensorPacket): TYPES = { 0x01: "RGR126/682/918", 0x02: "PCR800", 0x03: "TFA", 0x04: "UPM RG700", 0x05: "WS2300", 0x06: "La Crosse TX5" } def __str__(self): return ("Rain [subtype={0}, seqnbr={1}, id={2}, rainrate={3}, " + "raintotal={4}, battery={5}, rssi={6}]") \ .format(self.type_string, self.seqnbr, self.id_string, self.rainrate, self.raintotal, self.battery, self.rssi) def __init__(self): """Constructor""" super(Rain, self).__init__() self.id1 = None self.id2 = None self.rainrate1 = None self.rainrate2 = None self.rainrate = None self.raintotal1 = None self.raintotal2 = None self.raintotal3 = None self.raintotal = None self.battery = None def load_receive(self, data): """Load data from a bytearray""" self.data = data self.packetlength = data[0] self.packettype = data[1] self.subtype = data[2] self.seqnbr = data[3] self.id1 = data[4] self.id2 = data[5] self.rainrate1 = data[6] self.rainrate2 = data[7] self.rainrate = (self.rainrate1 << 8) + self.rainrate2 if self.subtype == 2: self.rainrate = float(self.rainrate) / 100 self.raintotal1 = data[8] self.raintotal2 = data[9] self.raintotal3 = data[10] self.raintotal = float((self.raintotal1 << 16) + (self.raintotal2 << 8) + self.raintotal3) / 10 self.rssi_byte = data[11] self.battery = self.rssi_byte & 0x0f self.rssi = self.rssi_byte >> 4 self._set_strings() def _set_strings(self): """Translate loaded numeric values into convenience strings""" self.id_string = "{0:02x}:{1:02x}".format(self.id1, self.id2) if self.subtype in self.TYPES: self.type_string = self.TYPES[self.subtype] else: #Degrade nicely for yet unknown subtypes self.type_string = self._UNKNOWN_TYPE.format(self.packettype, self.subtype) ############################################################################### # Wind class ############################################################################### class Wind(SensorPacket): """ Data class for the Wind packet type """ TYPES = {0x01: 'WTGR800', 0x02: 'WGR800', 0x03: 'STR918, WGR918, WGR928', 0x04: 'TFA', 0x05: 'UPM WDS500', 0x06: 'WS2300', } """ Mapping of numeric subtype values to strings, used in type_string """ def __str__(self): return ("Wind [subtype={0}, seqnbr={1}, id={2}, direction={3}, " + "average_speed={4}, gust={5}, temperature={6}, chill={7}, " + "battery={8}, rssi={9}]") \ .format(self.type_string, self.seqnbr, self.id_string, self.direction, self.average_speed, self.gust, self.temperature, self.chill, self.battery, self.rssi) def __init__(self): """Constructor""" super(Wind, self).__init__() self.id1 = None self.id2 = None self.direction = None self.average_speed = None self.gust = None self.temperature = None self.chill = None self.battery = None self.rssi = None def load_receive(self, data): """Load data from a bytearray""" self.data = data self.packetlength = data[0] self.packettype = data[1] self.subtype = data[2] self.seqnbr = data[3] self.id1 = data[4] self.id2 = data[5] self.direction = data[6] * 256 + data[7] self.average_speed = data[8] * 256.0 + data[9] / 10.0 self.gust = data[10] * 256.0 + data[11] / 10.0 self.temperature = (-1 * (data[12] >> 7)) * ( (data[12] & 0x7f) * 256.0 + data[13]) / 10.0 self.chill = (-1 * (data[14] >> 7)) * ( (data[14] & 0x7f) * 256.0 + data[15]) / 10.0 if self.subtype == 0x03: self.battery = data[16] + 1 * 10 else: self.rssi_byte = data[16] self.battery = self.rssi_byte & 0x0f self.rssi = self.rssi_byte >> 4 self._set_strings() def _set_strings(self): """Translate loaded numeric values into convenience strings""" self.id_string = "{0:02x}:{1:02x}".format(self.id1, self.id2) if self.subtype in self.TYPES: self.type_string = self.TYPES[self.subtype] else: #Degrade nicely for yet unknown subtypes self.type_string = self._UNKNOWN_TYPE.format(self.packettype, self.subtype)
This is a introductory sewing kit for children, a beautiful gift for those who has interest in fashion and learning sewing. Friends Sewing Kit contains 4 pictures for children to do the sewing, through the ready punched holes. Contain: 4 pictures cards, 4 lengths of wool, 2 needles threaders, 1 round ended needle, and an instruction booklet.
import math import pickle import sys import numpy as np import numba as nb from visnav.algo import tools from visnav.iotools import objloader from visnav.settings import * # data/ryugu+tex-d1-80k.obj data/ryugu+tex-d1-16k.obj data/ryugu+tex-d1-16k.nsm # data/ryugu+tex-d1-80k.obj data/ryugu+tex-d1-4k.obj data/ryugu+tex-d1-4k.nsm # data/ryugu+tex-d1-80k.obj data/ryugu+tex-d1-1k.obj data/ryugu+tex-d1-1k.nsm # data/ryugu+tex-d2-80k.obj data/ryugu+tex-d2-16k.obj data/ryugu+tex-d2-16k.nsm # data/ryugu+tex-d2-80k.obj data/ryugu+tex-d2-4k.obj data/ryugu+tex-d2-4k.nsm # data/ryugu+tex-d2-80k.obj data/ryugu+tex-d2-1k.obj data/ryugu+tex-d2-1k.nsm # data/67p+tex-80k.obj data/67p+tex-1k.obj data/67p+tex-1k.nsm # data/67p+tex-80k.obj data/67p+tex-4k.obj data/67p+tex-4k.nsm # data/67p+tex-80k.obj data/67p+tex-16k.obj data/67p+tex-16k.nsm if __name__ == '__main__': if False: res = tools.poly_line_intersect(((0, 0, 1), (0, 1, 1), (1, 0, 1)), ((0, 0, 0), (.3, .7, 1))) print('%s' % res) quit() assert len(sys.argv) == 4, 'USAGE: %s [full-res-model] [target-model] [output]' % sys.argv[0] full_res_model = os.path.join(BASE_DIR, sys.argv[1]) infile = os.path.join(BASE_DIR, sys.argv[2]) outfile = os.path.join(BASE_DIR, sys.argv[3]) sc = 1000 # bennu in meters, ryugu & 67P in km # load shape models obj_fr = objloader.ShapeModel(fname=full_res_model) obj = objloader.ShapeModel(fname=infile) timer = tools.Stopwatch() timer.start() devs = tools.point_cloud_vs_model_err(np.array(obj_fr.vertices), obj) timer.stop() # doesnt work: tools.intersections.parallel_diagnostics(level=4) p50 = np.median(devs) p68, p95, p99 = np.percentile(np.abs(devs-p50), (68, 95, 99.7)) idxs = np.abs(devs-p50) < p95 clean_devs = devs[idxs] dev_mean = np.mean(clean_devs) dev_std = np.std(clean_devs) print('\n\n(%.2fms/vx) dev mean %.6fm/%.6fm, std %.6fm/%.6fm, 2s %.6fm/%.6fm, 3s %.6fm/%.6fm' % tuple( map(lambda x: sc*x, ( timer.elapsed/len(obj_fr.vertices), dev_mean, p50, dev_std*1, p68, dev_std*2, p95, dev_std*3, p99, )) )) with open(outfile, 'wb') as fh: pickle.dump((obj.as_dict(), dev_mean), fh, -1)
Renault defied its partner firm, Nissan, and the French government on Tuesday night as its board decided to retain Carlos Ghosn as its chairman and chief executive in spite of his shock arrest by Japanese prosecutors. Renault’s board promoted Thierry Bolloré to deputy chief executive to head the company, but said the appointment was only made on a “temporary basis” in Ghosn’s absence. Ghosn, the head of the Renault, Nissan and Mitsubishi car manufacturing alliance, is chairman and chief executive of Renault. He is also chairman of Nissan and Mitsubishi. The actions of Renault’s board stand in stark contrast to those of its partners in the alliance, both of which quickly said they would remove Ghosn from their boards after Nissan said he had understated his income on financial statements. Renault asked Nissan to share all of the evidence against Ghosn “on the basis of the principles of transparence, trust and mutual respect set forth in the Alliance Charter”. Bolloré had written on Tuesday morning that Ghosn still had the “full support” of senior executives in a memo to staff published on an internal website. Nissan said Ghosn was exposed by a whistleblower in a development that shocked the automotive industry. A highly influential figure in the sector in recent decades, the French-Brazilian executive forged an alliance between France’s Renault and Japan’s Nissan and Mitsubishi that created the world’s second largest car manufacturer. Osamu Masuko, Mitsubishi’s chief executive, said on Tuesday he thought the three companies in the alliance would appoint separate new chairs because there was no executive of Ghosn’s calibre who could take on the combined role. “I don’t think there is anyone else on earth like Ghosn who could run Renault, Nissan and Mitsubishi,” Masuko told reporters in Tokyo. The split between the alliance partners will raise questions about whether the companies will continue to pursue closer integration. The Financial Times reported on Tuesday that Nissan board members had tried to block a plan, recently pursued by Ghosn, to merge fully. Renault said it remained “particularly focused on the consolidation of the Renault Nissan Mitsubishi alliance”. Ghosn’s downfall was expected to trigger a battle between Japan and France for control of the alliance, which has an unorthodox governance structure. Renault owns 43.4% of Nissan, while Nissan owns 15% of Renault and 34% of Mitsubishi. Hiroto Saikawa, Nissan’s chief executive, has previously expressed his opposition to a full merger between his firm and Renault. Saikawa appeared to distance himself from Ghosn’s reign in a press conference announcing his arrest on Monday. Bolloré is perceived by analysts as a capable replacement for Ghosn at Renault, although it remains unclear if he will lead the alliance. Bolloré, a 55-year-old French national, started his career in 1990 at tyre maker, Michelin. He joined Renault in 2012, and was appointed chief operating officer in February this year. Shares in Nissan plunged in Tokyo on Tuesday, ending down almost 5.5% and pulling the Nikkei index to a three-week low. Renault shares fell another 1.2%, after losing more than 8% on Monday. Japanese prosecutors confirmed on Tuesday that they are investigating whether Ghosn and another senior Nissan executive, Greg Kelly, understated Ghosn’s income over five years from 2010. They are accused of reporting only half of his actual salary of almost 10bn yen (£69m) during the period. Ghosn is also accused of personal use of company assets. Japan’s public broadcaster NHK reported that Nissan paid “huge sums” to buy luxury houses for Ghosn in four cities around the world. The properties, in Rio de Janeiro, Beirut, Paris and Amsterdam, were acquired “without any legitimate business reason”, it claimed.
#!/usr/bin/env python import os import sys import connect_client try: from setuptools import setup except ImportError: from distutils.core import setup if sys.argv[-1] == 'publish': os.system('python setup.py sdist upload') sys.exit() packages = [ 'connect_client', 'connect_client.tests', ] requires = [ 'shams==0.0.2', 'Django>=1.6', ] with open('README.md') as f: readme = f.read() with open('LICENSE') as f: license = f.read() setup( name='connect_client', version=connect_client.__version__, description='Heroku Connect client django app', long_description=readme, author='David Gouldin', author_email='dgouldin@heroku.com', url='https://github.com/heroku/django-heroku-connect', packages=packages, package_data={'': ['LICENSE']}, package_dir={'connect_client': 'connect_client'}, include_package_data=True, install_requires=requires, license=license, zip_safe=False, classifiers=( 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', ), )
This is a snippet from the original CML Pro dossier published on 5-11-2018. ROKU beat earnings estimates, but the earnings results were the smaller story. Roku (NASDAQ:ROKU) is on the verge of becoming the operating system to streaming video - and that is a massive opportunity. ROKU is a Spotlight Top Pick and we cover the broad, far-reaching bullish thesis in the Top Pick dossier The Tech Gem Looking to Dominate Streaming Video. ROKU is building a business based on users and if we measured them as a cable provider, as of right now, they would be the third largest cable provider in the country behind just Comcast and AT&T -- that's how many people and how much content they serve, already. Further, the number of accounts for ROKU rose 47% while Comcast and AT&T are essentially flat. All of this is driven by the large secular shift by consumers to streaming video and away from linear TV. And then straight to SVOD revenue forecasts. The various SVOD content providers are in a war - to buy content, to buy users, to keep users, to differentiate. Netflix, Amazon Prime Video and Hulu are at war with each other, as they are with other over the top (OTT) video services like those coming from Apple, Google (YouTube), Disney, and many others. That battle doesn't interest us - what we are after is the operating system, the guts, that will house all of it. And this is where Roku exists. Each of these over the top (OTT) content providers are available with Roku hardware or software. The idea behind the business is to grow scale -- to grow active accounts, and to become the operating system of streaming TV. Yes, their goal, their future, is to be what Microsoft was to PCs and what Apple is to smartphones -- the platform, the operating system, for the booming industry that is Streaming Video on Demand (SVOD). Now, let's turn to the earnings results, and the information we gathered form the call that goes well beyond the numbers. * Revenue: $136.6 million (up 36% in the year-ago period) vs. estimates of $127.5 million. * EPS: -$0.07 vs estimates of -$0.16. The real story, though, goes well beyond these headline numbers. Remember, the company is purposefully lowering prices on its hardware to grow its user base and platform reach. * Roku TV is the #1 licensed TV operating system in the U.S. * One in four smart TVs sold in the U.S. were Roku TVs up from one in five in 2017. Today, Roku offers one of the broadest selections of direct-to-consumer services, including Sling TV, DirecTV Now, PlayStation Vue, Hulu Live, YouTube TV, fuboTV and Philo. So, the business model then is based on platform revenue from accounts, of which about two-thirds is ad revenue. The only way for that to work is if (1) the user base balloons and (2) the user base is more engaged. Both are happening at staggering rates. We believe virtually every TV OEM will eventually need to license a TV OS, as consumers shift to smart TVs with 4K displays, and as OEMs focus on both cutting costs and boosting customer satisfaction. * Active accounts rose 47% year over year to 20.8 million at quarter end. * The company saw 2.8 hours of streaming hours per active account during the quarter. * Streaming Hours: Roku streamed 5.1 billion hours of content in the quarter, up 56% from the prior year. * Average Revenue Per User (ARPU) rose 50% year over year to $15.07. * Platform revenue rose 106% year over year to $75.1 million. * Gross profit rose 62% year over year to $63.1 million. * Platform gross profit increased 90% year over year. But there is even more going on -- and it's gigantic. The company said that nearly half of its roughly 21 million active users have cut the cord or have never had a traditional pay TV subscription. Stop for a moment and think about that. That means any kind of marketing that is supposed to come through TV ads simply cannot be reached through linear TV -- it must go through ROKU. The CEO went further to say that "according to Nielsen, 10% of 18 to 34-year-olds in the U.S. are only reachable on the Roku platform in the living room." And there's more. A television advertisement, as we all have seen, is broad and can be wildly unfocused. For example, perhaps we watch TV and see an ad for a new Ford truck -- what percentage of the viewing population has an interest in a truck? The answer is, we don't know. The ad companies use Nielson ratings and demographic data and then the stations try to sell their ads to the right "type" of advertiser. Every advertisement on ROKU to its users is custom for that individual user. That's right. Not only are tens of millions of people totally unreachable by traditional TV, even the ones that are, if they are on ROKU, they get personalized ads. That reminds me of two other companies that did this to create two of the six largest companies in the world: Google and Facebook. * Facebook users spend 35 minutes on average on the platform, although the United States is higher, perhaps closer to 50 minutes. * YouTube users spend upwards of 40 minutes a day on the platform. * ROKU saw 2.8 hours of streaming hours per active account during the quarter. Yes -- Facebook and YouTube combined have about half the total daily viewership per user as ROKU. And just to make sure that math really holds up, we did some back of the envelope calculations. - ROKU streamed 5,100,000,000 hours of content. - ROKU ended the quarter with 21,000,000 active users. That comes out to 243 hours per active user per quarter. - Now take that and divide it into 90 days per quarter and we get... 2.7 hours per day per user. The company reported 2.8 hours, but we have come close enough to verifying that the numbers play out. Of course, the hours spent on ROKU aren't exactly the same as hours spent on Facebook, or YouTube, but my goodness, this company is saying out loud, with actual data, it is becoming a behemoth. The Roku Channel is now a top 15 channel on Roku devices based on hours streamed - and the #3 free ad-supported channel on the Roku platform. In Q1, we expanded the content syndicated from channel partners and added more movies and TV shows from Lionsgate, MGM, Sony Pictures Entertainment, Warner Bros. and other studios. Recently, we announced the addition of live news from ABC News, Cheddar, People TV and others. Owning a stock for the sole purpose of a takeover possibility is, in our opinion, a terrible idea. Ownership in an asset that is "supposed" to appreciate must be based on a bullish narrative that stands on its own. But, we are also starting to see a narrative where this firm may become a takeover candidate. For example -- Netflix has no ecosystem at all -- in one fell swoop it could have one, with a advertising business. Facebook, which has no hedge against its own platform, could suddenly challenge YouTube with a powerful content and ad business and jump into living rooms, after being relegated to just the smartphone. There are other examples -- but if ROKU keeps growing at this pace, and the other metrics continue to support this idea of ROKU fulfilling its promise to become the leading operating system for streaming TVs (like Apple has become for smartphones, like Microsoft became for PCs), then it's quite possible it becomes a takeover target. The author is long shares of ROKU at the time of writing and publication.
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import six from pyarrow._parquet import (ParquetReader, FileMetaData, # noqa RowGroupMetaData, Schema, ParquetWriter) import pyarrow._parquet as _parquet # noqa from pyarrow.table import concat_tables EXCLUDED_PARQUET_PATHS = {'_metadata', '_common_metadata', '_SUCCESS'} class ParquetFile(object): """ Open a Parquet binary file for reading Parameters ---------- source : str or pyarrow.io.NativeFile Readable source. For passing Python file objects or byte buffers, see pyarrow.io.PythonFileInterface or pyarrow.io.BufferReader. metadata : ParquetFileMetadata, default None Use existing metadata object, rather than reading from file. """ def __init__(self, source, metadata=None): self.reader = ParquetReader() self.reader.open(source, metadata=metadata) @property def metadata(self): return self.reader.metadata @property def schema(self): return self.metadata.schema def read(self, nrows=None, columns=None, nthreads=1): """ Read a Table from Parquet format Parameters ---------- columns: list If not None, only these columns will be read from the file. nthreads : int, default 1 Number of columns to read in parallel. If > 1, requires that the underlying file source is threadsafe Returns ------- pyarrow.table.Table Content of the file as a table (of columns) """ if nrows is not None: raise NotImplementedError("nrows argument") if columns is None: column_indices = None else: column_indices = [self.reader.column_name_idx(column) for column in columns] return self.reader.read(column_indices=column_indices, nthreads=nthreads) def read_table(source, columns=None, nthreads=1, metadata=None): """ Read a Table from Parquet format Parameters ---------- source: str or pyarrow.io.NativeFile Location of Parquet dataset. If a string passed, can be a single file name or directory name. For passing Python file objects or byte buffers, see pyarrow.io.PythonFileInterface or pyarrow.io.BufferReader. columns: list If not None, only these columns will be read from the file. nthreads : int, default 1 Number of columns to read in parallel. Requires that the underlying file source is threadsafe metadata : FileMetaData If separately computed Returns ------- pyarrow.Table Content of the file as a table (of columns) """ from pyarrow.filesystem import LocalFilesystem if isinstance(source, six.string_types): fs = LocalFilesystem.get_instance() if fs.isdir(source): return fs.read_parquet(source, columns=columns, metadata=metadata) pf = ParquetFile(source, metadata=metadata) return pf.read(columns=columns, nthreads=nthreads) def read_multiple_files(paths, columns=None, filesystem=None, nthreads=1, metadata=None, schema=None): """ Read multiple Parquet files as a single pyarrow.Table Parameters ---------- paths : List[str] List of file paths columns : List[str] Names of columns to read from the file filesystem : Filesystem, default None If nothing passed, paths assumed to be found in the local on-disk filesystem nthreads : int, default 1 Number of columns to read in parallel. Requires that the underlying file source is threadsafe metadata : pyarrow.parquet.FileMetaData Use metadata obtained elsewhere to validate file schemas schema : pyarrow.parquet.Schema Use schema obtained elsewhere to validate file schemas. Alternative to metadata parameter Returns ------- pyarrow.Table Content of the file as a table (of columns) """ if filesystem is None: def open_file(path, meta=None): return ParquetFile(path, metadata=meta) else: def open_file(path, meta=None): return ParquetFile(filesystem.open(path, mode='rb'), metadata=meta) if len(paths) == 0: raise ValueError('Must pass at least one file path') if metadata is None and schema is None: schema = open_file(paths[0]).schema elif schema is None: schema = metadata.schema # Verify schemas are all equal all_file_metadata = [] for path in paths: file_metadata = open_file(path).metadata if not schema.equals(file_metadata.schema): raise ValueError('Schema in {0} was different. {1!s} vs {2!s}' .format(path, file_metadata.schema, schema)) all_file_metadata.append(file_metadata) # Read the tables tables = [] for path, path_metadata in zip(paths, all_file_metadata): reader = open_file(path, meta=path_metadata) table = reader.read(columns=columns, nthreads=nthreads) tables.append(table) all_data = concat_tables(tables) return all_data def write_table(table, sink, chunk_size=None, version='1.0', use_dictionary=True, compression='snappy'): """ Write a Table to Parquet format Parameters ---------- table : pyarrow.Table sink: string or pyarrow.io.NativeFile chunk_size : int The maximum number of rows in each Parquet RowGroup. As a default, we will write a single RowGroup per file. version : {"1.0", "2.0"}, default "1.0" The Parquet format version, defaults to 1.0 use_dictionary : bool or list Specify if we should use dictionary encoding in general or only for some columns. compression : str or dict Specify the compression codec, either on a general basis or per-column. """ writer = ParquetWriter(sink, use_dictionary=use_dictionary, compression=compression, version=version) writer.write_table(table, row_group_size=chunk_size)
Zoomerang & SurveyMonkey, Together At Last. Advanced features & customization for power users. All the essentials to get you started for FREE.
from __future__ import absolute_import import logging import os from email.parser import FeedParser from pip._vendor import pkg_resources from pip._vendor.packaging.utils import canonicalize_name from pip._internal.cli.base_command import Command from pip._internal.cli.status_codes import ERROR, SUCCESS logger = logging.getLogger(__name__) class ShowCommand(Command): """ Show information about one or more installed packages. The output is in RFC-compliant mail header format. """ name = 'show' usage = """ %prog [options] <package> ...""" summary = 'Show information about installed packages.' ignore_require_venv = True def __init__(self, *args, **kw): super(ShowCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '-f', '--files', dest='files', action='store_true', default=False, help='Show the full list of installed files for each package.') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): if not args: logger.warning('ERROR: Please provide a package name or names.') return ERROR query = args results = search_packages_info(query) if not print_results( results, list_files=options.files, verbose=options.verbose): return ERROR return SUCCESS def search_packages_info(query): """ Gather details from installed distributions. Print distribution name, version, location, and installed files. Installed files requires a pip generated 'installed-files.txt' in the distributions '.egg-info' directory. """ installed = {} for p in pkg_resources.working_set: installed[canonicalize_name(p.project_name)] = p query_names = [canonicalize_name(name) for name in query] for dist in [installed[pkg] for pkg in query_names if pkg in installed]: package = { 'name': dist.project_name, 'version': dist.version, 'location': dist.location, 'requires': [dep.project_name for dep in dist.requires()], } file_list = None metadata = None if isinstance(dist, pkg_resources.DistInfoDistribution): # RECORDs should be part of .dist-info metadatas if dist.has_metadata('RECORD'): lines = dist.get_metadata_lines('RECORD') paths = [l.split(',')[0] for l in lines] paths = [os.path.join(dist.location, p) for p in paths] file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('METADATA'): metadata = dist.get_metadata('METADATA') else: # Otherwise use pip's log for .egg-info's if dist.has_metadata('installed-files.txt'): paths = dist.get_metadata_lines('installed-files.txt') paths = [os.path.join(dist.egg_info, p) for p in paths] file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('PKG-INFO'): metadata = dist.get_metadata('PKG-INFO') if dist.has_metadata('entry_points.txt'): entry_points = dist.get_metadata_lines('entry_points.txt') package['entry_points'] = entry_points if dist.has_metadata('INSTALLER'): for line in dist.get_metadata_lines('INSTALLER'): if line.strip(): package['installer'] = line.strip() break # @todo: Should pkg_resources.Distribution have a # `get_pkg_info` method? feed_parser = FeedParser() feed_parser.feed(metadata) pkg_info_dict = feed_parser.close() for key in ('metadata-version', 'summary', 'home-page', 'author', 'author-email', 'license'): package[key] = pkg_info_dict.get(key) # It looks like FeedParser cannot deal with repeated headers classifiers = [] for line in metadata.splitlines(): if line.startswith('Classifier: '): classifiers.append(line[len('Classifier: '):]) package['classifiers'] = classifiers if file_list: package['files'] = sorted(file_list) yield package def print_results(distributions, list_files=False, verbose=False): """ Print the informations from installed distributions found. """ results_printed = False for i, dist in enumerate(distributions): results_printed = True if i > 0: logger.info("---") name = dist.get('name', '') required_by = [ pkg.project_name for pkg in pkg_resources.working_set if name in [required.name for required in pkg.requires()] ] logger.info("Name: %s", name) logger.info("Version: %s", dist.get('version', '')) logger.info("Summary: %s", dist.get('summary', '')) logger.info("Home-page: %s", dist.get('home-page', '')) logger.info("Author: %s", dist.get('author', '')) logger.info("Author-email: %s", dist.get('author-email', '')) logger.info("License: %s", dist.get('license', '')) logger.info("Location: %s", dist.get('location', '')) logger.info("Requires: %s", ', '.join(dist.get('requires', []))) logger.info("Required-by: %s", ', '.join(required_by)) if verbose: logger.info("Metadata-Version: %s", dist.get('metadata-version', '')) logger.info("Installer: %s", dist.get('installer', '')) logger.info("Classifiers:") for classifier in dist.get('classifiers', []): logger.info(" %s", classifier) logger.info("Entry-points:") for entry in dist.get('entry_points', []): logger.info(" %s", entry.strip()) if list_files: logger.info("Files:") for line in dist.get('files', []): logger.info(" %s", line.strip()) if "files" not in dist: logger.info("Cannot locate installed-files.txt") return results_printed
There are a lot of us out there who are trying to make sure that our homes have everything that they need when it comes to taking care of privacy. But, there are many of us who just aren’t sure what we need to do when it comes to putting in something like a chain link fence Tampa. When you talk to fencing pros, you will find that they have a lot of tips that you can utilize in order to make sure that your fence is the best that it can be. Think about the fence that you want to get. Why do you want it? Do you want to have a privacy area that no one can see? Or would it be better for you to go ahead and get a fence that is just for your furry friends to go outside and hang out in? No matter what you may be trying to look for, there are plenty of options. You just have to make sure that you know what you need it for before you really do anything else with it. Take a look at the big picture and make sure that you can get everything that you need to take care of your goals. Not only is that going to help you to stay on top of things, but you will discover that there are a lot of ways to work toward everything that you need. Getting a good fence installation expert on your side to take care of your details is not only going to help you, but it will also guarantee that the job is going to be done correctly the first time. Take a look at the big picture, talk to the pros, and get a fence that makes sense for you.
# -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-04-25 06:50 from __future__ import unicode_literals from django.db import migrations from django.conf import settings class Migration(migrations.Migration): ops = [ ( ''' CREATE FOREIGN TABLE "campusonline"."abschlussarbeiten" ( ID numeric, STUDIENBEZEICHNUNG varchar, LETZTE_AENDERUNG timestamp, AUTOR_ID numeric, VORNAME_AUTOR varchar, NACHNAME_AUTOR varchar, AUTOREN varchar, TITEL varchar, KURZFASSUNG text, SEITEN_ANZAHL numeric, BETREUER_ID numeric, BETREUER varchar, ERSCHEINUNGSJAHR numeric, LINK varchar, WERK_TYP numeric, PUBLIZIERT varchar, ORGANISATION numeric, ORGANISATIONS_ID numeric ) SERVER sqlalchemy OPTIONS ( tablename 'F_ABSCHLUSSARBEIT', db_url '{}' ); '''.format(settings.MULTICORN.get('campusonline')), ''' DROP FOREIGN TABLE IF EXISTS "campusonline"."abschlussarbeiten"; ''', ), ( ''' CREATE MATERIALIZED VIEW "public"."campusonline_finalthesis" AS SELECT aa.ID::integer AS id, aa.STUDIENBEZEICHNUNG AS study_designation, aa.LETZTE_AENDERUNG::timestamptz AS modified, s.STUD_NR::integer AS author_id, regexp_split_to_array(trim(both ' ' from aa.AUTOREN), ';\s*') AS authors, aa.TITEL AS title, aa.KURZFASSUNG AS abstract, aa.SEITEN_ANZAHL::integer AS pages, p.PERS_NR::integer AS tutor_id, aa.ERSCHEINUNGSJAHR::integer AS year, aa.LINK AS url, aa.PUBLIZIERT AS category, o.NR::integer AS organization_id FROM "campusonline"."abschlussarbeiten" aa INNER JOIN "campusonline"."stud" s ON aa.AUTOR_ID::integer = s.STUD_NR::integer INNER JOIN "campusonline"."personen" p ON aa.BETREUER_ID::integer = p.PERS_NR::integer INNER JOIN "campusonline"."organisationen" o ON aa.ORGANISATIONS_ID::integer = o.NR::integer WITH DATA; ''', ''' DROP MATERIALIZED VIEW IF EXISTS "public"."campusonline_finalthesis"; ''', ), ( ''' CREATE INDEX campusonline_finalthesis_id_idx ON "public"."campusonline_finalthesis" ("id"); ''', ''' DROP INDEX IF EXISTS campusonline_finalthesis_id_idx; ''', ), ( ''' CREATE INDEX campusonline_finalthesis_modified_idx ON "public"."campusonline_finalthesis" ("modified"); ''', ''' DROP INDEX IF EXISTS campusonline_finalthesis_modified_idx; ''', ), ( ''' CREATE INDEX campusonline_finalthesis_author_id_idx ON "public"."campusonline_finalthesis" ("author_id"); ''', ''' DROP INDEX IF EXISTS campusonline_finalthesis_author_id_idx; ''', ), ( ''' CREATE INDEX campusonline_finalthesis_tutor_id_idx ON "public"."campusonline_finalthesis" ("tutor_id"); ''', ''' DROP INDEX IF EXISTS campusonline_finalthesis_tutor_id_idx; ''', ), ( ''' CREATE INDEX campusonline_finalthesis_year_idx ON "public"."campusonline_finalthesis" ("year"); ''', ''' DROP INDEX IF EXISTS campusonline_finalthesis_year_idx; ''', ), ( ''' CREATE INDEX campusonline_finalthesis_organization_id_idx ON "public"."campusonline_finalthesis" ("organization_id"); ''', ''' DROP INDEX IF EXISTS campusonline_finalthesis_organization_id_idx; ''', ), ] dependencies = [ ('campusonline', '0046_student_username'), ] operations = [ migrations.RunSQL( [forward for forward, reverse in ops], [reverse for forward, reverse in reversed(ops)] ) ]
If you are interested in becoming a nurse, it can be very helpful to join a nursing organization. These groups can provide you with support, networking opportunities and a mentor to help you on your career path. You don't have to wait until you graduate to join one of these organizations. A number of groups offer membership to students, and many provide scholarships and grants exclusively to members. Here are some organizations that you may want to consider joining if you are interested in becoming a nurse. HOSA is an organization designed to help children and young adults who are interested in entering the health care profession achieve their goals. The organization is open to individuals who are in middle and high school as well as those who are attending college. People who are already established in the field may join as mentors. The advantages of becoming a member of HOSA are the ability to interact with people who are already in the health care field, attend conferences with other like-minded individuals and compete for scholarships and awards. Founded in 1952, the National Student Nurses' Association was established to assist students enrolled in nursing programs at a variety of levels. The organization boasts more than 60,000 members in the United States, Guam, Puerto Rico and the U.S. Virgin Islands. Along with offering career assistance and mentoring, the NSNA provides a variety of scholarships to members; more than $330,000 in scholarships is awarded each year through the General Scholarship and Promise of Nursing programs. The American Association of Nurse Practitioners is an organization that has more than 205,000 members and is focused on advocating for nurse practitioners of all specialties. Membership is available for students and professionals, and students are given a variety of tools and opportunities to help them with their education and career. Student members have the ability to apply for scholarships and grants, and they are given access to a student loan refinancing service as well. Members are also able to use the organization's career service center and are given advance notice of job postings. According to the American Nurses Association, the organization represents the country's 3.4 million nurses, and it promotes safe and ethical work environments and advocates on a number of health care related issues. Membership is available to both professional nurses and students, and students receive a discount on the normal cost of membership. The benefits of belonging to the ANA include the ability to network with other nurses as well as access to the ANA's Leadership Institute and its Career Center. Professionals can also take advantage of savings on liability insurance. Founded in 1893, the National League for Nursing was the first nursing organization established in the United States, and it currently boasts more than 40,000 members. The NLN offers a variety of benefits to its members, including the ability to network with other established professionals and access to research grants, testing services and numerous opportunities related to professional development. It also provides accreditation services and is active in working to shape public policy to improve education and working environments for nurses. Membership is available to established nurses as well as graduate students enrolled in post-baccalaureate work. Whether someone is an established nurse or is still in school, there are a number of benefits to be gained from joining a nursing organization. While there is normally a cost associated with membership, career and education opportunities can more than make up for membership fees.
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import functools import time import msgpack import msgpack.exceptions import redis from pyramid import viewderivers from pyramid.interfaces import ISession, ISessionFactory from zope.interface import implementer import warehouse.utils.otp as otp import warehouse.utils.webauthn as webauthn from warehouse.cache.http import add_vary from warehouse.utils import crypto from warehouse.utils.msgpack import object_encode def _invalid_method(method): @functools.wraps(method) def wrapped(self, *args, **kwargs): self._error_message() return wrapped @implementer(ISession) class InvalidSession(dict): __contains__ = _invalid_method(dict.__contains__) __delitem__ = _invalid_method(dict.__delitem__) __getitem__ = _invalid_method(dict.__getitem__) __iter__ = _invalid_method(dict.__iter__) __len__ = _invalid_method(dict.__len__) __setitem__ = _invalid_method(dict.__setitem__) clear = _invalid_method(dict.clear) copy = _invalid_method(dict.copy) fromkeys = _invalid_method(dict.fromkeys) get = _invalid_method(dict.get) items = _invalid_method(dict.items) keys = _invalid_method(dict.keys) pop = _invalid_method(dict.pop) popitem = _invalid_method(dict.popitem) setdefault = _invalid_method(dict.setdefault) update = _invalid_method(dict.update) values = _invalid_method(dict.values) def _error_message(self): raise RuntimeError( "Cannot use request.session in a view without uses_session=True." ) def __getattr__(self, name): self._error_message() @property def created(self): self._error_message() def _changed_method(method): @functools.wraps(method) def wrapped(self, *args, **kwargs): self.changed() return method(self, *args, **kwargs) return wrapped @implementer(ISession) class Session(dict): time_to_reauth = 30 * 60 # 30 minutes _csrf_token_key = "_csrf_token" _flash_key = "_flash_messages" _totp_secret_key = "_totp_secret" _webauthn_challenge_key = "_webauthn_challenge" _reauth_timestamp_key = "_reauth_timestamp" # A number of our methods need to be decorated so that they also call # self.changed() __delitem__ = _changed_method(dict.__delitem__) __setitem__ = _changed_method(dict.__setitem__) clear = _changed_method(dict.clear) pop = _changed_method(dict.pop) popitem = _changed_method(dict.popitem) setdefault = _changed_method(dict.setdefault) update = _changed_method(dict.update) def __init__(self, data=None, session_id=None, new=True): # Brand new sessions don't have any data, so we'll just create an empty # dictionary for them. if data is None: data = {} # Initialize our actual dictionary here. super().__init__(data) # We need to track the state of our Session. self._sid = session_id self._changed = False self.new = new self.created = int(time.time()) # We'll track all of the IDs that have been invalidated here self.invalidated = set() @property def sid(self): if self._sid is None: self._sid = crypto.random_token() return self._sid def changed(self): self._changed = True def invalidate(self): self.clear() self.new = True self.created = int(time.time()) self._changed = False # If the current session id isn't None we'll want to record it as one # of the ones that have been invalidated. if self._sid is not None: self.invalidated.add(self._sid) self._sid = None def should_save(self): return self._changed def record_auth_timestamp(self): self[self._reauth_timestamp_key] = datetime.datetime.now().timestamp() self.changed() def needs_reauthentication(self): reauth_timestamp = self.get(self._reauth_timestamp_key, 0) current_time = datetime.datetime.now().timestamp() return current_time - reauth_timestamp >= self.time_to_reauth # Flash Messages Methods def _get_flash_queue_key(self, queue): return ".".join(filter(None, [self._flash_key, queue])) def flash(self, msg, queue="", allow_duplicate=True): queue_key = self._get_flash_queue_key(queue) # If we're not allowing duplicates check if this message is already # in the queue, and if it is just return immediately. if not allow_duplicate and msg in self[queue_key]: return self.setdefault(queue_key, []).append(msg) def peek_flash(self, queue=""): return self.get(self._get_flash_queue_key(queue), []) def pop_flash(self, queue=""): queue_key = self._get_flash_queue_key(queue) messages = self.get(queue_key, []) self.pop(queue_key, None) return messages # CSRF Methods def new_csrf_token(self): self[self._csrf_token_key] = crypto.random_token() return self[self._csrf_token_key] def get_csrf_token(self): token = self.get(self._csrf_token_key) if token is None: token = self.new_csrf_token() return token def get_totp_secret(self): totp_secret = self.get(self._totp_secret_key) if totp_secret is None: totp_secret = self[self._totp_secret_key] = otp.generate_totp_secret() return totp_secret def clear_totp_secret(self): self[self._totp_secret_key] = None def get_webauthn_challenge(self): webauthn_challenge = self.get(self._webauthn_challenge_key) if webauthn_challenge is None: self[self._webauthn_challenge_key] = webauthn.generate_webauthn_challenge() webauthn_challenge = self[self._webauthn_challenge_key] return webauthn_challenge def clear_webauthn_challenge(self): self[self._webauthn_challenge_key] = None @implementer(ISessionFactory) class SessionFactory: cookie_name = "session_id" max_age = 12 * 60 * 60 # 12 hours def __init__(self, secret, url): self.redis = redis.StrictRedis.from_url(url) self.signer = crypto.TimestampSigner(secret, salt="session") def __call__(self, request): return self._process_request(request) def _redis_key(self, session_id): return "warehouse/session/data/{}".format(session_id) def _process_request(self, request): # Register a callback with the request so we can save the session once # it's finished. request.add_response_callback(self._process_response) # Load our session ID from the request. session_id = request.cookies.get(self.cookie_name) # If we do not have a session ID then we'll just use a new empty # session. if session_id is None: return Session() # Check to make sure we have a valid session id try: session_id = self.signer.unsign(session_id, max_age=self.max_age) session_id = session_id.decode("utf8") except crypto.BadSignature: return Session() # Fetch the serialized data from redis bdata = self.redis.get(self._redis_key(session_id)) # If the session didn't exist in redis, we'll give the user a new # session. if bdata is None: return Session() # De-serialize our session data try: data = msgpack.unpackb(bdata, raw=False, use_list=True) except (msgpack.exceptions.UnpackException, msgpack.exceptions.ExtraData): # If the session data was invalid we'll give the user a new session return Session() # If we were able to load existing session data, load it into a # Session class session = Session(data, session_id, False) return session def _process_response(self, request, response): # If the request has an InvalidSession, then the view can't have # accessed the session, and we can just skip all of this anyways. if isinstance(request.session, InvalidSession): return # Check to see if the session has been marked to be deleted, if it has # benn then we'll delete it, and tell our response to delete the # session cookie as well. if request.session.invalidated: for session_id in request.session.invalidated: self.redis.delete(self._redis_key(session_id)) if not request.session.should_save(): response.delete_cookie(self.cookie_name) # Check to see if the session has been marked to be saved, generally # this means that the session data has been modified and thus we need # to store the new data. if request.session.should_save(): # Save our session in Redis self.redis.setex( self._redis_key(request.session.sid), self.max_age, msgpack.packb( request.session, default=object_encode, use_bin_type=True ), ) # Send our session cookie to the client response.set_cookie( self.cookie_name, self.signer.sign(request.session.sid.encode("utf8")), max_age=self.max_age, httponly=True, secure=request.scheme == "https", samesite=b"lax", ) def session_view(view, info): if info.options.get("uses_session"): # If we're using the session, then we'll just return the original view # with a small wrapper around it to ensure that it has a Vary: Cookie # header. return add_vary("Cookie")(view) elif info.exception_only: return view else: # If we're not using the session on this view, then we'll wrap the view # with a wrapper that just ensures that the session cannot be used. @functools.wraps(view) def wrapped(context, request): # This whole method is a little bit of an odd duck, we want to make # sure that we don't actually *access* request.session, because # doing so triggers the machinery to create a new session. So # instead we will dig into the request object __dict__ to # effectively do the same thing, just without triggering an access # on request.session. # Save the original session so that we can restore it once the # inner views have been called. nothing = object() original_session = request.__dict__.get("session", nothing) # This particular view hasn't been set to allow access to the # session, so we'll just assign an InvalidSession to # request.session request.__dict__["session"] = InvalidSession() try: # Invoke the real view return view(context, request) finally: # Restore the original session so that things like # pyramid_debugtoolbar can access it. if original_session is nothing: del request.__dict__["session"] else: request.__dict__["session"] = original_session return wrapped session_view.options = {"uses_session"} def includeme(config): config.set_session_factory( SessionFactory( config.registry.settings["sessions.secret"], config.registry.settings["sessions.url"], ) ) config.add_view_deriver(session_view, over="csrf_view", under=viewderivers.INGRESS)
Green moong matar kebab ! Hara moong and matar kebab are a perfect snack and starter recipe to entertain huge crowd. These are Simple yet nutritious fritters. Green moong are packed with protein and low on carbs, it is one of the best vegetarian superfoods that provides good source of protein as well. Malai Cham Cham ! With a twist .. I always fancy Dhaba style of cooking because of its rustic looks and bursting flavors.This mixed vegetable is one of its kind. A bit time consuming but the end result is worth all the effort.
# coding=utf-8 import unittest """2. Add Two Numbers https://leetcode.com/problems/add-two-numbers/description/ You are given two **non-empty** linked lists representing two non-negative integers. The digits are stored in **reverse order** and each of their nodes contain a single digit. Add the two numbers and return it as a linked list. You may assume the two numbers do not contain any leading zero, except the number 0 itself. **Example** **Input:** (2 -> 4 -> 3) + (5 -> 6 -> 4) **Output:** 7 -> 0 -> 8 **Explanation:** 342 + 465 = 807. Similar Questions: Multiply Strings (multiply-strings) Add Binary (add-binary) Sum of Two Integers (sum-of-two-integers) Add Strings (add-strings) Add Two Numbers II (add-two-numbers-ii) """ # Definition for singly-linked list. # class ListNode(object): # def __init__(self, x): # self.val = x # self.next = None from link import ListNode, to_node, compare class Solution(unittest.TestCase): def addTwoNumbers(self, l1, l2): """ :type l1: ListNode :type l2: ListNode :rtype: ListNode """ def toint(node): val, p, level = 0, node, 1 while p: val += p.val * level level *= 10 p = p.next return val def tolist(n): head = ListNode(0) p = head while n > 0: p.next = ListNode(n % 10) p = p.next n /= 10 return head.next or head return tolist(toint(l1) + toint(l2)) def test(self): self.assertTrue(compare( self.addTwoNumbers(to_node([0]), to_node([0])), to_node([0]))) self.assertTrue(compare( self.addTwoNumbers(to_node([2, 4, 3]), to_node([5, 6, 4])), to_node([7, 0, 8]))) if __name__ == "__main__": unittest.main()
A consultancy agreement is an agreement between a company (usually referred to as the “principal”) and a service entity (usually referred to as the “consultant”). The consultant agrees to provide a service to the principal for a fee. The consultant may agree to provide a one-off service for a set fee or continuing services to the principal for a predetermined length of time for a fee. The LVDox™ Free Consultancy Agreement is a very basic consultancy agreement. additional provisions in related to the rights, responsibilities and obligations of the consultant and principal, particularly if an agreement has been reached on particular issues.
""" Argus websocket handler and event handler """ import os from re import sub from json import dumps from tornado import ioloop from tornado import websocket from watchdog.events import RegexMatchingEventHandler from watchdog.observers import Observer from settings import ARGUS_ROOT active_handlers = {} active_observers = {} def define_options(enable=[], disable=[]): """ Define the options for the subscribed events. Valid options: - CRfile: file created - CRdir: directory created - MDfile: file modified - MDdir: directory modified - MVfile: file moved - MVdir: directory moved - DLfile: file deleted - DLdir: directory deleted - all: for disable only. Disables all options. By default all options are enabled. If all options are disabled, 'enable' options are applied. If all options are not disabled, 'disable' options are disabled. """ default_options = [ 'CRfile', 'CRdir', 'MDfile', 'MDdir', 'MVfile', 'MVdir', 'DLfile', 'DLdir' ] if disable == enable == []: return default_options elif 'all' in disable: return list(set(enable) & set(default_options)) else: return list(set(default_options) - set(disable)) class Argus(RegexMatchingEventHandler): def __init__(self, web_socket, root, options, *args, **kwargs): super(Argus, self).__init__(*args, **kwargs) self.websockets = [web_socket] self.root = root self.options = options def write_msg(self, message): for wbsocket in self.websockets: wbsocket.write_message(message) def on_created(self, event): is_directory = event.is_directory if ( (is_directory and 'CRdir' in self.options) or (not is_directory and 'CRfile' in self.options) ): self.write_msg( dumps( { 'event_type': 'created', 'is_directory': event.is_directory, 'src_path': sub(self.root, '', event.src_path) } ) ) def on_modified(self, event): is_directory = event.is_directory if ( (is_directory and 'MDdir' in self.options) or (not is_directory and 'MDfile' in self.options) ): self.write_msg( dumps( { 'event_type': 'modified', 'is_directory': event.is_directory, 'src_path': sub(self.root, '', event.src_path) } ) ) def on_deleted(self, event): is_directory = event.is_directory if ( (is_directory and 'DLdir' in self.options) or (not is_directory and 'DLfile' in self.options) ): self.write_msg( dumps( { 'event_type': 'deleted', 'is_directory': event.is_directory, 'src_path': sub(self.root, '', event.src_path) } ) ) def on_moved(self, event): is_directory = event.is_directory if ( (is_directory and 'MVdir' in self.options) or (not is_directory and 'MVfile' in self.options) ): self.write_msg( dumps( { 'event_type': 'moved', 'is_directory': event.is_directory, 'src_path': sub(self.root, '', event.src_path), 'dest_path': sub(self.root, '', event.dest_path) } ) ) def add_socket(self, wbsocket): self.websockets.append(wbsocket) def remove_socket(self, wbsocket): if wbsocket in self.websockets: self.websockets.remove(wbsocket) class ArgusWebSocketHandler(websocket.WebSocketHandler): def __init__(self, *args, **kwargs): super(ArgusWebSocketHandler, self).__init__(*args, **kwargs) self.started_observer = False self.observer = None self.path = None self.args = [] self.kwargs = {} def check_origin(self, origin): return True def initiation_handler(self): """ Observers are unique per watched path. If an observer already exists for the requested path, the new web socket is added in the observer's sockets via the handler. In order to achieve this, both the handler and the observer objects are stored in a global dict. """ self.path = os.path.join(ARGUS_ROOT, self.kwargs.get('path')) if not os.path.exists(self.path): self.write_message('Path does not exist.') self.close() return if self.path in active_observers: event_handler = active_handlers[self.path] event_handler.add_socket(self) self.observer = active_observers[self.path] self.started_observer = True else: enable = self.get_arguments('enable', strip=True) disable = self.get_arguments('disable', strip=True) options = define_options(enable, disable) if options == []: return event_handler = Argus( web_socket=self, root=self.path, options=options, case_sensitive=True ) self.observer = Observer() self.observer.schedule( event_handler, path=self.path, recursive=True ) print '- Starting fs observer for path {}'.format(self.path) try: self.observer.start() except OSError: self.write_message('Cannot start observer') self.close() return active_handlers[self.path] = event_handler active_observers[self.path] = self.observer self.started_observer = True def open(self, *args, **kwargs): self.args = args self.kwargs = kwargs self.callback = ioloop.PeriodicCallback(lambda: self.ping(''), 60000) self.callback.start() self.initiation_handler() def on_message(self, message): pass def data_received(self, chunk): pass def on_close(self): self.callback.stop() if self.started_observer: event_handler = active_handlers[self.path] event_handler.remove_socket(self) if event_handler.websockets == []: print '- Stopping fs observer' self.observer.stop() del active_observers[self.path] del active_handlers[self.path]
Chances are you’ve heard of TRX, and its popularity has skyrocketed in the last few years. Designed by a Navy SEAL commander, TRX was made to be a highly-portable and durable suspension training system you can take with you absolutely anywhere you find yourself – and the TRX Tactical Gym is the toughest and lightest one they’ve ever made. In addition to the regular TRX system (which, in the Tactical Gym, comes in a subtle military-inspired khaki) the Tactical Gym version comes with rubber handles as well as a TRX Door Anchor and TRX Xtender. It also includes the FORCE Super App, with their 12-week tactical conditioning program, and a tactical mesh bag for storing it in. TRX won’t be making you into a Greek God anytime soon, but thanks to it’s ultra-portability and versatility, you can get in an intense, dynamic and challenging bodyweight workout absolutely anywhere you go – your home, the hotel, the park. It packs down small and fits into your bag or suitcase, and you can do literally hundreds of different exercises and routines. With one of these in your kit, there really are no excuses for missing a workout.
"""Cpuset manipulation command """ from __future__ import unicode_literals from builtins import str __copyright__ = """ Copyright (C) 2007-2010 Novell Inc. Copyright (C) 2013-2018 SUSE Author: Alex Tsariounov <tsariounov@gmail.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ import sys, os, logging, time from optparse import OptionParser, make_option from cpuset import config from cpuset import cset from cpuset.util import * from cpuset.commands.common import * try: from cpuset.commands import proc except SyntaxError: raise except: pass global log log = logging.getLogger('set') help = 'create, modify and destroy cpusets' usage = """%prog [options] [cpuset name] This command is used to create, modify, and destroy cpusets. Cpusets form a tree-like structure rooted at the root cpuset which always includes all system CPUs and all system memory nodes. A cpuset is an organizational unit that defines a group of CPUs and a group of memory nodes where a process or thread (i.e. task) is allowed to run on. For non-NUMA machines, the memory node is always 0 (zero) and cannot be set to anything else. For NUMA machines, the memory node can be set to a similar specification as the CPU definition and will tie those memory nodes to that cpuset. You will usually want the memory nodes that belong to the CPUs defined to be in the same cpuset. A cpuset can have exclusive right to the CPUs defined in it. This means that only this cpuset can own these CPUs. Similarly, a cpuset can have exclusive right to the memory nodes defined in it. This means that only this cpuset can own these memory nodes. Cpusets can be specified by name or by path; however, care should be taken when specifying by name if the name is not unique. This tool will generally not let you do destructive things to non-unique cpuset names. Cpusets are uniquely specified by path. The path starts at where the cpusets filesystem is mounted so you generally do not have to know where that is. For example, so specify a cpuset that is called "two" which is a subset of "one" which in turn is a subset of the root cpuset, use the path "/one/two" regardless of where the cpusets filesystem is mounted. When specifying CPUs, a so-called CPUSPEC is used. The CPUSPEC will accept a comma-separated list of CPUs and inclusive range specifications. For example, --cpu=1,3,5-7 will assign CPU1, CPU3, CPU5, CPU6, and CPU7 to the specified cpuset. Note that cpusets follow certain rules. For example, children can only include CPUs that the parents already have. If you do not follow those rules, the kernel cpuset subsystem will not let you create that cpuset. For example, if you create a cpuset that contains CPU3, and then attempt to create a child of that cpuset with a CPU other than 3, you will get an error, and the cpuset will not be active. The error is somewhat cryptic in that it is usually a "Permission denied" error. Memory nodes are specified with a MEMSPEC in a similar way to the CPUSPEC. For example, --mem=1,3-6 will assign MEM1, MEM3, MEM4, MEM5, and MEM6 to the specified cpuset. Note that if you attempt to create or modify a cpuset with a memory node specification that is not valid, you may get a cryptic error message, "No space left on device", and the modification will not be allowed. When you destroy a cpuset, then the tasks running in that set are moved to the parent of that cpuset. If this is not what you want, then manually move those tasks to the cpuset of your choice with the 'cset proc' command (see 'cset proc --help' for more information). EXAMPLES Create a cpuset with the default memory specification: # cset set --cpu=2,4,6-8 --set=new_set This command creates a cpuset called "new_set" located off the root cpuset which holds CPUS 2,4,6,7,8 and node 0 (interleaved) memory. Note that --set is optional, and you can just specify the name for the new cpuset after all arguments. Create a cpuset that specifies both CPUs and memory nodes: # cset set --cpu=3 --mem=3 /rad/set_one Note that this command uses the full path method to specify the name of the new cpuset "/rad/set_one". It also names the new cpuset implicitly (i.e. no --set option, although you can use that if you want to). If the "set_one" name is unique, you can subsequently refer to is just by that. Memory node 3 is assigned to this cpuset as well as CPU 3. The above commands will create the new cpusets, or if they already exist, they will modify them to the new specifications.""" verbose = 0 options = [make_option('-l', '--list', help = 'list the named cpuset(s); recursive list if also -r', action = 'store_true'), make_option('-c', '--cpu', help = 'create or modify cpuset in the specified ' 'cpuset with CPUSPEC specification', metavar = 'CPUSPEC'), make_option('-m', '--mem', help = 'specify which memory nodes to assign ' 'to the created or modified cpuset (optional)', metavar = 'MEMSPEC'), make_option('-n', '--newname', help = 'rename cpuset specified with --set to NEWNAME'), make_option('-d', '--destroy', help = 'destroy specified cpuset', action = 'store_true'), make_option('-s', '--set', metavar = 'CPUSET', help = 'specify cpuset'), make_option('-r', '--recurse', help = 'do things recursively, use with --list and --destroy', action = 'store_true'), make_option('--force', help = 'force recursive deletion even if processes are running ' 'in those cpusets (they will be moved to parent cpusets)', action = 'store_true'), make_option('-x', '--usehex', help = 'use hexadecimal value for CPUSPEC and MEMSPEC when ' 'listing cpusets', action = 'store_true'), make_option('-v', '--verbose', help = 'prints more detailed output, additive', action = 'count'), make_option('--cpu_exclusive', help = 'mark this cpuset as owning its CPUs exclusively', action = 'store_true'), make_option('--mem_exclusive', help = 'mark this cpuset as owning its MEMs exclusively', action = 'store_true'), ] def func(parser, options, args): log.debug("entering func, options=%s, args=%s", options, args) global verbose if options.verbose: verbose = options.verbose cset.rescan() if options.list: if options.set: list_sets(options.set, options.recurse, options.usehex) return if len(args): list_sets(args, options.recurse, options.usehex) else: list_sets('root', options.recurse, options.usehex) return if options.cpu or options.mem: # create or modify cpuset create_from_options(options, args) return if options.newname: rename_set(options, args) return if options.destroy: if options.set: destroy_sets(options.set, options.recurse, options.force) else: destroy_sets(args, options.recurse, options.force) return if options.cpu_exclusive or options.mem_exclusive: # FIXME: modification of existing cpusets for exclusivity log.info("Modification of cpu_exclusive and mem_exclusive not implemented.") return # default behavior if no options specified is list log.debug('no options set, default is listing cpusets') if options.set: list_sets(options.set, options.recurse, options.usehex) elif len(args): list_sets(args, options.recurse, options.usehex) else: list_sets('root', options.recurse, options.usehex) def list_sets(tset, recurse=None, usehex=False): """list cpusets specified in tset as cpuset or list of cpusets, recurse if true""" log.debug('entering list_sets, tset=%s recurse=%s', tset, recurse) sl = [] if isinstance(tset, list): for s in tset: sl.extend(cset.find_sets(s)) else: sl.extend(cset.find_sets(tset)) log.debug('total unique sets in passed tset: %d', len(sl)) sl2 = [] for s in sl: sl2.append(s) if len(s.subsets) > 0: sl2.extend(s.subsets) if recurse: for node in s.subsets: for nd in cset.walk_set(node): sl2.append(nd) sl = sl2 if config.mread: pl = ['cpuset_list_start'] else: pl = [''] pl.extend(set_header(' ')) for s in sl: if verbose: pl.append(set_details(s,' ', None, usehex)) else: pl.append(set_details(s,' ', 78, usehex)) if config.mread: pl.append('cpuset_list_end') log.info("\n".join(pl)) def destroy_sets(sets, recurse=False, force=False): """destroy cpusets in list of sets, recurse if true, if force destroy if tasks running""" log.debug('enter destroy_sets, sets=%s, force=%s', sets, force) nl = [] if isinstance(sets, list): nl.extend(sets) else: nl.append(sets) # check that sets passed are ok, will raise if one is bad sl2 = [] for s in nl: st = cset.unique_set(s) sl2.append(st) if len(st.subsets) > 0: if not recurse: raise CpusetException( 'cpuset "%s" has subsets, delete them first, or use --recurse' % st.path) elif not force: raise CpusetException( 'cpuset "%s" has subsets, use --force to destroy' % st.path) sl2.extend(st.subsets) for node in st.subsets: for nd in cset.walk_set(node): sl2.append(nd) # ok, good to go if recurse: sl2.reverse() for s in sl2: s = cset.unique_set(s) # skip the root set!!! or you'll have problems... if s.path == '/': continue log.info('--> processing cpuset "%s", moving %s tasks to parent "%s"...', s.name, len(s.tasks), s.parent.path) proc.move(s, s.parent) log.info('--> deleting cpuset "%s"', s.path) destroy(s) log.info('done') def destroy(name): """destroy one cpuset by name as cset or string""" log.debug('entering destroy, name=%s', name) if isstr(name): set = cset.unique_set(name) elif not isinstance(name, cset.CpuSet): raise CpusetException( "passed name=%s, which is not a string or CpuSet" % name) else: set = name tsks = set.tasks if len(tsks) > 0: # wait for tasks, sometimes it takes a little while to # have them leave the set ii = 0 while len(tsks)>0: log.debug('%i tasks still running in set %s, waiting interval %s...', len(tsks), set.name, ii+1) time.sleep(0.5) tsks = set.tasks ii += 1 if (ii) > 6: # try it for 3 seconds, bail if tasks still there raise CpusetException( "trying to destroy cpuset %s with tasks running: %s" % (set.path, set.tasks)) log.debug("tasks expired, deleting set %s" % set.path) os.rmdir(cset.CpuSet.basepath+set.path) # fixme: perhaps reparsing the all the sets is not so efficient... cset.rescan() def rename_set(options, args): """rename cpuset as specified in options and args lists""" log.debug('entering rename_set, options=%s args=%s', options, args) # figure out target cpuset name, if --set not used, use first arg name = options.newname if options.set: tset = cset.unique_set(options.set) elif len(args) > 0: tset = cset.unique_set(args[0]) else: raise CpusetException('desired cpuset not specified') path = tset.path[0:tset.path.rfind('/')+1] log.debug('target set="%s", path="%s", name="%s"', tset.path, path, name) try: if name.find('/') == -1: chk = cset.unique_set(path+name) else: if name[0:name.rfind('/')+1] != path: raise CpusetException('desired name cannot have different path') chk = cset.unique_set(name) raise CpusetException('cpuset "'+chk.path+'" already exists') except CpusetNotFound: pass except: raise if name.rfind('/') != -1: name = name[name.rfind('/')+1:] log.info('--> renaming "%s" to "%s"', cset.CpuSet.basepath+tset.path, name) os.rename(cset.CpuSet.basepath+tset.path, cset.CpuSet.basepath+path+name) cset.rescan() def create_from_options(options, args): """create cpuset as specified by options and args lists""" log.debug('entering create_from_options, options=%s args=%s', options, args) # figure out target cpuset name, if --set not used, use first arg if options.set: tset = options.set elif len(args) > 0: tset = args[0] else: raise CpusetException('cpuset not specified') cspec = None mspec = None cx = None mx = None if options.cpu: cset.cpuspec_check(options.cpu) cspec = options.cpu if options.mem: cset.memspec_check(options.mem) mspec = options.mem if options.cpu_exclusive: cx = options.cpu_exclusive if options.mem_exclusive: mx = options.mem_exclusive try: create(tset, cspec, mspec, cx, mx) if not mspec: modify(tset, memspec='0') # always need at least this log.info('--> created cpuset "%s"', tset) except CpusetExists: modify(tset, cspec, mspec, cx, mx) log.info('--> modified cpuset "%s"', tset) active(tset) def create(name, cpuspec, memspec, cx, mx): """create one cpuset by name, cpuspec, memspec, cpu and mem exclusive flags""" log.debug('entering create, name=%s cpuspec=%s memspec=%s cx=%s mx=%s', name, cpuspec, memspec, cx, mx) try: cset.unique_set(name) except CpusetNotFound: pass except: raise CpusetException('cpuset "%s" not unique, please specify by path' % name) else: raise CpusetExists('attempt to create already existing set: "%s"' % name) # FIXME: check if name is a path here os.mkdir(cset.CpuSet.basepath+'/'+name) # fixme: perhaps reparsing the all the sets is not so efficient... cset.rescan() log.debug('created new cpuset "%s"', name) modify(name, cpuspec, memspec, cx, mx) def modify(name, cpuspec=None, memspec=None, cx=None, mx=None): """modify one cpuset by name, cpuspec, memspec, cpu and mem exclusive flags""" log.debug('entering modify, name=%s cpuspec=%s memspec=%s cx=%s mx=%s', name, cpuspec, memspec, cx, mx) if isstr(name): nset = cset.unique_set(name) elif not isinstance(name, cset.CpuSet): raise CpusetException( "passed name=%s, which is not a string or CpuSet" % name) else: nset = name log.debug('modifying cpuset "%s"', nset.name) if cpuspec: nset.cpus = cpuspec if memspec: nset.mems = memspec if cx: nset.cpu_exclusive = cx if mx: nset.mem_exclusive = mx def active(name): """check that cpuset by name or cset is ready to be used""" log.debug("entering active, name=%s", name) if isstr(name): set = cset.unique_set(name) elif not isinstance(name, cset.CpuSet): raise CpusetException("passing bogus name=%s" % name) else: set = name if set.cpus == '': raise CpusetException('"%s" cpuset not active, no cpus defined' % set.path) if set.mems == '': raise CpusetException('"%s" cpuset not active, no mems defined' % set.path) def set_header(indent=None): """return list of cpuset output header""" if indent: istr = indent else: istr = '' l = [] # '123456789-123456789-123456789-123456789-123456789-123456789-' l.append(istr + ' Name CPUs-X MEMs-X Tasks Subs Path') l.append(istr + '------------ ---------- - ------- - ----- ---- ----------') return l def set_details(name, indent=None, width=None, usehex=False): """return string of cpuset details""" if width == None: width = 0 if isstr(name): set = cset.unique_set(name) elif not isinstance(name, cset.CpuSet): raise CpusetException("passing bogus set=%s" % name) else: set = name l = [] l.append(set.name.rjust(12)) cs = set.cpus if cs == '': cs = '*****' elif usehex: cs = cset.cpuspec_to_hex(cs) l.append(cs.rjust(10)) if set.cpu_exclusive: l.append('y') else: l.append('n') cs = set.mems if cs == '': cs = '*****' elif usehex: cs = cset.cpuspec_to_hex(cs) l.append(cs.rjust(7)) if set.mem_exclusive: l.append('y') else: l.append('n') l.append(str(len(set.tasks)).rjust(5)) l.append(str(len(set.subsets)).rjust(4)) if config.mread: l.append(set.path) l2 = [] for line in l: l2.append(line.strip()) return ';'.join(l2) out = ' '.join(l) + ' ' tst = out + set.path if width != 0 and len(tst) > width: target = width - len(out) patha = set.path[:len(set.path)//2-3] pathb = set.path[len(set.path)//2:] patha = patha[:target//2-3] pathb = pathb[-target//2:] out += patha + '...' + pathb else: out = tst if indent: istr = indent else: istr = '' return istr + out
Edith Swinney Huebner, 91, of Boyd, died Tuesday, Nov. 27, 2018, in Boyd. Funeral is 11 a.m. Tuesday, Dec. 4, at Hawkins Funeral Home in Boyd with the Rev. Robert Turk officiating. Burial will follow at Boyd Cemetery. Edith was born July 4, 1927, in Decatur to Tom and Gladys (Johnson) Swinney in Decatur. She married Kenneth Charles Huebner June 7, 1945, in Boyd. Edith retired from General Dynamics after 37 years of service as a chief clerk. She was a member of the First United Methodist Church in Boyd. She was preceded in death by her husband of 68 years; and sister Barbara Luster. She is survived by her daughter, Diann Williams of Surry, Va.; one grandchild; and sister Tommie Gillespie of Boyd. Memorial donations may be made to the First United Methodist Church Boyd.
from __future__ import unicode_literals from django.utils.encoding import python_2_unicode_compatible from django.conf import settings from django.db import models from django.core.urlresolvers import reverse from django.db.models.signals import pre_save from django.utils import timezone from django.utils.text import slugify class CareerManager(models.Manager): def active(self, *args, **kwargs): return super(CareerManager, self).filter(draft = False).filter(published_at__lte = timezone.now()) @python_2_unicode_compatible class Career(models.Model): FULLTIME = 'Full-time' PARTTIME = 'Part-time' INTERNSHIP = 'Internship' RESEARCH = 'Research' ROLE_CATEGORY_CHOICES = ( (FULLTIME, 'Full-time'), (PARTTIME, 'Part-time'), (INTERNSHIP, 'Internship'), (RESEARCH, 'Research'), ) role_category = models.CharField( max_length=12, choices=ROLE_CATEGORY_CHOICES, default=FULLTIME, ) # Role role = models.CharField(max_length = 120) # Location city = models.CharField(max_length=255) # Plain text and urlify slug career_slug = models.SlugField(unique = True) career_offer_title = models.CharField(max_length=255, default="") career_offer_description = models.TextField(default="") career_experience = models.TextField(default="") career_terms = models.TextField(default="") # Time and meta staff draft = models.BooleanField(default = False) published_at = models.DateField(auto_now = False, auto_now_add = False) updated = models.DateTimeField(auto_now = True, auto_now_add = False) timestamp = models.DateTimeField(auto_now = False, auto_now_add = True) objects = CareerManager() def __unicode__(self): return self.role def __str__(self): return self.role def get_absolute_url(self): return reverse('careers:detail', kwargs = {'slug':self.career_slug}) class Meta: ordering = ["-timestamp", "-updated"] def create_slug(instance, new_slug = None): career_slug = slugify(instance.title) if new_slug is not None: career_slug = new_slug qs = Career.objects.filter(career_slug = career_slug).order_by("-id") exists = qs.exists() if exists: new_slug = "%s-%s" %(career_slug, qs.first().id) return create_slug(instance, slug = new_slug) return career_slug def pre_save_post_receiver(sender, instance, *args, **kwargs): if not instance.career_slug: instance.career_slug = create_slug(instance) pre_save.connect(pre_save_post_receiver, sender = Career)
my opinion about the results I read as far as some of the music winners go. Winners are in the bold. hits the stores this year, the results may be different next year. has the most crossover appeal out of the list. Although Paramore is cool in my book, I personally voted for Green Day on this one. 21st Century Breakdown was my favorite rock album last year. This Town came out on top, so my vote was Run This Town. a very positive TV show on MTV, Road to Redemption.
# dirtool.py - diff tool for directories # Copyright (C) 2018 Ingo Ruhnke <grumbel@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from typing import TYPE_CHECKING, Callable, Any from dirtools.util import numeric_sort_key from dirtools.fileview.file_info import FileInfo if TYPE_CHECKING: from dirtools.fileview.file_collection import FileCollection # noqa: F401 class Sorter: def __init__(self) -> None: self.directories_first = True self.reverse = False self.key_func: Callable[[FileInfo], Any] = lambda x: numeric_sort_key(x.basename().lower()) def set_directories_first(self, v: bool) -> None: self.directories_first = v def set_sort_reversed(self, rev: bool) -> None: self.reverse = rev def set_key_func(self, key_func: Callable[[FileInfo], Any]) -> None: self.key_func = key_func def get_key_func(self) -> Callable[[FileInfo], Any]: if self.directories_first: return lambda fileinfo: (not fileinfo.isdir(), self.key_func(fileinfo)) else: return self.key_func # def apply(self, file_collection: 'FileCollection') -> None: # file_collection.sort(self.get_key_func(), reverse=self.reverse) # EOF #
However, there is some good news because the developers behind the Spark email app have announced that the app is now launching on Android. Spark was previously only available on iOS devices, but it looks like it is now finding its way to Android handsets where some of the features of the app should offer up an Inbox-like experience for Android users. Some of Spark’s features include smart notifications, where the app will only notify users of new email from someone it deems to be “important” to you. This means that you won’t get notified all the time whenever an email comes in, thus alerting you only when it thinks there is an email that you should tend to. It can also focus on emails that are important by putting them at the top, and also gives users the ability to batch-delete other emails, such as newsletters, deals, and so on. While it won’t be a 100% replica of Inbox in terms of features, if you were looking for a smarter email app, it could be worth checking out. The app is free to download via the Google Play Store but there will also be additional features that can be unlocked by paying money. Filed in Cellphones. Read more about Android, Apps, Google and Inbox.
"""Test XmlTestRunner functionality for junitxml. :Author: Duncan Findlay <duncan@duncf.ca> """ import xml.dom.minidom try: from cStringIO import StringIO except ImportError: from io import StringIO try: import unittest2 as unittest except ImportError: import unittest import junitxml.runner # Old versions of unittest don't have these "fancy" types of results. _FANCY_UNITTEST = (hasattr(unittest, 'skip') and hasattr(unittest, 'expectedFailure')) class TestXMLTestRunner(unittest.TestCase): class DummyTestCase(unittest.TestCase): def test_pass(self): pass def test_fail(self): self.fail() def test_error(self): raise Exception() if _FANCY_UNITTEST: @unittest.skip('skipped') def test_skip(self): pass @unittest.expectedFailure def test_xfail(self): self.fail('all is good') @unittest.expectedFailure def test_unexpected_success(self): pass def _run_runner(self, test_suite): xml_out = StringIO() console = StringIO() runner = junitxml.runner.JUnitXmlTestRunner( xml_stream=xml_out, txt_stream=console) result = runner.run(test_suite) return (result, xml_out, console) def test_xml_output(self): """Tests that runner properly gives XML output.""" test_suite = unittest.TestLoader().loadTestsFromTestCase( self.DummyTestCase) result, xml_out, console = self._run_runner(test_suite) num_tests = test_suite.countTestCases() # Make sure the XML output looks correct. value = xml_out.getvalue() document = xml.dom.minidom.parseString(value) self.assertEqual(document.documentElement.tagName, 'testsuite') self.assertEqual(document.documentElement.getAttribute('tests'), str(num_tests)) def test_console_output_fail(self): """Tests that failure is reported properly on stderr.""" test_suite = unittest.TestLoader().loadTestsFromTestCase( self.DummyTestCase) result, xml_out, console = self._run_runner(test_suite) num_tests = test_suite.countTestCases() # Make sure the console output looks correct. value = console.getvalue() self.assertTrue('Ran %d tests in ' % (num_tests,) in value, 'Output was:\n%s' % (value,)) self.assertTrue('FAILED (failures=1' in value, 'Output was:\n%s' % (value,)) self.assertTrue('errors=1' in value, 'Output was:\n%s' % (value,)) if _FANCY_UNITTEST: self.assertTrue('expected failures=1' in value, 'Output was:\n%s' % (value,)) self.assertTrue('skipped=1' in value, 'Output was:\n%s' % (value,)) self.assertTrue('unexpected successes=1' in value, 'Output was:\n%s' % (value,)) def test_console_output_ok(self): """Tests that success is reported properly on stderr.""" test_suite = unittest.TestSuite() test_suite.addTest(self.DummyTestCase('test_pass')) result, xml_out, console = self._run_runner(test_suite) value = console.getvalue() self.assertTrue('Ran 1 test in ' in value, 'Output was:\n%s' % (value,)) self.assertTrue('OK\n' in value, 'Output was:\n%s' % (value,))
Although wind power is used worldwide in a variety of settings, it is commonly overlooked as a viable, alternative energy source. This is mainly due to the cost of implementing large scale systems. There are also a lot of wind energy facts that are commonly overlooked when it comes to understanding wind power as a science. These facts, which when more widely understood and accepted, could push it into a more mainstream alternative. Unfortunately, unlike with solar power, the long term vision of seeing the ongoing generation of power from a system like this rarely outweighs the up front financial commitment that is required to begin a construction project of a large scale wind power system. One commonly overlooked fact is that wind power is simply a form of solar energy (see How Wind Power Works). Like solar power, there is always a constant supply of wind. As long as the sun rises and sets, there will be the heating and cooling of the earth’s surface which cause variances in air pressures leading to wind. Like solar power, it is merely a potential energy that if left un-harnessed or captured simply comes and goes. But the important thing is that is always comes and always goes regardless of the price of oil, the state of the economy or what country you live in. The Department of Energy conducted a survey in 2005 around wind energy. Their findings showed that out of all the electricity produced in the world, wind energy accounted for only 1% of the total. Germany leads the world in this technology and is the largest producer of wind energy, followed by Denmark then the United States. This statistic clearly shows that we have merely scratched the surface of the capabilities of wind energy. As the renewable energy advocates continue to push for the use of alternative energy sources, wind energy will be standing by waiting to grow to an eventual massive source around the globe. When we look at the United States in 3rd, it should be known that the U.S. gave birth to wind energy technology. If you add all the wind power systems throughout the US, there is a potential to generate over 20,000 mega watts. In 2008 the projections were to generate 48 billion kilowatt-hours. This enormous amount in number, still only accounts for just over 1 % of the total U.S. electricity supply. When you compare this to the averages consumed by typical households, this amount has the potential to power over 4.5 million homes. On the flip side, the actual potential that could be generated from wind is estimated at more than 2x what is being generated currently. Even though the level of wind power falls just over 1% currently, it has been the fastest growing form of alternative energy nationwide. As the acceptance of this technology grows, large wind farms will most likely be more prevalent. Even though these wind farms do tend to require a large up front capital outlay to get started, but they eventually become on of the least expensive ways to generate electricity and one of the least labor intensive systems to maintain. Offshore wind farms have the potential to power an entire country. First however, the acceptance in the minds of people must take root and like all new technologies it must travel through the cycles of innovation, critical mass, exponential growth before it becomes commonplace. Until there is a wider acceptance and realization of this phenomenal energy source, there is always going to be the underlying worry in the hearts of people centered around rising fuel costs and all the environmental and physical problems that fossil fuel use creates. When wind energy is looked at on a wider scale and the idea of off shore wind farms and residential wind turbine technology (see Homemade Wind Turbines) is accepted, these concerns may be minimized and eventually eliminated. Until then, we as individuals can do our part with residential solar (see Home Solar Power) and wind technology and set the example to our neighbors.
''' testcode2 --------- A framework for regression testing numerical programs. :copyright: (c) 2012 James Spencer. :license: modified BSD; see LICENSE for more details. ''' import glob import os import pipes import shutil import subprocess import sys try: import yaml _HAVE_YAML = True except ImportError: _HAVE_YAML = False import testcode2.dir_lock as dir_lock import testcode2.exceptions as exceptions import testcode2.queues as queues import testcode2.compatibility as compat import testcode2.util as util import testcode2.validation as validation DIR_LOCK = dir_lock.DirLock() # Do not change! Bad things will happen... _FILESTEM_TUPLE = ( ('test', 'test.out'), ('error', 'test.err'), ('benchmark', 'benchmark.out'), ) _FILESTEM_DICT = dict( _FILESTEM_TUPLE ) # We can change FILESTEM if needed. # However, this should only be done to compare two sets of test output or two # sets of benchmarks. # Bad things will happen if tests are run without the default FILESTEM! FILESTEM = dict( _FILESTEM_TUPLE ) class TestProgram: '''Store and access information about the program being tested.''' def __init__(self, name, exe, test_id, benchmark, **kwargs): # Set sane defaults (mostly null) for keyword arguments. self.name = name # Running self.exe = exe self.test_id = test_id self.run_cmd_template = ('tc.program tc.args tc.input > ' 'tc.output 2> tc.error') self.launch_parallel = 'mpirun -np tc.nprocs' self.submit_pattern = 'testcode.run_cmd' # dummy job with default settings (e.g tolerance) self.default_test_settings = None # Analysis self.benchmark = benchmark self.ignore_fields = [] self.data_tag = None self.extract_cmd_template = 'tc.extract tc.args tc.file' self.extract_program = None self.extract_args = '' self.extract_fmt = 'table' self.skip_cmd_template = 'tc.skip tc.args tc.test' self.skip_program = None self.skip_args = '' self.verify = False # Info self.vcs = None # Set values passed in as keyword options. for (attr, val) in kwargs.items(): setattr(self, attr, val) # If using an external verification program, then set the default # extract command template. if self.verify and 'extract_cmd_template' not in kwargs: self.extract_cmd_template = 'tc.extract tc.args tc.test tc.bench' # Can we actually extract the data? if self.extract_fmt == 'yaml' and not _HAVE_YAML: err = 'YAML data format cannot be used: PyYAML is not installed.' raise exceptions.TestCodeError(err) def run_cmd(self, input_file, args, nprocs=0): '''Create run command.''' output_file = util.testcode_filename(FILESTEM['test'], self.test_id, input_file, args) error_file = util.testcode_filename(FILESTEM['error'], self.test_id, input_file, args) # Need to escape filenames for passing them to the shell. exe = pipes.quote(self.exe) output_file = pipes.quote(output_file) error_file = pipes.quote(error_file) cmd = self.run_cmd_template.replace('tc.program', exe) if type(input_file) is str: input_file = pipes.quote(input_file) cmd = cmd.replace('tc.input', input_file) else: cmd = cmd.replace('tc.input', '') if type(args) is str: cmd = cmd.replace('tc.args', args) else: cmd = cmd.replace('tc.args', '') cmd = cmd.replace('tc.output', output_file) cmd = cmd.replace('tc.error', error_file) if nprocs > 0 and self.launch_parallel: cmd = '%s %s' % (self.launch_parallel, cmd) cmd = cmd.replace('tc.nprocs', str(nprocs)) return cmd def extract_cmd(self, path, input_file, args): '''Create extraction command(s).''' test_file = util.testcode_filename(FILESTEM['test'], self.test_id, input_file, args) bench_file = self.select_benchmark_file(path, input_file, args) cmd = self.extract_cmd_template cmd = cmd.replace('tc.extract', pipes.quote(self.extract_program)) cmd = cmd.replace('tc.args', self.extract_args) if self.verify: # Single command to compare benchmark and test outputs. cmd = cmd.replace('tc.test', pipes.quote(test_file)) cmd = cmd.replace('tc.bench', pipes.quote(bench_file)) return (cmd,) else: # Need to return commands to extract data from the test and # benchmark outputs. test_cmd = cmd.replace('tc.file', pipes.quote(test_file)) bench_cmd = cmd.replace('tc.file', pipes.quote(bench_file)) return (bench_cmd, test_cmd) def skip_cmd(self, input_file, args): '''Create skip command.''' test_file = util.testcode_filename(FILESTEM['test'], self.test_id, input_file, args) cmd = self.skip_cmd_template cmd = cmd.replace('tc.skip', pipes.quote(self.skip_program)) cmd = cmd.replace('tc.args', self.skip_args) cmd = cmd.replace('tc.test', pipes.quote(test_file)) return cmd def select_benchmark_file(self, path, input_file, args): '''Find the first benchmark file out of all benchmark IDs which exists.''' benchmark = None benchmarks = [] for bench_id in self.benchmark: benchfile = util.testcode_filename(FILESTEM['benchmark'], bench_id, input_file, args) benchmarks.append(benchfile) if os.path.exists(os.path.join(path, benchfile)): benchmark = benchfile break if not benchmark: err = 'No benchmark found in %s. Checked for: %s.' raise exceptions.TestCodeError(err % (path, ', '.join(benchmarks))) return benchmark class Test: '''Store and execute a test.''' def __init__(self, name, test_program, path, **kwargs): self.name = name # program self.test_program = test_program # running self.path = path self.inputs_args = None self.output = None self.nprocs = 0 self.min_nprocs = 0 self.max_nprocs = compat.maxint self.submit_template = None # Run jobs in this concurrently rather than consecutively? # Only used when setting tests up in testcode2.config: if true then # each pair of input file and arguments are assigned to a different # Test object rather than a single Test object. self.run_concurrent = False # Analysis self.default_tolerance = None self.tolerances = {} # Set values passed in as keyword options. for (attr, val) in kwargs.items(): setattr(self, attr, val) if not self.inputs_args: self.inputs_args = [('', '')] self.status = dict( (inp_arg, None) for inp_arg in self.inputs_args ) # 'Decorate' functions which require a directory lock in order for file # access to be thread-safe. # As we use the in_dir decorator, which requires knowledge of the test # directory (a per-instance property), we cannot use the @decorator # syntactic sugar. Fortunately we can still modify them at # initialisation time. Thank you python for closures! self.start_job = DIR_LOCK.in_dir(self.path)(self._start_job) self.move_output_to_test_output = DIR_LOCK.in_dir(self.path)( self._move_output_to_test_output) self.move_old_output_files = DIR_LOCK.in_dir(self.path)( self._move_old_output_files) self.verify_job = DIR_LOCK.in_dir(self.path)(self._verify_job) self.skip_job = DIR_LOCK.in_dir(self.path)(self._skip_job) def __hash__(self): return hash(self.path) def __eq__(self, other): if not isinstance(other, self.__class__): return False else: # Compare values we care about... cmp_vals = ['test_program', 'path', 'inputs_args', 'output', 'nprocs', 'min_nprocs', 'max_nprocs', 'submit_template', 'default_tolerance', 'tolerances', 'status'] comparison = tuple(getattr(other, cmp_val) == getattr(self, cmp_val) for cmp_val in cmp_vals) return compat.compat_all(comparison) def run_test(self, verbose=1, cluster_queue=None, rundir=None): '''Run all jobs in test.''' try: # Construct tests. test_cmds = [] test_files = [] for (test_input, test_arg) in self.inputs_args: if (test_input and not os.path.exists(os.path.join(self.path,test_input))): err = 'Input file does not exist: %s' % (test_input,) raise exceptions.RunError(err) test_cmds.append(self.test_program.run_cmd(test_input, test_arg, self.nprocs)) test_files.append(util.testcode_filename(FILESTEM['test'], self.test_program.test_id, test_input, test_arg)) # Move files matching output pattern out of the way. self.move_old_output_files(verbose) # Run tests one-at-a-time locally or submit job in single submit # file to a queueing system. if cluster_queue: if self.output: for (ind, test) in enumerate(test_cmds): # Don't quote self.output if it contains any wildcards # (assume the user set it up correctly!) out = self.output if not compat.compat_any(wild in self.output for wild in ['*', '?', '[', '{']): out = pipes.quote(self.output) test_cmds[ind] = '%s; mv %s %s' % (test_cmds[ind], out, pipes.quote(test_files[ind])) test_cmds = ['\n'.join(test_cmds)] for (ind, test) in enumerate(test_cmds): job = self.start_job(test, cluster_queue, verbose) job.wait() # Analyse tests as they finish. if cluster_queue: # Did all of them at once. for (test_input, test_arg) in self.inputs_args: self.verify_job(test_input, test_arg, verbose, rundir) else: # Did one job at a time. (test_input, test_arg) = self.inputs_args[ind] err = [] if self.output: try: self.move_output_to_test_output(test_files[ind]) except exceptions.RunError: err.append(sys.exc_info()[1]) status = validation.Status() if job.returncode != 0: err.insert(0, 'Error running job. Return code: %i' % job.returncode) (status, msg) = self.skip_job(test_input, test_arg, verbose) if status.skipped(): self._update_status(status, (test_input, test_arg)) if verbose > 0 and verbose < 3: sys.stdout.write( util.info_line(self.path, test_input, test_arg, rundir) ) status.print_status(msg, verbose) elif err: # re-raise first error we hit. raise exceptions.RunError(err[0]) else: self.verify_job(test_input, test_arg, verbose, rundir) except exceptions.RunError: err = sys.exc_info()[1] if verbose > 2: err = 'Test(s) in %s failed.\n%s' % (self.path, err) status = validation.Status([False]) self._update_status(status, (test_input, test_arg)) if verbose > 0 and verbose < 3: info_line = util.info_line(self.path, test_input, test_arg, rundir) sys.stdout.write(info_line) status.print_status(err, verbose) # Shouldn't run remaining tests after such a catastrophic failure. # Mark all remaining tests as skipped so the user knows that they # weren't run. err = 'Previous test in %s caused a system failure.' % (self.path) status = validation.Status(name='skipped') for ((test_input, test_arg), stat) in self.status.items(): if not self.status[(test_input,test_arg)]: self._update_status(status, (test_input, test_arg)) if verbose > 2: cmd = self.test_program.run_cmd(test_input, test_arg, self.nprocs) print('Test using %s in %s' % (cmd, self.path)) elif verbose > 0: info_line = util.info_line(self.path, test_input, test_arg, rundir) sys.stdout.write(info_line) status.print_status(err, verbose) def _start_job(self, cmd, cluster_queue=None, verbose=1): '''Start test running. Requires directory lock. IMPORTANT: use self.start_job rather than self._start_job if using multiple threads. Decorated to start_job, which acquires directory lock and enters self.path first, during initialisation.''' if cluster_queue: tp_ptr = self.test_program submit_file = '%s.%s' % (os.path.basename(self.submit_template), tp_ptr.test_id) job = queues.ClusterQueueJob(submit_file, system=cluster_queue) job.create_submit_file(tp_ptr.submit_pattern, cmd, self.submit_template) if verbose > 2: print('Submitting tests using %s (template submit file) in %s' % (self.submit_template, self.path)) job.start_job() else: # Run locally via subprocess. if verbose > 2: print('Running test using %s in %s\n' % (cmd, self.path)) try: job = subprocess.Popen(cmd, shell=True) except OSError: # slightly odd syntax in order to be compatible with python 2.5 # and python 2.6/3 err = 'Execution of test failed: %s' % (sys.exc_info()[1],) raise exceptions.RunError(err) # Return either Popen object or ClusterQueueJob object. Both have # a wait method which returns only once job has finished. return job def _move_output_to_test_output(self, test_files_out): '''Move output to the testcode output file. Requires directory lock. This is used when a program writes to standard output rather than to STDOUT. IMPORTANT: use self.move_output_to_test_output rather than self._move_output_to_test_output if using multiple threads. Decorated to move_output_to_test_output, which acquires the directory lock and enters self.path. ''' # self.output might be a glob which works with e.g. # mv self.output test_files[ind] # if self.output matches only one file. Reproduce that # here so that running tests through the queueing system # and running tests locally have the same behaviour. out_files = glob.glob(self.output) if len(out_files) == 1: shutil.move(out_files[0], test_files_out) else: err = ('Output pattern (%s) matches %s files (%s).' % (self.output, len(out_files), out_files)) raise exceptions.RunError(err) def _move_old_output_files(self, verbose=1): '''Move output to the testcode output file. Requires directory lock. This is used when a program writes to standard output rather than to STDOUT. IMPORTANT: use self.move_oold_output_files rather than self._move_old_output_files if using multiple threads. Decorated to move_old_output_files, which acquires the directory lock and enters self.path. ''' if self.output: old_out_files = glob.glob(self.output) if old_out_files: out_dir = 'test.prev.output.%s' % (self.test_program.test_id) if verbose > 2: print('WARNING: found existing files matching output ' 'pattern: %s.' % self.output) print('WARNING: moving existing output files (%s) to %s.\n' % (', '.join(old_out_files), out_dir)) if not os.path.exists(out_dir): os.mkdir(out_dir) for out_file in old_out_files: shutil.move(out_file, out_dir) def _verify_job(self, input_file, args, verbose=1, rundir=None): '''Check job against benchmark. Assume function is executed in self.path. IMPORTANT: use self.verify_job rather than self._verify_job if using multiple threads. Decorated to verify_job, which acquires directory lock and enters self.path first, during initialisation.''' # We already have DIR_LOCK, so use _skip_job instead of skip_job. (status, msg) = self._skip_job(input_file, args, verbose) try: if self.test_program.verify and not status.skipped(): (status, msg) = self.verify_job_external(input_file, args, verbose) elif not status.skipped(): (bench_out, test_out) = self.extract_data(input_file, args, verbose) (comparable, status, msg) = validation.compare_data(bench_out, test_out, self.default_tolerance, self.tolerances, self.test_program.ignore_fields) if verbose > 2: # Include data tables in output. if comparable: # Combine test and benchmark dictionaries. data_table = util.pretty_print_table( ['benchmark', 'test'], [bench_out, test_out]) else: # Print dictionaries separately--couldn't even compare # them! data_table = '\n'.join(( util.pretty_print_table(['benchmark'], [bench_out]), util.pretty_print_table(['test '], [test_out]))) if msg.strip(): # join data table with error message from # validation.compare_data. msg = '\n'.join((msg, data_table)) else: msg = data_table except (exceptions.AnalysisError, exceptions.TestCodeError): if msg.strip(): msg = '%s\n%s' % (msg, sys.exc_info()[1]) else: msg = sys.exc_info()[1] status = validation.Status([False]) self._update_status(status, (input_file, args)) if verbose > 0 and verbose < 3: info_line = util.info_line(self.path, input_file, args, rundir) sys.stdout.write(info_line) status.print_status(msg, verbose) return (status, msg) def _skip_job(self, input_file, args, verbose=1): '''Run user-supplied command to check if test should be skipped. IMPORTANT: use self.skip_job rather than self._skip_job if using multiple threads. Decorated to skip_job, which acquires directory lock and enters self.path first, during initialisation.''' status = validation.Status() if self.test_program.skip_program: cmd = self.test_program.skip_cmd(input_file, args) try: if verbose > 2: print('Testing whether to skip test using %s in %s.' % (cmd, self.path)) skip_popen = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) skip_popen.wait() if skip_popen.returncode == 0: # skip this test status = validation.Status(name='skipped') except OSError: # slightly odd syntax in order to be compatible with python # 2.5 and python 2.6/3 if verbose > 2: print('Test to skip test: %s' % (sys.exc_info()[1],)) return (status, '') def verify_job_external(self, input_file, args, verbose=1): '''Run user-supplied verifier script. Assume function is executed in self.path.''' verify_cmd, = self.test_program.extract_cmd(self.path, input_file, args) try: if verbose > 2: print('Analysing test using %s in %s.' % (verify_cmd, self.path)) verify_popen = subprocess.Popen(verify_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) verify_popen.wait() except OSError: # slightly odd syntax in order to be compatible with python 2.5 # and python 2.6/3 err = 'Analysis of test failed: %s' % (sys.exc_info()[1],) raise exceptions.AnalysisError(err) output = verify_popen.communicate()[0].decode('utf-8') if verbose < 2: # Suppress output. (hackhack) output = '' if verify_popen.returncode == 0: return (validation.Status([True]), output) else: return (validation.Status([False]), output) def extract_data(self, input_file, args, verbose=1): '''Extract data from output file. Assume function is executed in self.path.''' tp_ptr = self.test_program if tp_ptr.data_tag: # Using internal data extraction function. data_files = [ tp_ptr.select_benchmark_file(self.path, input_file, args), util.testcode_filename(FILESTEM['test'], tp_ptr.test_id, input_file, args), ] if verbose > 2: print('Analysing output using data_tag %s in %s on files %s.' % (tp_ptr.data_tag, self.path, ' and '.join(data_files))) outputs = [util.extract_tagged_data(tp_ptr.data_tag, dfile) for dfile in data_files] else: # Using external data extraction script. # Get extraction commands. extract_cmds = tp_ptr.extract_cmd(self.path, input_file, args) # Extract data. outputs = [] for cmd in extract_cmds: try: if verbose > 2: print('Analysing output using %s in %s.' % (cmd, self.path)) # Samuel Ponce: Popen.wait() creates deadlock if the data is too large # See documented issue for example in: # https://docs.python.org/2/library/subprocess.html#subprocess.Popen.returncode # # Previous code that create deadlock: #extract_popen = subprocess.Popen(cmd, shell=True, # stdout=subprocess.PIPE, stderr=subprocess.PIPE) #extract_popen.wait() # # New code (this might not be the best but work for me): extract_popen = subprocess.Popen(cmd, bufsize=1, shell=True, stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE) lines = [] for line in iter(extract_popen.stdout.readline, ''): #print line, lines.append(line) except OSError: # slightly odd syntax in order to be compatible with python # 2.5 and python 2.6/3 err = 'Analysing output failed: %s' % (sys.exc_info()[1],) raise exceptions.AnalysisError(err) # Convert data string from extract command to dictionary format. # SP: Because of the above change, the test below cannot be done: #if extract_popen.returncode != 0: # err = extract_popen.communicate()[1].decode('utf-8') # err = 'Analysing output failed: %s' % (err) # raise exceptions.AnalysisError(err) #data_string = extract_popen.communicate()[0].decode('utf-8') data_string = ''.join(lines) if self.test_program.extract_fmt == 'table': outputs.append(util.dict_table_string(data_string)) elif self.test_program.extract_fmt == 'yaml': outputs.append({}) # convert values to be in a tuple so the format matches # that from dict_table_string. # ensure all keys are strings so they can be sorted # (different data types cause problems!) for (key, val) in yaml.safe_load(data_string).items(): if isinstance(val, list): outputs[-1][str(key)] = tuple(val) else: outputs[-1][str(key)] = tuple((val,)) return tuple(outputs) def create_new_benchmarks(self, benchmark, copy_files_since=None, copy_files_path='testcode_data'): '''Copy the test files to benchmark files.''' oldcwd = os.getcwd() os.chdir(self.path) test_files = [] for (inp, arg) in self.inputs_args: test_file = util.testcode_filename(FILESTEM['test'], self.test_program.test_id, inp, arg) err_file = util.testcode_filename(FILESTEM['error'], self.test_program.test_id, inp, arg) bench_file = util.testcode_filename(_FILESTEM_DICT['benchmark'], benchmark, inp, arg) test_files.extend((test_file, err_file, bench_file)) shutil.copy(test_file, bench_file) if copy_files_since: if not os.path.isdir(copy_files_path): os.mkdir(copy_files_path) if os.path.isdir(copy_files_path): for data_file in glob.glob('*'): if (os.path.isfile(data_file) and os.stat(data_file)[-2] >= copy_files_since and data_file not in test_files): bench_data_file = os.path.join(copy_files_path, data_file) # shutil.copy can't overwrite files so remove old ones # with the same name. if os.path.exists(bench_data_file): os.unlink(bench_data_file) shutil.copy(data_file, bench_data_file) os.chdir(oldcwd) def _update_status(self, status, inp_arg): '''Update self.status with success of a test.''' if status: self.status[inp_arg] = status else: # Something went wrong. Store a Status failed object. self.status[inp_arg] = validation.Status([False]) def get_status(self): '''Get number of passed and number of ran tasks.''' # If there's an object (other than None/False) in the corresponding # dict entry in self.status, then that test must have ran (albeit not # necessarily successfuly!). status = {} status['passed'] = sum(True for stat in self.status.values() if stat and stat.passed()) status['warning'] = sum(True for stat in self.status.values() if stat and stat.warning()) status['skipped'] = sum(True for stat in self.status.values() if stat and stat.skipped()) status['failed'] = sum(True for stat in self.status.values() if stat and stat.failed()) status['unknown'] = sum(True for stat in self.status.values() if stat and stat.unknown()) status['ran'] = sum(True for stat in self.status.values() if stat) return status
This is a sponsored post written by me on behalf of Citrus Lane. If you are like me you have a few (or a lot) of gifts left to check off your list. Don’t worry! I’m still sharing lots of great ideas over the next week or two! Here is a unique idea that can last all year long. Maybe you’ll want to get this one yourself! Have you heard of Citrus Lane? It is a company that sends boxes monthly full of fun and unique products geared toward children ages 0-5. These may include toys, snacks, books, and/or personal care products. I was excited to receive my own box to test out and share with my children. Here’s what my box looked like. I really liked the packaging so I took lots of pictures. There was even a punch out activity on the bottom of the box. And these are the contents. I am a music teacher to kids, so the rhythm sticks immediately made their way to my rhythm box where they will get lots of use. My kids were really excited by this light-up octopus and decided it should be a present for our new baby when it comes. It can glow in green or blue light and scatters stars across the ceiling. We arm wrestled over these organic baked Potato chips and I won. No, I shared, but what can I say, if the pregnant woman wants to eat something, you’d probably better let her. I need to find some more of these! My 1st grader loved reading the stories in the Ladybug magazine. They were interesting to her and she could read them to her younger brother. There was also a coupon for a FREE digital subscription to the magazine, or info on how to subscribe to the printed version. We enjoyed discovering each of our fun surprises. It would be really fun to have a surprise each month, and not just on Christmas! Something to look forward to throughout the year. Especially since my birthday is in December, too. If you are less enthusiastic about surprises and want to pick what you are getting, you’ll want to check out the newly launched Citrus Lane Shop. It is full of beautiful, high-quality toys and children’s product. I was impressed with the educational value of the toys they carry, and they all appeal to my design aesthetic! Here is some merry news, just in time for Christmas!! One of you will be getting a $25.00 voucher to the Citrus Lane Shop! Leave a comment stating what you would purchase from the Citrus Lane shop if you won this voucher. BONUS entry: Tweet about this giveaway using the hashtag, #CitrusLane, and leave the URL to the tweet in the comments. Hurry! Giveaway ends Dec. 15! This is a sponsored post written by me on behalf of Citrus Lane. New customers who purchase a subscription will receive $10.00 off their first month with code BLOGPOST. Expires 12/31/2013. Offer Terms: Discount may only be applied to subscriptions purchased by new Citrus Lane customers. It cannot be applied to gift subscriptions. Subscriptions automatically renew to full retail price, $25 per month. You may cancel anytime. For more information, visit http://www.citruslane.com. The oatmeal colored Teddy Bear would be perfect for the grandbaby due in February! Thanks so much for the opportunity! I would love to order a box geared toward a newborn. I love the decorative blocks. I would order the Octopus light. My girl would LOVE that thing! She’s been begging for a similar light for a while now. I think my preschooler would love the ukelele. I love the octopus light super cute.
#!/usr/bin/env python import configparser import netifaces import os import re import requests import sh import socket import sys import time import logging from time import sleep NETWORK_PATH = '/boot/network.ini' logging.basicConfig(level=logging.INFO, format='%(message)s') def get_default_gw(): gws = netifaces.gateways() return gws['default'][netifaces.AF_INET][0] def ping_test(host): ping = sh.ping('-q', '-c', 10, host, _ok_code=[0, 1]) packet_loss = re.findall(r'(\d+)% packet loss', ping.stdout)[0] if int(packet_loss) > 60: logging.error('Unable to ping gateway.') return False else: return True def http_test(host): try: r = requests.head(host, allow_redirects=True, verify=False) if 200 <= r.status_code < 400: return True else: logging.error('Unable to reach Cast server.') return False except Exception as e: logging.error('http_test failed: {}'.format(str(e))) return False def restart_networking(): networking = sh.Command('/etc/init.d/networking') networking('restart') def restart_interface(interface): logging.info('Restarting network interface.') ifdown = sh.Command('/sbin/ifdown') ifdown('--force', interface) restart_networking() def is_static(config, interface): ip = config.get(interface, 'ip', fallback=False) netmask = config.get(interface, 'netmask', fallback=False) gateway = config.get(interface, 'gateway', fallback=False) return ip and netmask and gateway def bring_up_interface(interface): retry_limit = 10 retries = 0 while retries < retry_limit: restart_interface(interface) if has_ip(interface): return True else: retries += 1 time.sleep(15) logging.error('Unable to bring up network interface.') return False def bring_down_interface(interface): logging.info('Bringing down interface %s', interface) ifdown = sh.Command('/sbin/ifdown') ifdown('--force', interface) def has_ip(interface): """ Return True if interface has an IP. """ try: ips = netifaces.ifaddresses(interface) except ValueError: logging.error('Interface does not exist.') return False for k in ips.keys(): ip = ips[k][0].get('addr', False) if ip: try: socket.inet_aton(ip) return True except socket.error: pass return False def get_active_iface(config, prefix): for n in range(10): iface = '{}{}'.format(prefix, n) if config.has_section(iface): return iface return False def join_zerotier_network(): os.system('/usr/sbin/zerotier-cli join 17d709436cf23366') if __name__ == '__main__': config = configparser.RawConfigParser() config.read(NETWORK_PATH) wifi_iface = get_active_iface(config, 'wlan') if wifi_iface: logging.info('Found wifi interface {}'.format(wifi_iface)) reaches_internet = http_test('http://example.com') can_ping_gw = ping_test(get_default_gw()) if reaches_internet and can_ping_gw: logging.info('WiFi interface is healthy.') else: if not reaches_internet and not can_ping_gw: logging.error('Unable to connect to internet and gateway') elif can_ping_cw: logging.error('Unable to connect to gateway') elif reaches_internet: logging.error('Unable to connect to the internet') logging.info('Restarting {}'.format(wifi_iface)) wifi_is_healthy = bring_up_interface(wifi_iface) if wifi_is_healthy: logging.info('WiFi is healthy again!') else: logging.error('WiFi still isn\'t healthy') join_zerotier_network()
CCBoot supports diskless booting client PCs which have Dual NICs. Note: CCBoot does not support booting from Wireless LAN. 1) The client PC is installed of hard disk drive with operating system and also the two Network card. Notes: In newer version of CCBoot, you do not need to disable second NIC before installing CCBoot Client. 2) Install CCBoot Client, click "Install Client", then press “Yes” button to start installation (see Figure 1-1). 3) Now, A pop up dialog box will open and will ask which NIC to use for booting client diskless (as in Figure 1-2). 4) Click the drop down and choose the NIC you want to use to boot the client from (as in Figure 1-3). 5) Click “OK” to continue with CCBoot Client installation. 6) Once installation is complete, Fill in the Upload image fields and Click “Upload Image” button to upload the image (as in Figure 1-4). Note: If you want to make use of both NIC then you need to set Static IP on the main NIC use for booting and leave the second NIC for DHCP. This way, you can use both NIC for internet access or for networking purpose.
#!/usr/bin/env python3 import sys import asyncio import threading import base64 import os SRCPATH = './src/' class KWBServerProtocol(asyncio.Protocol): def connection_made(self, transport): print('Connected: {}'.format(transport.get_extra_info('peername'))) self.transport = transport self.method = None self.authorization = None self.content_length = None self.message = str() def data_received(self, data): transport = self.transport message = data.decode('utf-8') # 헤더 확인 if self.method == None: self.method = message.split('\r\n')[0] if not self.method.startswith('POST / HTTP/1.1'): transport.write('400 Bad Request\r\n'.encode('ascii')) transport.close() # 헤더 파싱 headers = message.split('\r\n\r\n')[0] for header in headers.split('\r\n'): if header.startswith('Authorization'): self.authorization = header.split(' ')[-1] self.authorization = \ base64.b64decode(self.authorization) self.authorization = self.authorization.decode('utf-8') print('Autorizing Info.: {}'.format( self.authorization)) elif header.startswith('Content-Length'): self.content_length = int(header.split(' ')[-1]) if self.content_length > 10240: transport.write( '400 Bad Request\r\n'.encode('ascii')) transport.close() if None in (self.method, self.authorization, self.content_length): transport.write('400 Bad Request\r\n'.encode('ascii')) transport.close() message = message.split('\r\n\r\n')[1:] message = ''.join(message) # 데이터 합치기 self.message = self.message + message if self.content_length < len(self.message.encode('utf-8')): transport.write('400 Bad Request\r\n'.encode('ascii')) transport.close() if self.content_length == len(self.message.encode('utf-8')): if 'import random' in self.message: transport.write( '400 Bad Request: import random\r\n'.encode( 'ascii')) transport.close() else: transport.write('202 Accepted\r\n'.encode('ascii')) transport.write(self.message.encode('utf-8')) filename = (SRCPATH + self.authorization.split(':')[0] + '_' + self.authorization.split(':')[1] + '.py') with open(filename, 'w') as received_file: received_file.write(self.message) transport.close() def eof_received(self): transport = self.transport self.transport.close() def connection_lost(self, exc): pass def main(): # 소스 디렉터리 생성 os.makedirs(SRCPATH, exist_ok = True) loop = asyncio.get_event_loop() coro = loop.create_server(KWBServerProtocol, '', 8080, reuse_address = True, reuse_port = True) print('가위바위보 서버가 시작되었습니다.') server = loop.run_until_complete(coro) try: loop.run_forever() except KeyboardInterrupt: print('가위바위보 서버가 종료되었습니다.') pass server.close() loop.run_until_complete(server.wait_closed()) loop.close() if __name__ == '__main__': main()
At PACC our experience is matched by the range of our tools. Whether your project is relatively small or massive, we have the tools to ensure that the job gets done right. PACC owns heavy equipment and reusable materials used for all of our jobs.
import re from thefuck.utils import for_app commands = ('ssh', 'scp') @for_app(*commands) def match(command): if not command.script: return False if not command.script.startswith(commands): return False patterns = ( r'WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED!', r'WARNING: POSSIBLE DNS SPOOFING DETECTED!', r"Warning: the \S+ host key for '([^']+)' differs from the key for the IP address '([^']+)'", ) return any(re.findall(pattern, command.stderr) for pattern in patterns) def get_new_command(command): return command.script def side_effect(old_cmd, command): offending_pattern = re.compile( r'(?:Offending (?:key for IP|\S+ key)|Matching host key) in ([^:]+):(\d+)', re.MULTILINE) offending = offending_pattern.findall(old_cmd.stderr) for filepath, lineno in offending: with open(filepath, 'r') as fh: lines = fh.readlines() del lines[int(lineno) - 1] with open(filepath, 'w') as fh: fh.writelines(lines)
As I sophomore in high school, I remember looking around the classroom and observing the tiny amount of students I would be spending the rest of the year learning geometry with. Being more of a serious student who also constantly craved conversation, I found instant relief in the discovery that the two boys who sat behind me also enjoyed a good joke between the breaks in our oh-so-intelligent and serious teacher’s lessons. The best part about these two boys were that they too were serious about acing every assignment and test we received. Except one of the boys was especially impressive. He always had a smile on his face and his presence exuded respect, maturity, and optimism. Little did I know that he had been battling cancer for years. Tim Vorenkamp, an alumnus of JSerra Catholic High School and a sophomore at the University of California at Berkeley was diagnosed with cancer at age 13 while he was playing for the USA Volleyball Boys Junior National Championship division. He had noticed a lump growing on his leg. He was soon diagnosed with a very rare form of cancer called Synovial Cell Sarcoma. Only 500 people are diagnosed with it annually. Despite receiving the news that it was already in stage three, Vorenkamp recalls smiling because he knew it at least wasn’t in stage four. In a recent article by the Orange County Register about Vorenkamp states, “He spoke about his past goals – continuing to compete for the USA Volleyball junior national program, getting a major college scholarship, maybe making the U.S. Olympic team and having a pro career, plus a college education and a career in business or sports. Then he spoke of his new goals. On January 10, 2016, Tim passed away at age 18. A few months previous, he started a non-profit foundation called the Live For Others Foundation. This foundation was created to raise awareness of Synovial Cell Sarcoma as well as fund further research on finding the cure. For Tim, his passing is just a new journey with Jesus. ©2018 Live for Others Foundation. All rights reserved.
""" Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com> This file is part of RockStor. RockStor is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. RockStor is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from datetime import datetime from django.utils.timezone import utc from django.conf import settings from storageadmin.models import (Share, Disk, Snapshot, SFTP) from smart_manager.models import ShareUsage from fs.btrfs import (mount_share, mount_snap, is_share_mounted, is_mounted, umount_root, shares_info, share_usage, snaps_info, qgroup_create, update_quota) from storageadmin.util import handle_exception import logging logger = logging.getLogger(__name__) def helper_mount_share(share, mnt_pt=None): if (not is_share_mounted(share.name)): if(mnt_pt is None): mnt_pt = ('%s%s' % (settings.MNT_PT, share.name)) mount_share(share, mnt_pt) def validate_share(sname, request): try: return Share.objects.get(name=sname) except: e_msg = ('Share with name: %s does not exist' % sname) handle_exception(Exception(e_msg), request) def sftp_snap_toggle(share, mount=True): for snap in Snapshot.objects.filter(share=share, uvisible=True): mnt_pt = ('%s/%s/%s/.%s' % (settings.SFTP_MNT_ROOT, share.owner, share.name, snap.name)) if (mount and not is_mounted(mnt_pt)): mount_snap(share, snap.name, mnt_pt) elif (is_mounted(mnt_pt) and not mount): umount_root(mnt_pt) def toggle_sftp_visibility(share, snap_name, on=True): if (not SFTP.objects.filter(share=share).exists()): return mnt_pt = ('%s/%s/%s/.%s' % (settings.SFTP_MNT_ROOT, share.owner, share.name, snap_name)) if (on): if (not is_mounted(mnt_pt)): mount_snap(share, snap_name, mnt_pt) else: umount_root(mnt_pt) def import_shares(pool, request): disk = Disk.objects.filter(pool=pool)[0].name shares = [s.name for s in Share.objects.filter(pool=pool)] shares_d = shares_info(pool) for s in shares: if (s not in shares_d): Share.objects.get(pool=pool, name=s).delete() for s in shares_d: if (s in shares): share = Share.objects.get(name=s) share.qgroup = shares_d[s] rusage, eusage = share_usage(pool, share.qgroup) ts = datetime.utcnow().replace(tzinfo=utc) if (rusage != share.rusage or eusage != share.eusage): share.rusage = rusage share.eusage = eusage su = ShareUsage(name=s, r_usage=rusage, e_usage=eusage, ts=ts) su.save() else: try: su = ShareUsage.objects.filter(name=s).latest('id') su.ts = ts su.count += 1 except ShareUsage.DoesNotExist: su = ShareUsage(name=s, r_usage=rusage, e_usage=eusage, ts=ts) finally: su.save() share.save() continue try: cshare = Share.objects.get(name=s) cshares_d = shares_info('%s%s' % (settings.MNT_PT, cshare.pool.name)) if (s in cshares_d): e_msg = ('Another pool(%s) has a Share with this same ' 'name(%s) as this pool(%s). This configuration is not supported.' ' You can delete one of them manually with this command: ' 'btrfs subvol delete %s[pool name]/%s' % (cshare.pool.name, s, pool.name, settings.MNT_PT, s)) handle_exception(Exception(e_msg), request) else: cshare.pool = pool cshare.qgroup = shares_d[s] cshare.size = pool.size cshare.subvol_name = s cshare.rusage, cshare.eusage = share_usage(pool, cshare.qgroup) cshare.save() except Share.DoesNotExist: pqid = qgroup_create(pool) update_quota(pool, pqid, pool.size * 1024) nso = Share(pool=pool, qgroup=shares_d[s], pqgroup=pqid, name=s, size=pool.size, subvol_name=s) nso.save() mount_share(nso, '%s%s' % (settings.MNT_PT, s)) def import_snapshots(share): snaps_d = snaps_info('%s%s' % (settings.MNT_PT, share.pool.name), share.name) disk = Disk.objects.filter(pool=share.pool)[0].name snaps = [s.name for s in Snapshot.objects.filter(share=share)] for s in snaps: if (s not in snaps_d): Snapshot.objects.get(share=share,name=s).delete() for s in snaps_d: if (s in snaps): so = Snapshot.objects.get(share=share, name=s) else: so = Snapshot(share=share, name=s, real_name=s, writable=snaps_d[s][1], qgroup=snaps_d[s][0]) rusage, eusage = share_usage(share.pool, snaps_d[s][0]) ts = datetime.utcnow().replace(tzinfo=utc) if (rusage != so.rusage or eusage != so.eusage): so.rusage = rusage so.eusage = eusage su = ShareUsage(name=s, r_usage=rusage, e_usage=eusage, ts=ts) su.save() else: try: su = ShareUsage.objects.filter(name=s).latest('id') su.ts = ts su.count += 1 except ShareUsage.DoesNotExist: su = ShareUsage(name=s, r_usage=rusage, e_usage=eusage, ts=ts) finally: su.save() so.save()
Want to open a business in Ireland? We can help! Translation/Interpretation services (our staff members cover 5 languages: English, Italian, French, Spanish and Portuguese). Our team collaborates with the best accountancy firms in the city and we are also available for professional translation services if required.
import random as random_lib import copy from opsbro.evaluater import export_evaluater_function FUNCTION_GROUP = 'random' @export_evaluater_function(function_group=FUNCTION_GROUP) def random(): """**random()** -> Returns a random float between 0 and 1 <code> Example: random() Returns: 0.6988342144113194 </code> """ return random_lib.random() @export_evaluater_function(function_group=FUNCTION_GROUP) def randomint_between(int_start, int_end): """**randomint_between(int_start, int_end)** -> Returns a random int between the start and the end <code> Example: randomint_between(1, 100) Returns: 69 </code> """ return random_lib.randint(int_start, int_end) @export_evaluater_function(function_group=FUNCTION_GROUP) def shuffle(list): """**shuffle(list)** -> Return a copy of the list suffle randomly <code> Example: suffle([ 1, 2, 3, 4 ]) Returns: [ 3, 1, 4, 2 ] </code> """ # NOTE random.shuffle is in place n_list = copy.copy(list) random_lib.shuffle(n_list) return n_list
This video was part of the Brent Walker series that was broadcast in the United States on PBS. It is one of the more fascinating entries in the series, with some intriguing innovations as well as a few ideas that fall flat. The high point is surely the ghost scene, with deceased Murgatroyds enjoying tea and biscuits while they harass Sir Ruthven. The ghosts do not all revive from portraits—a few are memorialized as busts or statues, a clever twist that I've seen copied in a couple of stage productions. The first act is generally less appealing. A particularly irritating bit occurs in "When sailing o'er life's ocean wide," in which the picture suddenly cuts to the three singers apparently floating in a barrel in the middle of the ocean. This is video technology at its least appealing, where the director is calling attention to his own cleverness instead of just telling the story. The choice of an over-the-hill Vincent Price as Sir Despard is controversial. Some find it an inspired bit of type-casting, while others lament his lack of a passable singing voice. I lean toward the former, but there is admittedly no consensus on the issue. J. Donald Smith notes that "his singing is not bad in the ensemble numbers (and his diction is perfect in the 'Matter' trio) but very weak as a soloist. He obviously needed someone to set the pitch for him." On the other hand, I haven't found anyone who particularly likes Keith Michell in any role whatsoever. Overall, this is a Ruddigore we're fortunate to have. It will continue to spark debate, but it's a production all should see. The production has numerous cuts, including, "I shipped d'ye see," "The battle's roar is over," "Away, remorse," the second verse of the Madrigal, three out of four verses of "Oh, happy the lily," and the second verse of "There grew a little flower," not to mention incidental dialogue cuts. Even so, it was a tight fit into PBS's two hour slot, as the credits roll during the Act II finale (the only Walker video in which this happens). I understand that "I shipped d'ye see" was included in the version broadcast on the BBC (although not in the home video version), and it is pictured in the Darlene Geis book, The Gilbert and Sullivan Operas, which was issued as a companion to the series. 2002 Acorn Media DVD AMP-5386 Available only in a 10-disc boxed set (cat. AMP-5483) including the entire Brent Walker series, excepting Trial and Cox and Box.
# -*- coding: utf-8 -*- __all__ = ['Media', 'MessageEntity', 'DownloadableMedia', 'PhotoSize', 'Audio', 'Animation', 'Document', 'Sticker', 'Video', 'Voice', 'VideoNote', 'Contact', 'Location', 'Venue', 'UserProfilePhotos', 'File', 'ChatPhoto', 'Game'] from luckydonaldUtils.encoding import unicode_type, to_unicode as u from luckydonaldUtils.exceptions import assert_type_or_raise from . import Receivable from . import Result __author__ = 'luckydonald' class Media(Receivable): pass # end Media class MessageEntity(Result): """ This object represents one special entity in a text message. For example, hashtags, usernames, URLs, etc. https://core.telegram.org/bots/api#messageentity Parameters: :param type: Type of the entity. Can be "mention" (@username), "hashtag" (#hashtag), "cashtag" ($USD), "bot_command" (/start@jobs_bot), "url" (https://telegram.org), "email" (do-not-reply@telegram.org), "phone_number" (+1-212-555-0123), "bold" (bold text), "italic" (italic text), "underline" (underlined text), "strikethrough" (strikethrough text), "code" (monowidth string), "pre" (monowidth block), "text_link" (for clickable text URLs), "text_mention" (for users without usernames) :type type: str|unicode :param offset: Offset in UTF-16 code units to the start of the entity :type offset: int :param length: Length of the entity in UTF-16 code units :type length: int Optional keyword parameters: :param url: Optional. For "text_link" only, url that will be opened after user taps on the text :type url: str|unicode :param user: Optional. For "text_mention" only, the mentioned user :type user: pytgbot.api_types.receivable.peer.User :param language: Optional. For "pre" only, the programming language of the entity text :type language: str|unicode :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, type, offset, length, url=None, user=None, language=None, _raw=None): """ This object represents one special entity in a text message. For example, hashtags, usernames, URLs, etc. https://core.telegram.org/bots/api#messageentity Parameters: :param type: Type of the entity. Can be "mention" (@username), "hashtag" (#hashtag), "cashtag" ($USD), "bot_command" (/start@jobs_bot), "url" (https://telegram.org), "email" (do-not-reply@telegram.org), "phone_number" (+1-212-555-0123), "bold" (bold text), "italic" (italic text), "underline" (underlined text), "strikethrough" (strikethrough text), "code" (monowidth string), "pre" (monowidth block), "text_link" (for clickable text URLs), "text_mention" (for users without usernames) :type type: str|unicode :param offset: Offset in UTF-16 code units to the start of the entity :type offset: int :param length: Length of the entity in UTF-16 code units :type length: int Optional keyword parameters: :param url: Optional. For "text_link" only, url that will be opened after user taps on the text :type url: str|unicode :param user: Optional. For "text_mention" only, the mentioned user :type user: pytgbot.api_types.receivable.peer.User :param language: Optional. For "pre" only, the programming language of the entity text :type language: str|unicode :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(MessageEntity, self).__init__() from .peer import User assert_type_or_raise(type, unicode_type, parameter_name="type") self.type = type assert_type_or_raise(offset, int, parameter_name="offset") self.offset = offset assert_type_or_raise(length, int, parameter_name="length") self.length = length assert_type_or_raise(url, None, unicode_type, parameter_name="url") self.url = url assert_type_or_raise(user, None, User, parameter_name="user") self.user = user assert_type_or_raise(language, None, unicode_type, parameter_name="language") self.language = language self._raw = _raw # end def __init__ def to_array(self): """ Serializes this MessageEntity to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(MessageEntity, self).to_array() array['type'] = u(self.type) # py2: type unicode, py3: type str array['offset'] = int(self.offset) # type int array['length'] = int(self.length) # type int if self.url is not None: array['url'] = u(self.url) # py2: type unicode, py3: type str if self.user is not None: array['user'] = self.user.to_array() # type User if self.language is not None: array['language'] = u(self.language) # py2: type unicode, py3: type str return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the MessageEntity constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") from .peer import User data = Result.validate_array(array) data['type'] = u(array.get('type')) data['offset'] = int(array.get('offset')) data['length'] = int(array.get('length')) data['url'] = u(array.get('url')) if array.get('url') is not None else None data['user'] = User.from_array(array.get('user')) if array.get('user') is not None else None data['language'] = u(array.get('language')) if array.get('language') is not None else None return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new MessageEntity from a given dictionary. :return: new MessageEntity instance. :rtype: MessageEntity """ if not array: # None or {} return None # end if data = MessageEntity.validate_array(array) data['_raw'] = array return MessageEntity(**data) # end def from_array def __str__(self): """ Implements `str(messageentity_instance)` """ return "MessageEntity(type={self.type!r}, offset={self.offset!r}, length={self.length!r}, url={self.url!r}, user={self.user!r}, language={self.language!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(messageentity_instance)` """ if self._raw: return "MessageEntity.from_array({self._raw})".format(self=self) # end if return "MessageEntity(type={self.type!r}, offset={self.offset!r}, length={self.length!r}, url={self.url!r}, user={self.user!r}, language={self.language!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in messageentity_instance` """ return ( key in ["type", "offset", "length", "url", "user", "language"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class MessageEntity class DownloadableMedia(Media): @staticmethod def validate_array(array): """ Subclass for all :class:`Media` which has a :py:attr:`file_id` and optionally a :py:attr:`file_size` :param array: a array to parse :type array: dict :return: a dict with file_id and file_size extracted from the array :rtype: dict """ data = Media.from_array(array) data["file_id"] = array.get("file_id") data["file_size"] = array.get("file_size") # can be None return data # end class DownloadableMedia class PhotoSize(Result): """ This object represents one size of a photo or a file / sticker thumbnail. https://core.telegram.org/bots/api#photosize Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode :param width: Photo width :type width: int :param height: Photo height :type height: int Optional keyword parameters: :param file_size: Optional. File size :type file_size: int :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, file_id, file_unique_id, width, height, file_size=None, _raw=None): """ This object represents one size of a photo or a file / sticker thumbnail. https://core.telegram.org/bots/api#photosize Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode :param width: Photo width :type width: int :param height: Photo height :type height: int Optional keyword parameters: :param file_size: Optional. File size :type file_size: int :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(PhotoSize, self).__init__() assert_type_or_raise(file_id, unicode_type, parameter_name="file_id") self.file_id = file_id assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id") self.file_unique_id = file_unique_id assert_type_or_raise(width, int, parameter_name="width") self.width = width assert_type_or_raise(height, int, parameter_name="height") self.height = height assert_type_or_raise(file_size, None, int, parameter_name="file_size") self.file_size = file_size self._raw = _raw # end def __init__ def to_array(self): """ Serializes this PhotoSize to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(PhotoSize, self).to_array() array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str array['width'] = int(self.width) # type int array['height'] = int(self.height) # type int if self.file_size is not None: array['file_size'] = int(self.file_size) # type int return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the PhotoSize constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") data = Result.validate_array(array) data['file_id'] = u(array.get('file_id')) data['file_unique_id'] = u(array.get('file_unique_id')) data['width'] = int(array.get('width')) data['height'] = int(array.get('height')) data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new PhotoSize from a given dictionary. :return: new PhotoSize instance. :rtype: PhotoSize """ if not array: # None or {} return None # end if data = PhotoSize.validate_array(array) data['_raw'] = array return PhotoSize(**data) # end def from_array def __str__(self): """ Implements `str(photosize_instance)` """ return "PhotoSize(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, width={self.width!r}, height={self.height!r}, file_size={self.file_size!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(photosize_instance)` """ if self._raw: return "PhotoSize.from_array({self._raw})".format(self=self) # end if return "PhotoSize(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, width={self.width!r}, height={self.height!r}, file_size={self.file_size!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in photosize_instance` """ return ( key in ["file_id", "file_unique_id", "width", "height", "file_size"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class PhotoSize class Animation(Media): """ This object represents an animation file (GIF or H.264/MPEG-4 AVC video without sound). https://core.telegram.org/bots/api#animation Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode :param width: Video width as defined by sender :type width: int :param height: Video height as defined by sender :type height: int :param duration: Duration of the video in seconds as defined by sender :type duration: int Optional keyword parameters: :param thumb: Optional. Animation thumbnail as defined by sender :type thumb: pytgbot.api_types.receivable.media.PhotoSize :param file_name: Optional. Original animation filename as defined by sender :type file_name: str|unicode :param mime_type: Optional. MIME type of the file as defined by sender :type mime_type: str|unicode :param file_size: Optional. File size :type file_size: int :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, file_id, file_unique_id, width, height, duration, thumb=None, file_name=None, mime_type=None, file_size=None, _raw=None): """ This object represents an animation file (GIF or H.264/MPEG-4 AVC video without sound). https://core.telegram.org/bots/api#animation Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode :param width: Video width as defined by sender :type width: int :param height: Video height as defined by sender :type height: int :param duration: Duration of the video in seconds as defined by sender :type duration: int Optional keyword parameters: :param thumb: Optional. Animation thumbnail as defined by sender :type thumb: pytgbot.api_types.receivable.media.PhotoSize :param file_name: Optional. Original animation filename as defined by sender :type file_name: str|unicode :param mime_type: Optional. MIME type of the file as defined by sender :type mime_type: str|unicode :param file_size: Optional. File size :type file_size: int :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(Animation, self).__init__() assert_type_or_raise(file_id, unicode_type, parameter_name="file_id") self.file_id = file_id assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id") self.file_unique_id = file_unique_id assert_type_or_raise(width, int, parameter_name="width") self.width = width assert_type_or_raise(height, int, parameter_name="height") self.height = height assert_type_or_raise(duration, int, parameter_name="duration") self.duration = duration assert_type_or_raise(thumb, None, PhotoSize, parameter_name="thumb") self.thumb = thumb assert_type_or_raise(file_name, None, unicode_type, parameter_name="file_name") self.file_name = file_name assert_type_or_raise(mime_type, None, unicode_type, parameter_name="mime_type") self.mime_type = mime_type assert_type_or_raise(file_size, None, int, parameter_name="file_size") self.file_size = file_size self._raw = _raw # end def __init__ def to_array(self): """ Serializes this Animation to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(Animation, self).to_array() array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str array['width'] = int(self.width) # type int array['height'] = int(self.height) # type int array['duration'] = int(self.duration) # type int if self.thumb is not None: array['thumb'] = self.thumb.to_array() # type PhotoSize if self.file_name is not None: array['file_name'] = u(self.file_name) # py2: type unicode, py3: type str if self.mime_type is not None: array['mime_type'] = u(self.mime_type) # py2: type unicode, py3: type str if self.file_size is not None: array['file_size'] = int(self.file_size) # type int return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the Animation constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") data = Media.validate_array(array) data['file_id'] = u(array.get('file_id')) data['file_unique_id'] = u(array.get('file_unique_id')) data['width'] = int(array.get('width')) data['height'] = int(array.get('height')) data['duration'] = int(array.get('duration')) data['thumb'] = PhotoSize.from_array(array.get('thumb')) if array.get('thumb') is not None else None data['file_name'] = u(array.get('file_name')) if array.get('file_name') is not None else None data['mime_type'] = u(array.get('mime_type')) if array.get('mime_type') is not None else None data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new Animation from a given dictionary. :return: new Animation instance. :rtype: Animation """ if not array: # None or {} return None # end if data = Animation.validate_array(array) data['_raw'] = array return Animation(**data) # end def from_array def __str__(self): """ Implements `str(animation_instance)` """ return "Animation(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, width={self.width!r}, height={self.height!r}, duration={self.duration!r}, thumb={self.thumb!r}, file_name={self.file_name!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(animation_instance)` """ if self._raw: return "Animation.from_array({self._raw})".format(self=self) # end if return "Animation(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, width={self.width!r}, height={self.height!r}, duration={self.duration!r}, thumb={self.thumb!r}, file_name={self.file_name!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in animation_instance` """ return ( key in ["file_id", "file_unique_id", "width", "height", "duration", "thumb", "file_name", "mime_type", "file_size"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class Animation class Audio(Media): """ This object represents an audio file to be treated as music by the Telegram clients. https://core.telegram.org/bots/api#audio Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode :param duration: Duration of the audio in seconds as defined by sender :type duration: int Optional keyword parameters: :param performer: Optional. Performer of the audio as defined by sender or by audio tags :type performer: str|unicode :param title: Optional. Title of the audio as defined by sender or by audio tags :type title: str|unicode :param mime_type: Optional. MIME type of the file as defined by sender :type mime_type: str|unicode :param file_size: Optional. File size :type file_size: int :param thumb: Optional. Thumbnail of the album cover to which the music file belongs :type thumb: pytgbot.api_types.receivable.media.PhotoSize :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, file_id, file_unique_id, duration, performer=None, title=None, mime_type=None, file_size=None, thumb=None, _raw=None): """ This object represents an audio file to be treated as music by the Telegram clients. https://core.telegram.org/bots/api#audio Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode :param duration: Duration of the audio in seconds as defined by sender :type duration: int Optional keyword parameters: :param performer: Optional. Performer of the audio as defined by sender or by audio tags :type performer: str|unicode :param title: Optional. Title of the audio as defined by sender or by audio tags :type title: str|unicode :param mime_type: Optional. MIME type of the file as defined by sender :type mime_type: str|unicode :param file_size: Optional. File size :type file_size: int :param thumb: Optional. Thumbnail of the album cover to which the music file belongs :type thumb: pytgbot.api_types.receivable.media.PhotoSize :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(Audio, self).__init__() assert_type_or_raise(file_id, unicode_type, parameter_name="file_id") self.file_id = file_id assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id") self.file_unique_id = file_unique_id assert_type_or_raise(duration, int, parameter_name="duration") self.duration = duration assert_type_or_raise(performer, None, unicode_type, parameter_name="performer") self.performer = performer assert_type_or_raise(title, None, unicode_type, parameter_name="title") self.title = title assert_type_or_raise(mime_type, None, unicode_type, parameter_name="mime_type") self.mime_type = mime_type assert_type_or_raise(file_size, None, int, parameter_name="file_size") self.file_size = file_size assert_type_or_raise(thumb, None, PhotoSize, parameter_name="thumb") self.thumb = thumb self._raw = _raw # end def __init__ def to_array(self): """ Serializes this Audio to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(Audio, self).to_array() array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str array['duration'] = int(self.duration) # type int if self.performer is not None: array['performer'] = u(self.performer) # py2: type unicode, py3: type str if self.title is not None: array['title'] = u(self.title) # py2: type unicode, py3: type str if self.mime_type is not None: array['mime_type'] = u(self.mime_type) # py2: type unicode, py3: type str if self.file_size is not None: array['file_size'] = int(self.file_size) # type int if self.thumb is not None: array['thumb'] = self.thumb.to_array() # type PhotoSize return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the Audio constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") data = Media.validate_array(array) data['file_id'] = u(array.get('file_id')) data['file_unique_id'] = u(array.get('file_unique_id')) data['duration'] = int(array.get('duration')) data['performer'] = u(array.get('performer')) if array.get('performer') is not None else None data['title'] = u(array.get('title')) if array.get('title') is not None else None data['mime_type'] = u(array.get('mime_type')) if array.get('mime_type') is not None else None data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None data['thumb'] = PhotoSize.from_array(array.get('thumb')) if array.get('thumb') is not None else None return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new Audio from a given dictionary. :return: new Audio instance. :rtype: Audio """ if not array: # None or {} return None # end if data = Audio.validate_array(array) data['_raw'] = array return Audio(**data) # end def from_array def __str__(self): """ Implements `str(audio_instance)` """ return "Audio(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, duration={self.duration!r}, performer={self.performer!r}, title={self.title!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r}, thumb={self.thumb!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(audio_instance)` """ if self._raw: return "Audio.from_array({self._raw})".format(self=self) # end if return "Audio(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, duration={self.duration!r}, performer={self.performer!r}, title={self.title!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r}, thumb={self.thumb!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in audio_instance` """ return ( key in ["file_id", "file_unique_id", "duration", "performer", "title", "mime_type", "file_size", "thumb"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class Audio class Document(Media): """ This object represents a general file (as opposed to photos, voice messages and audio files). https://core.telegram.org/bots/api#document Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode Optional keyword parameters: :param thumb: Optional. Document thumbnail as defined by sender :type thumb: pytgbot.api_types.receivable.media.PhotoSize :param file_name: Optional. Original filename as defined by sender :type file_name: str|unicode :param mime_type: Optional. MIME type of the file as defined by sender :type mime_type: str|unicode :param file_size: Optional. File size :type file_size: int :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, file_id, file_unique_id, thumb=None, file_name=None, mime_type=None, file_size=None, _raw=None): """ This object represents a general file (as opposed to photos, voice messages and audio files). https://core.telegram.org/bots/api#document Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode Optional keyword parameters: :param thumb: Optional. Document thumbnail as defined by sender :type thumb: pytgbot.api_types.receivable.media.PhotoSize :param file_name: Optional. Original filename as defined by sender :type file_name: str|unicode :param mime_type: Optional. MIME type of the file as defined by sender :type mime_type: str|unicode :param file_size: Optional. File size :type file_size: int :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(Document, self).__init__() assert_type_or_raise(file_id, unicode_type, parameter_name="file_id") self.file_id = file_id assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id") self.file_unique_id = file_unique_id assert_type_or_raise(thumb, None, PhotoSize, parameter_name="thumb") self.thumb = thumb assert_type_or_raise(file_name, None, unicode_type, parameter_name="file_name") self.file_name = file_name assert_type_or_raise(mime_type, None, unicode_type, parameter_name="mime_type") self.mime_type = mime_type assert_type_or_raise(file_size, None, int, parameter_name="file_size") self.file_size = file_size self._raw = _raw # end def __init__ def to_array(self): """ Serializes this Document to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(Document, self).to_array() array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str if self.thumb is not None: array['thumb'] = self.thumb.to_array() # type PhotoSize if self.file_name is not None: array['file_name'] = u(self.file_name) # py2: type unicode, py3: type str if self.mime_type is not None: array['mime_type'] = u(self.mime_type) # py2: type unicode, py3: type str if self.file_size is not None: array['file_size'] = int(self.file_size) # type int return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the Document constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") data = Media.validate_array(array) data['file_id'] = u(array.get('file_id')) data['file_unique_id'] = u(array.get('file_unique_id')) data['thumb'] = PhotoSize.from_array(array.get('thumb')) if array.get('thumb') is not None else None data['file_name'] = u(array.get('file_name')) if array.get('file_name') is not None else None data['mime_type'] = u(array.get('mime_type')) if array.get('mime_type') is not None else None data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new Document from a given dictionary. :return: new Document instance. :rtype: Document """ if not array: # None or {} return None # end if data = Document.validate_array(array) data['_raw'] = array return Document(**data) # end def from_array def __str__(self): """ Implements `str(document_instance)` """ return "Document(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, thumb={self.thumb!r}, file_name={self.file_name!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(document_instance)` """ if self._raw: return "Document.from_array({self._raw})".format(self=self) # end if return "Document(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, thumb={self.thumb!r}, file_name={self.file_name!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in document_instance` """ return ( key in ["file_id", "file_unique_id", "thumb", "file_name", "mime_type", "file_size"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class Document class Video(Media): """ This object represents a video file. https://core.telegram.org/bots/api#video Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode :param width: Video width as defined by sender :type width: int :param height: Video height as defined by sender :type height: int :param duration: Duration of the video in seconds as defined by sender :type duration: int Optional keyword parameters: :param thumb: Optional. Video thumbnail :type thumb: pytgbot.api_types.receivable.media.PhotoSize :param mime_type: Optional. Mime type of a file as defined by sender :type mime_type: str|unicode :param file_size: Optional. File size :type file_size: int :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, file_id, file_unique_id, width, height, duration, thumb=None, mime_type=None, file_size=None, _raw=None): """ This object represents a video file. https://core.telegram.org/bots/api#video Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode :param width: Video width as defined by sender :type width: int :param height: Video height as defined by sender :type height: int :param duration: Duration of the video in seconds as defined by sender :type duration: int Optional keyword parameters: :param thumb: Optional. Video thumbnail :type thumb: pytgbot.api_types.receivable.media.PhotoSize :param mime_type: Optional. Mime type of a file as defined by sender :type mime_type: str|unicode :param file_size: Optional. File size :type file_size: int :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(Video, self).__init__() assert_type_or_raise(file_id, unicode_type, parameter_name="file_id") self.file_id = file_id assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id") self.file_unique_id = file_unique_id assert_type_or_raise(width, int, parameter_name="width") self.width = width assert_type_or_raise(height, int, parameter_name="height") self.height = height assert_type_or_raise(duration, int, parameter_name="duration") self.duration = duration assert_type_or_raise(thumb, None, PhotoSize, parameter_name="thumb") self.thumb = thumb assert_type_or_raise(mime_type, None, unicode_type, parameter_name="mime_type") self.mime_type = mime_type assert_type_or_raise(file_size, None, int, parameter_name="file_size") self.file_size = file_size self._raw = _raw # end def __init__ def to_array(self): """ Serializes this Video to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(Video, self).to_array() array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str array['width'] = int(self.width) # type int array['height'] = int(self.height) # type int array['duration'] = int(self.duration) # type int if self.thumb is not None: array['thumb'] = self.thumb.to_array() # type PhotoSize if self.mime_type is not None: array['mime_type'] = u(self.mime_type) # py2: type unicode, py3: type str if self.file_size is not None: array['file_size'] = int(self.file_size) # type int return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the Video constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") data = Media.validate_array(array) data['file_id'] = u(array.get('file_id')) data['file_unique_id'] = u(array.get('file_unique_id')) data['width'] = int(array.get('width')) data['height'] = int(array.get('height')) data['duration'] = int(array.get('duration')) data['thumb'] = PhotoSize.from_array(array.get('thumb')) if array.get('thumb') is not None else None data['mime_type'] = u(array.get('mime_type')) if array.get('mime_type') is not None else None data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new Video from a given dictionary. :return: new Video instance. :rtype: Video """ if not array: # None or {} return None # end if data = Video.validate_array(array) data['_raw'] = array return Video(**data) # end def from_array def __str__(self): """ Implements `str(video_instance)` """ return "Video(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, width={self.width!r}, height={self.height!r}, duration={self.duration!r}, thumb={self.thumb!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(video_instance)` """ if self._raw: return "Video.from_array({self._raw})".format(self=self) # end if return "Video(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, width={self.width!r}, height={self.height!r}, duration={self.duration!r}, thumb={self.thumb!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in video_instance` """ return ( key in ["file_id", "file_unique_id", "width", "height", "duration", "thumb", "mime_type", "file_size"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class Video class VideoNote(Media): """ This object represents a video message (available in Telegram apps as of v.4.0). https://core.telegram.org/bots/api#videonote Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode :param length: Video width and height (diameter of the video message) as defined by sender :type length: int :param duration: Duration of the video in seconds as defined by sender :type duration: int Optional keyword parameters: :param thumb: Optional. Video thumbnail :type thumb: pytgbot.api_types.receivable.media.PhotoSize :param file_size: Optional. File size :type file_size: int :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, file_id, file_unique_id, length, duration, thumb=None, file_size=None, _raw=None): """ This object represents a video message (available in Telegram apps as of v.4.0). https://core.telegram.org/bots/api#videonote Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode :param length: Video width and height (diameter of the video message) as defined by sender :type length: int :param duration: Duration of the video in seconds as defined by sender :type duration: int Optional keyword parameters: :param thumb: Optional. Video thumbnail :type thumb: pytgbot.api_types.receivable.media.PhotoSize :param file_size: Optional. File size :type file_size: int :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(VideoNote, self).__init__() assert_type_or_raise(file_id, unicode_type, parameter_name="file_id") self.file_id = file_id assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id") self.file_unique_id = file_unique_id assert_type_or_raise(length, int, parameter_name="length") self.length = length assert_type_or_raise(duration, int, parameter_name="duration") self.duration = duration assert_type_or_raise(thumb, None, PhotoSize, parameter_name="thumb") self.thumb = thumb assert_type_or_raise(file_size, None, int, parameter_name="file_size") self.file_size = file_size self._raw = _raw # end def __init__ def to_array(self): """ Serializes this VideoNote to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(VideoNote, self).to_array() array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str array['length'] = int(self.length) # type int array['duration'] = int(self.duration) # type int if self.thumb is not None: array['thumb'] = self.thumb.to_array() # type PhotoSize if self.file_size is not None: array['file_size'] = int(self.file_size) # type int return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the VideoNote constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") data = Media.validate_array(array) data['file_id'] = u(array.get('file_id')) data['file_unique_id'] = u(array.get('file_unique_id')) data['length'] = int(array.get('length')) data['duration'] = int(array.get('duration')) data['thumb'] = PhotoSize.from_array(array.get('thumb')) if array.get('thumb') is not None else None data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new VideoNote from a given dictionary. :return: new VideoNote instance. :rtype: VideoNote """ if not array: # None or {} return None # end if data = VideoNote.validate_array(array) data['_raw'] = array return VideoNote(**data) # end def from_array def __str__(self): """ Implements `str(videonote_instance)` """ return "VideoNote(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, length={self.length!r}, duration={self.duration!r}, thumb={self.thumb!r}, file_size={self.file_size!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(videonote_instance)` """ if self._raw: return "VideoNote.from_array({self._raw})".format(self=self) # end if return "VideoNote(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, length={self.length!r}, duration={self.duration!r}, thumb={self.thumb!r}, file_size={self.file_size!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in videonote_instance` """ return ( key in ["file_id", "file_unique_id", "length", "duration", "thumb", "file_size"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class VideoNote class Voice(Media): """ This object represents a voice note. https://core.telegram.org/bots/api#voice Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode :param duration: Duration of the audio in seconds as defined by sender :type duration: int Optional keyword parameters: :param mime_type: Optional. MIME type of the file as defined by sender :type mime_type: str|unicode :param file_size: Optional. File size :type file_size: int :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, file_id, file_unique_id, duration, mime_type=None, file_size=None, _raw=None): """ This object represents a voice note. https://core.telegram.org/bots/api#voice Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode :param duration: Duration of the audio in seconds as defined by sender :type duration: int Optional keyword parameters: :param mime_type: Optional. MIME type of the file as defined by sender :type mime_type: str|unicode :param file_size: Optional. File size :type file_size: int :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(Voice, self).__init__() assert_type_or_raise(file_id, unicode_type, parameter_name="file_id") self.file_id = file_id assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id") self.file_unique_id = file_unique_id assert_type_or_raise(duration, int, parameter_name="duration") self.duration = duration assert_type_or_raise(mime_type, None, unicode_type, parameter_name="mime_type") self.mime_type = mime_type assert_type_or_raise(file_size, None, int, parameter_name="file_size") self.file_size = file_size self._raw = _raw # end def __init__ def to_array(self): """ Serializes this Voice to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(Voice, self).to_array() array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str array['duration'] = int(self.duration) # type int if self.mime_type is not None: array['mime_type'] = u(self.mime_type) # py2: type unicode, py3: type str if self.file_size is not None: array['file_size'] = int(self.file_size) # type int return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the Voice constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") data = Media.validate_array(array) data['file_id'] = u(array.get('file_id')) data['file_unique_id'] = u(array.get('file_unique_id')) data['duration'] = int(array.get('duration')) data['mime_type'] = u(array.get('mime_type')) if array.get('mime_type') is not None else None data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new Voice from a given dictionary. :return: new Voice instance. :rtype: Voice """ if not array: # None or {} return None # end if data = Voice.validate_array(array) data['_raw'] = array return Voice(**data) # end def from_array def __str__(self): """ Implements `str(voice_instance)` """ return "Voice(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, duration={self.duration!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(voice_instance)` """ if self._raw: return "Voice.from_array({self._raw})".format(self=self) # end if return "Voice(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, duration={self.duration!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in voice_instance` """ return ( key in ["file_id", "file_unique_id", "duration", "mime_type", "file_size"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class Voice class Contact(Media): """ This object represents a phone contact. https://core.telegram.org/bots/api#contact Parameters: :param phone_number: Contact's phone number :type phone_number: str|unicode :param first_name: Contact's first name :type first_name: str|unicode Optional keyword parameters: :param last_name: Optional. Contact's last name :type last_name: str|unicode :param user_id: Optional. Contact's user identifier in Telegram :type user_id: int :param vcard: Optional. Additional data about the contact in the form of a vCard :type vcard: str|unicode :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, phone_number, first_name, last_name=None, user_id=None, vcard=None, _raw=None): """ This object represents a phone contact. https://core.telegram.org/bots/api#contact Parameters: :param phone_number: Contact's phone number :type phone_number: str|unicode :param first_name: Contact's first name :type first_name: str|unicode Optional keyword parameters: :param last_name: Optional. Contact's last name :type last_name: str|unicode :param user_id: Optional. Contact's user identifier in Telegram :type user_id: int :param vcard: Optional. Additional data about the contact in the form of a vCard :type vcard: str|unicode :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(Contact, self).__init__() assert_type_or_raise(phone_number, unicode_type, parameter_name="phone_number") self.phone_number = phone_number assert_type_or_raise(first_name, unicode_type, parameter_name="first_name") self.first_name = first_name assert_type_or_raise(last_name, None, unicode_type, parameter_name="last_name") self.last_name = last_name assert_type_or_raise(user_id, None, int, parameter_name="user_id") self.user_id = user_id assert_type_or_raise(vcard, None, unicode_type, parameter_name="vcard") self.vcard = vcard self._raw = _raw # end def __init__ def to_array(self): """ Serializes this Contact to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(Contact, self).to_array() array['phone_number'] = u(self.phone_number) # py2: type unicode, py3: type str array['first_name'] = u(self.first_name) # py2: type unicode, py3: type str if self.last_name is not None: array['last_name'] = u(self.last_name) # py2: type unicode, py3: type str if self.user_id is not None: array['user_id'] = int(self.user_id) # type int if self.vcard is not None: array['vcard'] = u(self.vcard) # py2: type unicode, py3: type str return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the Contact constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") data = Media.validate_array(array) data['phone_number'] = u(array.get('phone_number')) data['first_name'] = u(array.get('first_name')) data['last_name'] = u(array.get('last_name')) if array.get('last_name') is not None else None data['user_id'] = int(array.get('user_id')) if array.get('user_id') is not None else None data['vcard'] = u(array.get('vcard')) if array.get('vcard') is not None else None return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new Contact from a given dictionary. :return: new Contact instance. :rtype: Contact """ if not array: # None or {} return None # end if data = Contact.validate_array(array) data['_raw'] = array return Contact(**data) # end def from_array def __str__(self): """ Implements `str(contact_instance)` """ return "Contact(phone_number={self.phone_number!r}, first_name={self.first_name!r}, last_name={self.last_name!r}, user_id={self.user_id!r}, vcard={self.vcard!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(contact_instance)` """ if self._raw: return "Contact.from_array({self._raw})".format(self=self) # end if return "Contact(phone_number={self.phone_number!r}, first_name={self.first_name!r}, last_name={self.last_name!r}, user_id={self.user_id!r}, vcard={self.vcard!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in contact_instance` """ return ( key in ["phone_number", "first_name", "last_name", "user_id", "vcard"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class Contact class Dice(Media): """ This object represents an animated emoji that displays a random value. https://core.telegram.org/bots/api#dice Parameters: :param emoji: Emoji on which the dice throw animation is based :type emoji: str|unicode :param value: Value of the dice, 1-6 for "" and "" base emoji, 1-5 for "" base emoji :type value: int Optional keyword parameters: :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, emoji, value, _raw=None): """ This object represents an animated emoji that displays a random value. https://core.telegram.org/bots/api#dice Parameters: :param emoji: Emoji on which the dice throw animation is based :type emoji: str|unicode :param value: Value of the dice, 1-6 for "" and "" base emoji, 1-5 for "" base emoji :type value: int Optional keyword parameters: :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(Dice, self).__init__() assert_type_or_raise(emoji, unicode_type, parameter_name="emoji") self.emoji = emoji assert_type_or_raise(value, int, parameter_name="value") self.value = value self._raw = _raw # end def __init__ def to_array(self): """ Serializes this Dice to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(Dice, self).to_array() array['emoji'] = u(self.emoji) # py2: type unicode, py3: type str array['value'] = int(self.value) # type int return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the Dice constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") data = Media.validate_array(array) data['emoji'] = u(array.get('emoji')) data['value'] = int(array.get('value')) return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new Dice from a given dictionary. :return: new Dice instance. :rtype: Dice """ if not array: # None or {} return None # end if data = Dice.validate_array(array) data['_raw'] = array return Dice(**data) # end def from_array def __str__(self): """ Implements `str(dice_instance)` """ return "Dice(emoji={self.emoji!r}, value={self.value!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(dice_instance)` """ if self._raw: return "Dice.from_array({self._raw})".format(self=self) # end if return "Dice(emoji={self.emoji!r}, value={self.value!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in dice_instance` """ return ( key in ["emoji", "value"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class Dice class PollOption(Receivable): """ This object contains information about one answer option in a poll. https://core.telegram.org/bots/api#polloption Parameters: :param text: Option text, 1-100 characters :type text: str|unicode :param voter_count: Number of users that voted for this option :type voter_count: int Optional keyword parameters: :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, text, voter_count, _raw=None): """ This object contains information about one answer option in a poll. https://core.telegram.org/bots/api#polloption Parameters: :param text: Option text, 1-100 characters :type text: str|unicode :param voter_count: Number of users that voted for this option :type voter_count: int Optional keyword parameters: :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(PollOption, self).__init__() assert_type_or_raise(text, unicode_type, parameter_name="text") self.text = text assert_type_or_raise(voter_count, int, parameter_name="voter_count") self.voter_count = voter_count self._raw = _raw # end def __init__ def to_array(self): """ Serializes this PollOption to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(PollOption, self).to_array() array['text'] = u(self.text) # py2: type unicode, py3: type str array['voter_count'] = int(self.voter_count) # type int return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the PollOption constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") data = Receivable.validate_array(array) data['text'] = u(array.get('text')) data['voter_count'] = int(array.get('voter_count')) return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new PollOption from a given dictionary. :return: new PollOption instance. :rtype: PollOption """ if not array: # None or {} return None # end if data = PollOption.validate_array(array) data['_raw'] = array return PollOption(**data) # end def from_array def __str__(self): """ Implements `str(polloption_instance)` """ return "PollOption(text={self.text!r}, voter_count={self.voter_count!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(polloption_instance)` """ if self._raw: return "PollOption.from_array({self._raw})".format(self=self) # end if return "PollOption(text={self.text!r}, voter_count={self.voter_count!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in polloption_instance` """ return ( key in ["text", "voter_count"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class PollOption class PollAnswer(Receivable): """ This object represents an answer of a user in a non-anonymous poll. https://core.telegram.org/bots/api#pollanswer Parameters: :param poll_id: Unique poll identifier :type poll_id: str|unicode :param user: The user, who changed the answer to the poll :type user: pytgbot.api_types.receivable.peer.User :param option_ids: 0-based identifiers of answer options, chosen by the user. May be empty if the user retracted their vote. :type option_ids: list of int Optional keyword parameters: :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, poll_id, user, option_ids, _raw=None): """ This object represents an answer of a user in a non-anonymous poll. https://core.telegram.org/bots/api#pollanswer Parameters: :param poll_id: Unique poll identifier :type poll_id: str|unicode :param user: The user, who changed the answer to the poll :type user: pytgbot.api_types.receivable.peer.User :param option_ids: 0-based identifiers of answer options, chosen by the user. May be empty if the user retracted their vote. :type option_ids: list of int Optional keyword parameters: :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(PollAnswer, self).__init__() from .peer import User assert_type_or_raise(poll_id, unicode_type, parameter_name="poll_id") self.poll_id = poll_id assert_type_or_raise(user, User, parameter_name="user") self.user = user assert_type_or_raise(option_ids, list, parameter_name="option_ids") self.option_ids = option_ids self._raw = _raw # end def __init__ def to_array(self): """ Serializes this PollAnswer to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(PollAnswer, self).to_array() array['poll_id'] = u(self.poll_id) # py2: type unicode, py3: type str array['user'] = self.user.to_array() # type User array['option_ids'] = self._as_array(self.option_ids) # type list of int return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the PollAnswer constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") from .peer import User data = Receivable.validate_array(array) data['poll_id'] = u(array.get('poll_id')) data['user'] = User.from_array(array.get('user')) data['option_ids'] = PollAnswer._builtin_from_array_list(required_type=int, value=array.get('option_ids'), list_level=1) return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new PollAnswer from a given dictionary. :return: new PollAnswer instance. :rtype: PollAnswer """ if not array: # None or {} return None # end if data = PollAnswer.validate_array(array) data['_raw'] = array return PollAnswer(**data) # end def from_array def __str__(self): """ Implements `str(pollanswer_instance)` """ return "PollAnswer(poll_id={self.poll_id!r}, user={self.user!r}, option_ids={self.option_ids!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(pollanswer_instance)` """ if self._raw: return "PollAnswer.from_array({self._raw})".format(self=self) # end if return "PollAnswer(poll_id={self.poll_id!r}, user={self.user!r}, option_ids={self.option_ids!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in pollanswer_instance` """ return ( key in ["poll_id", "user", "option_ids"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class PollAnswer class Poll(Media): """ This object contains information about a poll. https://core.telegram.org/bots/api#poll Parameters: :param id: Unique poll identifier :type id: str|unicode :param question: Poll question, 1-255 characters :type question: str|unicode :param options: List of poll options :type options: list of pytgbot.api_types.receivable.media.PollOption :param total_voter_count: Total number of users that voted in the poll :type total_voter_count: int :param is_closed: True, if the poll is closed :type is_closed: bool :param is_anonymous: True, if the poll is anonymous :type is_anonymous: bool :param type: Poll type, currently can be "regular" or "quiz" :type type: str|unicode :param allows_multiple_answers: True, if the poll allows multiple answers :type allows_multiple_answers: bool Optional keyword parameters: :param correct_option_id: Optional. 0-based identifier of the correct answer option. Available only for polls in the quiz mode, which are closed, or was sent (not forwarded) by the bot or to the private chat with the bot. :type correct_option_id: int :param explanation: Optional. Text that is shown when a user chooses an incorrect answer or taps on the lamp icon in a quiz-style poll, 0-200 characters :type explanation: str|unicode :param explanation_entities: Optional. Special entities like usernames, URLs, bot commands, etc. that appear in the explanation :type explanation_entities: list of pytgbot.api_types.receivable.media.MessageEntity :param open_period: Optional. Amount of time in seconds the poll will be active after creation :type open_period: int :param close_date: Optional. Point in time (Unix timestamp) when the poll will be automatically closed :type close_date: int :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, id, question, options, total_voter_count, is_closed, is_anonymous, type, allows_multiple_answers, correct_option_id=None, explanation=None, explanation_entities=None, open_period=None, close_date=None, _raw=None): """ This object contains information about a poll. https://core.telegram.org/bots/api#poll Parameters: :param id: Unique poll identifier :type id: str|unicode :param question: Poll question, 1-255 characters :type question: str|unicode :param options: List of poll options :type options: list of pytgbot.api_types.receivable.media.PollOption :param total_voter_count: Total number of users that voted in the poll :type total_voter_count: int :param is_closed: True, if the poll is closed :type is_closed: bool :param is_anonymous: True, if the poll is anonymous :type is_anonymous: bool :param type: Poll type, currently can be "regular" or "quiz" :type type: str|unicode :param allows_multiple_answers: True, if the poll allows multiple answers :type allows_multiple_answers: bool Optional keyword parameters: :param correct_option_id: Optional. 0-based identifier of the correct answer option. Available only for polls in the quiz mode, which are closed, or was sent (not forwarded) by the bot or to the private chat with the bot. :type correct_option_id: int :param explanation: Optional. Text that is shown when a user chooses an incorrect answer or taps on the lamp icon in a quiz-style poll, 0-200 characters :type explanation: str|unicode :param explanation_entities: Optional. Special entities like usernames, URLs, bot commands, etc. that appear in the explanation :type explanation_entities: list of pytgbot.api_types.receivable.media.MessageEntity :param open_period: Optional. Amount of time in seconds the poll will be active after creation :type open_period: int :param close_date: Optional. Point in time (Unix timestamp) when the poll will be automatically closed :type close_date: int :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(Poll, self).__init__() assert_type_or_raise(id, unicode_type, parameter_name="id") self.id = id assert_type_or_raise(question, unicode_type, parameter_name="question") self.question = question assert_type_or_raise(options, list, parameter_name="options") self.options = options assert_type_or_raise(total_voter_count, int, parameter_name="total_voter_count") self.total_voter_count = total_voter_count assert_type_or_raise(is_closed, bool, parameter_name="is_closed") self.is_closed = is_closed assert_type_or_raise(is_anonymous, bool, parameter_name="is_anonymous") self.is_anonymous = is_anonymous assert_type_or_raise(type, unicode_type, parameter_name="type") self.type = type assert_type_or_raise(allows_multiple_answers, bool, parameter_name="allows_multiple_answers") self.allows_multiple_answers = allows_multiple_answers assert_type_or_raise(correct_option_id, None, int, parameter_name="correct_option_id") self.correct_option_id = correct_option_id assert_type_or_raise(explanation, None, unicode_type, parameter_name="explanation") self.explanation = explanation assert_type_or_raise(explanation_entities, None, list, parameter_name="explanation_entities") self.explanation_entities = explanation_entities assert_type_or_raise(open_period, None, int, parameter_name="open_period") self.open_period = open_period assert_type_or_raise(close_date, None, int, parameter_name="close_date") self.close_date = close_date self._raw = _raw # end def __init__ def to_array(self): """ Serializes this Poll to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(Poll, self).to_array() array['id'] = u(self.id) # py2: type unicode, py3: type str array['question'] = u(self.question) # py2: type unicode, py3: type str array['options'] = self._as_array(self.options) # type list of PollOption array['total_voter_count'] = int(self.total_voter_count) # type int array['is_closed'] = bool(self.is_closed) # type bool array['is_anonymous'] = bool(self.is_anonymous) # type bool array['type'] = u(self.type) # py2: type unicode, py3: type str array['allows_multiple_answers'] = bool(self.allows_multiple_answers) # type bool if self.correct_option_id is not None: array['correct_option_id'] = int(self.correct_option_id) # type int if self.explanation is not None: array['explanation'] = u(self.explanation) # py2: type unicode, py3: type str if self.explanation_entities is not None: array['explanation_entities'] = self._as_array(self.explanation_entities) # type list of MessageEntity if self.open_period is not None: array['open_period'] = int(self.open_period) # type int if self.close_date is not None: array['close_date'] = int(self.close_date) # type int return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the Poll constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") data = Media.validate_array(array) data['id'] = u(array.get('id')) data['question'] = u(array.get('question')) data['options'] = PollOption.from_array_list(array.get('options'), list_level=1) data['total_voter_count'] = int(array.get('total_voter_count')) data['is_closed'] = bool(array.get('is_closed')) data['is_anonymous'] = bool(array.get('is_anonymous')) data['type'] = u(array.get('type')) data['allows_multiple_answers'] = bool(array.get('allows_multiple_answers')) data['correct_option_id'] = int(array.get('correct_option_id')) if array.get('correct_option_id') is not None else None data['explanation'] = u(array.get('explanation')) if array.get('explanation') is not None else None data['explanation_entities'] = MessageEntity.from_array_list(array.get('explanation_entities'), list_level=1) if array.get('explanation_entities') is not None else None data['open_period'] = int(array.get('open_period')) if array.get('open_period') is not None else None data['close_date'] = int(array.get('close_date')) if array.get('close_date') is not None else None return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new Poll from a given dictionary. :return: new Poll instance. :rtype: Poll """ if not array: # None or {} return None # end if data = Poll.validate_array(array) data['_raw'] = array return Poll(**data) # end def from_array def __str__(self): """ Implements `str(poll_instance)` """ return "Poll(id={self.id!r}, question={self.question!r}, options={self.options!r}, total_voter_count={self.total_voter_count!r}, is_closed={self.is_closed!r}, is_anonymous={self.is_anonymous!r}, type={self.type!r}, allows_multiple_answers={self.allows_multiple_answers!r}, correct_option_id={self.correct_option_id!r}, explanation={self.explanation!r}, explanation_entities={self.explanation_entities!r}, open_period={self.open_period!r}, close_date={self.close_date!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(poll_instance)` """ if self._raw: return "Poll.from_array({self._raw})".format(self=self) # end if return "Poll(id={self.id!r}, question={self.question!r}, options={self.options!r}, total_voter_count={self.total_voter_count!r}, is_closed={self.is_closed!r}, is_anonymous={self.is_anonymous!r}, type={self.type!r}, allows_multiple_answers={self.allows_multiple_answers!r}, correct_option_id={self.correct_option_id!r}, explanation={self.explanation!r}, explanation_entities={self.explanation_entities!r}, open_period={self.open_period!r}, close_date={self.close_date!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in poll_instance` """ return ( key in ["id", "question", "options", "total_voter_count", "is_closed", "is_anonymous", "type", "allows_multiple_answers", "correct_option_id", "explanation", "explanation_entities", "open_period", "close_date"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class Poll class Location(Media): """ This object represents a point on the map. https://core.telegram.org/bots/api#location Parameters: :param longitude: Longitude as defined by sender :type longitude: float :param latitude: Latitude as defined by sender :type latitude: float Optional keyword parameters: :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, longitude, latitude, _raw=None): """ This object represents a point on the map. https://core.telegram.org/bots/api#location Parameters: :param longitude: Longitude as defined by sender :type longitude: float :param latitude: Latitude as defined by sender :type latitude: float Optional keyword parameters: :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(Location, self).__init__() assert_type_or_raise(longitude, float, parameter_name="longitude") self.longitude = longitude assert_type_or_raise(latitude, float, parameter_name="latitude") self.latitude = latitude self._raw = _raw # end def __init__ def to_array(self): """ Serializes this Location to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(Location, self).to_array() array['longitude'] = float(self.longitude) # type float array['latitude'] = float(self.latitude) # type float return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the Location constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") data = Media.validate_array(array) data['longitude'] = float(array.get('longitude')) data['latitude'] = float(array.get('latitude')) return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new Location from a given dictionary. :return: new Location instance. :rtype: Location """ if not array: # None or {} return None # end if data = Location.validate_array(array) data['_raw'] = array return Location(**data) # end def from_array def __str__(self): """ Implements `str(location_instance)` """ return "Location(longitude={self.longitude!r}, latitude={self.latitude!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(location_instance)` """ if self._raw: return "Location.from_array({self._raw})".format(self=self) # end if return "Location(longitude={self.longitude!r}, latitude={self.latitude!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in location_instance` """ return ( key in ["longitude", "latitude"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class Location class Venue(Media): """ This object represents a venue. https://core.telegram.org/bots/api#venue Parameters: :param location: Venue location :type location: pytgbot.api_types.receivable.media.Location :param title: Name of the venue :type title: str|unicode :param address: Address of the venue :type address: str|unicode Optional keyword parameters: :param foursquare_id: Optional. Foursquare identifier of the venue :type foursquare_id: str|unicode :param foursquare_type: Optional. Foursquare type of the venue. (For example, "arts_entertainment/default", "arts_entertainment/aquarium" or "food/icecream".) :type foursquare_type: str|unicode :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, location, title, address, foursquare_id=None, foursquare_type=None, _raw=None): """ This object represents a venue. https://core.telegram.org/bots/api#venue Parameters: :param location: Venue location :type location: pytgbot.api_types.receivable.media.Location :param title: Name of the venue :type title: str|unicode :param address: Address of the venue :type address: str|unicode Optional keyword parameters: :param foursquare_id: Optional. Foursquare identifier of the venue :type foursquare_id: str|unicode :param foursquare_type: Optional. Foursquare type of the venue. (For example, "arts_entertainment/default", "arts_entertainment/aquarium" or "food/icecream".) :type foursquare_type: str|unicode :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(Venue, self).__init__() assert_type_or_raise(location, Location, parameter_name="location") self.location = location assert_type_or_raise(title, unicode_type, parameter_name="title") self.title = title assert_type_or_raise(address, unicode_type, parameter_name="address") self.address = address assert_type_or_raise(foursquare_id, None, unicode_type, parameter_name="foursquare_id") self.foursquare_id = foursquare_id assert_type_or_raise(foursquare_type, None, unicode_type, parameter_name="foursquare_type") self.foursquare_type = foursquare_type self._raw = _raw # end def __init__ def to_array(self): """ Serializes this Venue to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(Venue, self).to_array() array['location'] = self.location.to_array() # type Location array['title'] = u(self.title) # py2: type unicode, py3: type str array['address'] = u(self.address) # py2: type unicode, py3: type str if self.foursquare_id is not None: array['foursquare_id'] = u(self.foursquare_id) # py2: type unicode, py3: type str if self.foursquare_type is not None: array['foursquare_type'] = u(self.foursquare_type) # py2: type unicode, py3: type str return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the Venue constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") data = Media.validate_array(array) data['location'] = Location.from_array(array.get('location')) data['title'] = u(array.get('title')) data['address'] = u(array.get('address')) data['foursquare_id'] = u(array.get('foursquare_id')) if array.get('foursquare_id') is not None else None data['foursquare_type'] = u(array.get('foursquare_type')) if array.get('foursquare_type') is not None else None return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new Venue from a given dictionary. :return: new Venue instance. :rtype: Venue """ if not array: # None or {} return None # end if data = Venue.validate_array(array) data['_raw'] = array return Venue(**data) # end def from_array def __str__(self): """ Implements `str(venue_instance)` """ return "Venue(location={self.location!r}, title={self.title!r}, address={self.address!r}, foursquare_id={self.foursquare_id!r}, foursquare_type={self.foursquare_type!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(venue_instance)` """ if self._raw: return "Venue.from_array({self._raw})".format(self=self) # end if return "Venue(location={self.location!r}, title={self.title!r}, address={self.address!r}, foursquare_id={self.foursquare_id!r}, foursquare_type={self.foursquare_type!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in venue_instance` """ return ( key in ["location", "title", "address", "foursquare_id", "foursquare_type"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class Venue class UserProfilePhotos(Result): """ This object represent a user's profile pictures. https://core.telegram.org/bots/api#userprofilephotos Parameters: :param total_count: Total number of profile pictures the target user has :type total_count: int :param photos: Requested profile pictures (in up to 4 sizes each) :type photos: list of list of pytgbot.api_types.receivable.media.PhotoSize Optional keyword parameters: :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, total_count, photos, _raw=None): """ This object represent a user's profile pictures. https://core.telegram.org/bots/api#userprofilephotos Parameters: :param total_count: Total number of profile pictures the target user has :type total_count: int :param photos: Requested profile pictures (in up to 4 sizes each) :type photos: list of list of pytgbot.api_types.receivable.media.PhotoSize Optional keyword parameters: :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(UserProfilePhotos, self).__init__() assert_type_or_raise(total_count, int, parameter_name="total_count") self.total_count = total_count assert_type_or_raise(photos, list, parameter_name="photos") self.photos = photos self._raw = _raw # end def __init__ def to_array(self): """ Serializes this UserProfilePhotos to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(UserProfilePhotos, self).to_array() array['total_count'] = int(self.total_count) # type int array['photos'] = self._as_array(self.photos) # type list of list of PhotoSize return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the UserProfilePhotos constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") data = Result.validate_array(array) data['total_count'] = int(array.get('total_count')) data['photos'] = PhotoSize.from_array_list(array.get('photos'), list_level=2) return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new UserProfilePhotos from a given dictionary. :return: new UserProfilePhotos instance. :rtype: UserProfilePhotos """ if not array: # None or {} return None # end if data = UserProfilePhotos.validate_array(array) data['_raw'] = array return UserProfilePhotos(**data) # end def from_array def __str__(self): """ Implements `str(userprofilephotos_instance)` """ return "UserProfilePhotos(total_count={self.total_count!r}, photos={self.photos!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(userprofilephotos_instance)` """ if self._raw: return "UserProfilePhotos.from_array({self._raw})".format(self=self) # end if return "UserProfilePhotos(total_count={self.total_count!r}, photos={self.photos!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in userprofilephotos_instance` """ return ( key in ["total_count", "photos"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class UserProfilePhotos class File(Receivable): """ This object represents a file ready to be downloaded. The file can be downloaded via the link https://api.telegram.org/file/bot<token>/<file_path>. It is guaranteed that the link will be valid for at least 1 hour. When the link expires, a new one can be requested by calling getFile. Maximum file size to download is 20 MB https://core.telegram.org/bots/api#file Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode Optional keyword parameters: :param file_size: Optional. File size, if known :type file_size: int :param file_path: Optional. File path. Use https://api.telegram.org/file/bot<token>/<file_path> to get the file. :type file_path: str|unicode :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, file_id, file_unique_id, file_size=None, file_path=None, _raw=None): """ This object represents a file ready to be downloaded. The file can be downloaded via the link https://api.telegram.org/file/bot<token>/<file_path>. It is guaranteed that the link will be valid for at least 1 hour. When the link expires, a new one can be requested by calling getFile. Maximum file size to download is 20 MB https://core.telegram.org/bots/api#file Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode Optional keyword parameters: :param file_size: Optional. File size, if known :type file_size: int :param file_path: Optional. File path. Use https://api.telegram.org/file/bot<token>/<file_path> to get the file. :type file_path: str|unicode :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(File, self).__init__() assert_type_or_raise(file_id, unicode_type, parameter_name="file_id") self.file_id = file_id assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id") self.file_unique_id = file_unique_id assert_type_or_raise(file_size, None, int, parameter_name="file_size") self.file_size = file_size assert_type_or_raise(file_path, None, unicode_type, parameter_name="file_path") self.file_path = file_path self._raw = _raw # end def __init__ def get_download_url(self, token): """ Creates a url to download the file. Note: Contains the secret API key, so you should not share this url! :param token: API key :type token: str :return: url :rtype: str """ return "https://api.telegram.org/file/bot{token}/{file_path}".format(token=token, file_path=self.file_path) # end def get_download_url def to_array(self): """ Serializes this File to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(File, self).to_array() array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str if self.file_size is not None: array['file_size'] = int(self.file_size) # type int if self.file_path is not None: array['file_path'] = u(self.file_path) # py2: type unicode, py3: type str return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the File constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") data = Receivable.validate_array(array) data['file_id'] = u(array.get('file_id')) data['file_unique_id'] = u(array.get('file_unique_id')) data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None data['file_path'] = u(array.get('file_path')) if array.get('file_path') is not None else None return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new File from a given dictionary. :return: new File instance. :rtype: File """ if not array: # None or {} return None # end if data = File.validate_array(array) data['_raw'] = array return File(**data) # end def from_array def __str__(self): """ Implements `str(file_instance)` """ return "File(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, file_size={self.file_size!r}, file_path={self.file_path!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(file_instance)` """ if self._raw: return "File.from_array({self._raw})".format(self=self) # end if return "File(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, file_size={self.file_size!r}, file_path={self.file_path!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in file_instance` """ return ( key in ["file_id", "file_unique_id", "file_size", "file_path"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class File class ChatPhoto(Result): """ This object represents a chat photo. https://core.telegram.org/bots/api#chatphoto Parameters: :param small_file_id: File identifier of small (160x160) chat photo. This file_id can be used only for photo download and only for as long as the photo is not changed. :type small_file_id: str|unicode :param small_file_unique_id: Unique file identifier of small (160x160) chat photo, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type small_file_unique_id: str|unicode :param big_file_id: File identifier of big (640x640) chat photo. This file_id can be used only for photo download and only for as long as the photo is not changed. :type big_file_id: str|unicode :param big_file_unique_id: Unique file identifier of big (640x640) chat photo, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type big_file_unique_id: str|unicode Optional keyword parameters: :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, small_file_id, small_file_unique_id, big_file_id, big_file_unique_id, _raw=None): """ This object represents a chat photo. https://core.telegram.org/bots/api#chatphoto Parameters: :param small_file_id: File identifier of small (160x160) chat photo. This file_id can be used only for photo download and only for as long as the photo is not changed. :type small_file_id: str|unicode :param small_file_unique_id: Unique file identifier of small (160x160) chat photo, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type small_file_unique_id: str|unicode :param big_file_id: File identifier of big (640x640) chat photo. This file_id can be used only for photo download and only for as long as the photo is not changed. :type big_file_id: str|unicode :param big_file_unique_id: Unique file identifier of big (640x640) chat photo, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type big_file_unique_id: str|unicode Optional keyword parameters: :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(ChatPhoto, self).__init__() assert_type_or_raise(small_file_id, unicode_type, parameter_name="small_file_id") self.small_file_id = small_file_id assert_type_or_raise(small_file_unique_id, unicode_type, parameter_name="small_file_unique_id") self.small_file_unique_id = small_file_unique_id assert_type_or_raise(big_file_id, unicode_type, parameter_name="big_file_id") self.big_file_id = big_file_id assert_type_or_raise(big_file_unique_id, unicode_type, parameter_name="big_file_unique_id") self.big_file_unique_id = big_file_unique_id self._raw = _raw # end def __init__ def to_array(self): """ Serializes this ChatPhoto to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(ChatPhoto, self).to_array() array['small_file_id'] = u(self.small_file_id) # py2: type unicode, py3: type str array['small_file_unique_id'] = u(self.small_file_unique_id) # py2: type unicode, py3: type str array['big_file_id'] = u(self.big_file_id) # py2: type unicode, py3: type str array['big_file_unique_id'] = u(self.big_file_unique_id) # py2: type unicode, py3: type str return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the ChatPhoto constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") data = Result.validate_array(array) data['small_file_id'] = u(array.get('small_file_id')) data['small_file_unique_id'] = u(array.get('small_file_unique_id')) data['big_file_id'] = u(array.get('big_file_id')) data['big_file_unique_id'] = u(array.get('big_file_unique_id')) return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new ChatPhoto from a given dictionary. :return: new ChatPhoto instance. :rtype: ChatPhoto """ if not array: # None or {} return None # end if data = ChatPhoto.validate_array(array) data['_raw'] = array return ChatPhoto(**data) # end def from_array def __str__(self): """ Implements `str(chatphoto_instance)` """ return "ChatPhoto(small_file_id={self.small_file_id!r}, small_file_unique_id={self.small_file_unique_id!r}, big_file_id={self.big_file_id!r}, big_file_unique_id={self.big_file_unique_id!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(chatphoto_instance)` """ if self._raw: return "ChatPhoto.from_array({self._raw})".format(self=self) # end if return "ChatPhoto(small_file_id={self.small_file_id!r}, small_file_unique_id={self.small_file_unique_id!r}, big_file_id={self.big_file_id!r}, big_file_unique_id={self.big_file_unique_id!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in chatphoto_instance` """ return ( key in ["small_file_id", "small_file_unique_id", "big_file_id", "big_file_unique_id"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class ChatPhoto class Sticker(Media): """ This object represents a sticker. https://core.telegram.org/bots/api#sticker Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode :param width: Sticker width :type width: int :param height: Sticker height :type height: int :param is_animated: True, if the sticker is animated :type is_animated: bool Optional keyword parameters: :param thumb: Optional. Sticker thumbnail in the .WEBP or .JPG format :type thumb: pytgbot.api_types.receivable.media.PhotoSize :param emoji: Optional. Emoji associated with the sticker :type emoji: str|unicode :param set_name: Optional. Name of the sticker set to which the sticker belongs :type set_name: str|unicode :param mask_position: Optional. For mask stickers, the position where the mask should be placed :type mask_position: pytgbot.api_types.receivable.stickers.MaskPosition :param file_size: Optional. File size :type file_size: int :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, file_id, file_unique_id, width, height, is_animated, thumb=None, emoji=None, set_name=None, mask_position=None, file_size=None, _raw=None): """ This object represents a sticker. https://core.telegram.org/bots/api#sticker Parameters: :param file_id: Identifier for this file, which can be used to download or reuse the file :type file_id: str|unicode :param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file. :type file_unique_id: str|unicode :param width: Sticker width :type width: int :param height: Sticker height :type height: int :param is_animated: True, if the sticker is animated :type is_animated: bool Optional keyword parameters: :param thumb: Optional. Sticker thumbnail in the .WEBP or .JPG format :type thumb: pytgbot.api_types.receivable.media.PhotoSize :param emoji: Optional. Emoji associated with the sticker :type emoji: str|unicode :param set_name: Optional. Name of the sticker set to which the sticker belongs :type set_name: str|unicode :param mask_position: Optional. For mask stickers, the position where the mask should be placed :type mask_position: pytgbot.api_types.receivable.stickers.MaskPosition :param file_size: Optional. File size :type file_size: int :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(Sticker, self).__init__() from .stickers import MaskPosition assert_type_or_raise(file_id, unicode_type, parameter_name="file_id") self.file_id = file_id assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id") self.file_unique_id = file_unique_id assert_type_or_raise(width, int, parameter_name="width") self.width = width assert_type_or_raise(height, int, parameter_name="height") self.height = height assert_type_or_raise(is_animated, bool, parameter_name="is_animated") self.is_animated = is_animated assert_type_or_raise(thumb, None, PhotoSize, parameter_name="thumb") self.thumb = thumb assert_type_or_raise(emoji, None, unicode_type, parameter_name="emoji") self.emoji = emoji assert_type_or_raise(set_name, None, unicode_type, parameter_name="set_name") self.set_name = set_name assert_type_or_raise(mask_position, None, MaskPosition, parameter_name="mask_position") self.mask_position = mask_position assert_type_or_raise(file_size, None, int, parameter_name="file_size") self.file_size = file_size self._raw = _raw # end def __init__ def to_array(self): """ Serializes this Sticker to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(Sticker, self).to_array() array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str array['width'] = int(self.width) # type int array['height'] = int(self.height) # type int array['is_animated'] = bool(self.is_animated) # type bool if self.thumb is not None: array['thumb'] = self.thumb.to_array() # type PhotoSize if self.emoji is not None: array['emoji'] = u(self.emoji) # py2: type unicode, py3: type str if self.set_name is not None: array['set_name'] = u(self.set_name) # py2: type unicode, py3: type str if self.mask_position is not None: array['mask_position'] = self.mask_position.to_array() # type MaskPosition if self.file_size is not None: array['file_size'] = int(self.file_size) # type int return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the Sticker constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") from .stickers import MaskPosition data = Media.validate_array(array) data['file_id'] = u(array.get('file_id')) data['file_unique_id'] = u(array.get('file_unique_id')) data['width'] = int(array.get('width')) data['height'] = int(array.get('height')) data['is_animated'] = bool(array.get('is_animated')) data['thumb'] = PhotoSize.from_array(array.get('thumb')) if array.get('thumb') is not None else None data['emoji'] = u(array.get('emoji')) if array.get('emoji') is not None else None data['set_name'] = u(array.get('set_name')) if array.get('set_name') is not None else None data['mask_position'] = MaskPosition.from_array(array.get('mask_position')) if array.get('mask_position') is not None else None data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new Sticker from a given dictionary. :return: new Sticker instance. :rtype: Sticker """ if not array: # None or {} return None # end if data = Sticker.validate_array(array) data['_raw'] = array return Sticker(**data) # end def from_array def __str__(self): """ Implements `str(sticker_instance)` """ return "Sticker(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, width={self.width!r}, height={self.height!r}, is_animated={self.is_animated!r}, thumb={self.thumb!r}, emoji={self.emoji!r}, set_name={self.set_name!r}, mask_position={self.mask_position!r}, file_size={self.file_size!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(sticker_instance)` """ if self._raw: return "Sticker.from_array({self._raw})".format(self=self) # end if return "Sticker(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, width={self.width!r}, height={self.height!r}, is_animated={self.is_animated!r}, thumb={self.thumb!r}, emoji={self.emoji!r}, set_name={self.set_name!r}, mask_position={self.mask_position!r}, file_size={self.file_size!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in sticker_instance` """ return ( key in ["file_id", "file_unique_id", "width", "height", "is_animated", "thumb", "emoji", "set_name", "mask_position", "file_size"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class Sticker class Game(Media): """ This object represents a game. Use BotFather to create and edit games, their short names will act as unique identifiers. https://core.telegram.org/bots/api#game Parameters: :param title: Title of the game :type title: str|unicode :param description: Description of the game :type description: str|unicode :param photo: Photo that will be displayed in the game message in chats. :type photo: list of pytgbot.api_types.receivable.media.PhotoSize Optional keyword parameters: :param text: Optional. Brief description of the game or high scores included in the game message. Can be automatically edited to include current high scores for the game when the bot calls setGameScore, or manually edited using editMessageText. 0-4096 characters. :type text: str|unicode :param text_entities: Optional. Special entities that appear in text, such as usernames, URLs, bot commands, etc. :type text_entities: list of pytgbot.api_types.receivable.media.MessageEntity :param animation: Optional. Animation that will be displayed in the game message in chats. Upload via BotFather :type animation: pytgbot.api_types.receivable.media.Animation :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ def __init__(self, title, description, photo, text=None, text_entities=None, animation=None, _raw=None): """ This object represents a game. Use BotFather to create and edit games, their short names will act as unique identifiers. https://core.telegram.org/bots/api#game Parameters: :param title: Title of the game :type title: str|unicode :param description: Description of the game :type description: str|unicode :param photo: Photo that will be displayed in the game message in chats. :type photo: list of pytgbot.api_types.receivable.media.PhotoSize Optional keyword parameters: :param text: Optional. Brief description of the game or high scores included in the game message. Can be automatically edited to include current high scores for the game when the bot calls setGameScore, or manually edited using editMessageText. 0-4096 characters. :type text: str|unicode :param text_entities: Optional. Special entities that appear in text, such as usernames, URLs, bot commands, etc. :type text_entities: list of pytgbot.api_types.receivable.media.MessageEntity :param animation: Optional. Animation that will be displayed in the game message in chats. Upload via BotFather :type animation: pytgbot.api_types.receivable.media.Animation :param _raw: Optional. Original data this object was generated from. Could be `None`. :type _raw: None | dict """ super(Game, self).__init__() assert_type_or_raise(title, unicode_type, parameter_name="title") self.title = title assert_type_or_raise(description, unicode_type, parameter_name="description") self.description = description assert_type_or_raise(photo, list, parameter_name="photo") self.photo = photo assert_type_or_raise(text, None, unicode_type, parameter_name="text") self.text = text assert_type_or_raise(text_entities, None, list, parameter_name="text_entities") self.text_entities = text_entities assert_type_or_raise(animation, None, Animation, parameter_name="animation") self.animation = animation self._raw = _raw # end def __init__ def to_array(self): """ Serializes this Game to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(Game, self).to_array() array['title'] = u(self.title) # py2: type unicode, py3: type str array['description'] = u(self.description) # py2: type unicode, py3: type str array['photo'] = self._as_array(self.photo) # type list of PhotoSize if self.text is not None: array['text'] = u(self.text) # py2: type unicode, py3: type str if self.text_entities is not None: array['text_entities'] = self._as_array(self.text_entities) # type list of MessageEntity if self.animation is not None: array['animation'] = self.animation.to_array() # type Animation return array # end def to_array @staticmethod def validate_array(array): """ Builds a new array with valid values for the Game constructor. :return: new array with valid values :rtype: dict """ assert_type_or_raise(array, dict, parameter_name="array") data = Media.validate_array(array) data['title'] = u(array.get('title')) data['description'] = u(array.get('description')) data['photo'] = PhotoSize.from_array_list(array.get('photo'), list_level=1) data['text'] = u(array.get('text')) if array.get('text') is not None else None data['text_entities'] = MessageEntity.from_array_list(array.get('text_entities'), list_level=1) if array.get('text_entities') is not None else None data['animation'] = Animation.from_array(array.get('animation')) if array.get('animation') is not None else None return data # end def validate_array @staticmethod def from_array(array): """ Deserialize a new Game from a given dictionary. :return: new Game instance. :rtype: Game """ if not array: # None or {} return None # end if data = Game.validate_array(array) data['_raw'] = array return Game(**data) # end def from_array def __str__(self): """ Implements `str(game_instance)` """ return "Game(title={self.title!r}, description={self.description!r}, photo={self.photo!r}, text={self.text!r}, text_entities={self.text_entities!r}, animation={self.animation!r})".format(self=self) # end def __str__ def __repr__(self): """ Implements `repr(game_instance)` """ if self._raw: return "Game.from_array({self._raw})".format(self=self) # end if return "Game(title={self.title!r}, description={self.description!r}, photo={self.photo!r}, text={self.text!r}, text_entities={self.text_entities!r}, animation={self.animation!r})".format(self=self) # end def __repr__ def __contains__(self, key): """ Implements `"key" in game_instance` """ return ( key in ["title", "description", "photo", "text", "text_entities", "animation"] and hasattr(self, key) and bool(getattr(self, key, None)) ) # end def __contains__ # end class Game
Carolina Moon digital sheet music. Contains printable sheet music plus an interactive, downloadable digital sheet music file. The Arrangement Details Tab gives you detailed information about this particular arrangement of Carolina Moon - not necessarily the song. There are no reviews written for Carolina Moon.
""" :py:mod:`rootpy.ROOT` ===================== This module is intended to be a drop-in replacement for ordinary PyROOT imports by mimicking PyROOT's interface. If you find a case where it is not, please report an issue to the rootpy developers. Both ROOT and rootpy classes can be accessed in a harmonized way through this module. This means you can take advantage of rootpy classes automatically by replacing ``import ROOT`` with ``import rootpy.ROOT as ROOT`` or ``from rootpy import ROOT`` in your code, while maintaining backward compatibility with existing use of ROOT's classes. ROOT classes are automatically "asrootpy'd" *after* the constructor in ROOT has been called: .. sourcecode:: python >>> import rootpy.ROOT as ROOT >>> h = ROOT.TH1F('name', 'title', 10, 0, 1) >>> h Hist('name') >>> h.TYPE 'F' Also access rootpy classes under this same module without needing to remember where to import them from in rootpy: .. sourcecode:: python >>> import rootpy.ROOT as ROOT >>> h = ROOT.Hist(10, 0, 1, name='name', type='F') >>> h Hist('name') >>> h.TYPE 'F' Plain old ROOT can still be accessed through the ``R`` property: .. sourcecode:: python >>> from rootpy import ROOT >>> ROOT.R.TFile <class 'ROOT.TFile'> """ from __future__ import absolute_import from copy import copy import ROOT from . import asrootpy, lookup_rootpy, ROOT_VERSION from . import QROOT, stl from .utils.module_facade import Facade __all__ = [] def proxy_global(name, no_expand_macro=False, fname='func', args=()): """ Used to automatically asrootpy ROOT's thread local variables """ if no_expand_macro: # pragma: no cover # handle older ROOT versions without _ExpandMacroFunction wrapping @property def gSomething_no_func(self): glob = self(getattr(ROOT, name)) # create a fake func() that just returns self def func(): return glob glob.func = func return glob return gSomething_no_func @property def gSomething(self): obj_func = getattr(getattr(ROOT, name), fname) try: obj = obj_func(*args) except ReferenceError: # null pointer return None # asrootpy return self(obj) return gSomething @Facade(__name__, expose_internal=False) class Module(object): __version__ = ROOT_VERSION def __call__(self, arg, after_init=False): return asrootpy(arg, warn=False, after_init=after_init) def __getattr__(self, what): try: # check ROOT result = self(getattr(ROOT, what), after_init=True) except AttributeError: # check rootpy result = lookup_rootpy(what) if result is None: raise AttributeError( 'ROOT does not have the attribute `{0}` ' 'and rootpy does not contain the class `{0}`'.format(what)) return result try: # Memoize setattr(self, what, result) except AttributeError: # Oops... Oh well. I tried. pass return result @property def R(self): return ROOT gPad = proxy_global("gPad", fname='GetPad' if ROOT_VERSION >= (6, 9, 2) else 'func', args=(0,) if ROOT_VERSION >= (6, 9, 2) else ()) gVirtualX = proxy_global("gVirtualX") if ROOT_VERSION < (5, 32, 0): # pragma: no cover gDirectory = proxy_global("gDirectory", no_expand_macro=True) gFile = proxy_global("gFile", no_expand_macro=True) gInterpreter = proxy_global("gInterpreter", no_expand_macro=True) else: gDirectory = proxy_global("gDirectory", fname='CurrentDirectory' if ROOT_VERSION >= (6, 9, 2) else 'func') gFile = proxy_global("gFile", fname='CurrentFile' if ROOT_VERSION >= (6, 9, 2) else 'func') gInterpreter = proxy_global("gInterpreter", no_expand_macro=ROOT_VERSION >= (6, 9, 2)) # use the smart template STL types from rootpy.stl instead for t in QROOT.std.stlclasses: locals()[t] = getattr(stl, t) del t
The Trimble TMT ServiceConnect module connects fleet shops using TMT Fleet Maintenance to more than 4,500 medium- and heavy-duty service locations, including many OEM dealers, throughout North America. The module is powered by Decisiv Service Relationship Management technology and provides more seamless collaboration between fleets and heavy-duty repair shops, according to Trimble. Fleets managers can schedule repair and maintenance activities; track service status; record service, parts replaced and labor; and create invoices for the service performed. They can see the status of all equipment being worked on across service locations within the new module, and it enables fleet shops to communicate directly with all service providers across their networks to schedule and approve repair work to be completed.
""" Django JSON Field. This extends Django Model Fields to store JSON as a field-type. """ #TODO - Move this to utils or another application. This is tangential to reporting and useful for other things. from django.db import models try: import json as simplejson except ImportError: from django.utils import simplejson from django.core.serializers.json import DjangoJSONEncoder import logging class JSONFieldDescriptor(object): def __init__(self, field, datatype=dict): """ Create a JSONFieldDescriptor :param field: The field to create the descriptor for. :param datatype: The datatype of the descriptor. """ self.field = field self.datatype = datatype def __get__(self, instance=None, owner=None): if instance is None: raise AttributeError( "The '%s' attribute can only be accessed from %s instances." % (self.field.name, owner.__name__)) if not hasattr(instance, self.field.get_cache_name()): data = instance.__dict__.get(self.field.attname, self.datatype()) if not isinstance(data, self.datatype): data = self.field.loads(data) if data is None: data = self.datatype() setattr(instance, self.field.get_cache_name(), data) return getattr(instance, self.field.get_cache_name()) def __set__(self, instance, value): if not isinstance(value, (self.datatype, basestring)): value = self.datatype(value) instance.__dict__[self.field.attname] = value try: delattr(instance, self.field.get_cache_name()) except AttributeError: pass class JSONField(models.TextField): """ A field for storing JSON-encoded data. The data is accessible as standard Python data types and is transparently encoded/decoded to/from a JSON string in the database. """ serialize_to_string = True descriptor_class = JSONFieldDescriptor def __init__(self, verbose_name=None, name=None, encoder=DjangoJSONEncoder(), decoder=simplejson.JSONDecoder(), datatype=dict, **kwargs): """ Create a new JSONField :param verbose_name: The verbose name of the field :param name: The short name of the field. :param encoder: The encoder used to turn native datatypes into JSON. :param decoder: The decoder used to turn JSON into native datatypes. :param datatype: The native datatype to store. :param kwargs: Other arguments to pass to parent constructor. """ blank = kwargs.pop('blank', True) models.TextField.__init__(self, verbose_name, name, blank=blank, **kwargs) self.encoder = encoder self.decoder = decoder self.datatype = datatype #TODO - Is this used anywhere? If not, let's remove it. def db_type(self, connection=None): """ Returns the database type. Overrides django.db.models.Field's db_type. :param connection: The database connection - defaults to none. :return: The database type. Always returns the string 'text'. """ return "text" def contribute_to_class(self, cls, name): """ Overrides django.db.models.Field's contribute to class to handle descriptors. :param cls: The class to contribute to. :param name: The name. """ super(JSONField, self).contribute_to_class(cls, name) setattr(cls, self.name, self.descriptor_class(self, self.datatype)) def pre_save(self, model_instance, add): "Returns field's value just before saving. If a descriptor, get's that instead of value from object." descriptor = getattr(model_instance, self.attname) if isinstance(descriptor, self.datatype): return descriptor return self.field.value_from_object(model_instance) def get_db_prep_save(self, value, *args, **kwargs): if not isinstance(value, basestring): value = self.dumps(value) return super(JSONField, self).get_db_prep_save(value, *args, **kwargs) def value_to_string(self, obj): """ Turns the value to a JSON string. :param obj: An object. :return: A string. """ return self.dumps(self.value_from_object(obj)) def dumps(self, data): """ Encodes data and dumps. :param data: A value. :return: An encoded string. """ return self.encoder.encode(data) def loads(self, val): """ :param val: A JSON encoddd string. :return: A dict with data from val """ try: val = self.decoder.decode(val)#, encoding=settings.DEFAULT_CHARSET) # XXX We need to investigate why this is happening once we have # a solid repro case. if isinstance(val, basestring): logging.warning("JSONField decode error. Expected dictionary, " "got string for input '%s'" % val) # For whatever reason, we may have gotten back val = self.decoder.decode(val)#, encoding=settings.DEFAULT_CHARSET) except ValueError: val = None return val def south_field_triple(self): """ Returns a suitable description of this field for South." :return: A tuple of field_class, args and kwargs from South's introspector. """ # We'll just introspect the _actual_ field. from south.modelsinspector import introspector field_class = "django.db.models.fields.TextField" args, kwargs = introspector(self) # That's our definition! return (field_class, args, kwargs)
Holland continues her Viking saga (following The Soul Thief and The Witches' Kitchen) with an uneven but entertaining adventure tale. After their war band is defeated in a legendary clash at Hjorunga Bay, cousins Conn and Raef Corbansson, who narrowly escaped the carnage, find themselves in the frozen north of Scandinavia. As free warriors, they volunteer for an expedition to seize the port city of Chersonese in the heart of the Greek Sea. The journey is long and arduous, Chersonese is a more formidable target than expected, and as Conn and Raef discover, their new allies are treacherous. Holland's recreation of the expedition is imaginative and creditable, and her characters—especially introspective Raef and impulsive Conn—are sharply drawn and authentic. The action ebbs and flows, the plot is occasionally opaque and the proliferation of obscure names and places can be daunting, but the novelty of Vikings out of their traditional milieu keeps the pages turning.
import logging import os import shutil import subprocess import sys from mecoshark.processor.baseprocessor import BaseProcessor from mecoshark.resultparser.sourcemeterparser import SourcemeterParser class JavaProcessor(BaseProcessor): """ Implements :class:`~mecoshark.processor.baseprocessor.BaseProcessor` for Java """ @property def supported_languages(self): """ See: :func:`~mecoshark.processor.baseprocessor.BaseProcessor.supported_languages` """ return ['java'] @property def enabled(self): """ See: :func:`~mecoshark.processor.baseprocessor.BaseProcessor.enabled` """ return True @property def threshold(self): """ See: :func:`~mecoshark.processor.baseprocessor.BaseProcessor.threshold` """ return 0.4 def __init__(self, output_path, input_path): super().__init__(output_path, input_path) self.logger = logging.getLogger("processor") return def execute_sourcemeter(self): """ Executes sourcemeter for the java language """ # Clean output directory shutil.rmtree(os.path.join(self.output_path, self.projectname), True) template_path = os.path.dirname(os.path.realpath(__file__))+'/../../templates' failure_happened = False # try maven if os.path.exists(os.path.join(self.input_path, 'pom.xml')): self.logger.info("Trying out maven...") self.prepare_template(os.path.join(template_path, 'build-maven.sh')) self.prepare_template(os.path.join(template_path, 'analyze-maven.sh')) try: subprocess.run(os.path.join(self.output_path, 'analyze-maven.sh'), shell=True) except Exception: sys.exit(1) pass if not self.is_output_produced(): shutil.rmtree(os.path.join(self.output_path, self.projectname), True) failure_happened = True # try ant if os.path.exists(os.path.join(self.input_path, 'build.xml')) and failure_happened: self.logger.info("Trying out ant...") self.prepare_template(os.path.join(template_path, 'build-ant.sh')) self.prepare_template(os.path.join(template_path, 'analyze-ant.sh')) try: subprocess.run(os.path.join(self.output_path, 'analyze-ant.sh'), shell=True) except Exception: pass if not self.is_output_produced(): shutil.rmtree(os.path.join(self.output_path, self.projectname), True) failure_happened = True # use directory based analysis otherwise if failure_happened: self.logger.info("Trying out directory analysis for java...") self.prepare_template(os.path.join(template_path, 'analyze-dir.sh')) if self.input_path.endswith("/"): self.input_path = self.input_path[:-1] if self.output_path.endswith("/"): self.output_path = self.output_path[:-1] try: subprocess.run(os.path.join(self.output_path, 'analyze-dir.sh'), shell=True) except Exception: pass if not self.is_output_produced(): self.logger.error('Problem in using mecoshark! No output was produced!') def is_output_produced(self): """ Checks if output was produced for the process :return: boolean """ output_path = os.path.join(self.output_path, self.projectname, 'java') if not os.path.exists(output_path): return False output_path = os.path.join(output_path, os.listdir(output_path)[0]) number_of_files = len([name for name in os.listdir(output_path) if name.endswith('.csv')]) if number_of_files == 12: return True return False def process(self, revision, url, options): """ See: :func:`~mecoshark.processor.baseprocessor.BaseProcessor.process` Processes the given revision. First executes sourcemeter with given options, then it creates the parser to store the data. :param revision: revision :param url: url of the project that is analyzed :param options: options for execution """ self.execute_sourcemeter() meco_path = os.path.join(self.output_path, self.projectname, 'java') output_path = os.path.join(meco_path, os.listdir(meco_path)[0]) parser = SourcemeterParser(output_path, self.input_path, url, revision) parser.store_data() # delete directory shutil.rmtree(os.path.join(self.output_path, self.projectname), True)
When you hear Homeowner Associations, you probably immediately conjure up images of old ladies with blue hair, sitting around all day, tattling on their neighbors for breaking the rules. Like buzzards waiting for something to die, they spy on you between their blinds and the minute you hang something from your balcony, even for a minute, they’re on the phone, getting you in trouble. Okay, that may be a bit of a dramatization on my part. Homeowner Associations do actually have inherent worth. But what is that worth? Have you ever wondered what are the benefits (and real drawbacks) of an HOA? If you ever find yourself or a friend in the market to purchase a home or condo with an HOA, trust me, you’ll want to know these little tidbits. They can weigh heavily on your ultimate decision to purchase. Here are our top 3 benefits and drawbacks of a Homeowner Association (and their fees). Thank you so very, very much for all the guidance and many little extras that you provided. You always go above and beyond our expectations.
__version__ = '0.2.1' from itertools import chain import numpy as np import pyglet from Polygon import Polygon, setDataStyle, STYLE_NUMPY from Polygon.Utils import pointList as point_list setDataStyle(STYLE_NUMPY) class Shape: """Graphical polygon primitive for use with `pyglet`_. Alternative constructor methods: - |Shape.circle| - |Shape.rectangle| - |Shape.regular_polygon| - |Shape.from_dict| Parameters ---------- vertices : array-like or |Polygon|. If a |Polygon| is passed, its points will be used. Otherwise, `vertices` should be a sequence of `[x, y]` locations or an array with x and y columns. color : str or 3-tuple of int, optional Color, in R, G, B format. Alternatively, a key that refers to an element of `colors`. velocity : array-like Speed and direction of motion, in [dx_dt, dy_dt] format. angular_velocity : float Speed of angular motion, in counter-clockwise radians per second. colors : dict of tuple, optional Named colors, defined as R, G, B tuples. Useful for easily switching between a set of colors. Attributes ---------- poly : |Polygon| Associated |Polygon| object. vertices : |array| An array of points, with x and y columns. Read-only. center : |array| The centroid of the shape. Setting center calls |Shape.translate|. position : |array| Alias for `center`. radius : |array| Mean distance from each point to the center. Setting radius calls |Shape.scale|. color : str or tuple of int The current color, in R, G, B format if `colors` was not passed. Otherwise, the current color is represented as a key in `colors`. colors : dict of tuple Named colors. velocity : |array| Speed and direction of linear motion. Angular_velocity : float Speed of angular motion, in counter-clockwise radians per second. enabled : bool If False, the shape will not be drawn. """ def __init__(self, vertices, color=(255, 255, 255), velocity=(0, 0), angular_velocity=0, colors=None): if isinstance(vertices, Polygon): self.poly = vertices else: self.poly = Polygon(vertices) self.colors = colors self._color = 'primary' if colors: self.color = color else: self.colors = {'primary': color} self.velocity = np.asarray(velocity) self.angular_velocity = angular_velocity # Construct vertex_list. self._vertex_list = self._get_vertex_list() self.enabled = True @classmethod def regular_polygon(cls, center, radius, n_vertices, start_angle=0, **kwargs): """Construct a regular polygon. Parameters ---------- center : array-like radius : float n_vertices : int start_angle : float, optional Where to put the first point, relative to `center`, in radians counter-clockwise starting from the horizontal axis. kwargs Other keyword arguments are passed to the |Shape| constructor. """ angles = (np.arange(n_vertices) * 2 * np.pi / n_vertices) + start_angle return cls(center + radius * np.array([np.cos(angles), np.sin(angles)]).T, **kwargs) @classmethod def circle(cls, center, radius, n_vertices=50, **kwargs): """Construct a circle. Parameters ---------- center : array-like radius : float n_vertices : int, optional Number of points to draw. Decrease for performance, increase for appearance. kwargs Other keyword arguments are passed to the |Shape| constructor. """ return cls.regular_polygon(center, radius, n_vertices, **kwargs) @classmethod def rectangle(cls, vertices, **kwargs): """Shortcut for creating a rectangle aligned with the screen axes from only two corners. Parameters ---------- vertices : array-like An array containing the ``[x, y]`` positions of two corners. kwargs Other keyword arguments are passed to the |Shape| constructor. """ bottom_left, top_right = vertices top_left = [bottom_left[0], top_right[1]] bottom_right = [top_right[0], bottom_left[1]] return cls([bottom_left, bottom_right, top_right, top_left], **kwargs) @classmethod def from_dict(cls, spec): """Create a |Shape| from a dictionary specification. Parameters ---------- spec : dict A dictionary with either the fields ``'center'`` and ``'radius'`` (for a circle), ``'center'``, ``'radius'``, and ``'n_vertices'`` (for a regular polygon), or ``'vertices'``. If only two vertices are given, they are assumed to be lower left and top right corners of a rectangle. Other fields are interpreted as keyword arguments. """ spec = spec.copy() center = spec.pop('center', None) radius = spec.pop('radius', None) if center and radius: return cls.circle(center, radius, **spec) vertices = spec.pop('vertices') if len(vertices) == 2: return cls.rectangle(vertices, **spec) return cls(vertices, **spec) @property def vertices(self): return np.asarray(point_list(self.poly)) @property def color(self): if len(self.colors) == 1: return self.colors[self._color] else: return self._color @color.setter def color(self, value): if value in self.colors: self._color = value else: self.colors[self._color] = value @property def _kwargs(self): """Keyword arguments for recreating the Shape from the vertices. """ return dict(color=self.color, velocity=self.velocity, colors=self.colors) @property def center(self): return np.asarray(self.poly.center()) @center.setter def center(self, value): self.translate(np.asarray(value) - self.center) @property def radius(self): return np.linalg.norm(self.vertices - self.center, axis=1).mean() @radius.setter def radius(self, value): self.scale(value / self.radius) @property def _gl_vertices(self): return list(chain(self.center, *point_list(self.poly))) @property def _gl_colors(self): return (len(self) + 1) * self.colors[self._color] def distance_to(self, point): """Distance from center to arbitrary point. Parameters ---------- point : array-like Returns ------- float """ return np.linalg.norm(self.center - point) def scale(self, factor, center=None): """Resize the shape by a proportion (e.g., 1 is unchanged), in-place. Parameters ---------- factor : float or array-like If a scalar, the same factor will be applied in the x and y dimensions. center : array-like, optional Point around which to perform the scaling. If not passed, the center of the shape is used. """ factor = np.asarray(factor) if len(factor.shape): args = list(factor) else: args = [factor, factor] if center is not None: args.extend(center) self.poly.scale(*args) return self def translate(self, vector): """Translate the shape along a vector, in-place. Parameters ---------- vector : array-like """ self.poly.shift(*vector) def rotate(self, angle, center=None): """Rotate the shape, in-place. Parameters ---------- angle : float Angle to rotate, in radians counter-clockwise. center : array-like, optional Point about which to rotate. If not passed, the center of the shape will be used. """ args = [angle] if center is not None: args.extend(center) self.poly.rotate(*args) return self def flip_x(self, center=None): """Flip the shape in the x direction, in-place. Parameters ---------- center : array-like, optional Point about which to flip. If not passed, the center of the shape will be used. """ if center is None: self.poly.flip() else: self.poly.flip(center[0]) def flip_y(self, center=None): """Flip the shape in the y direction, in-place. Parameters ---------- center : array-like, optional Point about which to flip. If not passed, the center of the shape will be used. """ if center is None: self.poly.flop() else: self.poly.flop(center[1]) return self def flip(self, angle, center=None): """ Flip the shape in an arbitrary direction. Parameters ---------- angle : array-like The angle, in radians counter-clockwise from the horizontal axis, defining the angle about which to flip the shape (of a line through `center`). center : array-like, optional The point about which to flip. If not passed, the center of the shape will be used. """ return self.rotate(-angle, center=center).flip_y(center=center).rotate(angle, center=center) def _get_vertex_list(self): indices = [] for i in range(1, len(self) + 1): indices.extend([0, i, i + 1]) indices[-1] = 1 return pyglet.graphics.vertex_list_indexed( len(self) + 1, indices, ('v2f', self._gl_vertices), ('c3B', self._gl_colors)) def draw(self): """Draw the shape in the current OpenGL context. """ if self.enabled: self._vertex_list.colors = self._gl_colors self._vertex_list.vertices = self._gl_vertices self._vertex_list.draw(pyglet.gl.GL_TRIANGLES) def update(self, dt): """Update the shape's position by moving it forward according to its velocity. Parameters ---------- dt : float """ self.translate(dt * self.velocity) self.rotate(dt * self.angular_velocity) def enable(self, enabled): """Set whether the shape should be drawn. Parameters ---------- enabled : bool """ self.enabled = enabled return self def overlaps(self, other): """Check if two shapes overlap. Parameters ---------- other : |Shape| Returns ------- bool """ return bool(self.poly.overlaps(other.poly)) def covers(self, other): """Check if the shape completely covers another shape. Parameters ---------- other : |Shape| Returns ------- bool """ return bool(self.poly.covers(other.poly)) def __repr__(self): kwarg_strs = [] for arg, value in self._kwargs.items(): if isinstance(value, str): value_str = "'{}'".format(value) elif isinstance(value, np.ndarray): value_str = '[{}, {}]'.format(*value) else: value_str = str(value) kwarg_strs.append(arg + '=' + value_str) kwargs = ',\n' + ', '.join(kwarg_strs) return '{cls}({points}{kwargs})'.format( cls=type(self).__name__, points='[{}]'.format(',\n'.join('[{}, {}]'.format(x, y) for x, y in self.vertices)), kwargs=kwargs, ) def __eq__(self, other): if isinstance(other, Shape): if len(self) != len(other): return False return (np.all(np.isclose(np.sort(self.vertices, axis=0), np.sort(other.vertices, axis=0))) and self.colors == other.colors and self.color == other.color and np.all(np.isclose(self.velocity, other.velocity))) else: return False def __bool__(self): return True def __getitem__(self, item): return self.vertices[item] def __len__(self): return self.poly.nPoints() def __add__(self, other): if isinstance(other, Shape): return type(self)(self.poly + other.poly) return type(self)(self.vertices + other, **self._kwargs) __radd__ = __add__ def __sub__(self, other): if isinstance(other, Shape): return type(self)(self.poly - other.poly) return type(self)(self.vertices - other, **self._kwargs) def __mul__(self, other): return type(self)(self.vertices * other, **self._kwargs) def __rmul__(self, other): return type(self)(other * self.vertices, **self._kwargs) def __truediv__(self, other): return type(self)(self.vertices / other, **self._kwargs) __div__ = __truediv__ def __xor__(self, other): return type(self)(self.poly ^ other.poly, **self._kwargs) def __and__(self, other): return type(self)(self.poly & other.poly, **self._kwargs) def __or__(self, other): return type(self)(self.poly | other.poly, **self._kwargs) def __iadd__(self, other): self.translate(other) return self def __isub__(self, other): self.translate(-np.asarray(other)) return self def __imul__(self, other): if isinstance(other, int) or isinstance(other, float): self.poly.scale(other, other) elif len(other) == 2: self.poly.scale(*other) return self def __itruediv__(self, other): if isinstance(other, int) or isinstance(other, float): self.poly.scale(1/other, 1/other) elif len(other) == 2: self.poly.scale(1/other[0], 1/other[1]) return self __idiv__ = __itruediv__ position = center
Pros / The base station has an above-average battery life. Cons / This service charges activation and equipment-delivery fees. Verdict / Philips Lifeline is one of the best-known names in the medical alert community and provides reliable service. This fall alert system is designed as wearable fall detection with a neck pendant that automatically detects if you take a fall. The necklace is waterproof so you can wear it in the shower or bath. The fall detection sensor has a long battery life and does not require recharging. Along with the fall detection pendant, the Philips Lifeline AutoAlert system comes with a communicator intercom and access to 24-hour assistance via its U.S.-based response center, which can provide you with emergency assistance when you need it. The fall detection pendant also includes a help button, which you push in the event of an emergency. Fall detection technology is designed to work anywhere in your home that is within range of the base station communicator. However, the range may vary depending on the construction of your home. In our hands-on range tests, Philips AutoAlert performed above average. The company lists the maximum range at 400 feet; however, we found the pendant communicated with the base station at up to 625 feet in our maximum range field test. In our indoor range tests, the pendant had a reach of 160 feet. We also tested the battery life of the base units to see how long they would continue to monitor your activity in the event of a power average. Philips AutoAlert's base unit lasted for 28 hours without an additional power source, which is above average in our tests. The AutoAlert is a subscription-based system. Philips Lifeline's options all come with a monthly fee, which varies depending on the service you choose. However, Philips is one of the only companies on our lineup that charges fees for activation and for equipment delivery. If you want to add more to your medical alert system with fall detection, Philips gives you the option of adding premium features. For a fee, you can choose add-ons such as a lockbox or a voice extender. If you need to contact Philips customer service, you can do via email or telephone. Philips is also one of the few services on our lineup to offer assistance via live chat. Philips scored an 80 percent in our customer service tests. Representatives were helpful and respectful during our phone interactions; however, some responses were slow over email. The Philips Lifeline fall detection sensor can be there for you in the event of a fall when no one else is. The system automatically sends an alert to emergency services so you can have help on the way in no time. You can simply wear the pendant 24 hours a day to give you and your family members' peace of mind.
#!/usr/bin/env python # -*- coding: UTF-8 -*- import wx import wx.lib.mixins.listctrl as listmix import sqlite3 cfgFile_g=".tresor2.settings" # ########################################################### # Die Verschluesselungs-Klasse # braucht: python-pycrypto - Cryptographic modules for Python import struct import random import hashlib from Crypto.Cipher import AES import base64 class DedeCrypt(): # Setzt das Passwort und liefert einen verschluesselten # Hash-Wert dieses Passworts zurueck. def PasswortEinstellen(self, password): self.key=hashlib.sha256(password).digest() return(self.verschluesseln(base64.b64encode(self.key))) # Liefert True, wenn "passwordhash" auf das via PasswortEinstellen # eingestellte Passwort passt. Sonst False. def PasswortPruefen(self, passwordhash): try: tk=base64.b64decode(self.entschluesseln(passwordhash)) except TypeError: return(False) if tk==self.key: return(True) return(False) # Liefert die verschluesselte Version der Liste "lst" def ListeVerschluesseln(self, lst): return(self.verschluesseln(self.ListePacken(lst))) # Liefert die entschluesselte Version von "txt" als Liste def ListeEntschluesseln(self, txt): return(self.ListeEntpacken(self.entschluesseln(txt))) # Liefert die verschluesselte Version des Strings "textu" def verschluesseln(self, textu): iv=self.__RandomString(16) encryptor=AES.new(self.key, AES.MODE_ECB, iv) return(base64.b64encode(iv + encryptor.encrypt(self.__String16(textu)))) # Liefert die entschluesselte Version von "textv" def entschluesseln(self, textv): c1=base64.b64decode(textv) iv=c1[:16] decryptor=AES.new(self.key, AES.MODE_ECB, iv) c2=c1[16:] try: c3=decryptor.decrypt(c2) except ValueError: return("<error>") return(self.__StringAuspacken(c3)) # Liefert einen String mit zufaelligen Zeichen der Laenge "laenge" def __RandomString(self, laenge): return(''.join(chr(random.randint(0, 0xFF)) for i in range(laenge))) # Liefert soviele zufaellige Zeichen, wie noetig sind, um "text" # damit zu einer ganzzahlig durch 16 teilbaren Laenge aufzufuellen def __Laenge16(self, text): if len(text)%16==0: return("") return(self.__RandomString(16-len(text)%16)) # Liefert "text" mit vorangestellter Laengen-Info und aufgefuellt mit # sovielen zufaelligen Zeichen, um ganzzahlig durch 16 teilbar zu sein def __String16(self, text): r=struct.pack('<h', len(text))+text return(r+self.__Laenge16(r)) # Liefert einen mit "__String16" verpackten Text wieder in Ursprungsform def __StringAuspacken(self, text): l=struct.unpack('<h', text[:2])[0] if l<0: return("<error>") return(text[2:l+2]) # Liefert den Inhalt der Liste "liste" als gepackten String def ListePacken(self, liste): s="" for i in liste: s+=struct.pack("<h", len(i)) s+=i return(s) # Liefert die Liste zu dem gepackten String "strg" def ListeEntpacken(self, strg): p=0 lst=[] while p<len(strg): l=struct.unpack("<h", strg[p:p+2])[0] lst.append(strg[p+2:p+2+l]) p+=2+l return(lst) # ########################################################### # Das Fester fuer das Programm class TresorGUI(wx.Frame): def __init__(self, parent, pos=wx.DefaultPosition, size=wx.DefaultSize): wx.Frame.__init__(self, None, wx.ID_ANY, "Passwort-Verwaltung", pos=pos, size=size) self.parent=parent Tresor(self) # ########################################################### # listmix.ColumnSorterMixin will das so.... class MeinListCtrl(wx.ListCtrl): def __init__(self, parent, ID=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0): wx.ListCtrl.__init__(self, parent, ID, pos, size, style) # ########################################################### # Das eigentliche GUI class Tresor(wx.Panel, listmix.ColumnSorterMixin): # ########################################################### # Will listmix.ColumnSorterMixin haben. def GetListCtrl(self): return self.liste def OnColClick(self, event): event.Skip() # ########################################################### # Initialisiert Variablen und laedt das Settings-File. # Ist im Settings-File ein DB-Name enthalten, wird diese # DB geoeffnet. def __init__(self, parent): wx.Panel.__init__(self, parent, -1, style=wx.WANTS_CHARS) self.parent=parent # [0]=dienst, [1]=userid, [2]=password, [3]=kommentar, [4]=datum, [5]=ID self.dDataMap={} # display self.sDataMap={} # sort self.nachDBID={} # Key ist DB-ID self.suchstring="" # Genutzt von OnCharEvent self.cltimer=None # Genutzt von OnCharEvent self.dbname="" # Init ueber SettingsFile self.show_pwd=False # Init ueber SettingsFile self.font=None # Init ueber SettingsFile self.offeneDB=False # wird in DBoeffnen ggf. auf True gesetzt self.tresor=DedeCrypt() self.SettingsFileLaden() self.FensterAufbauen() self.MenueAufbauen() self.MenueUpdate() self.mview.Check(302, self.show_pwd) if self.dbname!="": wx.CallLater(100, self.DBoeffnen) # etwas Zeit geben, um das Fenster aufzubauen # ########################################################### # Laedt Daten aus dem Settings-File. Wenn das File nicht # existiert, werden Defaultwerte eingestellt. # Aufruf aus: __init__ def SettingsFileLaden(self): fc=wx.FileConfig(localFilename=cfgFile_g) self.dbname=fc.Read("dbname") self.show_pwd=bool(fc.ReadInt("show_pwd")) fs=fc.ReadInt("font_size") ff=fc.ReadInt("font_family") fy=fc.ReadInt("font_style") fw=fc.ReadInt("font_weight") fu=fc.ReadInt("font_underline") fa=fc.Read( "font_face") if fa=="": self.font=wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL) else: self.font=wx.Font(fs, ff, fy, fw, fu, fa) # ########################################################### # Erstellt das Haupt-Control # Zusaetzlich zu den drei sichtbaren Spalten existiert eine # vierte Spalte, in der sie jeweilige DB-ID steht. # # Aufruf aus: __init__ def FensterAufbauen(self): self.liste=MeinListCtrl(self, style=wx.LC_REPORT|wx.BORDER_SUNKEN|wx.LC_SORT_ASCENDING|wx.LC_SINGLE_SEL) self.liste.SetFont(self.font) self.liste.Bind(wx.EVT_CHAR, self.OnCharEvent) zb=7 self.liste.InsertColumn(0, 'Dienst', width=20*zb) self.liste.InsertColumn(1, 'Username', width=20*zb) self.liste.InsertColumn(2, 'Passwort', width=20*zb) self.liste.InsertColumn(3, 'ID', width=0) self.itemDataMap=self.sDataMap listmix.ColumnSorterMixin.__init__(self, 3) self.liste.Bind(wx.EVT_LIST_COL_CLICK, self.OnColClick) self.liste.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnRowDClick) self.liste.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu) topsizer=wx.BoxSizer(wx.VERTICAL) lbsizer= wx.BoxSizer(wx.HORIZONTAL) lbsizer.Add( self.liste, 1, wx.ALL|wx.EXPAND, 5) topsizer.Add(self.liste, 1, wx.ALL|wx.EXPAND, 5) self.liste.SetToolTip(wx.ToolTip('Doppelklick oeffnet den aktuellen Eintrag zum aendern')) self.SetSizer(topsizer) # ########################################################### # Erstellt das Menue und die Statuszeile # Aufruf aus: __init__ def MenueAufbauen(self): self.menubar=wx.MenuBar() self.mfile=wx.Menu() self.mfile.Append(101, '&Neue Datenbank', 'Legt eine neue Datenbank an') self.mfile.Append(102, '&Oeffnen', 'Oeffnet eine Datenbank') self.mfile.AppendSeparator() self.mfile.Append(105, '&Abgleichen (unfertig)', 'Importiert Änderungen aus einer weiteren Datenbank') self.mfile.AppendSeparator() self.mfile.Append(104, '&Speichern', 'Speichert die Programm-Einstellungen') self.mfile.AppendSeparator() self.mfile.Append(103, '&Beenden', 'Beendet das Programm') self.medit=wx.Menu() self.medit.Append(201, '&neuer Eintrag\tIns', 'Erstellt einen neuen Eintrag') self.medit.Append(202, 'Eintrag &aendern\tEnter', 'Oeffnet den aktuellen Eintrag zum Aendern') self.medit.Append(203, 'Eintrag &loeschen\tDel', 'Loescht den aktuellen Eintrag') self.medit.AppendSeparator() self.medit.Append(204, '&Username kopieren\tCtrl-N', 'Kopiert den aktuellen Username ins Clipboard') self.medit.Append(205, '&Passwort kopieren\tCtrl-P', 'Kopiert das aktuelle Passwort ins Clipboard') self.mview=wx.Menu() self.mview.Append(301, '&Font', 'Erlaubt die Auswahl einer anderen Schriftart') self.mview.AppendSeparator() self.mview.Append(302, '&Passworte anzeigen', 'Schaltet die Anzeige der Passwoerter um', True) self.mhelp=wx.Menu() self.mhelp.Append(401, '&Ueber', 'Zeigt eine Versions-Info an') self.menubar.Append(self.mfile, '&Datei') self.menubar.Append(self.medit, 'B&earbeiten') self.menubar.Append(self.mview, '&Ansicht') self.menubar.Append(self.mhelp, '&Hilfe') self.parent.SetMenuBar(self.menubar) self.parent.CreateStatusBar(2) self.parent.SetStatusWidths([-1, 50]) self.parent.Bind(wx.EVT_MENU, self.NeueDBGewaehlt, id=101) self.parent.Bind(wx.EVT_MENU, self.OeffnenGewaehlt, id=102) self.parent.Bind(wx.EVT_MENU, self.SpeichernGewaehlt, id=104) self.parent.Bind(wx.EVT_MENU, self.BeendenGewaehlt, id=103) self.parent.Bind(wx.EVT_MENU, self.ImportDBGewaehlt, id=105) self.parent.Bind(wx.EVT_MENU, self.neuerEintragGewaehlt, id=201) self.parent.Bind(wx.EVT_MENU, self.EintragAendernGewaehlt, id=202) self.parent.Bind(wx.EVT_MENU, self.EintragLoeschenGewaehlt, id=203) self.parent.Bind(wx.EVT_MENU, self.UsernameKopierenGewaehlt, id=204) self.parent.Bind(wx.EVT_MENU, self.PasswortKopierenGewaehlt, id=205) self.parent.Bind(wx.EVT_MENU, self.FontGewaehlt, id=301) self.parent.Bind(wx.EVT_MENU, self.PasswortAnzeigenGewaehlt, id=302) self.parent.Bind(wx.EVT_MENU, self.UeberGewaehlt, id=401) # ########################################################### # Setzt den Enabled-Status des Edit-Menues entspr. "self.offeneDB" # Aufruf aus: MenueAufbauen, NeueDBGewaehlt, OeffnenGewaehlt, DBoeffnen def MenueUpdate(self): for i in range(201, 206): self.medit.Enable(i, self.offeneDB) self.mfile.Enable(105, self.offeneDB) if self.offeneDB==False: self.parent.SetStatusText("", 0) else: self.parent.SetStatusText(self.dbname, 0) # ########################################################### # Das Edit-Menue wird auch als Kontext-Menue dargestellt def OnContextMenu(self, event): self.liste.PopupMenu(self.medit) # ########################################################### # Menue: Neue DB # Fragt einen DB-Namen an, erstellt und initialisiert die DB # und oeffnet sie danach (das Passwort wird beim Oeffnen # abgefragt und mit der DB verknuepft). # # Aufruf aus: <Menue> def NeueDBGewaehlt(self, event): dlg=wx.FileDialog(self, message="neue DB", defaultDir=".", defaultFile="tresor2.sqlite", \ wildcard="DBs|*.sqlite|alle|*", style=wx.FD_SAVE) if dlg.ShowModal()!=wx.ID_OK: dlg.Destroy() return self.dbname=dlg.GetPath() dlg.Destroy() self.offeneDB=False self.MenueUpdate() self.liste.DeleteAllItems() self.connection=sqlite3.connect(self.dbname) self.cursor=self.connection.cursor() self.cursor.execute('CREATE TABLE UIDPWD' \ ' (ID INTEGER NOT NULL PRIMARY KEY,' \ ' daten VARCHAR)') self.cursor.execute('CREATE TABLE UIDPWDbackup' \ ' (ID INTEGER NOT NULL PRIMARY KEY,' \ ' daten VARCHAR,' \ ' backup DATE)') self.cursor.execute('CREATE TABLE pwdtest' \ ' (ID INTEGER PRIMARY KEY NOT NULL,' \ ' pwdhash VARCHAR)') self.connection.commit() fc=wx.FileConfig(localFilename=cfgFile_g) fc.Write("dbname", self.dbname) fc.Flush() self.DBoeffnen(db_frisch_angelegt=True) # ########################################################### # Menue: Oeffnen # Fragt einen DB-Namen an und oeffnet die DB mit diesem Namen. # # Aufruf aus: <Menue> def OeffnenGewaehlt(self, event): dlg=wx.FileDialog(self, message="DB oeffnen", defaultDir=".", defaultFile="tresor2.sqlite", \ wildcard="DBs|*.sqlite|alle|*", style=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST) if dlg.ShowModal()!=wx.ID_OK: dlg.Destroy() return self.dbname=dlg.GetPath() dlg.Destroy() self.offeneDB=False self.MenueUpdate() self.liste.DeleteAllItems() self.DBoeffnen() # ########################################################### # Menue: Abgleichen # def ImportDBGewaehlt(self, event): if self.offeneDB==False: wx.MessageBox("Es ist noch keine Datenbank geladen!", "Fehler", wx.OK|wx.ICON_ERROR) return # self.cursor.execute('SELECT ID, daten, backup FROM UIDPWDbackup') # c=self.cursor.fetchall() # for i in c: # d=self.tresor.ListeEntschluesseln(i[1]) # print i[2], d dlg=wx.FileDialog(self, message="DB oeffnen", defaultDir=".", defaultFile="tresor2.sqlite", \ wildcard="DBs|*.sqlite|alle|*", style=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST) if dlg.ShowModal()!=wx.ID_OK: dlg.Destroy() return dbname=dlg.GetPath() dlg.Destroy() tresor=DedeCrypt() dlg=wx.PasswordEntryDialog(self, "Bitte Passwort angeben", dbname) if dlg.ShowModal()!=wx.ID_OK: dlg.Destroy() return(False) pw=tresor.PasswortEinstellen(dlg.GetValue()) dlg.Destroy() connection=sqlite3.connect(dbname) cursor=connection.cursor() cursor.execute('SELECT pwdhash FROM pwdtest') c=cursor.fetchone() if tresor.PasswortPruefen(c[0])==False: wx.MessageBox("Passwort scheint falsch zu sein!", "Fehler", wx.OK|wx.ICON_ERROR) return(False) dDataMap={} cursor.execute('SELECT daten, ID FROM UIDPWD') c=cursor.fetchone() index=0 while c!=None: d=tresor.ListeEntschluesseln(c[0]) td=(d[0], d[1], d[2], d[3], d[4], str(c[1])) dDataMap.update({index : td}) index+=1 c=cursor.fetchone() for i in dDataMap.values(): found=False for j in self.dDataMap.values(): if i[5]==j[5]: if i[0]!=j[0] or i[0]!=j[0] or i[2]!=j[2] or i[3]!=j[3] or i[4]!=j[4]: print "\nÄnderung\n", i, "\n", j found=True else: found=True # Sätze sind identisch # if i[0].lower()==j[0].lower() and i[1].lower()==j[1].lower(): # # Dienst und User sind identisch # found=True # if i[2]!=j[2] or i[3]!=j[3] or i[4]!=j[4]: # print "\nÄnderung\n", i, "\n", j if found==False: print "\nNeu\n", i # ########################################################### # Menue: Einstellungen speichern # Gespeichert werden: # die Position des Fensters auf dem Bildschirm, # die Fenster-Abmessungen, # die Font, # der Passwort-Anzeige-Modus und # der Datenbank-Name # Aufruf aus: <Menue> def SpeichernGewaehlt(self, event): fc=wx.FileConfig(localFilename=cfgFile_g) sp=self.parent.GetScreenPosition() ss=self.parent.GetSizeTuple() fc.WriteInt("pos_x", sp[0]) fc.WriteInt("pos_y", sp[1]) fc.WriteInt("size_x" , ss[0]) fc.WriteInt("size_y" , ss[1]) fc.WriteInt("font_size", self.font.GetPointSize()) fc.WriteInt("font_family", self.font.GetFamily()) fc.WriteInt("font_style", self.font.GetStyle()) fc.WriteInt("font_weight", self.font.GetWeight()) fc.WriteInt("font_underline", self.font.GetUnderlined()) fc.Write( "font_face", self.font.GetFaceName()) fc.WriteInt("show_pwd", int(self.mview.IsChecked(302))) fc.Write( "dbname", self.dbname) fc.Flush() # ########################################################### # Menue: Programm beenden # Aufruf aus: <Menue>, OnCharEvent def BeendenGewaehlt(self, event): self.parent.Close() # ########################################################### # Menue: neuer Eintrag # Ruft den Satz-Aenderungs-Dialog im Neu-Modus auf. # # Aufruf aus: <Menue> def neuerEintragGewaehlt(self, event): self.EinzelSatzAnzeigeOeffnen(-1) # ########################################################### # Menue: Eintrag aendern # Ruft den Satz-Aenderungs-Dialog fuer den selektierten # Eintrag auf. # # Aufruf aus: <Menue> def EintragAendernGewaehlt(self, event): s=self.liste.GetFirstSelected() if s<0: wx.MessageBox("Kein Satz ausgewaehlt", "Fehler", wx.OK|wx.ICON_ERROR) return self.EinzelSatzAnzeigeOeffnen(s) # ########################################################### # Doppelklick auf einem Satz der Liste -> Satz aendern. def OnRowDClick(self, event): self.EinzelSatzAnzeigeOeffnen(event.GetIndex()) # ########################################################### # Menue: Eintrag loeschen # Loescht den selektierten Eintrag nach Rueckfrage und aktualisiert # die DB, um danach alles neu aus der DB nach "self.liste" zu laden. # # Aufruf aus: <Menue> def EintragLoeschenGewaehlt(self, event): idx=self.liste.GetFirstSelected() if idx<0: wx.MessageBox("Kein Satz ausgewaehlt", "Fehler", wx.OK|wx.ICON_ERROR) return d= self.liste.GetItem(idx, 0).GetText() # der Dienst wird nur fuer die Rueckfrage gebraucht i=int(self.liste.GetItem(idx, 3).GetText()) # DB-ID aus self.liste dlg=wx.MessageDialog(self, "Soll der Dienst <"+d+"> wirklich geloescht werden?", \ "Frage", wx.OK|wx.CANCEL) if dlg.ShowModal()==wx.ID_OK: self.cursor.execute('INSERT INTO UIDPWDbackup (daten, backup)' \ ' SELECT daten, date("now")' \ ' FROM UIDPWD WHERE ID=?', (i, )) self.cursor.execute('DELETE FROM UIDPWD WHERE ID=?', (i, )) self.connection.commit() if self.DatenLaden()==True: # 1x -1 fuer Count-auf-Index-Umrechnung und # 1x -1, weil ja ein Satz geloescht wurde # Beim Loeschen des letzten Satzes wird also -1 uebergeben self.DatenAnzeigen(min((idx, self.liste.GetItemCount()-2))) # ########################################################### # Menue: Username kopieren # Aufruf aus: <Menue> def UsernameKopierenGewaehlt(self, event): idx=self.liste.GetFirstSelected() if idx<0: wx.MessageBox("Kein Satz ausgewaehlt", "Fehler", wx.OK|wx.ICON_ERROR) return self.copy2clipboard(self.liste.GetItem(idx, 1).GetText()) # ########################################################### # Menue: Passwort kopieren # Aufruf aus: <Menue> def PasswortKopierenGewaehlt(self, event): idx=self.liste.GetFirstSelected() if idx<0: wx.MessageBox("Kein Satz ausgewaehlt", "Fehler", wx.OK|wx.ICON_ERROR) return i=int(self.liste.GetItem(idx, 3).GetText()) self.copy2clipboard(self.nachDBID[i][2]) # ########################################################### # Menue: Schriftart auswaehlen # Aufruf aus: <Menue> def FontGewaehlt(self, event): data=wx.FontData() data.SetInitialFont(self.font) dlg=wx.FontDialog(self, data) if dlg.ShowModal()==wx.ID_OK: data=dlg.GetFontData() self.font=data.GetChosenFont() self.liste.SetFont(self.font) dlg.Destroy() # ########################################################### # Menue: Passwort anzeigen umgeschaltet # Aufruf aus: <Menue> def PasswortAnzeigenGewaehlt(self, event): self.DatenAnzeigen() # ########################################################### # Menue: Ueber # Aufruf aus: <Menue> def UeberGewaehlt(self, event): info=wx.AboutDialogInfo() info.SetName("Passwort-Verwaltung") info.SetVersion("1.0") info.SetCopyright("D.A. (04/05.2012)") info.SetDescription("Ein kleines Programm zum Verwalten von UserID/Passwort-Relationen") info.SetLicence("Dieses Programm ist freie Software gemaess GNU General Public License") info.AddDeveloper("Detlev Ahlgrimm") wx.AboutBox(info) # ########################################################### # Kopiert "txt" ins Clipboard def copy2clipboard(self, txt): if wx.TheClipboard.Open(): do=wx.TextDataObject() do.SetText(txt) wx.TheClipboard.SetData(do) wx.TheClipboard.Close() else: wx.MessageBox("Kann Clipboard nicht oeffnen", "Fehler", wx.OK|wx.ICON_ERROR) # ########################################################### # Oeffnen der DB. # Bei Parameter "db_frisch_angelegt"==True wird der DB # nach Passwort-Abfrage das eingegebene Passwort zugewiesen. # Wurde der Parameter nicht oder mit False uebergeben, wird # ebenfalls das Passwort abgefragt, dieses dann aber gegen # die DB geprueft. Wenn es nicht passt, wird abgebrochen. # Wenn es passt, wird der Datenbank-Inhalt ausgelesen und # entschluesselt ins Programm / die Anzeige geladen. # # Aufruf aus: __init__, NeueDBGewaehlt, OeffnenGewaehlt def DBoeffnen(self, db_frisch_angelegt=False): self.parent.SetStatusText("", 0) dlg=wx.PasswordEntryDialog(self, "Bitte Passwort angeben", self.dbname) if dlg.ShowModal()!=wx.ID_OK: dlg.Destroy() self.liste.SetFocus() return(False) pw=self.tresor.PasswortEinstellen(dlg.GetValue()) dlg.Destroy() self.connection=sqlite3.connect(self.dbname) self.cursor=self.connection.cursor() if db_frisch_angelegt==True: self.cursor.execute('INSERT INTO pwdtest (pwdhash) VALUES (?)', (pw, )) self.connection.commit() else: self.cursor.execute('SELECT pwdhash FROM pwdtest') c=self.cursor.fetchone() if self.tresor.PasswortPruefen(c[0])==False: wx.MessageBox("Passwort scheint falsch zu sein!", "Fehler", wx.OK|wx.ICON_ERROR) return(False) self.offeneDB=True self.MenueUpdate() if self.DatenLaden()==True: self.DatenAnzeigen() self.parent.SetStatusText(self.dbname, 0) return(True) return(False) # ########################################################### # Laedt den Inhalt der aktuellen/geoeffneten DB nach: # self.dDataMap, self.sDataMap und self.nachDBID # Wenn das Passwort nicht auf den DB-Inhalt passt (was aber # eigentlich nicht vorkommen sollte), wird abgebrochen und # "False" zurueckgeliefert. Ansonsten "True". # # Aufruf aus: DBoeffnen, EintragLoeschenGewaehlt, EinzelSatzAnzeigeOeffnen def DatenLaden(self): self.dDataMap={} # display self.sDataMap={} # sort self.nachDBID={} # nach DB-ID # c[0] c[1] self.cursor.execute('SELECT daten, ID FROM UIDPWD') c=self.cursor.fetchone() index=0 while c!=None: d=self.tresor.ListeEntschluesseln(c[0]) td=(d[0], d[1], d[2], d[3], d[4], str(c[1])) ts=(d[0].lower(), d[1].lower(), d[2], d[3], d[4], c[1]) self.dDataMap.update({index : td}) self.sDataMap.update({index : ts}) self.nachDBID.update({c[1] : td}) index+=1 c=self.cursor.fetchone() return(True) # ########################################################### # Stellt den Inhalt von self.dDataMap dar. Die Spalte "Passwort" # wird je nach Menue-Status ausge-X-t oder lesbar dargestellt. # Durch die Uebergabe von "select" wird erreicht, dass der entsprechende # Eintrag selektiert wird. Bei Uebergabe eines Integers wird es als # Index in der Liste interpretiert, bei String als Dienst-Name. # Wurde nichts uebergeben, wird die Selektierung aus dem alten # Listenzustand uebernommen. # Sortierung und sichtbarer Ausschnitt wird, wenn moeglich, nach # Neubefuellung wiederhergestellt. # # Aufruf aus: DBoeffnen, EintragLoeschenGewaehlt, # PasswortAnzeigenGewaehlt, EinzelSatzAnzeigeOeffnen def DatenAnzeigen(self, select=None): aktuelleSortierung=self.GetSortState() if aktuelleSortierung[0]==-1: # wenn noch keine Sortierung eingestellt ist... aktuelleSortierung=(0, 1) # ...dann einstellen auf: spalte=0, aufsteigend obersterSichtbarerIndex=self.liste.GetTopItem() if select==None: selektierterIndex=self.liste.GetFirstSelected() if selektierterIndex==-1: selektierterIndex=0 else: if type(select)==int: selektierterIndex=select else: selektierterIndex=None # Kenner fuer "nach Befuellung bestimmen" setzen self.liste.DeleteAllItems() self.itemDataMap=self.sDataMap items=self.dDataMap.items() index=0 for key, data in items: self.liste.InsertStringItem(index, data[0]) self.liste.SetStringItem(index, 1, data[1]) if self.mview.IsChecked(302)==True: self.liste.SetStringItem(index, 2, data[2]) else: self.liste.SetStringItem(index, 2, "*"*len(data[2])) self.liste.SetStringItem(index, 3, data[5]) self.liste.SetItemData(index, key) index+=1 # Sortierung restaurieren self.SortListItems(aktuelleSortierung[0], aktuelleSortierung[1]) # untersten Eintrag sichtbar machen self.liste.Focus(self.liste.GetItemCount()-1) # alten obersten Eintrag sichtbar machen self.liste.Focus(obersterSichtbarerIndex) # damit sollte wieder der urspruenglich sichtbare Bereich angezeigt sein if selektierterIndex==None: selektierterIndex=self.liste.FindItem(0, select) self.liste.Select(selektierterIndex) self.liste.EnsureVisible(selektierterIndex) self.liste.SetFocus() # ########################################################### # Verarbeitet Tastendruecke im ListCtrl. def OnCharEvent(self, event): t={196 : "Ä", 214 : "Ö", 220 : "Ü", 223 : "ß", 228 : "ä", 246 : "ö", 252 : "ü"} key=event.GetKeyCode() ctrl=wx.GetKeyState(wx.WXK_CONTROL) if key==wx.WXK_ESCAPE: # ESC self.BeendenGewaehlt(event) elif ctrl==False and ((key>32 and key<128) or # standard ASCII (key in [196, 214, 220, 223, 228, 246, 252])): # Umlaut if key>128: self.suchstring+=t[key] else: self.suchstring+=chr(key) self.parent.SetStatusText(self.suchstring, 1) p=self.liste.FindItem(0, self.suchstring, True) if p>=0: self.liste.Select(p) self.liste.EnsureVisible(p) if self.cltimer!=None and self.cltimer.IsRunning(): # wenn timer schon laeuft -> verlaengern self.cltimer.Restart(1000) else: # wenn timer noch nicht laeuft -> starten self.cltimer=wx.CallLater(1000, self.MehrzeichenSucheTimerAbgelaufen) else: event.Skip() # ########################################################### # Setzt den suchstring nach einer Sekunde auf Leerstring zurueck. def MehrzeichenSucheTimerAbgelaufen(self): self.suchstring="" self.parent.SetStatusText(self.suchstring, 1) # ########################################################### # Oeffnet den Dialog zur EinzelSatzAnzeige und verarbeitet # die Daten. Wird "idx" mit -1 uebergeben, wird ein neuer Satz # erstellt, bei "idx" >=0 wird es als Index in "self.liste" # interpretiert und dieser Satz geaendert. # Wurden Veraenderungen vorgenommen, wird die DB geaendert # und danach alles neu aus der DB nach "self.liste" geladen. # # Aufruf aus: neuerEintragGewaehlt, EintragAendernGewaehlt, OnRowDClick def EinzelSatzAnzeigeOeffnen(self, idx): if idx<0: t="Konto erstellen" d=u=p=k="" dt=wx.DateTime.Now() else: t="Konto aendern" i=int(self.liste.GetItem(idx, 3).GetText()) d=self.nachDBID[i][0] u=self.nachDBID[i][1] p=self.nachDBID[i][2] k=self.nachDBID[i][3] jahr, monat, tag=self.nachDBID[i][4].split("-") dt=wx.DateTimeFromDMY(int(tag), int(monat)-1, int(jahr)) dlg=EinzelSatz(self, t, self.dDataMap, d, u, p, k, dt) if dlg.ShowModal()!=wx.ID_OK: dlg.Destroy() return daten=dlg.GibDaten() dlg.Destroy() daten[0]=str(daten[0].encode("utf8")) daten[1]=str(daten[1].encode("utf8")) daten[2]=str(daten[2].encode("utf8")) daten[3]=str(daten[3].encode("utf8")) daten[4]=str(daten[4].FormatISODate()) d=self.tresor.ListeVerschluesseln(daten) if idx<0: self.cursor.execute('INSERT INTO UIDPWD (daten) VALUES (?)', (d, )) self.connection.commit() else: id=i self.cursor.execute('INSERT INTO UIDPWDbackup (daten, backup)' \ ' SELECT daten, date("now")' \ ' FROM UIDPWD WHERE ID=?', (id, )) self.cursor.execute('UPDATE UIDPWD SET daten=? WHERE ID=?', (d, id)) self.connection.commit() if self.DatenLaden()==True: self.DatenAnzeigen(daten[0]) # ########################################################### # Ein Dialog zum Aendern eines Satzes. # # Input : Initalwerte fuer die Text-Felder und # dDataMap, um darueber vor Dialog-Ende erkennen zu # koennen, ob der Inhalt von "dienst" unique ist # Output: ggf. Clipboard-Inhalt (username oder password) # eine Liste mit den neuen Werten: # [dienst, username, password, kommentar, datum] # class EinzelSatz(wx.Dialog): def __init__(self, parent, title, dDataMap, dienst="", user="", passwd="", komment="", datum=""): super(EinzelSatz, self).__init__(parent=parent, title=title) self.dDataMap=dDataMap self.diensttxt= wx.StaticText( self, label="&Dienst:") self.dienst= wx.TextCtrl( self, wx.ID_ANY, size=(200, -1)) self.usernametxt= wx.StaticText( self, label="&Benutzername:") self.username= wx.TextCtrl( self, wx.ID_ANY, size=(200, -1)) self.passwordtxt= wx.StaticText( self, label="&Passwort:") self.password= wx.TextCtrl( self, wx.ID_ANY, size=(200, -1)) self.generieren= wx.Button( self, wx.ID_ANY, "&Generieren") self.datumtxt= wx.StaticText( self, label="&Datum:") self.datum= wx.DatePickerCtrl(self, wx.ID_ANY) self.kommentartxt=wx.StaticText( self, label="&Kommentar:") self.kommentar= wx.TextCtrl( self, wx.ID_ANY, size=(450, 100), style=wx.TE_MULTILINE) self.ok= wx.Button( self, wx.ID_OK, "&OK") self.abbruch= wx.Button( self, wx.ID_CANCEL, "&Abbruch") self.dienst.SetValue(dienst) self.username.SetValue(user) self.password.SetValue(passwd) self.kommentar.SetValue(komment) self.datum.SetValue(datum) topsizer= wx.BoxSizer(wx.VERTICAL) gbsizer= wx.GridBagSizer(2, 3) l4sizer= wx.BoxSizer(wx.HORIZONTAL) # size(x, y) pos(y, x) span(y, x) gbsizer.Add(self.diensttxt, (0, 0), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALL, border=1) gbsizer.Add(self.dienst, (0, 1), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALL, border=1) gbsizer.Add(self.usernametxt, (1, 0), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALL, border=1) gbsizer.Add(self.username, (1, 1), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALL, border=1) gbsizer.Add(self.passwordtxt, (2, 0), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALL, border=1) gbsizer.Add(self.password, (2, 1), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALL, border=1) gbsizer.Add(self.generieren, (2, 2), flag=wx.LEFT, border=10) gbsizer.Add(self.datumtxt, (3, 0), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALL, border=1) gbsizer.Add(self.datum, (3, 1), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALL, border=1) l4sizer.Add(self.ok, 0, wx.ALL, 1) l4sizer.Add(self.abbruch, 0, wx.ALL, 1) topsizer.Add(gbsizer, 0, wx.ALL, 5) topsizer.Add(self.kommentartxt, 0, wx.ALL, 5) topsizer.Add(self.kommentar, 0, wx.ALL, 5) topsizer.Add(l4sizer, 0, wx.ALL, 5) self.SetSizerAndFit(topsizer) self.generieren.Bind( wx.EVT_BUTTON, self.GenerierenGewaehlt) self.ok.Bind( wx.EVT_BUTTON, self.OkGewaehlt) self.abbruch.Bind( wx.EVT_BUTTON, self.AbbruchGewaehlt) self.username.Bind( wx.EVT_LEFT_DCLICK, self.username_dclick) self.password.Bind( wx.EVT_LEFT_DCLICK, self.password_dclick) self.username.SetToolTip(wx.ToolTip('Doppelklick kopiert den Namen ins Clipboard')) self.password.SetToolTip(wx.ToolTip('Doppelklick kopiert den Namen ins Clipboard')) self.ok.SetDefault() self.dienst.SetFocus() # ########################################################### # Kopiert self.username ins Clipboard def username_dclick(self, event): self.username.SetSelection(-1, -1) self.copy2clipboard(self.username.GetValue()) # ########################################################### # Kopiert self.password ins Clipboard def password_dclick(self, event): self.password.SetSelection(-1, -1) self.copy2clipboard(self.password.GetValue()) # ########################################################### # Kopiert "txt" ins Clipboard def copy2clipboard(self, txt): if wx.TheClipboard.Open(): do=wx.TextDataObject() do.SetText(txt) wx.TheClipboard.SetData(do) wx.TheClipboard.Close() else: wx.MessageBox("Kann Clipboard nicht oeffnen", "Fehler", wx.OK|wx.ICON_ERROR) # ########################################################### # Button Generieren def GenerierenGewaehlt(self, event): dlg=PasswortGenerator(self) if dlg.ShowModal()==wx.ID_OK: self.password.SetValue(dlg.GibPasswort()) dlg.Destroy() # ########################################################### # Button Ok def OkGewaehlt(self, event): self.EndModal(wx.ID_OK) # ########################################################### # Button Abbruch def AbbruchGewaehlt(self, event): self.EndModal(wx.ID_CANCEL) # ########################################################### # Liefert die eingegebenen Daten als Liste zurueck def GibDaten(self): return([self.dienst.GetValue(), self.username.GetValue(), \ self.password.GetValue(), self.kommentar.GetValue(), \ self.datum.GetValue()]) # ########################################################### # Ein Dialog zum Erzeugen von Passwoertern # Input : keiner # Output: ein String mit einem Passwort (oder "") class PasswortGenerator(wx.Dialog): def __init__(self, parent, id=wx.ID_ANY, title="Passwort-Erzeugung"): wx.Dialog.__init__(self, parent, id, title) sb=wx.StaticBox(self, -1, " dieses Passwort... ") c=["gross/klein", "nur klein", "nur gross"] self.buchstaben_jn= wx.CheckBox(self, wx.ID_ANY, "...enthaelt &Buchstaben") self.buchstaben_typ= wx.RadioBox(self, wx.ID_ANY, "", choices=c) self.ziffern_jn= wx.CheckBox(self, wx.ID_ANY, "...enthaelt &Ziffern") self.sonderzeichen_jn=wx.CheckBox(self, wx.ID_ANY, "...enthaelt &Sonderzeichen") self.beginn_jn= wx.CheckBox(self, wx.ID_ANY, "...beg&innt mit einem Buchstaben") self.buchstaben_jn.SetValue(True) self.ziffern_jn.SetValue(True) self.beginn_jn.SetValue(True) st1= wx.StaticText(self, wx.ID_ANY, "...hat eine &Laenge von:") st2= wx.StaticText(self, wx.ID_ANY, " bis:") st3= wx.StaticText(self, wx.ID_ANY, " Zeichen") self.laenge_u=wx.SpinCtrl(self, wx.ID_ANY, "", size=(50, -1), min=4, max=32, initial=8) self.laenge_o=wx.SpinCtrl(self, wx.ID_ANY, "", size=(50, -1), min=8, max=40, initial=10) st4= wx.StaticText(self, wx.ID_ANY, "&Passwort:") self.passwort=wx.TextCtrl(self, wx.ID_ANY, size=(200, -1)) dummy= wx.StaticText(self, wx.ID_ANY, "", size=(100, -1)) erzeugen_but= wx.Button(self, wx.ID_ANY, "&Erzeuge") self.ok_but= wx.Button(self, wx.ID_OK, "&Ok") abbruch_but= wx.Button(self, wx.ID_CANCEL, "&Abbruch") self.ok_but.Disable() topsizer=wx.BoxSizer(wx.VERTICAL) sbsizer= wx.StaticBoxSizer(sb, wx.VERTICAL) l1sizer= wx.BoxSizer(wx.HORIZONTAL) l2sizer= wx.BoxSizer(wx.HORIZONTAL) l3sizer= wx.BoxSizer(wx.HORIZONTAL) l4sizer= wx.BoxSizer(wx.HORIZONTAL) l1sizer.Add(self.buchstaben_jn, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5) l1sizer.Add(self.buchstaben_typ, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5) sbsizer.Add(l1sizer, 0, wx.ALL, 0) sbsizer.Add(self.ziffern_jn, 0, wx.ALL, 5) sbsizer.Add(self.sonderzeichen_jn, 0, wx.ALL, 5) sbsizer.Add(self.beginn_jn, 0, wx.ALL, 5) l2sizer.Add(st1, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5) l2sizer.Add(self.laenge_u, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5) l2sizer.Add(st2, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5) l2sizer.Add(self.laenge_o, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5) l2sizer.Add(st3, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5) sbsizer.Add(l2sizer, 0, wx.ALL, 0) topsizer.Add(sbsizer, 0, wx.ALL, 0) l3sizer.Add(st4, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5) l3sizer.Add(self.passwort, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5) topsizer.Add(l3sizer, 0, wx.ALL, 0) l4sizer.Add(erzeugen_but, 0, wx.ALL, 5) l4sizer.Add(dummy, 0, wx.ALL, 5) l4sizer.Add(self.ok_but, 0, wx.ALL, 5) l4sizer.Add(abbruch_but, 0, wx.ALL, 5) topsizer.Add(l4sizer, 0, wx.ALL, 0) self.buchstaben_jn.Bind(wx.EVT_CHECKBOX, self.buchstaben_jn_wahl) self.laenge_u.Bind( wx.EVT_SPINCTRL, self.laenge_u_wahl) erzeugen_but.Bind( wx.EVT_BUTTON, self.erzeugen_but_wahl) self.passwort.Bind( wx.EVT_TEXT, self.passwort_wahl) self.SetSizerAndFit(topsizer) erzeugen_but.SetFocus() # ########################################################### # Liefert das Passwort def GibPasswort(self): return(self.passwort.GetValue()) # ########################################################### # Steuert den Enabled-Status des Buchstaben-Typs gemaess # Buchstaben-J/N-Auswahl def buchstaben_jn_wahl(self, event): if self.buchstaben_jn.GetValue()==False: self.buchstaben_typ.Disable() self.beginn_jn.Disable() else: self.buchstaben_typ.Enable() self.beginn_jn.Enable() # ########################################################### # Sorgt dafuer, dass gilt: laenge_u <= laenge_o def laenge_u_wahl(self, event): self.laenge_o.SetRange(self.laenge_u.GetValue(), 40) # ########################################################### # Button "Erzeugen" gewaehlt def erzeugen_but_wahl(self, event): # zuerst mal die einzelnen Wertevorraete anlegen bg="ABCDEFGHIJKLMNOPQRSTUVWXYZ" bk="abcdefghijklmnopqrstuvwxyz" z="0123456789" s="!$%&/(){}?#*+-,;.:<>" # dann die einzelnen Wertevorraete gemaess Einstellung zu einem # Gesamt-Wertevorrat zusammenstellen bm="" if self.buchstaben_jn.GetValue()==True: bt=self.buchstaben_typ.GetSelection() if bt==0: bm+=bg+bk elif bt==1: bm+=bk else: bm+=bg wm=bm if self.ziffern_jn.GetValue()==True: wm+=z if self.sonderzeichen_jn.GetValue()==True: wm+=s # "wm" enthaelt jetzt den Gesamt-Wertevorrat pl=random.randrange(self.laenge_u.GetValue(), self.laenge_o.GetValue()+1) if self.beginn_jn.IsEnabled()==True and self.beginn_jn.GetValue()==True: # muss mit Buchstaben beginnen pwl=random.sample(bm, 1) pwl+=random.sample(wm, pl-1) else: pwl=random.sample(wm, pl) pw="" for pwc in pwl: pw+=pwc self.passwort.SetValue(pw) # ########################################################### # Aenderung am Passwort # Wenn das Passwort die eingestellte Minimal-Laenge aufweist, # wird der OK-Button freigeschaltet. Ansonsten wird er # ausgegraut. def passwort_wahl(self, event): if len(self.passwort.GetValue())>=self.laenge_u.GetValue(): self.ok_but.Enable() else: self.ok_but.Disable() # ########################################################### # Der Starter if __name__=='__main__': fc=wx.FileConfig(localFilename=cfgFile_g) spx=fc.ReadInt("pos_x", -1) spy=fc.ReadInt("pos_y", -1) ssx=fc.ReadInt("size_x", -1) ssy=fc.ReadInt("size_y", -1) sp=(spx, spy) # (-1, -1) entspricht wx.DefaultPosition ss=(ssx, ssy) # (-1, -1) entspricht wx.DefaultSize app=wx.App() frame=TresorGUI(None, pos=sp, size=ss).Show() app.MainLoop()
(H) 03/18/19 3:00 PM Practice Evergreen Sports Center . (H) 03/19/19 5:00 PM Practice LHS Gym . (H) 03/20/19 7:00 PM Practice Evergreen Sports Center . (H) 03/21/19 3:00 PM Practice LHS Gym . (H) 03/22/19 3:00 PM Practice Evergreen Sports Center . (H) 03/25/19 7:00 PM Practice Evergreen Sports Center . (H) 03/26/19 5:00 PM Practice LHS Gym . (H) 03/27/19 3:00 PM Practice Daisy Bronson . (H) 03/28/19 5:00 PM Practice LHS Gym . (H) 03/29/19 3:00 PM Practice Evergreen Sports Center . (H) 04/01/19 7:00 PM Practice Daisy Bronson . (H) 04/02/19 3:00 PM Practice Evergreen Sports Center . (H) 04/03/19 4:00 PM vs Profile School Remich Park Postponed . (A) 04/05/19 4:30 PM vs Inter-lakes HS Postponed . (A) 04/08/19 4:00 PM vs Gilford HS Postponed . (H) 04/09/19 6:00 PM Practice Daisy Bronson . (A) 04/11/19 4:00 PM vs Prospect Mountain HS 5 - 3 . (H) 04/12/19 4:30 PM Practice Daisy Bronson . (A) 04/15/19 3:30 PM vs Moultonborough Academy Postponed . (A) 04/16/19 4:30 PM vs Inter-lakes HS 3 - 6 . (H) 04/17/19 4:00 PM vs White Mountains Regional H.S. Remich Park 3 - 6 . (H) 04/18/19 3:00 PM Practice Remich Park Courts . (H) 04/19/19 3:00 PM Practice Remich Park Courts . (A) 04/29/19 4:00 PM vs Gilford HS . (A) 04/30/19 4:00 PM vs White Mountains Regional H.S. . (H) 05/01/19 3:00 PM Practice Apthorp Common . (H) 05/02/19 3:00 PM Practice Apthorp Common . (H) 05/03/19 3:00 PM vs Profile School Remich Park . (A) 05/03/19 4:00 PM vs Profile School . (H) 05/06/19 4:30 PM vs Inter-lakes HS Remich Park . (H) 05/08/19 2:00 PM vs Berlin Senior HS Remich Park . (A) 05/08/19 4:00 PM vs Berlin Senior HS . (A) 05/09/19 3:30 PM vs Moultonborough Academy Remich Park . (H) 05/13/19 4:00 PM vs Gilford HS Remich Park . (H) 05/14/19 3:30 PM vs Bishop Brady HS Remich Park . (H) 05/15/19 4:00 PM vs Moultonborough Academy .
# Copyright 2016 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Raid virtual driver""" import copy import re import os import json import jsonschema from jsonschema import exceptions as json_schema_exc from megautils.raid_ircu import mega from megautils import exception CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) RAID_CONFIG_SCHEMA = os.path.join(CURRENT_DIR, "raid_config_schema.json") class VirtualDriver(object): def __init__(self, adapter_id=None, id=None): self.adapter = adapter_id self.id = id self.volume_id = '' self.pi_supported = '' self.status_of_volume = '' self.volume_wwid = '' self.raid_level = '' self.size = '' self.physical_hard_disks = None def __flush__(self): if self.adapter == None or self.id == None: raise exception.InvalidParameterValue() cmd = '%s LIST| grep -w 1000 "IR volume %s"' % (self.adapter, self.id) ret = self._get_client().command(cmd) self._handle(ret, multi_vd=False) def _get_client(self): return mega.Mega() def _handle(self, retstr, multi_vd=True): vds = [] for line in retstr: if line.startswith('IR volume'): if not multi_vd and len(vds) > 0: return vds[0] offset = line.split(' ') self.id = int(offset[-1]) if self.id is not None and multi_vd: vds.append(self.copy()) offset = line.split(' ') self.id = int(offset[-1]) if line.startswith(' Volume ID'): offset = line.find(':') self.volume_id = int(line[offset + 1:].strip()) elif line.startswith(' PI Supported'): offset = line.find(':') self.pi_supported = line[offset + 1:].strip() elif line.startswith(' Status of volume'): offset = line.find(':') self.status_of_volume = line[offset + 1:].strip() elif line.startswith(' Volume wwid'): offset = line.find(':') self.volume_wwid = line[offset + 1:].strip() elif line.startswith(' RAID level'): offset = line.find(':') self.raid_level = line[offset + 1:delim].strip() elif line.startswith(' Size'): offset = line.find(':') self.size = int(line[offset + 1:].strip()) elif line.startswith(' Physical hard disks'): offset = line.find(':') if not self.physical_hard_disks: self.physical_hard_disks = [] elif line.startswith(' PHY'): offset = line.find(':') self.physical_hard_disks.append(line[offset + 1:].strip()) if self.id is not None: vds.append(self.copy()) return vds def copy(self): return copy.deepcopy(self) def create(self, raid_level, disks): """ Create a virtual driver with disks :param raid_level: raid level :param disks: lsi mega raid create disk schema """ disk_formater = re.compile(r'^[0-9]+:[0-9]+$') for disk in disks: if not re.match(disk_formater, disk): raise exception.InvalidDiskFormater(disk=disk) if raid_level in [mega.RAID_0, mega.RAID_1, mega.mega.RAID_10]: cmd = '%s CREATE %s MAX %s' % \ (self.adapter, mega.RAID_LEVEL_INPUT_MAPPING.get(raid_level), ' '.join(disks)) ret = self._get_client().command(cmd) self.id = None for line in ret.readlines(): offset = line.find('Created VD') if offset < 0: continue self.id = line[offset + 11:] break if not self.id: raise exception.MegaCLIError() self.__flush__() def destroy(self): """ Delete this raid :return: """ self.__flush__() cmd = '%s DELETEVOLUME %s' % (self.adapter, self.volume_id) self._get_client().command(cmd) self.id = None def getall_virtual_drivers(self): """ Get all virtual drivers :return: """ if self.adapter == None: raise exception.InvalidParameterValue() cmd = '%s LIST' % self.adapter ret = self._get_client().command(cmd) return self._handle(ret, multi_vd=True) def set_boot_able(self): """ Set current virtual driver bootable :return: """ self.__flush__() cmd = '%s BOOTIR %s' % (self.adapter, self.volume_id) self._get_client().command(cmd)
Ever heard of Better World Shopper? No? It's a site that rates companies on their social and environmental responsibility. The goal is to encourage shoppers to vote with their wallets by selecting companies that are taking strides to create a world that thrives on human rights, animal protection, community involvement, social justice, and protecting the environment. Check it out at www.betterworldshopper.com. Scope out your favorite shopping spots and see how they were graded. By the way, you can download Better World Shopper as an app on your iPhone.
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement """ A class which handles loading the pricing files. """ import os.path from os.path import join as pjoin try: import simplejson as json JSONDecodeError = json.JSONDecodeError except ImportError: import json JSONDecodeError = ValueError from libcloud.utils.connection import get_response_object __all__ = [ 'get_pricing', 'get_size_price', 'set_pricing', 'clear_pricing_data', 'download_pricing_file' ] # Default URL to the pricing file DEFAULT_FILE_URL = 'https://git-wip-us.apache.org/repos/asf?p=libcloud.git;a=blob_plain;f=libcloud/data/pricing.json' # NOQA CURRENT_DIRECTORY = os.path.dirname(os.path.abspath(__file__)) DEFAULT_PRICING_FILE_PATH = pjoin(CURRENT_DIRECTORY, 'data/pricing.json') CUSTOM_PRICING_FILE_PATH = os.path.expanduser('~/.libcloud/pricing.json') # Pricing data cache PRICING_DATA = { 'compute': {}, 'storage': {} } VALID_PRICING_DRIVER_TYPES = ['compute', 'storage'] def get_pricing_file_path(file_path=None): if os.path.exists(CUSTOM_PRICING_FILE_PATH) and \ os.path.isfile(CUSTOM_PRICING_FILE_PATH): # Custom pricing file is available, use it return CUSTOM_PRICING_FILE_PATH return DEFAULT_PRICING_FILE_PATH def get_pricing(driver_type, driver_name, pricing_file_path=None): """ Return pricing for the provided driver. :type driver_type: ``str`` :param driver_type: Driver type ('compute' or 'storage') :type driver_name: ``str`` :param driver_name: Driver name :type pricing_file_path: ``str`` :param pricing_file_path: Custom path to a price file. If not provided it uses a default path. :rtype: ``dict`` :return: Dictionary with pricing where a key name is size ID and the value is a price. """ if driver_type not in VALID_PRICING_DRIVER_TYPES: raise AttributeError('Invalid driver type: %s', driver_type) if driver_name in PRICING_DATA[driver_type]: return PRICING_DATA[driver_type][driver_name] if not pricing_file_path: pricing_file_path = get_pricing_file_path(file_path=pricing_file_path) with open(pricing_file_path) as fp: content = fp.read() pricing_data = json.loads(content) size_pricing = pricing_data[driver_type][driver_name] for driver_type in VALID_PRICING_DRIVER_TYPES: # pylint: disable=maybe-no-member pricing = pricing_data.get(driver_type, None) if pricing: PRICING_DATA[driver_type] = pricing return size_pricing def set_pricing(driver_type, driver_name, pricing): """ Populate the driver pricing dictionary. :type driver_type: ``str`` :param driver_type: Driver type ('compute' or 'storage') :type driver_name: ``str`` :param driver_name: Driver name :type pricing: ``dict`` :param pricing: Dictionary where a key is a size ID and a value is a price. """ PRICING_DATA[driver_type][driver_name] = pricing def get_size_price(driver_type, driver_name, size_id): """ Return price for the provided size. :type driver_type: ``str`` :param driver_type: Driver type ('compute' or 'storage') :type driver_name: ``str`` :param driver_name: Driver name :type size_id: ``str`` or ``int`` :param size_id: Unique size ID (can be an integer or a string - depends on the driver) :rtype: ``float`` :return: Size price. """ pricing = get_pricing(driver_type=driver_type, driver_name=driver_name) price = float(pricing[size_id]) return price def invalidate_pricing_cache(): """ Invalidate pricing cache for all the drivers. """ PRICING_DATA['compute'] = {} PRICING_DATA['storage'] = {} def clear_pricing_data(): """ Invalidate pricing cache for all the drivers. Note: This method does the same thing as invalidate_pricing_cache and is here for backward compatibility reasons. """ invalidate_pricing_cache() def invalidate_module_pricing_cache(driver_type, driver_name): """ Invalidate the cache for the specified driver. :type driver_type: ``str`` :param driver_type: Driver type ('compute' or 'storage') :type driver_name: ``str`` :param driver_name: Driver name """ if driver_name in PRICING_DATA[driver_type]: del PRICING_DATA[driver_type][driver_name] def download_pricing_file(file_url=DEFAULT_FILE_URL, file_path=CUSTOM_PRICING_FILE_PATH): """ Download pricing file from the file_url and save it to file_path. :type file_url: ``str`` :param file_url: URL pointing to the pricing file. :type file_path: ``str`` :param file_path: Path where a download pricing file will be saved. """ dir_name = os.path.dirname(file_path) if not os.path.exists(dir_name): # Verify a valid path is provided msg = ('Can\'t write to %s, directory %s, doesn\'t exist' % (file_path, dir_name)) raise ValueError(msg) if os.path.exists(file_path) and os.path.isdir(file_path): msg = ('Can\'t write to %s file path because it\'s a' ' directory' % (file_path)) raise ValueError(msg) response = get_response_object(file_url) body = response.body # Verify pricing file is valid try: data = json.loads(body) except JSONDecodeError: msg = 'Provided URL doesn\'t contain valid pricing data' raise Exception(msg) # pylint: disable=maybe-no-member if not data.get('updated', None): msg = 'Provided URL doesn\'t contain valid pricing data' raise Exception(msg) # No need to stream it since file is small with open(file_path, 'w') as file_handle: file_handle.write(body)
Physical Therapist/Full time 40 hours Description Kindred RehabilitationServices is the largest diversified provider of rehabilitation therapy in the country. Through RehabCare and Kindred Hospital Rehabilitation Services, we provide leading therapy to more than 2,000 sites of service across different settings in the care continuum and have been managing rehab for more than 30 years. We provide rehabilitation services, including physical, occupational and speech-language therapies to virtually every care setting including inpatient, outpatient, skilled nursing, home health, long-term acute care and assisted living. With locations across 47 states, we are certain to have a rehab job for you. Your career growth begins when you join an interdisciplinary team, where doctors, nurses, therapists and other experts work together to form individualized care plans for our patients and residents. Opportunities through our development programs, training seminars and university partnerships, not only allow for continual career growth but emphasize our commitment to investing in our employees and developing future healthcare leaders. The goal of our team is to focus on each patient as an individual to ensure that we are meeting their clinical needs and creating a fun and dynamic healing environment. Each employee's dedication is essential to meet and exceed the needs of each patient, resident and family we serve. Ranked as one of Fortune magazine's "Most Admired Healthcare Company" for 8 years, Kindred welcomes you to join our team and build a career that touches lives. As a Physical Therapist / PT you will: Put your physical therapy skills to work where they're really needed -evaluate a patient's condition, develop a treatment plan, and help them get better, day by day. You'll also instruct the nursing staff and the patient's families on follow-through programs that build on the progress they've made. Communicate patient progress or problems to supervisor and other team members; assist with patient scheduling and post charges daily to patient records. Document patient care in accordance with RehabCare, regulatory, licensing, payer and accrediting requirements. Instruct patient's family or nursing staff in follow-through programs. Maintain equipment and work area in a safe and clean condition. Make presentations to support marketing efforts, at team conferences and in-services. Handle job responsibilities in accordance with the Company's Code of Business Conduct, the Corporate Compliance Agreement, appropriate professional standard and applicable state/federal laws. physical therapist, physical therapy, physical therapy assistant, physical therapist therapy, physical therapy aide, home health physical therapist, Acute care, ADL, ADLs, ALF, Balance dysfunction, case management, case manager, clinical, clinic, clinical rotation, geriatric, geriatrics, Healthcare, health care, Homecare, home care, home health, homehealth, hospital, inpatient, inpatient PT, inpatient P.T., inpatient physical therapist, inpatient therapist, licensed therapist, licensed PT, licensed P.T., licensed physical therapist, modalities, neuro, neurological, ortho, ortho PT, ortho P.T., ortho therapist, ortho physical therapist, orthopedics, orthopedic, orthopedic PT, orthopedic P.T., orthopedic physical therapist, PT, P.T., physical therapist, physical therapy, DPT, doctorate PT, doctorate P.T., doctorate physical therapist, medical, medical care, therapist, therapy Care Manager, therapy case manager, therapy case management, therapy Job, therapy Jobs, PT job, PT jobs, P.T. job, P.T. jobs, physical therapy job, physical therapy jobs, physical therapist job, physical therapist jobs, Oasis, outpatient, outpatient clinic, out patient, outpatient rehab, outpatient PT, outpatient P.T., outpatient therapist, outpatient physical therapy, outpatient physical therapist, PRN, PDM, rehab, rehab PT, rehab P.T., rehab physical therapist, rehab setting, rehabilitative, therapeutic, safe strides, safe strides PT, safe strides P.T., safe strides physical therapist, skilled visit, SNF, sports med, sports medicine, sports med therapist, sports med physical therapist, sports medical physical therapist, subacute, sub acute, post acute, therapy, travel PT, travel P.T., travel therapist, travel physical therapist, traveling PT, traveling P.T., traveling physical therapist, rehabilitation services, rehabcare, hospital rehabilitation services, inpatient rehabilitation services, skilled rehabilitation services, IRF, HRS, SR, RHC, RHB #MON-HRS Qualifications As a Physical Therapist / PT you will have: Degree from an accredited Physical Therapy program. Minimum of one year physical therapy experience preferred. Current and unrestricted Physical Therapy license in the state where services are rendered. Current CPR certification. Strong organizational and communication skills. If you are a current Kindred/RehabCare employee Click Here .
""" Filename: plot_zonal_toa_breakdown.py Author: Damien Irving, irving.damien@gmail.com Description: """ # Import general Python modules import sys, os, pdb, glob import argparse import numpy import iris from iris.experimental.equalise_cubes import equalise_attributes import iris.plot as iplt import matplotlib.pyplot as plt from matplotlib import gridspec import seaborn seaborn.set_context('talk') # Import my modules cwd = os.getcwd() repo_dir = '/' for directory in cwd.split('/')[1:]: repo_dir = os.path.join(repo_dir, directory) if directory == 'ocean-analysis': break modules_dir = os.path.join(repo_dir, 'modules') sys.path.append(modules_dir) try: import general_io as gio import timeseries import convenient_universal as uconv import grids except ImportError: raise ImportError('Must run this script from anywhere within the ocean-analysis git repo') # Define functions aa_physics = {'CanESM2': 'p4', 'CCSM4': 'p10', 'CSIRO-Mk3-6-0': 'p4', 'GFDL-CM3': 'p1', 'GISS-E2-H': 'p107', 'GISS-E2-R': 'p107', 'NorESM1-M': 'p1'} def ensemble_grid(): """Make a dummy cube with desired grid.""" lat_values = numpy.arange(-89.5, 90, 1.0) latitude = iris.coords.DimCoord(lat_values, var_name='lat', standard_name='latitude', long_name='latitude', units='degrees_north', coord_system=iris.coord_systems.GeogCS(iris.fileformats.pp.EARTH_RADIUS)) dummy_data = numpy.zeros(len(lat_values)) new_cube = iris.cube.Cube(dummy_data, dim_coords_and_dims=[(latitude, 0)]) new_cube.coord('latitude').guess_bounds() return new_cube def ensemble_mean(cube_list): """Calculate the ensemble mean.""" if len(cube_list) > 1: equalise_attributes(cube_list) ensemble_cube = cube_list.merge_cube() ensemble_mean = ensemble_cube.collapsed('ensemble_member', iris.analysis.MEAN) else: ensemble_mean = cube_list[0] return ensemble_mean def calc_anomaly(cube): """Calculate the anomaly.""" anomaly = cube.copy() anomaly.data = anomaly.data - anomaly.data[0] anomaly = anomaly[-1, ::] anomaly.remove_coord('time') return anomaly def regrid(anomaly, ref_cube): """Regrid to reference cube, preserving the data sum""" lat_bounds = anomaly.coord('latitude').bounds lat_diffs = numpy.apply_along_axis(lambda x: x[1] - x[0], 1, lat_bounds) anomaly_scaled = anomaly / lat_diffs ref_points = [('latitude', ref_cube.coord('latitude').points)] anomaly_regridded = anomaly_scaled.interpolate(ref_points, iris.analysis.Linear()) ref_lat_bounds = ref_cube.coord('latitude').bounds ref_lat_diffs = numpy.apply_along_axis(lambda x: x[1] - x[0], 1, ref_lat_bounds) new_anomaly = anomaly_regridded * ref_lat_diffs return new_anomaly def get_data(infile, var, metadata_dict, time_constraint, ensemble_number, ref_cube=False): """Get data""" if infile: cube = iris.load_cube(infile[0], var & time_constraint) metadata_dict[infile[0]] = cube.attributes['history'] anomaly = calc_anomaly(cube) final_value = anomaly.data.sum() print(var, 'final global total:', final_value) if ref_cube: grid_match = ref_cube.coord('latitude') == cube.coord('latitude') if not grid_match: anomaly = regrid(anomaly, ref_cube) final_value = anomaly.data.sum() print(var, 'final global total (after regrid):', final_value) if ref_cube.standard_name: anomaly.replace_coord(ref_cube.coord('latitude')) else: if not anomaly.coord('latitude').has_bounds(): anomaly.coord('latitude').bounds = ref_cube.coord('latitude').bounds new_aux_coord = iris.coords.AuxCoord(ensemble_number, long_name='ensemble_member', units='no_unit') anomaly.add_aux_coord(new_aux_coord) else: cube = None anomaly = None final_value = None return cube, anomaly, metadata_dict def plot_breakdown(gs, rndt_anomaly, rsdt_anomaly, rsut_anomaly, rlut_anomaly, linewidth=None, decorate=True, ylim=True): """Plot netTOA and its component parts""" ax = plt.subplot(gs) plt.sca(ax) if decorate: labels = ['netTOA', 'rsdt', 'rsut', 'rlut'] else: labels = [None, None, None, None] iplt.plot(rndt_anomaly, color='black', label=labels[0], linewidth=linewidth) iplt.plot(rsdt_anomaly, color='yellow', label=labels[1], linewidth=linewidth) iplt.plot(rsut_anomaly * -1, color='orange', label=labels[2], linewidth=linewidth) iplt.plot(rlut_anomaly * -1, color='purple', label=labels[3], linewidth=linewidth) if ylim: ylower, yupper = ylim plt.ylim(ylower * 1e22, yupper * 1e22) if decorate: plt.ylabel('$J \; lat^{-1}$') plt.xlim(-90, 90) plt.axhline(y=0, color='0.5', linestyle='--') plt.legend() plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0), useMathText=True) ax.yaxis.major.formatter._useMathText = True def get_time_text(time_bounds): """Time text for plot title""" start_year = time_bounds[0].split('-')[0] end_year = time_bounds[-1].split('-')[0] time_text = '%s-%s' %(start_year, end_year) return time_text def main(inargs): """Run program""" nexp = len(inargs.experiments) fig = plt.figure(figsize=[11 * nexp, 14]) gs = gridspec.GridSpec(2, nexp) nmodels = len(inargs.models) ensemble_ref_cube = ensemble_grid() if nmodels > 1 else None var_list = ['rndt', 'rsdt', 'rsut', 'rlut'] plot_index = 0 time_constraint = gio.get_time_constraint(inargs.time) time_text = get_time_text(inargs.time) ensemble_dict = {} for experiment in inargs.experiments: data_dict = {} for var in var_list: data_dict[var] = iris.cube.CubeList([]) for index, model in enumerate(inargs.models): mip = 'r1i1' + aa_physics[model] if experiment == 'historicalMisc' else 'r1i1p1' dir_exp = experiment.split('-')[-1] file_exp = 'historical-' + experiment if experiment[0:3] == 'rcp' else experiment mydir = '/g/data/r87/dbi599/DRSv2/CMIP5/%s/%s/yr' %(model, dir_exp) rndt_file = glob.glob('%s/atmos/%s/rndt/latest/dedrifted/rndt-zonal-sum_Ayr_%s_%s_%s_cumsum-all.nc' %(mydir, mip, model, file_exp, mip)) rsdt_file = glob.glob('%s/atmos/%s/rsdt/latest/dedrifted/rsdt-zonal-sum_Ayr_%s_%s_%s_cumsum-all.nc' %(mydir, mip, model, file_exp, mip)) rsut_file = glob.glob('%s/atmos/%s/rsut/latest/dedrifted/rsut-zonal-sum_Ayr_%s_%s_%s_cumsum-all.nc' %(mydir, mip, model, file_exp, mip)) rlut_file = glob.glob('%s/atmos/%s/rlut/latest/dedrifted/rlut-zonal-sum_Ayr_%s_%s_%s_cumsum-all.nc' %(mydir, mip, model, file_exp, mip)) anomaly_dict = {} metadata_dict = {} rndt_cube, anomaly_dict['rndt'], metadata_dict = get_data(rndt_file, 'TOA Incoming Net Radiation', metadata_dict, time_constraint, index, ref_cube=ensemble_ref_cube) rsdt_cube, anomaly_dict['rsdt'], metadata_dict = get_data(rsdt_file, 'toa_incoming_shortwave_flux', metadata_dict, time_constraint, index, ref_cube=ensemble_ref_cube) rsut_cube, anomaly_dict['rsut'], metadata_dict = get_data(rsut_file, 'toa_outgoing_shortwave_flux', metadata_dict, time_constraint, index, ref_cube=ensemble_ref_cube) rlut_cube, anomaly_dict['rlut'], metadata_dict = get_data(rlut_file, 'toa_outgoing_longwave_flux', metadata_dict, time_constraint, index, ref_cube=ensemble_ref_cube) if nmodels > 1: plot_breakdown(gs[plot_index], anomaly_dict['rndt'], anomaly_dict['rsdt'], anomaly_dict['rsut'], anomaly_dict['rlut'], linewidth=0.3, decorate=False, ylim=inargs.ylim) for var in var_list: data_dict[var].append(anomaly_dict[var]) ensemble_dict[experiment] = {} for var in var_list: cube_list = iris.cube.CubeList(filter(None, data_dict[var])) ensemble_dict[experiment][var] = ensemble_mean(cube_list) linewidth = None if nmodels == 1 else 4.0 model_label = 'ensemble' if nmodels > 1 else inargs.models[0] experiment_label = 'historicalAA' if experiment == 'historicalMisc' else experiment plot_breakdown(gs[plot_index], ensemble_dict[experiment]['rndt'], ensemble_dict[experiment]['rsdt'], ensemble_dict[experiment]['rsut'], ensemble_dict[experiment]['rlut'], ylim=inargs.ylim) plt.title(experiment_label) plot_index = plot_index + 1 fig.suptitle('zonally integrated heat accumulation, ' + time_text, fontsize='large') dpi = inargs.dpi if inargs.dpi else plt.savefig.__globals__['rcParams']['figure.dpi'] print('dpi =', dpi) plt.savefig(inargs.outfile, bbox_inches='tight', dpi=dpi) gio.write_metadata(inargs.outfile, file_info=metadata_dict) if __name__ == '__main__': extra_info =""" author: Damien Irving, irving.damien@gmail.com """ description = 'Plot ensemble timeseries' parser = argparse.ArgumentParser(description=description, epilog=extra_info, argument_default=argparse.SUPPRESS, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument("outfile", type=str, help="name of output file. e.g. /g/data/r87/dbi599/figures/energy-check-zonal/energy-check-zonal_yr_model_experiment_mip_1861-2005.png") parser.add_argument("--models", type=str, nargs='*', help="models") parser.add_argument("--experiments", type=str, nargs='*', choices=('historical', 'historicalGHG', 'historicalMisc', 'historical-rcp85', 'rcp85'), help="experiments") parser.add_argument("--time", type=str, nargs=2, metavar=('START_DATE', 'END_DATE'), default=('1861-01-01', '2005-12-31'), help="Time period [default = 1861-2005]") parser.add_argument("--ylim", type=float, nargs=2, default=None, help="y limits for plots (x 10^22)") parser.add_argument("--dpi", type=float, default=None, help="Figure resolution in dots per square inch [default=auto]") args = parser.parse_args() main(args)
This is likely a driver problem. Have you installed the latest driver for your GPU? Drivers for NVidia GPUs can be found here: http://www.nvidia.com/Download/Find.aspx?lang=en-us. Also, please provide us with system information (CPU, GPU, RAM and OS) if the problem still persists after downloading the most recent driver. Not sure if it helps, but I was expriencing crashing at the same area. Made sure my drivers were up to date. till got a crash. Lowered the graphics settings to normal: no crashing. WIll experiment more laters.
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Initial server communication to determine session parameters.""" from google.protobuf import message import requests from absl import logging from tensorboard import version from tensorboard.plugins.scalar import metadata as scalars_metadata from tensorboard.uploader.proto import server_info_pb2 # Request timeout for communicating with remote server. _REQUEST_TIMEOUT_SECONDS = 10 # Minimum interval between initiating write WriteScalar RPCs, if not specified # by server_info, in milliseconds _DEFAULT_MIN_SCALAR_REQUEST_INTERVAL = 5000 # Minimum interval between initiating write WriteTensor RPCs, if not specified # by server_info, in milliseconds. _DEFAULT_MIN_TENSOR_REQUEST_INTERVAL = 1000 # Minimum interval between initiating blob write RPC streams, if not specified # by server_info, in milliseconds. # This may differ from the above RPC rate limits, because blob streams # are not batched, so sending a sequence of N blobs requires N streams, which # could reasonably be sent more frequently. _DEFAULT_MIN_BLOB_REQUEST_INTERVAL = 1000 # Maximum WriteScalar request size, if not specified by server_info, in bytes. # The server-side limit is 4 MiB [1]; we should pad a bit to mitigate any errors # in our bookkeeping. Currently, we pad a lot because WriteScalar is relatively # slow and we would otherwise risk Deadline Exceeded errors. # # [1]: https://github.com/grpc/grpc/blob/e70d8582b4b0eedc45e3d25a57b58a08b94a9f4a/include/grpc/impl/codegen/grpc_types.h#L447 # pylint: disable=line-too-long _DEFAULT_MAX_SCALAR_REQUEST_SIZE = 128 * (2 ** 10) # 128KiB # Maximum WriteTensor request size, if not specified by server_info, in bytes. # The server-side limit is 4 MiB [1]; we should pad a bit to mitigate any errors # in our bookkeeping. Currently, we pad a lot. # # [1]: https://github.com/grpc/grpc/blob/e70d8582b4b0eedc45e3d25a57b58a08b94a9f4a/include/grpc/impl/codegen/grpc_types.h#L447 # pylint: disable=line-too-long _DEFAULT_MAX_TENSOR_REQUEST_SIZE = 512 * (2 ** 10) # 512KiB # Maximum WriteBlob request size, if not specified by server_info, in bytes. # The server-side limit is 4 MiB [1]; we pad with a 256 KiB chunk to mitigate # any errors in our bookkeeping. # # [1]: https://github.com/grpc/grpc/blob/e70d8582b4b0eedc45e3d25a57b58a08b94a9f4a/include/grpc/impl/codegen/grpc_types.h#L447 # pylint: disable=line-too-long _DEFAULT_MAX_BLOB_REQUEST_SIZE = 4 * (2 ** 20) - 256 * (2 ** 10) # 4MiB-256KiB # Maximum blob size, if not specified by server_info, in bytes. _DEFAULT_MAX_BLOB_SIZE = 10 * (2 ** 20) # 10MiB # Maximum tensor point size, if not specified by server_info, in bytes. _DEFAULT_MAX_TENSOR_POINT_SIZE = 16 * (2 ** 10) # 16KiB def _server_info_request(upload_plugins): """Generates a ServerInfoRequest Args: upload_plugins: List of plugin names requested by the user and to be verified by the server. Returns: A `server_info_pb2.ServerInfoRequest` message. """ request = server_info_pb2.ServerInfoRequest() request.version = version.VERSION request.plugin_specification.upload_plugins[:] = upload_plugins return request def fetch_server_info(origin, upload_plugins): """Fetches server info from a remote server. Args: origin: The server with which to communicate. Should be a string like "https://tensorboard.dev", including protocol, host, and (if needed) port. upload_plugins: List of plugins names requested by the user and to be verified by the server. Returns: A `server_info_pb2.ServerInfoResponse` message. Raises: CommunicationError: Upon failure to connect to or successfully communicate with the remote server. """ endpoint = "%s/api/uploader" % origin server_info_request = _server_info_request(upload_plugins) post_body = server_info_request.SerializeToString() logging.info("Requested server info: <%r>", server_info_request) try: response = requests.post( endpoint, data=post_body, timeout=_REQUEST_TIMEOUT_SECONDS, headers={"User-Agent": "tensorboard/%s" % version.VERSION}, ) except requests.RequestException as e: raise CommunicationError("Failed to connect to backend: %s" % e) if not response.ok: raise CommunicationError( "Non-OK status from backend (%d %s): %r" % (response.status_code, response.reason, response.content) ) try: return server_info_pb2.ServerInfoResponse.FromString(response.content) except message.DecodeError as e: raise CommunicationError( "Corrupt response from backend (%s): %r" % (e, response.content) ) def create_server_info(frontend_origin, api_endpoint, upload_plugins): """Manually creates server info given a frontend and backend. Args: frontend_origin: The origin of the TensorBoard.dev frontend, like "https://tensorboard.dev" or "http://localhost:8000". api_endpoint: As to `server_info_pb2.ApiServer.endpoint`. upload_plugins: List of plugin names requested by the user and to be verified by the server. Returns: A `server_info_pb2.ServerInfoResponse` message. """ result = server_info_pb2.ServerInfoResponse() result.compatibility.verdict = server_info_pb2.VERDICT_OK result.api_server.endpoint = api_endpoint url_format = result.url_format placeholder = "{{EID}}" while placeholder in frontend_origin: placeholder = "{%s}" % placeholder url_format.template = "%s/experiment/%s/" % (frontend_origin, placeholder) url_format.id_placeholder = placeholder result.plugin_control.allowed_plugins[:] = upload_plugins return result def experiment_url(server_info, experiment_id): """Formats a URL that will resolve to the provided experiment. Args: server_info: A `server_info_pb2.ServerInfoResponse` message. experiment_id: A string; the ID of the experiment to link to. Returns: A URL resolving to the given experiment, as a string. """ url_format = server_info.url_format return url_format.template.replace(url_format.id_placeholder, experiment_id) def allowed_plugins(server_info): """Determines which plugins may upload data. This pulls from the `plugin_control` on the `server_info` when that submessage is set, else falls back to a default. Args: server_info: A `server_info_pb2.ServerInfoResponse` message. Returns: A `frozenset` of plugin names. """ if server_info.HasField("plugin_control"): return frozenset(server_info.plugin_control.allowed_plugins) else: # Old server: gracefully degrade to scalars only, which have # been supported since launch. TODO(@wchargin): Promote this # branch to an error once we're confident that we won't roll # back to old server versions. return frozenset((scalars_metadata.PLUGIN_NAME,)) def upload_limits(server_info): """Returns UploadLimits, from server_info if possible, otherwise from defaults. Args: server_info: A `server_info_pb2.ServerInfoResponse` message. Returns: An instance of UploadLimits. """ if server_info.HasField("upload_limits"): upload_limits = server_info.upload_limits else: upload_limits = server_info_pb2.UploadLimits() if not upload_limits.max_scalar_request_size: upload_limits.max_scalar_request_size = _DEFAULT_MAX_SCALAR_REQUEST_SIZE if not upload_limits.max_tensor_request_size: upload_limits.max_tensor_request_size = _DEFAULT_MAX_TENSOR_REQUEST_SIZE if not upload_limits.max_blob_request_size: upload_limits.max_blob_request_size = _DEFAULT_MAX_BLOB_REQUEST_SIZE if not upload_limits.min_scalar_request_interval: upload_limits.min_scalar_request_interval = ( _DEFAULT_MIN_SCALAR_REQUEST_INTERVAL ) if not upload_limits.min_tensor_request_interval: upload_limits.min_tensor_request_interval = ( _DEFAULT_MIN_TENSOR_REQUEST_INTERVAL ) if not upload_limits.min_blob_request_interval: upload_limits.min_blob_request_interval = ( _DEFAULT_MIN_BLOB_REQUEST_INTERVAL ) if not upload_limits.max_blob_size: upload_limits.max_blob_size = _DEFAULT_MAX_BLOB_SIZE if not upload_limits.max_tensor_point_size: upload_limits.max_tensor_point_size = _DEFAULT_MAX_TENSOR_POINT_SIZE return upload_limits class CommunicationError(RuntimeError): """Raised upon failure to communicate with the server.""" pass
Calling your web page after your current domain name may possibly appear totally obvious to a few of you, however you'll end up being amazed to understand that not really every website is branded after the domain even though the site owner is the owner of that web address. Naming a website after its domain name is important, for the easy rationale that whenever customers think of your website, they will likely think of it by name. If it turns out your name is as well your Domain name (ie, web url), they'll instantly understand where to go. By way of example, at any time someone look into myonlinecasino.com, they don't really need to ponder what url to put into their internet browser to make sure to reach it. Your website name is also your URL. What if you are not able to purchase the domain term of your choice? It certainly will depend on how well bent you are to this special brand name. For those who possess an active brand name that you're famous for, you will definitely probably not wish to ditch that name merely because you couldn't purchase the name. All things considered, it has taken you an awful lot of energy in addition to finances to create that label. If this describes the situation then one of your alternatives might be to find the proprietor of the name and then acquire it from him or her. Examina the "whois" data for the web site domain, and contact that man or woman shown to see if they may be eager to market it. You perhaps really should remember that many people are probably to choose to charge up a larger price as compared with you'll normally get any time purchasing completely new website domains (supposing they plan to market it to begin with). General Terms Or perhaps Brand Name Domains with extensions? I am certain that a variety of individuals appear to suspect that your domain name really should be some universal name just like "newcars.com" for anyone who is promoting automobiles. Observe, for instance, the amount of hard cash the common terms are being offered for. Although seriously, in case you were on the lookout to obtain a car, you without doubt already have got a few manufacturers in mind, and thus you're much more like to go for things such as generalmotors.com or simply toyota.com and not solely cars.com. For this reason, I personally state that a website address that matches your product is normally exceptionally great. The exact designation that you have to market your current product usually is the label that you would like for your website name, simply because that is generally the very first thing which regular people will try in their web browser. This unique idea furthermore helps it be very simple to memorize, and what ever that is quite easily remembered, would be much more likely to be tried out when compared to the difficult to understand domain. A domain name could have as many as 67 characters. You actually don't have to decide on for an unclear web site domain such as avab.com if what you actually mean is AcmeVideosAndBooks.com. Never the less, generally there seems to be some disagreement with regards to whether or not a long or shorter domain name is more desirable. Quite a few claim that smaller domains usually are much easier to memorize, less complicated to type and even noticeably less vunerable to errors: as an illustration, "getit.com" may be faster and easier to be aware of and less prone to typos compared with "connecttomywebsiteandobtainit.com". Others claim that a much longer web address is ordinarily easier on the people memory - to illustrate, "gaepw.com" is a combination of completely unrelated letters that will be problematic to memorize and type in the correct way, on the other hand as long as we enlarge it to its long style, "GetAnEconomicallyPricedWebsite.com", we tend to be much more inclined to never forget the website name. You can find real attestation that sustains these kind of arguments It can be significantly problematic to obtain shorter significant domain names. I have not verified, nevertheless I'm pretty sure that labels for example "getit.com" and "good.com" have already been bought. In the event that you actually manage to get a shorter website address though, the key is going to be to make certain it's a substantive combination of figures and not the incomprehensible "gaepw.com" in my contrived example in the previous paragraph. A number of major search engines genuinely give a reward to you actually for possessing a key phrase related to your current specific niche market in the domain names. The latter provide preference to search phrases that are usually positioned in your urls. So, one example is, in case you have a domain on free of charge C++ compilers using a domain name like freecpluspluscompilers.com, it may possibly fare better in a search for "free C++ compilers" when compared with the related pages on my additional web site, thefreecountry.com. Which would I go for? In the event that you also feel like me than you are going to constantly select a significant one, having said that I'm certainly not averse to longer terms. Regardless, I would almost certainly avoid very lengthy namings close to 67 letters. Apart from the understandable problem that regular people may be unable to remember this kind of a extended name, it could in addition turn out to be a difficult task inputting it as well as trying to fit it like a label on your current online site. It's simple to forget about the hyphens whenever typing a name. Several individuals are familiar with writing things for instance freecpluspluscompilers.com but not free-c-plus-plus-compilers.com. They will almost certainly skip the hyphens and turn out at your competitor's website. When men and women highly recommended your web page to their associates orally, possessing hyphens in your current domain takes to a lot more potential errors in comparison with whenever the label will not have hyphens. For starters, exactly how do you reckon your trusty website visitors will certainly introduce to your website in the event that it is identified as "acme-books-and-videos.com"? These people may claim, "I been to Acme Book and Videos dot com yesterday. That was stunning." Their pals, recollecting that remark later, would probably input into their internet browsers "acmebooksandvideos.com". Oops. It really is just so tedious to key in a domain name using a lot of hyphens. Enough said. Search engines like Yahoo could recognize your search topics more suitable thus return your trusty old webpage far more positively in Yahoo and Google for all those good keywords found in your website name. The non-hyphenated one could not anymore be offered. At the least using this method, you obtain the web address that you would like. In my opinion, I would like to avoid hyphenated titles if I will be able to, nonetheless I suppose it truly depends on your own name and your scenario. It is not always unheard of to end up stuck using a totally different website name then exactly what you wanted, the web site domain name registrar may suggest alternate different forms of the company you typed in. By way of example, whenever you required website.com, and also it was gone (needless to say it is), it could endorse forms akin to:thewebsite.com, mywebsite.com, websites.com,and similar, if it turns out they were not by now taken also. The problem is certainly, should you accept them? My private viewpoint tends to be that in case a person take the "the..." and "my..." varieties of the web name, you must generally take into account to endorse your very own web page having the complete style of the domain name. Or else, people are most likely to forget about to put in the mandatory "the" or simply "my". For that reason, I usually advertise and market my web pages as "thesitewizard.com" and "thefreecountry.com" in their own whole web site domain forms, instead than just "Free Country" or maybe "Site Wizard" (without the article). On the other hand, I would probably certainly not take on the plural style of the website domain (eg, websites.com) in the event I cannot really additionally get "website.com", given that the happenstance of the visitor to your site negligence to enter the "s" inside of the webpage is undoubtedly quite big. Take into consideration the well known name tussle among etoys.com with etoy.com. A lot of men or women seeking to pay a visit to etoys.com have been evidently ready to etoy.com instead. Assuming it transpired for these people, it might happen to you as well. One frequent issue I come upon is actually out of consumers which are not able to purchase the ".com" web page of their choice, but find the ".net", ".org", ".biz", ".info" (etc) or possibly region-specific top level domains (TLDs) (like for example.uk,.de,.us,.eu, etc) out there. Is it really worth planning for any one of these? Nevertheless, if perhaps they are in (say) the Uk, they are not necessarily inclined to prefer to look at to obtain fast food from pizzaparlour.com, which usually would suggest an foreign business. You will definitely have better fortune getting in touch with it pizzaparlour.co.uk, ie, having a UK website. What if your web-site is actually a websites or just business endeavor that can certainly take advantage through an foreign audience? There are basically many schools of thought on all of this. I will have to basically state several basic versions. The primary school of thought can be on the premise that it is far better to have a domain name of your preference "myperfectdomain" regardless if it has got a TLD of ".net", ".org" or a few other country exact suffix, compared to to find yourself selecting an incomprehensible domain for the basic reason you find it difficult to obtain your very first alternative. Consequently many people would likely pay off for website names for instance "myperfectdomain.de" or "myperfectdomain.net" or whatsoever. As opposed to this is actually the premise that the instance you get hold of a location specific website address, A number of buyers may think that you really only do business with that particular country. An additional school of thought considers the various other suffixes (like ".net", ".org", ".biz", ".info", etc) to end up being relatively tolerable. For a few, the ".org" suffix actually details the specifics of the non-profit makeup of their business. As a result, as an example, the prominent Apache web server will be established at "apache.org". For this reason, folk that do that are likely to be sent to your trusty competitionweb site in case you do not as well get the ".com" domain name. Certainly, regardless if consumers really do not depend on their particular web browser to finish their writing, many just presume a ".com" suffix while they enter a web name, for that reason when your very own company is actually "Acme", they will only suppose your current website domain is definitely "acme.com" as an alternative to "acme.net" or even various other such domain name. Never make the wrong choice of intending to retrofit your website name for your current business or online page. My best initial business, thefreecountry.com did not initially start out with that good name, and thus I came across a great trouble (as well as lost website traffic) simply by the Website name changes. Do not make that misstep too.
from operator import attrgetter from django.conf import settings from django.contrib.contenttypes.fields import GenericRelation from django.core.cache import cache from django.core.urlresolvers import reverse from django.core.validators import RegexValidator from django.db import models from django.db.models import F, QuerySet from django.db.models.expressions import RawSQL from django.db.models.functions import Coalesce from django.utils.functional import cached_property from django.utils.translation import ugettext_lazy as _ from judge.fulltext import SearchQuerySet from judge.models.profile import Profile from judge.models.runtime import Language from judge.user_translations import ugettext as user_ugettext from judge.utils.raw_sql import unique_together_left_join, RawSQLColumn __all__ = ['ProblemGroup', 'ProblemType', 'Problem', 'ProblemTranslation', 'ProblemClarification', 'TranslatedProblemQuerySet', 'TranslatedProblemForeignKeyQuerySet', 'License'] class ProblemType(models.Model): name = models.CharField(max_length=20, verbose_name=_('problem category ID'), unique=True) full_name = models.CharField(max_length=100, verbose_name=_('problem category name')) def __unicode__(self): return self.full_name class Meta: ordering = ['full_name'] verbose_name = _('problem type') verbose_name_plural = _('problem types') class ProblemGroup(models.Model): name = models.CharField(max_length=20, verbose_name=_('problem group ID'), unique=True) full_name = models.CharField(max_length=100, verbose_name=_('problem group name')) def __unicode__(self): return self.full_name class Meta: ordering = ['full_name'] verbose_name = _('problem group') verbose_name_plural = _('problem groups') class License(models.Model): key = models.CharField(max_length=20, unique=True, verbose_name=_('key'), validators=[RegexValidator(r'^[-\w.]+$', r'License key must be ^[-\w.]+$')]) link = models.CharField(max_length=256, verbose_name=_('link')) name = models.CharField(max_length=256, verbose_name=_('full name')) display = models.CharField(max_length=256, blank=True, verbose_name=_('short name'), help_text=_('Displayed on pages under this license')) icon = models.CharField(max_length=256, blank=True, verbose_name=_('icon'), help_text=_('URL to the icon')) text = models.TextField(verbose_name=_('license text')) def __unicode__(self): return self.name def get_absolute_url(self): return reverse('license', args=(self.key,)) class Meta: verbose_name = _('license') verbose_name_plural = _('licenses') class TranslatedProblemQuerySet(SearchQuerySet): def __init__(self, **kwargs): super(TranslatedProblemQuerySet, self).__init__(('code', 'name', 'description'), **kwargs) def add_i18n_name(self, language): queryset = self._clone() alias = unique_together_left_join(queryset, ProblemTranslation, 'problem', 'language', language) return queryset.annotate(i18n_name=Coalesce(RawSQL('%s.name' % alias, ()), F('name'), output_field=models.CharField())) class TranslatedProblemForeignKeyQuerySet(QuerySet): def add_problem_i18n_name(self, key, language, name_field=None): queryset = self._clone() if name_field is None else self.annotate(_name=F(name_field)) alias = unique_together_left_join(queryset, ProblemTranslation, 'problem', 'language', language, parent_model=Problem) # You must specify name_field if Problem is not yet joined into the QuerySet. kwargs = {key: Coalesce(RawSQL('%s.name' % alias, ()), F(name_field) if name_field else RawSQLColumn(Problem, 'name'), output_field=models.CharField())} return queryset.annotate(**kwargs) class Problem(models.Model): code = models.CharField(max_length=20, verbose_name=_('problem code'), unique=True, validators=[RegexValidator('^[a-z0-9]+$', _('Problem code must be ^[a-z0-9]+$'))]) name = models.CharField(max_length=100, verbose_name=_('problem name'), db_index=True) description = models.TextField(verbose_name=_('problem body')) authors = models.ManyToManyField(Profile, verbose_name=_('creators'), blank=True, related_name='authored_problems') curators = models.ManyToManyField(Profile, verbose_name=_('curators'), blank=True, related_name='curated_problems', help_text=_('These users will be able to edit a problem, ' 'but not be publicly shown as an author.')) testers = models.ManyToManyField(Profile, verbose_name=_('testers'), blank=True, related_name='tested_problems', help_text=_( 'These users will be able to view a private problem, but not edit it.')) types = models.ManyToManyField(ProblemType, verbose_name=_('problem types')) group = models.ForeignKey(ProblemGroup, verbose_name=_('problem group')) time_limit = models.FloatField(verbose_name=_('time limit')) memory_limit = models.IntegerField(verbose_name=_('memory limit')) short_circuit = models.BooleanField(default=False) points = models.FloatField(verbose_name=_('points')) partial = models.BooleanField(verbose_name=_('allows partial points'), default=False) allowed_languages = models.ManyToManyField(Language, verbose_name=_('allowed languages')) is_public = models.BooleanField(verbose_name=_('publicly visible'), db_index=True, default=False) is_manually_managed = models.BooleanField(verbose_name=_('manually managed'), db_index=True, default=False, help_text=_('Whether judges should be allowed to manage data or not')) date = models.DateTimeField(verbose_name=_('date of publishing'), null=True, blank=True, db_index=True, help_text=_("Doesn't have magic ability to auto-publish due to backward compatibility")) banned_users = models.ManyToManyField(Profile, verbose_name=_('personae non gratae'), blank=True, help_text=_('Bans the selected users from submitting to this problem')) license = models.ForeignKey(License, null=True, blank=True, on_delete=models.SET_NULL) og_image = models.CharField(verbose_name=_('OpenGraph image'), max_length=150, blank=True) summary = models.TextField(blank=True, verbose_name=_('problem summary'), help_text=_('Plain-text, shown in meta description tag, e.g. for social media.')) user_count = models.IntegerField(verbose_name=_('amount of users'), default=0, help_text=_('The amount of users on the best solutions page.')) ac_rate = models.FloatField(verbose_name=_('rate of AC submissions'), default=0) objects = TranslatedProblemQuerySet.as_manager() tickets = GenericRelation('Ticket') def __init__(self, *args, **kwargs): super(Problem, self).__init__(*args, **kwargs) self._translated_name_cache = {} self._i18n_name = None @cached_property def types_list(self): return map(user_ugettext, map(attrgetter('full_name'), self.types.all())) def languages_list(self): return self.allowed_languages.values_list('common_name', flat=True).distinct().order_by('common_name') def is_editor(self, profile): return (self.authors.filter(id=profile.id) | self.curators.filter(id=profile.id)).exists() def is_editable_by(self, user): if not user.is_authenticated: return False if user.has_perm('judge.edit_all_problem') or user.has_perm('judge.edit_public_problem') and self.is_public: return True return self.is_editor(user.profile) def is_accessible_by(self, user): # All users can see public problems if self.is_public: return True # If the user can view all problems if user.has_perm('judge.see_private_problem'): return True # If the user authored the problem or is a curator if user.has_perm('judge.edit_own_problem') and self.is_editor(user.profile): return True # If the user is in a contest containing that problem or is a tester if user.is_authenticated: return (self.testers.filter(id=user.profile.id).exists() or Problem.objects.filter(id=self.id, contest__users__user=user.profile).exists()) else: return False def __unicode__(self): return self.name def get_absolute_url(self): return reverse('problem_detail', args=(self.code,)) @cached_property def author_ids(self): return self.authors.values_list('id', flat=True) @cached_property def editor_ids(self): return self.author_ids | self.curators.values_list('id', flat=True) @cached_property def tester_ids(self): return self.testers.values_list('id', flat=True) @cached_property def usable_common_names(self): return set(self.usable_languages.values_list('common_name', flat=True)) @property def usable_languages(self): return self.allowed_languages.filter(judges__in=self.judges.filter(online=True)).distinct() def translated_name(self, language): if language in self._translated_name_cache: return self._translated_name_cache[language] # Hits database despite prefetch_related. try: name = self.translations.filter(language=language).values_list('name', flat=True)[0] except IndexError: name = self.name self._translated_name_cache[language] = name return name @property def i18n_name(self): if self._i18n_name is None: self._i18n_name = self._trans[0].name if self._trans else self.name return self._i18n_name @i18n_name.setter def i18n_name(self, value): self._i18n_name = value @property def clarifications(self): return ProblemClarification.objects.filter(problem=self) def update_stats(self): self.user_count = self.submission_set.filter(points__gt=0).values('user').distinct().count() submissions = self.submission_set.count() self.ac_rate = 100.0 * self.submission_set.filter(result='AC').count() / submissions if submissions else 0 self.save() update_stats.alters_data = True def _get_limits(self, key): limits = {limit['language_id']: (limit['language__name'], limit[key]) for limit in self.language_limits.values('language_id', 'language__name', key)} limit_ids = set(limits.keys()) common = [] for cn, ids in Language.get_common_name_map().iteritems(): if ids - limit_ids: continue limit = set(limits[id][1] for id in ids) if len(limit) == 1: limit = next(iter(limit)) common.append((cn, limit)) for id in ids: del limits[id] limits = limits.values() + common limits.sort() return limits @property def language_time_limit(self): key = 'problem_tls:%d' % self.id result = cache.get(key) if result is not None: return result result = self._get_limits('time_limit') cache.set(key, result) return result @property def language_memory_limit(self): key = 'problem_mls:%d' % self.id result = cache.get(key) if result is not None: return result result = self._get_limits('memory_limit') cache.set(key, result) return result class Meta: permissions = ( ('see_private_problem', 'See hidden problems'), ('edit_own_problem', 'Edit own problems'), ('edit_all_problem', 'Edit all problems'), ('edit_public_problem', 'Edit all public problems'), ('clone_problem', 'Clone problem'), ('change_public_visibility', 'Change is_public field'), ('change_manually_managed', 'Change is_manually_managed field'), ) verbose_name = _('problem') verbose_name_plural = _('problems') class ProblemTranslation(models.Model): problem = models.ForeignKey(Problem, verbose_name=_('problem'), related_name='translations') language = models.CharField(verbose_name=_('language'), max_length=7, choices=settings.LANGUAGES) name = models.CharField(verbose_name=_('translated name'), max_length=100, db_index=True) description = models.TextField(verbose_name=_('translated description')) class Meta: unique_together = ('problem', 'language') verbose_name = _('problem translation') verbose_name_plural = _('problem translations') class ProblemClarification(models.Model): problem = models.ForeignKey(Problem, verbose_name=_('clarified problem')) description = models.TextField(verbose_name=_('clarification body')) date = models.DateTimeField(verbose_name=_('clarification timestamp'), auto_now_add=True) class LanguageLimit(models.Model): problem = models.ForeignKey(Problem, verbose_name=_('problem'), related_name='language_limits') language = models.ForeignKey(Language, verbose_name=_('language')) time_limit = models.FloatField(verbose_name=_('time limit')) memory_limit = models.IntegerField(verbose_name=_('memory limit')) class Meta: unique_together = ('problem', 'language') verbose_name = _('language-specific resource limit') verbose_name_plural = _('language-specific resource limits')
Breasts are mostly fatty tissue, and athletes usually don’t have much in the way of body fat. As such, breast augmentation is a fairly common choice among female athletes who want the best of both worlds: an athletic physique and feminine curves. However, whether you visit our office in beautiful Scottsdale AZ for your breast augmentation or head somewhere else, you should know that breast augmentation for athletes can be a whole different ballgame. A great surgeon is like a great coach, so you need to choose the best available to get you where you want to be. You need to be clear with doctors about your fitness routine and what it means to you, and meet with more than just one. Then use their understanding and willingness to work with your lifestyle as criteria with which to make your decision. Dr. Rochlin is a Board Certified cosmetic surgeon and has experience working with active people. She can help you get the look you want, and not at the expense of your fitness routine. Once you decide on a surgeon, you’ll have to navigate a path to the right implant for you, your ideal body, and your activity level. For instance, larger implants can have an impact on your stride, if you are a runner, and the frequent movement can cause them to “sag” more than they might on a less athletic woman. Your incision placement can also have an effect on when you can resume certain activities. Anatomical, or teardrop shaped implants don’t move around when you do, so they won’t look as natural as round ones, and saline will appear less natural than silicone when you are moving. All of these are things you need to consider, and things you need to talk to your surgeon about so you know what to expect. You are going to have to limit your activity in the first month after your surgery. It is unavoidable, but your restrictions may be more flexible than a non-athlete’s. For instance, if you regularly run or do upper body exercises like push-ups, you may be able to do those activities in two weeks instead of four. But you need to do two things before you start your routine up again. The first is to make sure you really feel up to doing the activity. The recovery period after a major operation is not the time to push yourself. The second is call your surgeon, presumably Dr. Rochlin. She knows all about your incision, your recovery speed, your implant type and its placement and she can give you an expert opinion about whether or not it’s safe to resume your regular routine. Physical exercise is a great way to stay fit and healthy, and Dr. Rochlin wants you to be able to meet your health and fitness goals while looking the way you want. There are special concerns that athletes have for cosmetic surgery, such as breast augmentation, that non-athletes wouldn’t even think of, like, “How will my implants look when I’m lifting weights?” Dr, Rochlin and her staff can answer these questions and help you meet your goals, so you can feel great and look great doing it.
from math import log, exp class HMM: """Simple implement for Hidden Markov Model""" def __init__(self, state_num, observation_list, initial_probability=None, transition_probability=None, observation_probability=None): self.state_num = state_num # Initial probability for choosing first state self._init_prob = [0 for i in range(state_num)] if not initial_probability else initial_probability self._state = [i for i in range(state_num)] # Every state's transition probability self._state_prob = [[(1/self.state_num) for j in range(state_num)] for i in range(state_num)] \ if not transition_probability else transition_probability self._ob_list = observation_list self._ob_num = len(observation_list) # Every state's observation probability self._ob_prob = [[1/self._ob_num for j in range(self._ob_num)] for i in range(self.state_num)] \ if not observation_probability else observation_probability # Translate probability to log self._init_prob = [log(p) for p in self._init_prob] self._state_prob = [[log(p) for p in state] for state in self._state_prob] self._ob_prob = [[log(p) for p in state] for state in self._ob_prob] def forward(self, ob_list, time): """Use forward algorithm to evaluate probability of a given observation. Parameters ---------- ob_list : array-like, Observation list. time : integer, Assign which time of observation list. Returns ------- p : float, Probability of given observation. prob_list : array, Forward probability in every time stamp. """ if time > len(ob_list): raise IndexError("Time cannot be more than length of observation list.") ob_list = self._get_ob_index(ob_list) # Transform observation to index # Calculate probability of first observation for every state forward_prob = [self._initial_ob_prob(ob_list[0])] for t in range(1, time): forward_prob.append([]) for j in range(self.state_num): # Calculate probability that previous state probability transit to present state probability p = self._log_sum([forward_prob[t-1][i] + self._state_prob[i][j] for i in range(self.state_num)]) # Calculate probability that present state to present observation forward_prob[t].append(p + self._ob_prob[j][ob_list[t]]) return exp(self._log_sum(forward_prob[time-1])), forward_prob def backward(self, ob_list, time): """Use backward algorithm to evaluate probability of a given observation. Parameters ---------- ob_list : array-like, Observation list. time : integer, Assign which time of observation list. Returns ------- p : float, Probability of given observation. prob_list : array, Backward probability in every time stamp. """ if time > len(ob_list): raise IndexError("Time cannot be more than length of observation list.") ob_list = self._get_ob_index(ob_list) # Transform observation to index # Initialize the probability backward_prob = [[log(1) for i in range(self.state_num)] for t in range(time)] for t in range(time-2, -1, -1): for i in range(self.state_num): # Calculate probability that following state probability back to present state probability. p = self._log_sum([backward_prob[t+1][j] + self._state_prob[i][j] + self._ob_prob[j][ob_list[t+1]] for j in range(self.state_num)]) backward_prob[t][i] = p # Return the probability from last time to first time (need to multiply the probability from time0 to time1) return exp(self._log_sum([self._init_prob[i] + self._ob_prob[i][ob_list[0]] + backward_prob[0][i] for i in range(self.state_num)])), backward_prob def decode(self, ob_list, time): """Use viterbi algorithm to find the state sequence for a given observation list. Parameters ---------- ob_list : array-like, Observation list. time : integer, Assign which time of observation list. Returns ------- state_seq : array, The best state sequence for given observation list """ if time > len(ob_list): raise IndexError("Time cannot be more than length of observation list.") ob_list = self._get_ob_index(ob_list) # Transform observation to index # Calculate probability of first observation for every state max_prob = self._initial_ob_prob(ob_list[0]) pre_prob = max_prob[:] path = [[i] for i in range(self.state_num)] for t in range(1, time): new_path = [[] for i in range(self.state_num)] for j in range(self.state_num): # Find maximum probability and the most possible previous state to transit to present state p, state = max([(pre_prob[i] + self._state_prob[i][j], i) for i in range(self.state_num)]) # Calculate probability that present state to present observation max_prob[j] = p + self._ob_prob[j][ob_list[t]] # Choose the most possible path to present state new_path[j] = path[state] + [j] # Record for changed probability pre_prob = max_prob[:] # Record for new path path = new_path # Find the last state (prob, state) = max([(max_prob[i], i) for i in range(self.state_num)]) return path[state] def train(self, data_sets): """Use EM algorithm to train models. Parameters ---------- data_sets : array-like, A array of observation list. Returns ------- """ size = len(data_sets) # The probability of every path which pass through state i all_state_prob = [] # gamma # The probability of every path which pass by route from state i to state j all_stateset_prob = [] # xi # initial_prob = [0 for i in range(self.state_num)] # state_prob = [[0 for j in range(self.state_num)] for i in range(self.state_num)] for data in data_sets: time = len(data) # The probability of every path which pass through state i state_prob = [-1e10 for i in range(self.state_num)] state_prob = [state_prob for t in range(time)] # gamma # The probability of every path which pass by route from state i to state j state_set_prob = [[-1e10 for j in range(self.state_num)] for i in range(self.state_num)] state_set_prob = [state_set_prob for t in range(time)] # xi _, forward_prob = self.forward(data, time) _, backward_prob = self.backward(data, time) data = self._get_ob_index(data) for t, ob in enumerate(data): # p += α[t][i] * β[t][i] p = self._log_sum([forward_prob[t][i] + backward_prob[t][i] for i in range(self.state_num)]) # γ[t][i] = α[t][i] * β[t][i] / p for i in range(self.state_num): state_prob[t][i] = forward_prob[t][i] + backward_prob[t][i] - p if t < time-1: # p += α[t][i] * a[i][j] * b[j][o[t+1]] * β[t+1][j] p = self._log_sum([forward_prob[t][i] + self._state_prob[i][j] + self._ob_prob[j][data[t+1]] + backward_prob[t+1][j] for i in range(self.state_num) for j in range(self.state_num)]) # ξ[t][i][j] = α[t][i] * a[i][j] * b[j][o[t+1]] * β[t+1][j] / p; for i in range(self.state_num): for j in range(self.state_num): state_set_prob[t][i][j] = forward_prob[t][i] + self._state_prob[i][j] + \ self._ob_prob[j][data[t+1]] + backward_prob[t+1][j] - p # Update initial probability """ self._init_prob = [state_prob[0][i] for i in range(self.state_num)] # Update state transition probability for i in range(self.state_num): p2 = self._log_sum([state_prob[t][i] for t in range(time-1)]) for j in range(self.state_num): p1 = self._log_sum([state_set_prob[t][i][j] for t in range(time-1)]) self._state_prob[i][j] = p1 - p2 # Update observation probability for i in range(self.state_num): p = [-1e10 for o in range(self._ob_num)] p2 = self._log_sum([state_prob[t][i] for t in range(time)]) for t in range(time): p[data[t]] = self._log_sum([p[data[t]], state_prob[t][i]]) for j in range(self._ob_num): self._ob_prob[i][j] = p[j] - p2 """ all_state_prob.append(state_prob) all_stateset_prob.append(state_set_prob) pi = [self._log_sum([all_state_prob[l][0][i] for l in range(size)]) - log(size) for i in range(self.state_num)] print("pi:", pi) a = [[-1e10 for i in range(self.state_num)] for j in range(self.state_num)] b = [[-1e10 for o in range(self._ob_num)] for j in range(self.state_num)] for i in range(self.state_num): p2 = self._log_sum([all_state_prob[l][t][i] for l in range(size) for t in range(len(data_sets[l]) - 1)]) for j in range(self.state_num): p1 = self._log_sum([all_stateset_prob[l][t][i][j] for l in range(size) for t in range(len(data_sets[l]) - 1)]) print([all_stateset_prob[l][t][i][j] for l in range(size) for t in range(len(data_sets[l]) - 1)]) a[i][j] = p1 - p2 for i in range(self.state_num): p = [-1e10 for o in range(self._ob_num)] p2 = self._log_sum([all_state_prob[l][t][i] for l in range(size) for t in range(len(data_sets[l]))]) for l in range(size): for t in range(len(data_sets[l])): ob_ind = self._ob_list.index(data_sets[l][t]) p[ob_ind] = self._log_sum([p[ob_ind], all_state_prob[l][t][i]]) for j in range(self._ob_num): b[i][j] = p[j] - p2 self._init_prob = pi self._state_prob = a self._ob_prob = b """ self._init_prob = [self._log_sum([all_state_prob[l][0][i] - log(size) for l in range(size)]) for i in range(self.state_num)] for i in range(self.state_num): p2 = -1.0E10 p = 0 for x in all_state_prob: p+=sum(x[0:-1][i]) p = [-1.0E10 for i in range(self._ob_num)] for s, x in enumerate(all_state_prob): for t, y in enumerate(x): ob_ind = self._ob_list.index(data_sets[s][t]) p[ob_ind] = self._log_sum((p[ob_ind], y[i])) p2 = self._log_sum((p2, y[i])) for j in range(self.state_num): p1 = -1.0E10 for prob_list in all_stateset_prob: for prob in prob_list[:-1]: p1 = self._log_sum((p1, prob[i][j])) self._state_prob[i][j] = p1 - p2 for j in range(self._ob_num): self._ob_prob[i][j] = p[j] - p2 """ def _get_ob_index(self, observation): return [self._ob_list.index(i) for i in observation] # Transform observation to index def _initial_ob_prob(self, ob_index): return [self._init_prob[i] + self._ob_prob[i][ob_index] for i in range(self.state_num)] @staticmethod def _log_sum(sequence): """ :param sequence: array-like, The log number need to be add like log(p+q) :return: integer, After calculate log(p+q) """ start = sequence[0] for value in sequence[1:]: if start < value: start, value = value, start if start == 0 and value == 0: start = log(exp(start) + exp(value)) else: start += log(1 + exp(value - start)) return start
Are you looking for wedding ideas for your big day in Cornwall? Our wedding fairs and open days give you fresh inspiration and the chance to ask those all important questions. Why not come along to quiz our expert staff, take a tour of the venue and see what we offer? A whole host of the best Newquay wedding services and suppliers will also be present, whether you need the perfect wedding hair styilst or a trustworthy photographer for your day. Add entertainers, delicious canapes and drinks to sample and you have an unmissable event for anyone planning their dream Cornish wedding.